1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
43 #include "basic-block.h"
46 #include "target-def.h"
47 #include "langhooks.h"
52 #include "tm-constrs.h"
56 #include "dwarf2out.h"
58 static rtx legitimize_dllimport_symbol (rtx, bool);
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
79 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
148 /* Processor costs (relative to an add) */
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
705 100, /* number of parallel prefetches */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
779 MOVD reg64, xmmreg Double FADD 3
781 MOVD reg32, xmmreg Double FADD 3
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
791 100, /* number of parallel prefetches */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
821 struct processor_costs bdver1_cost = {
822 COSTS_N_INSNS (1), /* cost of an add instruction */
823 COSTS_N_INSNS (2), /* cost of a lea instruction */
824 COSTS_N_INSNS (1), /* variable shift costs */
825 COSTS_N_INSNS (1), /* constant shift costs */
826 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
827 COSTS_N_INSNS (4), /* HI */
828 COSTS_N_INSNS (3), /* SI */
829 COSTS_N_INSNS (4), /* DI */
830 COSTS_N_INSNS (5)}, /* other */
831 0, /* cost of multiply per each bit set */
832 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
833 COSTS_N_INSNS (35), /* HI */
834 COSTS_N_INSNS (51), /* SI */
835 COSTS_N_INSNS (83), /* DI */
836 COSTS_N_INSNS (83)}, /* other */
837 COSTS_N_INSNS (1), /* cost of movsx */
838 COSTS_N_INSNS (1), /* cost of movzx */
839 8, /* "large" insn */
841 4, /* cost for loading QImode using movzbl */
842 {3, 4, 3}, /* cost of loading integer registers
843 in QImode, HImode and SImode.
844 Relative to reg-reg move (2). */
845 {3, 4, 3}, /* cost of storing integer registers */
846 4, /* cost of reg,reg fld/fst */
847 {4, 4, 12}, /* cost of loading fp registers
848 in SFmode, DFmode and XFmode */
849 {6, 6, 8}, /* cost of storing fp registers
850 in SFmode, DFmode and XFmode */
851 2, /* cost of moving MMX register */
852 {3, 3}, /* cost of loading MMX registers
853 in SImode and DImode */
854 {4, 4}, /* cost of storing MMX registers
855 in SImode and DImode */
856 2, /* cost of moving SSE register */
857 {4, 4, 3}, /* cost of loading SSE registers
858 in SImode, DImode and TImode */
859 {4, 4, 5}, /* cost of storing SSE registers
860 in SImode, DImode and TImode */
861 3, /* MMX or SSE register to integer */
863 MOVD reg64, xmmreg Double FSTORE 4
864 MOVD reg32, xmmreg Double FSTORE 4
866 MOVD reg64, xmmreg Double FADD 3
868 MOVD reg32, xmmreg Double FADD 3
870 64, /* size of l1 cache. */
871 1024, /* size of l2 cache. */
872 64, /* size of prefetch block */
873 /* New AMD processors never drop prefetches; if they cannot be performed
874 immediately, they are queued. We set number of simultaneous prefetches
875 to a large constant to reflect this (it probably is not a good idea not
876 to limit number of prefetches at all, as their execution also takes some
878 100, /* number of parallel prefetches */
880 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
881 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
882 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
883 COSTS_N_INSNS (2), /* cost of FABS instruction. */
884 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
885 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
887 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
888 very small blocks it is better to use loop. For large blocks, libcall can
889 do nontemporary accesses and beat inline considerably. */
890 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
891 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
892 {{libcall, {{8, loop}, {24, unrolled_loop},
893 {2048, rep_prefix_4_byte}, {-1, libcall}}},
894 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
895 4, /* scalar_stmt_cost. */
896 2, /* scalar load_cost. */
897 2, /* scalar_store_cost. */
898 6, /* vec_stmt_cost. */
899 0, /* vec_to_scalar_cost. */
900 2, /* scalar_to_vec_cost. */
901 2, /* vec_align_load_cost. */
902 2, /* vec_unalign_load_cost. */
903 2, /* vec_store_cost. */
904 2, /* cond_taken_branch_cost. */
905 1, /* cond_not_taken_branch_cost. */
909 struct processor_costs pentium4_cost = {
910 COSTS_N_INSNS (1), /* cost of an add instruction */
911 COSTS_N_INSNS (3), /* cost of a lea instruction */
912 COSTS_N_INSNS (4), /* variable shift costs */
913 COSTS_N_INSNS (4), /* constant shift costs */
914 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
915 COSTS_N_INSNS (15), /* HI */
916 COSTS_N_INSNS (15), /* SI */
917 COSTS_N_INSNS (15), /* DI */
918 COSTS_N_INSNS (15)}, /* other */
919 0, /* cost of multiply per each bit set */
920 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
921 COSTS_N_INSNS (56), /* HI */
922 COSTS_N_INSNS (56), /* SI */
923 COSTS_N_INSNS (56), /* DI */
924 COSTS_N_INSNS (56)}, /* other */
925 COSTS_N_INSNS (1), /* cost of movsx */
926 COSTS_N_INSNS (1), /* cost of movzx */
927 16, /* "large" insn */
929 2, /* cost for loading QImode using movzbl */
930 {4, 5, 4}, /* cost of loading integer registers
931 in QImode, HImode and SImode.
932 Relative to reg-reg move (2). */
933 {2, 3, 2}, /* cost of storing integer registers */
934 2, /* cost of reg,reg fld/fst */
935 {2, 2, 6}, /* cost of loading fp registers
936 in SFmode, DFmode and XFmode */
937 {4, 4, 6}, /* cost of storing fp registers
938 in SFmode, DFmode and XFmode */
939 2, /* cost of moving MMX register */
940 {2, 2}, /* cost of loading MMX registers
941 in SImode and DImode */
942 {2, 2}, /* cost of storing MMX registers
943 in SImode and DImode */
944 12, /* cost of moving SSE register */
945 {12, 12, 12}, /* cost of loading SSE registers
946 in SImode, DImode and TImode */
947 {2, 2, 8}, /* cost of storing SSE registers
948 in SImode, DImode and TImode */
949 10, /* MMX or SSE register to integer */
950 8, /* size of l1 cache. */
951 256, /* size of l2 cache. */
952 64, /* size of prefetch block */
953 6, /* number of parallel prefetches */
955 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
956 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
957 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
958 COSTS_N_INSNS (2), /* cost of FABS instruction. */
959 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
960 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
961 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
962 DUMMY_STRINGOP_ALGS},
963 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
965 DUMMY_STRINGOP_ALGS},
966 1, /* scalar_stmt_cost. */
967 1, /* scalar load_cost. */
968 1, /* scalar_store_cost. */
969 1, /* vec_stmt_cost. */
970 1, /* vec_to_scalar_cost. */
971 1, /* scalar_to_vec_cost. */
972 1, /* vec_align_load_cost. */
973 2, /* vec_unalign_load_cost. */
974 1, /* vec_store_cost. */
975 3, /* cond_taken_branch_cost. */
976 1, /* cond_not_taken_branch_cost. */
980 struct processor_costs nocona_cost = {
981 COSTS_N_INSNS (1), /* cost of an add instruction */
982 COSTS_N_INSNS (1), /* cost of a lea instruction */
983 COSTS_N_INSNS (1), /* variable shift costs */
984 COSTS_N_INSNS (1), /* constant shift costs */
985 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
986 COSTS_N_INSNS (10), /* HI */
987 COSTS_N_INSNS (10), /* SI */
988 COSTS_N_INSNS (10), /* DI */
989 COSTS_N_INSNS (10)}, /* other */
990 0, /* cost of multiply per each bit set */
991 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
992 COSTS_N_INSNS (66), /* HI */
993 COSTS_N_INSNS (66), /* SI */
994 COSTS_N_INSNS (66), /* DI */
995 COSTS_N_INSNS (66)}, /* other */
996 COSTS_N_INSNS (1), /* cost of movsx */
997 COSTS_N_INSNS (1), /* cost of movzx */
998 16, /* "large" insn */
1000 4, /* cost for loading QImode using movzbl */
1001 {4, 4, 4}, /* cost of loading integer registers
1002 in QImode, HImode and SImode.
1003 Relative to reg-reg move (2). */
1004 {4, 4, 4}, /* cost of storing integer registers */
1005 3, /* cost of reg,reg fld/fst */
1006 {12, 12, 12}, /* cost of loading fp registers
1007 in SFmode, DFmode and XFmode */
1008 {4, 4, 4}, /* cost of storing fp registers
1009 in SFmode, DFmode and XFmode */
1010 6, /* cost of moving MMX register */
1011 {12, 12}, /* cost of loading MMX registers
1012 in SImode and DImode */
1013 {12, 12}, /* cost of storing MMX registers
1014 in SImode and DImode */
1015 6, /* cost of moving SSE register */
1016 {12, 12, 12}, /* cost of loading SSE registers
1017 in SImode, DImode and TImode */
1018 {12, 12, 12}, /* cost of storing SSE registers
1019 in SImode, DImode and TImode */
1020 8, /* MMX or SSE register to integer */
1021 8, /* size of l1 cache. */
1022 1024, /* size of l2 cache. */
1023 128, /* size of prefetch block */
1024 8, /* number of parallel prefetches */
1025 1, /* Branch cost */
1026 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1027 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1028 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1029 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1030 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1031 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1032 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1033 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1034 {100000, unrolled_loop}, {-1, libcall}}}},
1035 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1037 {libcall, {{24, loop}, {64, unrolled_loop},
1038 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1039 1, /* scalar_stmt_cost. */
1040 1, /* scalar load_cost. */
1041 1, /* scalar_store_cost. */
1042 1, /* vec_stmt_cost. */
1043 1, /* vec_to_scalar_cost. */
1044 1, /* scalar_to_vec_cost. */
1045 1, /* vec_align_load_cost. */
1046 2, /* vec_unalign_load_cost. */
1047 1, /* vec_store_cost. */
1048 3, /* cond_taken_branch_cost. */
1049 1, /* cond_not_taken_branch_cost. */
1053 struct processor_costs core2_cost = {
1054 COSTS_N_INSNS (1), /* cost of an add instruction */
1055 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1056 COSTS_N_INSNS (1), /* variable shift costs */
1057 COSTS_N_INSNS (1), /* constant shift costs */
1058 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1059 COSTS_N_INSNS (3), /* HI */
1060 COSTS_N_INSNS (3), /* SI */
1061 COSTS_N_INSNS (3), /* DI */
1062 COSTS_N_INSNS (3)}, /* other */
1063 0, /* cost of multiply per each bit set */
1064 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1065 COSTS_N_INSNS (22), /* HI */
1066 COSTS_N_INSNS (22), /* SI */
1067 COSTS_N_INSNS (22), /* DI */
1068 COSTS_N_INSNS (22)}, /* other */
1069 COSTS_N_INSNS (1), /* cost of movsx */
1070 COSTS_N_INSNS (1), /* cost of movzx */
1071 8, /* "large" insn */
1072 16, /* MOVE_RATIO */
1073 2, /* cost for loading QImode using movzbl */
1074 {6, 6, 6}, /* cost of loading integer registers
1075 in QImode, HImode and SImode.
1076 Relative to reg-reg move (2). */
1077 {4, 4, 4}, /* cost of storing integer registers */
1078 2, /* cost of reg,reg fld/fst */
1079 {6, 6, 6}, /* cost of loading fp registers
1080 in SFmode, DFmode and XFmode */
1081 {4, 4, 4}, /* cost of storing fp registers
1082 in SFmode, DFmode and XFmode */
1083 2, /* cost of moving MMX register */
1084 {6, 6}, /* cost of loading MMX registers
1085 in SImode and DImode */
1086 {4, 4}, /* cost of storing MMX registers
1087 in SImode and DImode */
1088 2, /* cost of moving SSE register */
1089 {6, 6, 6}, /* cost of loading SSE registers
1090 in SImode, DImode and TImode */
1091 {4, 4, 4}, /* cost of storing SSE registers
1092 in SImode, DImode and TImode */
1093 2, /* MMX or SSE register to integer */
1094 32, /* size of l1 cache. */
1095 2048, /* size of l2 cache. */
1096 128, /* size of prefetch block */
1097 8, /* number of parallel prefetches */
1098 3, /* Branch cost */
1099 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1100 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1101 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1102 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1103 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1104 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1105 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1106 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1107 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1108 {{libcall, {{8, loop}, {15, unrolled_loop},
1109 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1110 {libcall, {{24, loop}, {32, unrolled_loop},
1111 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1112 1, /* scalar_stmt_cost. */
1113 1, /* scalar load_cost. */
1114 1, /* scalar_store_cost. */
1115 1, /* vec_stmt_cost. */
1116 1, /* vec_to_scalar_cost. */
1117 1, /* scalar_to_vec_cost. */
1118 1, /* vec_align_load_cost. */
1119 2, /* vec_unalign_load_cost. */
1120 1, /* vec_store_cost. */
1121 3, /* cond_taken_branch_cost. */
1122 1, /* cond_not_taken_branch_cost. */
1126 struct processor_costs atom_cost = {
1127 COSTS_N_INSNS (1), /* cost of an add instruction */
1128 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1129 COSTS_N_INSNS (1), /* variable shift costs */
1130 COSTS_N_INSNS (1), /* constant shift costs */
1131 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1132 COSTS_N_INSNS (4), /* HI */
1133 COSTS_N_INSNS (3), /* SI */
1134 COSTS_N_INSNS (4), /* DI */
1135 COSTS_N_INSNS (2)}, /* other */
1136 0, /* cost of multiply per each bit set */
1137 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1138 COSTS_N_INSNS (26), /* HI */
1139 COSTS_N_INSNS (42), /* SI */
1140 COSTS_N_INSNS (74), /* DI */
1141 COSTS_N_INSNS (74)}, /* other */
1142 COSTS_N_INSNS (1), /* cost of movsx */
1143 COSTS_N_INSNS (1), /* cost of movzx */
1144 8, /* "large" insn */
1145 17, /* MOVE_RATIO */
1146 2, /* cost for loading QImode using movzbl */
1147 {4, 4, 4}, /* cost of loading integer registers
1148 in QImode, HImode and SImode.
1149 Relative to reg-reg move (2). */
1150 {4, 4, 4}, /* cost of storing integer registers */
1151 4, /* cost of reg,reg fld/fst */
1152 {12, 12, 12}, /* cost of loading fp registers
1153 in SFmode, DFmode and XFmode */
1154 {6, 6, 8}, /* cost of storing fp registers
1155 in SFmode, DFmode and XFmode */
1156 2, /* cost of moving MMX register */
1157 {8, 8}, /* cost of loading MMX registers
1158 in SImode and DImode */
1159 {8, 8}, /* cost of storing MMX registers
1160 in SImode and DImode */
1161 2, /* cost of moving SSE register */
1162 {8, 8, 8}, /* cost of loading SSE registers
1163 in SImode, DImode and TImode */
1164 {8, 8, 8}, /* cost of storing SSE registers
1165 in SImode, DImode and TImode */
1166 5, /* MMX or SSE register to integer */
1167 32, /* size of l1 cache. */
1168 256, /* size of l2 cache. */
1169 64, /* size of prefetch block */
1170 6, /* number of parallel prefetches */
1171 3, /* Branch cost */
1172 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1173 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1174 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1175 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1176 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1177 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1178 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1179 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1180 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1181 {{libcall, {{8, loop}, {15, unrolled_loop},
1182 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1183 {libcall, {{24, loop}, {32, unrolled_loop},
1184 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1185 1, /* scalar_stmt_cost. */
1186 1, /* scalar load_cost. */
1187 1, /* scalar_store_cost. */
1188 1, /* vec_stmt_cost. */
1189 1, /* vec_to_scalar_cost. */
1190 1, /* scalar_to_vec_cost. */
1191 1, /* vec_align_load_cost. */
1192 2, /* vec_unalign_load_cost. */
1193 1, /* vec_store_cost. */
1194 3, /* cond_taken_branch_cost. */
1195 1, /* cond_not_taken_branch_cost. */
1198 /* Generic64 should produce code tuned for Nocona and K8. */
1200 struct processor_costs generic64_cost = {
1201 COSTS_N_INSNS (1), /* cost of an add instruction */
1202 /* On all chips taken into consideration lea is 2 cycles and more. With
1203 this cost however our current implementation of synth_mult results in
1204 use of unnecessary temporary registers causing regression on several
1205 SPECfp benchmarks. */
1206 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1207 COSTS_N_INSNS (1), /* variable shift costs */
1208 COSTS_N_INSNS (1), /* constant shift costs */
1209 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1210 COSTS_N_INSNS (4), /* HI */
1211 COSTS_N_INSNS (3), /* SI */
1212 COSTS_N_INSNS (4), /* DI */
1213 COSTS_N_INSNS (2)}, /* other */
1214 0, /* cost of multiply per each bit set */
1215 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1216 COSTS_N_INSNS (26), /* HI */
1217 COSTS_N_INSNS (42), /* SI */
1218 COSTS_N_INSNS (74), /* DI */
1219 COSTS_N_INSNS (74)}, /* other */
1220 COSTS_N_INSNS (1), /* cost of movsx */
1221 COSTS_N_INSNS (1), /* cost of movzx */
1222 8, /* "large" insn */
1223 17, /* MOVE_RATIO */
1224 4, /* cost for loading QImode using movzbl */
1225 {4, 4, 4}, /* cost of loading integer registers
1226 in QImode, HImode and SImode.
1227 Relative to reg-reg move (2). */
1228 {4, 4, 4}, /* cost of storing integer registers */
1229 4, /* cost of reg,reg fld/fst */
1230 {12, 12, 12}, /* cost of loading fp registers
1231 in SFmode, DFmode and XFmode */
1232 {6, 6, 8}, /* cost of storing fp registers
1233 in SFmode, DFmode and XFmode */
1234 2, /* cost of moving MMX register */
1235 {8, 8}, /* cost of loading MMX registers
1236 in SImode and DImode */
1237 {8, 8}, /* cost of storing MMX registers
1238 in SImode and DImode */
1239 2, /* cost of moving SSE register */
1240 {8, 8, 8}, /* cost of loading SSE registers
1241 in SImode, DImode and TImode */
1242 {8, 8, 8}, /* cost of storing SSE registers
1243 in SImode, DImode and TImode */
1244 5, /* MMX or SSE register to integer */
1245 32, /* size of l1 cache. */
1246 512, /* size of l2 cache. */
1247 64, /* size of prefetch block */
1248 6, /* number of parallel prefetches */
1249 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1250 is increased to perhaps more appropriate value of 5. */
1251 3, /* Branch cost */
1252 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1253 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1254 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1255 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1256 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1257 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1258 {DUMMY_STRINGOP_ALGS,
1259 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1260 {DUMMY_STRINGOP_ALGS,
1261 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1262 1, /* scalar_stmt_cost. */
1263 1, /* scalar load_cost. */
1264 1, /* scalar_store_cost. */
1265 1, /* vec_stmt_cost. */
1266 1, /* vec_to_scalar_cost. */
1267 1, /* scalar_to_vec_cost. */
1268 1, /* vec_align_load_cost. */
1269 2, /* vec_unalign_load_cost. */
1270 1, /* vec_store_cost. */
1271 3, /* cond_taken_branch_cost. */
1272 1, /* cond_not_taken_branch_cost. */
1275 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1277 struct processor_costs generic32_cost = {
1278 COSTS_N_INSNS (1), /* cost of an add instruction */
1279 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1280 COSTS_N_INSNS (1), /* variable shift costs */
1281 COSTS_N_INSNS (1), /* constant shift costs */
1282 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1283 COSTS_N_INSNS (4), /* HI */
1284 COSTS_N_INSNS (3), /* SI */
1285 COSTS_N_INSNS (4), /* DI */
1286 COSTS_N_INSNS (2)}, /* other */
1287 0, /* cost of multiply per each bit set */
1288 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1289 COSTS_N_INSNS (26), /* HI */
1290 COSTS_N_INSNS (42), /* SI */
1291 COSTS_N_INSNS (74), /* DI */
1292 COSTS_N_INSNS (74)}, /* other */
1293 COSTS_N_INSNS (1), /* cost of movsx */
1294 COSTS_N_INSNS (1), /* cost of movzx */
1295 8, /* "large" insn */
1296 17, /* MOVE_RATIO */
1297 4, /* cost for loading QImode using movzbl */
1298 {4, 4, 4}, /* cost of loading integer registers
1299 in QImode, HImode and SImode.
1300 Relative to reg-reg move (2). */
1301 {4, 4, 4}, /* cost of storing integer registers */
1302 4, /* cost of reg,reg fld/fst */
1303 {12, 12, 12}, /* cost of loading fp registers
1304 in SFmode, DFmode and XFmode */
1305 {6, 6, 8}, /* cost of storing fp registers
1306 in SFmode, DFmode and XFmode */
1307 2, /* cost of moving MMX register */
1308 {8, 8}, /* cost of loading MMX registers
1309 in SImode and DImode */
1310 {8, 8}, /* cost of storing MMX registers
1311 in SImode and DImode */
1312 2, /* cost of moving SSE register */
1313 {8, 8, 8}, /* cost of loading SSE registers
1314 in SImode, DImode and TImode */
1315 {8, 8, 8}, /* cost of storing SSE registers
1316 in SImode, DImode and TImode */
1317 5, /* MMX or SSE register to integer */
1318 32, /* size of l1 cache. */
1319 256, /* size of l2 cache. */
1320 64, /* size of prefetch block */
1321 6, /* number of parallel prefetches */
1322 3, /* Branch cost */
1323 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1324 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1325 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1326 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1327 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1328 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1329 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1330 DUMMY_STRINGOP_ALGS},
1331 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1332 DUMMY_STRINGOP_ALGS},
1333 1, /* scalar_stmt_cost. */
1334 1, /* scalar load_cost. */
1335 1, /* scalar_store_cost. */
1336 1, /* vec_stmt_cost. */
1337 1, /* vec_to_scalar_cost. */
1338 1, /* scalar_to_vec_cost. */
1339 1, /* vec_align_load_cost. */
1340 2, /* vec_unalign_load_cost. */
1341 1, /* vec_store_cost. */
1342 3, /* cond_taken_branch_cost. */
1343 1, /* cond_not_taken_branch_cost. */
1346 const struct processor_costs *ix86_cost = &pentium_cost;
1348 /* Processor feature/optimization bitmasks. */
1349 #define m_386 (1<<PROCESSOR_I386)
1350 #define m_486 (1<<PROCESSOR_I486)
1351 #define m_PENT (1<<PROCESSOR_PENTIUM)
1352 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1353 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1354 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1355 #define m_CORE2 (1<<PROCESSOR_CORE2)
1356 #define m_ATOM (1<<PROCESSOR_ATOM)
1358 #define m_GEODE (1<<PROCESSOR_GEODE)
1359 #define m_K6 (1<<PROCESSOR_K6)
1360 #define m_K6_GEODE (m_K6 | m_GEODE)
1361 #define m_K8 (1<<PROCESSOR_K8)
1362 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1363 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1364 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1365 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1366 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1368 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1369 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1371 /* Generic instruction choice should be common subset of supported CPUs
1372 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1373 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1375 /* Feature tests against the various tunings. */
1376 unsigned char ix86_tune_features[X86_TUNE_LAST];
1378 /* Feature tests against the various tunings used to create ix86_tune_features
1379 based on the processor mask. */
1380 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1381 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1382 negatively, so enabling for Generic64 seems like good code size
1383 tradeoff. We can't enable it for 32bit generic because it does not
1384 work well with PPro base chips. */
1385 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1387 /* X86_TUNE_PUSH_MEMORY */
1388 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1389 | m_NOCONA | m_CORE2 | m_GENERIC,
1391 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1394 /* X86_TUNE_UNROLL_STRLEN */
1395 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1399 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1401 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1402 on simulation result. But after P4 was made, no performance benefit
1403 was observed with branch hints. It also increases the code size.
1404 As a result, icc never generates branch hints. */
1407 /* X86_TUNE_DOUBLE_WITH_ADD */
1410 /* X86_TUNE_USE_SAHF */
1411 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1412 | m_NOCONA | m_CORE2 | m_GENERIC,
1414 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1415 partial dependencies. */
1416 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1417 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1419 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1420 register stalls on Generic32 compilation setting as well. However
1421 in current implementation the partial register stalls are not eliminated
1422 very well - they can be introduced via subregs synthesized by combine
1423 and can happen in caller/callee saving sequences. Because this option
1424 pays back little on PPro based chips and is in conflict with partial reg
1425 dependencies used by Athlon/P4 based chips, it is better to leave it off
1426 for generic32 for now. */
1429 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1430 m_CORE2 | m_GENERIC,
1432 /* X86_TUNE_USE_HIMODE_FIOP */
1433 m_386 | m_486 | m_K6_GEODE,
1435 /* X86_TUNE_USE_SIMODE_FIOP */
1436 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1438 /* X86_TUNE_USE_MOV0 */
1441 /* X86_TUNE_USE_CLTD */
1442 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1444 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1447 /* X86_TUNE_SPLIT_LONG_MOVES */
1450 /* X86_TUNE_READ_MODIFY_WRITE */
1453 /* X86_TUNE_READ_MODIFY */
1456 /* X86_TUNE_PROMOTE_QIMODE */
1457 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1458 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1460 /* X86_TUNE_FAST_PREFIX */
1461 ~(m_PENT | m_486 | m_386),
1463 /* X86_TUNE_SINGLE_STRINGOP */
1464 m_386 | m_PENT4 | m_NOCONA,
1466 /* X86_TUNE_QIMODE_MATH */
1469 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1470 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1471 might be considered for Generic32 if our scheme for avoiding partial
1472 stalls was more effective. */
1475 /* X86_TUNE_PROMOTE_QI_REGS */
1478 /* X86_TUNE_PROMOTE_HI_REGS */
1481 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1482 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1483 | m_CORE2 | m_GENERIC,
1485 /* X86_TUNE_ADD_ESP_8 */
1486 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1487 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1489 /* X86_TUNE_SUB_ESP_4 */
1490 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1493 /* X86_TUNE_SUB_ESP_8 */
1494 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1495 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1497 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1498 for DFmode copies */
1499 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1500 | m_GENERIC | m_GEODE),
1502 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1503 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1505 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1506 conflict here in between PPro/Pentium4 based chips that thread 128bit
1507 SSE registers as single units versus K8 based chips that divide SSE
1508 registers to two 64bit halves. This knob promotes all store destinations
1509 to be 128bit to allow register renaming on 128bit SSE units, but usually
1510 results in one extra microop on 64bit SSE units. Experimental results
1511 shows that disabling this option on P4 brings over 20% SPECfp regression,
1512 while enabling it on K8 brings roughly 2.4% regression that can be partly
1513 masked by careful scheduling of moves. */
1514 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1515 | m_AMDFAM10 | m_BDVER1,
1517 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1518 m_AMDFAM10 | m_BDVER1,
1520 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1523 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1526 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1527 are resolved on SSE register parts instead of whole registers, so we may
1528 maintain just lower part of scalar values in proper format leaving the
1529 upper part undefined. */
1532 /* X86_TUNE_SSE_TYPELESS_STORES */
1535 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1536 m_PPRO | m_PENT4 | m_NOCONA,
1538 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1539 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1541 /* X86_TUNE_PROLOGUE_USING_MOVE */
1542 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1544 /* X86_TUNE_EPILOGUE_USING_MOVE */
1545 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1547 /* X86_TUNE_SHIFT1 */
1550 /* X86_TUNE_USE_FFREEP */
1553 /* X86_TUNE_INTER_UNIT_MOVES */
1554 ~(m_AMD_MULTIPLE | m_GENERIC),
1556 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1557 ~(m_AMDFAM10 | m_BDVER1),
1559 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1560 than 4 branch instructions in the 16 byte window. */
1561 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1564 /* X86_TUNE_SCHEDULE */
1565 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1568 /* X86_TUNE_USE_BT */
1569 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1571 /* X86_TUNE_USE_INCDEC */
1572 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1574 /* X86_TUNE_PAD_RETURNS */
1575 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1577 /* X86_TUNE_EXT_80387_CONSTANTS */
1578 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1579 | m_CORE2 | m_GENERIC,
1581 /* X86_TUNE_SHORTEN_X87_SSE */
1584 /* X86_TUNE_AVOID_VECTOR_DECODE */
1587 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1588 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1591 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1592 vector path on AMD machines. */
1593 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1595 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1597 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1599 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1603 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1604 but one byte longer. */
1607 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1608 operand that cannot be represented using a modRM byte. The XOR
1609 replacement is long decoded, so this split helps here as well. */
1612 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1614 m_AMDFAM10 | m_GENERIC,
1616 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1617 from integer to FP. */
1620 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1621 with a subsequent conditional jump instruction into a single
1622 compare-and-branch uop. */
1625 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1626 will impact LEA instruction selection. */
1630 /* Feature tests against the various architecture variations. */
1631 unsigned char ix86_arch_features[X86_ARCH_LAST];
1633 /* Feature tests against the various architecture variations, used to create
1634 ix86_arch_features based on the processor mask. */
1635 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1636 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1637 ~(m_386 | m_486 | m_PENT | m_K6),
1639 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1642 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1645 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1648 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1652 static const unsigned int x86_accumulate_outgoing_args
1653 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1656 static const unsigned int x86_arch_always_fancy_math_387
1657 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1658 | m_NOCONA | m_CORE2 | m_GENERIC;
1660 static enum stringop_alg stringop_alg = no_stringop;
1662 /* In case the average insn count for single function invocation is
1663 lower than this constant, emit fast (but longer) prologue and
1665 #define FAST_PROLOGUE_INSN_COUNT 20
1667 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1668 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1669 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1670 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1672 /* Array of the smallest class containing reg number REGNO, indexed by
1673 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1675 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1677 /* ax, dx, cx, bx */
1678 AREG, DREG, CREG, BREG,
1679 /* si, di, bp, sp */
1680 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1682 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1683 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1686 /* flags, fpsr, fpcr, frame */
1687 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1689 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1692 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1695 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1696 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1697 /* SSE REX registers */
1698 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1702 /* The "default" register map used in 32bit mode. */
1704 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1706 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1707 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1708 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1709 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1710 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1711 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1712 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1715 /* The "default" register map used in 64bit mode. */
1717 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1719 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1720 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1721 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1722 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1723 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1724 8,9,10,11,12,13,14,15, /* extended integer registers */
1725 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1728 /* Define the register numbers to be used in Dwarf debugging information.
1729 The SVR4 reference port C compiler uses the following register numbers
1730 in its Dwarf output code:
1731 0 for %eax (gcc regno = 0)
1732 1 for %ecx (gcc regno = 2)
1733 2 for %edx (gcc regno = 1)
1734 3 for %ebx (gcc regno = 3)
1735 4 for %esp (gcc regno = 7)
1736 5 for %ebp (gcc regno = 6)
1737 6 for %esi (gcc regno = 4)
1738 7 for %edi (gcc regno = 5)
1739 The following three DWARF register numbers are never generated by
1740 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1741 believes these numbers have these meanings.
1742 8 for %eip (no gcc equivalent)
1743 9 for %eflags (gcc regno = 17)
1744 10 for %trapno (no gcc equivalent)
1745 It is not at all clear how we should number the FP stack registers
1746 for the x86 architecture. If the version of SDB on x86/svr4 were
1747 a bit less brain dead with respect to floating-point then we would
1748 have a precedent to follow with respect to DWARF register numbers
1749 for x86 FP registers, but the SDB on x86/svr4 is so completely
1750 broken with respect to FP registers that it is hardly worth thinking
1751 of it as something to strive for compatibility with.
1752 The version of x86/svr4 SDB I have at the moment does (partially)
1753 seem to believe that DWARF register number 11 is associated with
1754 the x86 register %st(0), but that's about all. Higher DWARF
1755 register numbers don't seem to be associated with anything in
1756 particular, and even for DWARF regno 11, SDB only seems to under-
1757 stand that it should say that a variable lives in %st(0) (when
1758 asked via an `=' command) if we said it was in DWARF regno 11,
1759 but SDB still prints garbage when asked for the value of the
1760 variable in question (via a `/' command).
1761 (Also note that the labels SDB prints for various FP stack regs
1762 when doing an `x' command are all wrong.)
1763 Note that these problems generally don't affect the native SVR4
1764 C compiler because it doesn't allow the use of -O with -g and
1765 because when it is *not* optimizing, it allocates a memory
1766 location for each floating-point variable, and the memory
1767 location is what gets described in the DWARF AT_location
1768 attribute for the variable in question.
1769 Regardless of the severe mental illness of the x86/svr4 SDB, we
1770 do something sensible here and we use the following DWARF
1771 register numbers. Note that these are all stack-top-relative
1773 11 for %st(0) (gcc regno = 8)
1774 12 for %st(1) (gcc regno = 9)
1775 13 for %st(2) (gcc regno = 10)
1776 14 for %st(3) (gcc regno = 11)
1777 15 for %st(4) (gcc regno = 12)
1778 16 for %st(5) (gcc regno = 13)
1779 17 for %st(6) (gcc regno = 14)
1780 18 for %st(7) (gcc regno = 15)
1782 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1784 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1785 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1786 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1787 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1788 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1789 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1790 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1793 /* Test and compare insns in i386.md store the information needed to
1794 generate branch and scc insns here. */
1796 rtx ix86_compare_op0 = NULL_RTX;
1797 rtx ix86_compare_op1 = NULL_RTX;
1799 /* Define parameter passing and return registers. */
1801 static int const x86_64_int_parameter_registers[6] =
1803 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1806 static int const x86_64_ms_abi_int_parameter_registers[4] =
1808 CX_REG, DX_REG, R8_REG, R9_REG
1811 static int const x86_64_int_return_registers[4] =
1813 AX_REG, DX_REG, DI_REG, SI_REG
1816 /* Define the structure for the machine field in struct function. */
1818 struct GTY(()) stack_local_entry {
1819 unsigned short mode;
1822 struct stack_local_entry *next;
1825 /* Structure describing stack frame layout.
1826 Stack grows downward:
1832 saved frame pointer if frame_pointer_needed
1833 <- HARD_FRAME_POINTER
1842 [va_arg registers] (
1843 > to_allocate <- FRAME_POINTER
1856 HOST_WIDE_INT frame;
1858 int outgoing_arguments_size;
1860 HOST_WIDE_INT to_allocate;
1861 /* The offsets relative to ARG_POINTER. */
1862 HOST_WIDE_INT frame_pointer_offset;
1863 HOST_WIDE_INT hard_frame_pointer_offset;
1864 HOST_WIDE_INT stack_pointer_offset;
1866 /* When save_regs_using_mov is set, emit prologue using
1867 move instead of push instructions. */
1868 bool save_regs_using_mov;
1871 /* Code model option. */
1872 enum cmodel ix86_cmodel;
1874 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1876 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1878 /* Which unit we are generating floating point math for. */
1879 enum fpmath_unit ix86_fpmath;
1881 /* Which cpu are we scheduling for. */
1882 enum attr_cpu ix86_schedule;
1884 /* Which cpu are we optimizing for. */
1885 enum processor_type ix86_tune;
1887 /* Which instruction set architecture to use. */
1888 enum processor_type ix86_arch;
1890 /* true if sse prefetch instruction is not NOOP. */
1891 int x86_prefetch_sse;
1893 /* ix86_regparm_string as a number */
1894 static int ix86_regparm;
1896 /* -mstackrealign option */
1897 extern int ix86_force_align_arg_pointer;
1898 static const char ix86_force_align_arg_pointer_string[]
1899 = "force_align_arg_pointer";
1901 static rtx (*ix86_gen_leave) (void);
1902 static rtx (*ix86_gen_pop1) (rtx);
1903 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1904 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1905 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1906 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1907 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1908 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1910 /* Preferred alignment for stack boundary in bits. */
1911 unsigned int ix86_preferred_stack_boundary;
1913 /* Alignment for incoming stack boundary in bits specified at
1915 static unsigned int ix86_user_incoming_stack_boundary;
1917 /* Default alignment for incoming stack boundary in bits. */
1918 static unsigned int ix86_default_incoming_stack_boundary;
1920 /* Alignment for incoming stack boundary in bits. */
1921 unsigned int ix86_incoming_stack_boundary;
1923 /* The abi used by target. */
1924 enum calling_abi ix86_abi;
1926 /* Values 1-5: see jump.c */
1927 int ix86_branch_cost;
1929 /* Calling abi specific va_list type nodes. */
1930 static GTY(()) tree sysv_va_list_type_node;
1931 static GTY(()) tree ms_va_list_type_node;
1933 /* Variables which are this size or smaller are put in the data/bss
1934 or ldata/lbss sections. */
1936 int ix86_section_threshold = 65536;
1938 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1939 char internal_label_prefix[16];
1940 int internal_label_prefix_len;
1942 /* Fence to use after loop using movnt. */
1945 /* Register class used for passing given 64bit part of the argument.
1946 These represent classes as documented by the PS ABI, with the exception
1947 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1948 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1950 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1951 whenever possible (upper half does contain padding). */
1952 enum x86_64_reg_class
1955 X86_64_INTEGER_CLASS,
1956 X86_64_INTEGERSI_CLASS,
1963 X86_64_COMPLEX_X87_CLASS,
1967 #define MAX_CLASSES 4
1969 /* Table of constants used by fldpi, fldln2, etc.... */
1970 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1971 static bool ext_80387_constants_init = 0;
1974 static struct machine_function * ix86_init_machine_status (void);
1975 static rtx ix86_function_value (const_tree, const_tree, bool);
1976 static bool ix86_function_value_regno_p (const unsigned int);
1977 static rtx ix86_static_chain (const_tree, bool);
1978 static int ix86_function_regparm (const_tree, const_tree);
1979 static void ix86_compute_frame_layout (struct ix86_frame *);
1980 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1982 static void ix86_add_new_builtins (int);
1983 static rtx ix86_expand_vec_perm_builtin (tree);
1984 static tree ix86_canonical_va_list_type (tree);
1986 enum ix86_function_specific_strings
1988 IX86_FUNCTION_SPECIFIC_ARCH,
1989 IX86_FUNCTION_SPECIFIC_TUNE,
1990 IX86_FUNCTION_SPECIFIC_FPMATH,
1991 IX86_FUNCTION_SPECIFIC_MAX
1994 static char *ix86_target_string (int, int, const char *, const char *,
1995 const char *, bool);
1996 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1997 static void ix86_function_specific_save (struct cl_target_option *);
1998 static void ix86_function_specific_restore (struct cl_target_option *);
1999 static void ix86_function_specific_print (FILE *, int,
2000 struct cl_target_option *);
2001 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2002 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2003 static bool ix86_can_inline_p (tree, tree);
2004 static void ix86_set_current_function (tree);
2005 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2007 static enum calling_abi ix86_function_abi (const_tree);
2010 #ifndef SUBTARGET32_DEFAULT_CPU
2011 #define SUBTARGET32_DEFAULT_CPU "i386"
2014 /* The svr4 ABI for the i386 says that records and unions are returned
2016 #ifndef DEFAULT_PCC_STRUCT_RETURN
2017 #define DEFAULT_PCC_STRUCT_RETURN 1
2020 /* Whether -mtune= or -march= were specified */
2021 static int ix86_tune_defaulted;
2022 static int ix86_arch_specified;
2024 /* Bit flags that specify the ISA we are compiling for. */
2025 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
2027 /* A mask of ix86_isa_flags that includes bit X if X
2028 was set or cleared on the command line. */
2029 static int ix86_isa_flags_explicit;
2031 /* Define a set of ISAs which are available when a given ISA is
2032 enabled. MMX and SSE ISAs are handled separately. */
2034 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2035 #define OPTION_MASK_ISA_3DNOW_SET \
2036 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2038 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2039 #define OPTION_MASK_ISA_SSE2_SET \
2040 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2041 #define OPTION_MASK_ISA_SSE3_SET \
2042 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2043 #define OPTION_MASK_ISA_SSSE3_SET \
2044 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2045 #define OPTION_MASK_ISA_SSE4_1_SET \
2046 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2047 #define OPTION_MASK_ISA_SSE4_2_SET \
2048 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2049 #define OPTION_MASK_ISA_AVX_SET \
2050 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2051 #define OPTION_MASK_ISA_FMA_SET \
2052 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2054 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2056 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2058 #define OPTION_MASK_ISA_SSE4A_SET \
2059 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2060 #define OPTION_MASK_ISA_FMA4_SET \
2061 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2062 | OPTION_MASK_ISA_AVX_SET)
2063 #define OPTION_MASK_ISA_XOP_SET \
2064 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2065 #define OPTION_MASK_ISA_LWP_SET \
2068 /* AES and PCLMUL need SSE2 because they use xmm registers */
2069 #define OPTION_MASK_ISA_AES_SET \
2070 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2071 #define OPTION_MASK_ISA_PCLMUL_SET \
2072 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2074 #define OPTION_MASK_ISA_ABM_SET \
2075 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2077 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2078 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2079 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2080 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2081 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2083 /* Define a set of ISAs which aren't available when a given ISA is
2084 disabled. MMX and SSE ISAs are handled separately. */
2086 #define OPTION_MASK_ISA_MMX_UNSET \
2087 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2088 #define OPTION_MASK_ISA_3DNOW_UNSET \
2089 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2090 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2092 #define OPTION_MASK_ISA_SSE_UNSET \
2093 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2094 #define OPTION_MASK_ISA_SSE2_UNSET \
2095 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2096 #define OPTION_MASK_ISA_SSE3_UNSET \
2097 (OPTION_MASK_ISA_SSE3 \
2098 | OPTION_MASK_ISA_SSSE3_UNSET \
2099 | OPTION_MASK_ISA_SSE4A_UNSET )
2100 #define OPTION_MASK_ISA_SSSE3_UNSET \
2101 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2102 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2103 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2104 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2105 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2106 #define OPTION_MASK_ISA_AVX_UNSET \
2107 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2108 | OPTION_MASK_ISA_FMA4_UNSET)
2109 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2111 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2113 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2115 #define OPTION_MASK_ISA_SSE4A_UNSET \
2116 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2118 #define OPTION_MASK_ISA_FMA4_UNSET \
2119 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2120 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2121 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2123 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2124 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2125 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2126 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2127 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2128 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2129 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2130 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2132 /* Vectorization library interface and handlers. */
2133 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2134 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2135 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2137 /* Processor target table, indexed by processor number */
2140 const struct processor_costs *cost; /* Processor costs */
2141 const int align_loop; /* Default alignments. */
2142 const int align_loop_max_skip;
2143 const int align_jump;
2144 const int align_jump_max_skip;
2145 const int align_func;
2148 static const struct ptt processor_target_table[PROCESSOR_max] =
2150 {&i386_cost, 4, 3, 4, 3, 4},
2151 {&i486_cost, 16, 15, 16, 15, 16},
2152 {&pentium_cost, 16, 7, 16, 7, 16},
2153 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2154 {&geode_cost, 0, 0, 0, 0, 0},
2155 {&k6_cost, 32, 7, 32, 7, 32},
2156 {&athlon_cost, 16, 7, 16, 7, 16},
2157 {&pentium4_cost, 0, 0, 0, 0, 0},
2158 {&k8_cost, 16, 7, 16, 7, 16},
2159 {&nocona_cost, 0, 0, 0, 0, 0},
2160 {&core2_cost, 16, 10, 16, 10, 16},
2161 {&generic32_cost, 16, 7, 16, 7, 16},
2162 {&generic64_cost, 16, 10, 16, 10, 16},
2163 {&amdfam10_cost, 32, 24, 32, 7, 32},
2164 {&bdver1_cost, 32, 24, 32, 7, 32},
2165 {&atom_cost, 16, 7, 16, 7, 16}
2168 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2195 /* Implement TARGET_HANDLE_OPTION. */
2198 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2205 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2206 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2210 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2211 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2218 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2219 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2223 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2224 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2234 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2239 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2247 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2248 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2252 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2253 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2260 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2261 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2265 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2266 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2273 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2274 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2278 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2279 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2286 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2287 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2291 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2292 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2299 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2300 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2304 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2305 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2312 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2313 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2317 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2318 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2325 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2326 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2330 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2331 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2336 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2348 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2349 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2353 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2354 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2361 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2362 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2366 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2367 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2374 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2375 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2379 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2380 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2387 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2388 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2392 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2393 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2400 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2401 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2405 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2406 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2413 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2414 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2418 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2419 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2426 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2427 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2431 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2432 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2439 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2440 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2444 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2445 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2452 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2453 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2457 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2458 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2465 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2466 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2470 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2471 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2478 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2479 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2483 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2484 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2491 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2492 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2496 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2497 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2506 /* Return a string that documents the current -m options. The caller is
2507 responsible for freeing the string. */
2510 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2511 const char *fpmath, bool add_nl_p)
2513 struct ix86_target_opts
2515 const char *option; /* option string */
2516 int mask; /* isa mask options */
2519 /* This table is ordered so that options like -msse4.2 that imply
2520 preceding options while match those first. */
2521 static struct ix86_target_opts isa_opts[] =
2523 { "-m64", OPTION_MASK_ISA_64BIT },
2524 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2525 { "-mfma", OPTION_MASK_ISA_FMA },
2526 { "-mxop", OPTION_MASK_ISA_XOP },
2527 { "-mlwp", OPTION_MASK_ISA_LWP },
2528 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2529 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2530 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2531 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2532 { "-msse3", OPTION_MASK_ISA_SSE3 },
2533 { "-msse2", OPTION_MASK_ISA_SSE2 },
2534 { "-msse", OPTION_MASK_ISA_SSE },
2535 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2536 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2537 { "-mmmx", OPTION_MASK_ISA_MMX },
2538 { "-mabm", OPTION_MASK_ISA_ABM },
2539 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2540 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2541 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2542 { "-maes", OPTION_MASK_ISA_AES },
2543 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2547 static struct ix86_target_opts flag_opts[] =
2549 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2550 { "-m80387", MASK_80387 },
2551 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2552 { "-malign-double", MASK_ALIGN_DOUBLE },
2553 { "-mcld", MASK_CLD },
2554 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2555 { "-mieee-fp", MASK_IEEE_FP },
2556 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2557 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2558 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2559 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2560 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2561 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2562 { "-mno-red-zone", MASK_NO_RED_ZONE },
2563 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2564 { "-mrecip", MASK_RECIP },
2565 { "-mrtd", MASK_RTD },
2566 { "-msseregparm", MASK_SSEREGPARM },
2567 { "-mstack-arg-probe", MASK_STACK_PROBE },
2568 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2571 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2574 char target_other[40];
2583 memset (opts, '\0', sizeof (opts));
2585 /* Add -march= option. */
2588 opts[num][0] = "-march=";
2589 opts[num++][1] = arch;
2592 /* Add -mtune= option. */
2595 opts[num][0] = "-mtune=";
2596 opts[num++][1] = tune;
2599 /* Pick out the options in isa options. */
2600 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2602 if ((isa & isa_opts[i].mask) != 0)
2604 opts[num++][0] = isa_opts[i].option;
2605 isa &= ~ isa_opts[i].mask;
2609 if (isa && add_nl_p)
2611 opts[num++][0] = isa_other;
2612 sprintf (isa_other, "(other isa: %#x)", isa);
2615 /* Add flag options. */
2616 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2618 if ((flags & flag_opts[i].mask) != 0)
2620 opts[num++][0] = flag_opts[i].option;
2621 flags &= ~ flag_opts[i].mask;
2625 if (flags && add_nl_p)
2627 opts[num++][0] = target_other;
2628 sprintf (target_other, "(other flags: %#x)", flags);
2631 /* Add -fpmath= option. */
2634 opts[num][0] = "-mfpmath=";
2635 opts[num++][1] = fpmath;
2642 gcc_assert (num < ARRAY_SIZE (opts));
2644 /* Size the string. */
2646 sep_len = (add_nl_p) ? 3 : 1;
2647 for (i = 0; i < num; i++)
2650 for (j = 0; j < 2; j++)
2652 len += strlen (opts[i][j]);
2655 /* Build the string. */
2656 ret = ptr = (char *) xmalloc (len);
2659 for (i = 0; i < num; i++)
2663 for (j = 0; j < 2; j++)
2664 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2671 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2679 for (j = 0; j < 2; j++)
2682 memcpy (ptr, opts[i][j], len2[j]);
2684 line_len += len2[j];
2689 gcc_assert (ret + len >= ptr);
2694 /* Return TRUE if software prefetching is beneficial for the
2698 software_prefetching_beneficial_p (void)
2702 case PROCESSOR_GEODE:
2704 case PROCESSOR_ATHLON:
2706 case PROCESSOR_AMDFAM10:
2714 /* Function that is callable from the debugger to print the current
2717 ix86_debug_options (void)
2719 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2720 ix86_arch_string, ix86_tune_string,
2721 ix86_fpmath_string, true);
2725 fprintf (stderr, "%s\n\n", opts);
2729 fputs ("<no options>\n\n", stderr);
2734 /* Sometimes certain combinations of command options do not make
2735 sense on a particular target machine. You can define a macro
2736 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2737 defined, is executed once just after all the command options have
2740 Don't use this macro to turn on various extra optimizations for
2741 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2744 override_options (bool main_args_p)
2747 unsigned int ix86_arch_mask, ix86_tune_mask;
2748 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2753 /* Comes from final.c -- no real reason to change it. */
2754 #define MAX_CODE_ALIGN 16
2762 PTA_PREFETCH_SSE = 1 << 4,
2764 PTA_3DNOW_A = 1 << 6,
2768 PTA_POPCNT = 1 << 10,
2770 PTA_SSE4A = 1 << 12,
2771 PTA_NO_SAHF = 1 << 13,
2772 PTA_SSE4_1 = 1 << 14,
2773 PTA_SSE4_2 = 1 << 15,
2775 PTA_PCLMUL = 1 << 17,
2778 PTA_MOVBE = 1 << 20,
2786 const char *const name; /* processor name or nickname. */
2787 const enum processor_type processor;
2788 const enum attr_cpu schedule;
2789 const unsigned /*enum pta_flags*/ flags;
2791 const processor_alias_table[] =
2793 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2794 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2795 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2796 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2797 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2798 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2799 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2800 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2801 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2802 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2803 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2804 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2805 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2807 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2809 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2810 PTA_MMX | PTA_SSE | PTA_SSE2},
2811 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2812 PTA_MMX |PTA_SSE | PTA_SSE2},
2813 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2814 PTA_MMX | PTA_SSE | PTA_SSE2},
2815 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2816 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2817 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2818 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2819 | PTA_CX16 | PTA_NO_SAHF},
2820 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2821 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2822 | PTA_SSSE3 | PTA_CX16},
2823 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2824 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2825 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2826 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2827 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2828 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2829 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2830 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2831 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2832 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2833 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2834 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2835 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2836 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2837 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2838 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2839 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2840 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2841 {"x86-64", PROCESSOR_K8, CPU_K8,
2842 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2843 {"k8", PROCESSOR_K8, CPU_K8,
2844 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2845 | PTA_SSE2 | PTA_NO_SAHF},
2846 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2847 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2848 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2849 {"opteron", PROCESSOR_K8, CPU_K8,
2850 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2851 | PTA_SSE2 | PTA_NO_SAHF},
2852 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2853 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2854 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2855 {"athlon64", PROCESSOR_K8, CPU_K8,
2856 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2857 | PTA_SSE2 | PTA_NO_SAHF},
2858 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2859 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2860 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2861 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2862 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2863 | PTA_SSE2 | PTA_NO_SAHF},
2864 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2865 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2866 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2867 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2868 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2869 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2870 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
2871 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2872 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
2873 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
2874 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
2875 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2876 0 /* flags are only used for -march switch. */ },
2877 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2878 PTA_64BIT /* flags are only used for -march switch. */ },
2881 int const pta_size = ARRAY_SIZE (processor_alias_table);
2883 /* Set up prefix/suffix so the error messages refer to either the command
2884 line argument, or the attribute(target). */
2893 prefix = "option(\"";
2898 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2899 SUBTARGET_OVERRIDE_OPTIONS;
2902 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2903 SUBSUBTARGET_OVERRIDE_OPTIONS;
2906 /* -fPIC is the default for x86_64. */
2907 if (TARGET_MACHO && TARGET_64BIT)
2910 /* Set the default values for switches whose default depends on TARGET_64BIT
2911 in case they weren't overwritten by command line options. */
2916 /* Mach-O doesn't support omitting the frame pointer for now. */
2917 if (flag_omit_frame_pointer == 2)
2918 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2919 if (flag_asynchronous_unwind_tables == 2)
2920 flag_asynchronous_unwind_tables = 1;
2921 if (flag_pcc_struct_return == 2)
2922 flag_pcc_struct_return = 0;
2928 if (flag_omit_frame_pointer == 2)
2929 flag_omit_frame_pointer = 0;
2930 if (flag_asynchronous_unwind_tables == 2)
2931 flag_asynchronous_unwind_tables = 0;
2932 if (flag_pcc_struct_return == 2)
2933 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2936 /* Need to check -mtune=generic first. */
2937 if (ix86_tune_string)
2939 if (!strcmp (ix86_tune_string, "generic")
2940 || !strcmp (ix86_tune_string, "i686")
2941 /* As special support for cross compilers we read -mtune=native
2942 as -mtune=generic. With native compilers we won't see the
2943 -mtune=native, as it was changed by the driver. */
2944 || !strcmp (ix86_tune_string, "native"))
2947 ix86_tune_string = "generic64";
2949 ix86_tune_string = "generic32";
2951 /* If this call is for setting the option attribute, allow the
2952 generic32/generic64 that was previously set. */
2953 else if (!main_args_p
2954 && (!strcmp (ix86_tune_string, "generic32")
2955 || !strcmp (ix86_tune_string, "generic64")))
2957 else if (!strncmp (ix86_tune_string, "generic", 7))
2958 error ("bad value (%s) for %stune=%s %s",
2959 ix86_tune_string, prefix, suffix, sw);
2960 else if (!strcmp (ix86_tune_string, "x86-64"))
2961 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2962 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2963 prefix, suffix, prefix, suffix, prefix, suffix);
2967 if (ix86_arch_string)
2968 ix86_tune_string = ix86_arch_string;
2969 if (!ix86_tune_string)
2971 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2972 ix86_tune_defaulted = 1;
2975 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2976 need to use a sensible tune option. */
2977 if (!strcmp (ix86_tune_string, "generic")
2978 || !strcmp (ix86_tune_string, "x86-64")
2979 || !strcmp (ix86_tune_string, "i686"))
2982 ix86_tune_string = "generic64";
2984 ix86_tune_string = "generic32";
2988 if (ix86_stringop_string)
2990 if (!strcmp (ix86_stringop_string, "rep_byte"))
2991 stringop_alg = rep_prefix_1_byte;
2992 else if (!strcmp (ix86_stringop_string, "libcall"))
2993 stringop_alg = libcall;
2994 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2995 stringop_alg = rep_prefix_4_byte;
2996 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2998 /* rep; movq isn't available in 32-bit code. */
2999 stringop_alg = rep_prefix_8_byte;
3000 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3001 stringop_alg = loop_1_byte;
3002 else if (!strcmp (ix86_stringop_string, "loop"))
3003 stringop_alg = loop;
3004 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3005 stringop_alg = unrolled_loop;
3007 error ("bad value (%s) for %sstringop-strategy=%s %s",
3008 ix86_stringop_string, prefix, suffix, sw);
3011 if (!ix86_arch_string)
3012 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3014 ix86_arch_specified = 1;
3016 /* Validate -mabi= value. */
3017 if (ix86_abi_string)
3019 if (strcmp (ix86_abi_string, "sysv") == 0)
3020 ix86_abi = SYSV_ABI;
3021 else if (strcmp (ix86_abi_string, "ms") == 0)
3024 error ("unknown ABI (%s) for %sabi=%s %s",
3025 ix86_abi_string, prefix, suffix, sw);
3028 ix86_abi = DEFAULT_ABI;
3030 if (ix86_cmodel_string != 0)
3032 if (!strcmp (ix86_cmodel_string, "small"))
3033 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3034 else if (!strcmp (ix86_cmodel_string, "medium"))
3035 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3036 else if (!strcmp (ix86_cmodel_string, "large"))
3037 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3039 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3040 else if (!strcmp (ix86_cmodel_string, "32"))
3041 ix86_cmodel = CM_32;
3042 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3043 ix86_cmodel = CM_KERNEL;
3045 error ("bad value (%s) for %scmodel=%s %s",
3046 ix86_cmodel_string, prefix, suffix, sw);
3050 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3051 use of rip-relative addressing. This eliminates fixups that
3052 would otherwise be needed if this object is to be placed in a
3053 DLL, and is essentially just as efficient as direct addressing. */
3054 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3055 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3056 else if (TARGET_64BIT)
3057 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3059 ix86_cmodel = CM_32;
3061 if (ix86_asm_string != 0)
3064 && !strcmp (ix86_asm_string, "intel"))
3065 ix86_asm_dialect = ASM_INTEL;
3066 else if (!strcmp (ix86_asm_string, "att"))
3067 ix86_asm_dialect = ASM_ATT;
3069 error ("bad value (%s) for %sasm=%s %s",
3070 ix86_asm_string, prefix, suffix, sw);
3072 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3073 error ("code model %qs not supported in the %s bit mode",
3074 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3075 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3076 sorry ("%i-bit mode not compiled in",
3077 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3079 for (i = 0; i < pta_size; i++)
3080 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3082 ix86_schedule = processor_alias_table[i].schedule;
3083 ix86_arch = processor_alias_table[i].processor;
3084 /* Default cpu tuning to the architecture. */
3085 ix86_tune = ix86_arch;
3087 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3088 error ("CPU you selected does not support x86-64 "
3091 if (processor_alias_table[i].flags & PTA_MMX
3092 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3093 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3094 if (processor_alias_table[i].flags & PTA_3DNOW
3095 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3096 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3097 if (processor_alias_table[i].flags & PTA_3DNOW_A
3098 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3099 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3100 if (processor_alias_table[i].flags & PTA_SSE
3101 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3102 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3103 if (processor_alias_table[i].flags & PTA_SSE2
3104 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3105 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3106 if (processor_alias_table[i].flags & PTA_SSE3
3107 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3108 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3109 if (processor_alias_table[i].flags & PTA_SSSE3
3110 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3111 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3112 if (processor_alias_table[i].flags & PTA_SSE4_1
3113 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3114 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3115 if (processor_alias_table[i].flags & PTA_SSE4_2
3116 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3117 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3118 if (processor_alias_table[i].flags & PTA_AVX
3119 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3120 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3121 if (processor_alias_table[i].flags & PTA_FMA
3122 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3123 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3124 if (processor_alias_table[i].flags & PTA_SSE4A
3125 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3126 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3127 if (processor_alias_table[i].flags & PTA_FMA4
3128 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3129 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3130 if (processor_alias_table[i].flags & PTA_XOP
3131 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3132 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3133 if (processor_alias_table[i].flags & PTA_LWP
3134 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3135 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3136 if (processor_alias_table[i].flags & PTA_ABM
3137 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3138 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3139 if (processor_alias_table[i].flags & PTA_CX16
3140 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3141 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3142 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3143 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3144 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3145 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3146 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3147 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3148 if (processor_alias_table[i].flags & PTA_MOVBE
3149 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3150 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3151 if (processor_alias_table[i].flags & PTA_AES
3152 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3153 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3154 if (processor_alias_table[i].flags & PTA_PCLMUL
3155 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3156 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3157 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3158 x86_prefetch_sse = true;
3163 if (!strcmp (ix86_arch_string, "generic"))
3164 error ("generic CPU can be used only for %stune=%s %s",
3165 prefix, suffix, sw);
3166 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3167 error ("bad value (%s) for %sarch=%s %s",
3168 ix86_arch_string, prefix, suffix, sw);
3170 ix86_arch_mask = 1u << ix86_arch;
3171 for (i = 0; i < X86_ARCH_LAST; ++i)
3172 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3174 for (i = 0; i < pta_size; i++)
3175 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3177 ix86_schedule = processor_alias_table[i].schedule;
3178 ix86_tune = processor_alias_table[i].processor;
3179 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3181 if (ix86_tune_defaulted)
3183 ix86_tune_string = "x86-64";
3184 for (i = 0; i < pta_size; i++)
3185 if (! strcmp (ix86_tune_string,
3186 processor_alias_table[i].name))
3188 ix86_schedule = processor_alias_table[i].schedule;
3189 ix86_tune = processor_alias_table[i].processor;
3192 error ("CPU you selected does not support x86-64 "
3195 /* Intel CPUs have always interpreted SSE prefetch instructions as
3196 NOPs; so, we can enable SSE prefetch instructions even when
3197 -mtune (rather than -march) points us to a processor that has them.
3198 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3199 higher processors. */
3201 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3202 x86_prefetch_sse = true;
3206 if (ix86_tune_specified && i == pta_size)
3207 error ("bad value (%s) for %stune=%s %s",
3208 ix86_tune_string, prefix, suffix, sw);
3210 ix86_tune_mask = 1u << ix86_tune;
3211 for (i = 0; i < X86_TUNE_LAST; ++i)
3212 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3215 ix86_cost = &ix86_size_cost;
3217 ix86_cost = processor_target_table[ix86_tune].cost;
3219 /* Arrange to set up i386_stack_locals for all functions. */
3220 init_machine_status = ix86_init_machine_status;
3222 /* Validate -mregparm= value. */
3223 if (ix86_regparm_string)
3226 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3227 i = atoi (ix86_regparm_string);
3228 if (i < 0 || i > REGPARM_MAX)
3229 error ("%sregparm=%d%s is not between 0 and %d",
3230 prefix, i, suffix, REGPARM_MAX);
3235 ix86_regparm = REGPARM_MAX;
3237 /* If the user has provided any of the -malign-* options,
3238 warn and use that value only if -falign-* is not set.
3239 Remove this code in GCC 3.2 or later. */
3240 if (ix86_align_loops_string)
3242 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3243 prefix, suffix, suffix);
3244 if (align_loops == 0)
3246 i = atoi (ix86_align_loops_string);
3247 if (i < 0 || i > MAX_CODE_ALIGN)
3248 error ("%salign-loops=%d%s is not between 0 and %d",
3249 prefix, i, suffix, MAX_CODE_ALIGN);
3251 align_loops = 1 << i;
3255 if (ix86_align_jumps_string)
3257 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3258 prefix, suffix, suffix);
3259 if (align_jumps == 0)
3261 i = atoi (ix86_align_jumps_string);
3262 if (i < 0 || i > MAX_CODE_ALIGN)
3263 error ("%salign-loops=%d%s is not between 0 and %d",
3264 prefix, i, suffix, MAX_CODE_ALIGN);
3266 align_jumps = 1 << i;
3270 if (ix86_align_funcs_string)
3272 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3273 prefix, suffix, suffix);
3274 if (align_functions == 0)
3276 i = atoi (ix86_align_funcs_string);
3277 if (i < 0 || i > MAX_CODE_ALIGN)
3278 error ("%salign-loops=%d%s is not between 0 and %d",
3279 prefix, i, suffix, MAX_CODE_ALIGN);
3281 align_functions = 1 << i;
3285 /* Default align_* from the processor table. */
3286 if (align_loops == 0)
3288 align_loops = processor_target_table[ix86_tune].align_loop;
3289 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3291 if (align_jumps == 0)
3293 align_jumps = processor_target_table[ix86_tune].align_jump;
3294 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3296 if (align_functions == 0)
3298 align_functions = processor_target_table[ix86_tune].align_func;
3301 /* Validate -mbranch-cost= value, or provide default. */
3302 ix86_branch_cost = ix86_cost->branch_cost;
3303 if (ix86_branch_cost_string)
3305 i = atoi (ix86_branch_cost_string);
3307 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3309 ix86_branch_cost = i;
3311 if (ix86_section_threshold_string)
3313 i = atoi (ix86_section_threshold_string);
3315 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3317 ix86_section_threshold = i;
3320 if (ix86_tls_dialect_string)
3322 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3323 ix86_tls_dialect = TLS_DIALECT_GNU;
3324 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3325 ix86_tls_dialect = TLS_DIALECT_GNU2;
3327 error ("bad value (%s) for %stls-dialect=%s %s",
3328 ix86_tls_dialect_string, prefix, suffix, sw);
3331 if (ix87_precision_string)
3333 i = atoi (ix87_precision_string);
3334 if (i != 32 && i != 64 && i != 80)
3335 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3340 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3342 /* Enable by default the SSE and MMX builtins. Do allow the user to
3343 explicitly disable any of these. In particular, disabling SSE and
3344 MMX for kernel code is extremely useful. */
3345 if (!ix86_arch_specified)
3347 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3348 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3351 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3355 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3357 if (!ix86_arch_specified)
3359 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3361 /* i386 ABI does not specify red zone. It still makes sense to use it
3362 when programmer takes care to stack from being destroyed. */
3363 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3364 target_flags |= MASK_NO_RED_ZONE;
3367 /* Keep nonleaf frame pointers. */
3368 if (flag_omit_frame_pointer)
3369 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3370 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3371 flag_omit_frame_pointer = 1;
3373 /* If we're doing fast math, we don't care about comparison order
3374 wrt NaNs. This lets us use a shorter comparison sequence. */
3375 if (flag_finite_math_only)
3376 target_flags &= ~MASK_IEEE_FP;
3378 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3379 since the insns won't need emulation. */
3380 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3381 target_flags &= ~MASK_NO_FANCY_MATH_387;
3383 /* Likewise, if the target doesn't have a 387, or we've specified
3384 software floating point, don't use 387 inline intrinsics. */
3386 target_flags |= MASK_NO_FANCY_MATH_387;
3388 /* Turn on MMX builtins for -msse. */
3391 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3392 x86_prefetch_sse = true;
3395 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3396 if (TARGET_SSE4_2 || TARGET_ABM)
3397 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3399 /* Validate -mpreferred-stack-boundary= value or default it to
3400 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3401 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3402 if (ix86_preferred_stack_boundary_string)
3404 i = atoi (ix86_preferred_stack_boundary_string);
3405 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3406 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3407 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3409 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3412 /* Set the default value for -mstackrealign. */
3413 if (ix86_force_align_arg_pointer == -1)
3414 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3416 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3418 /* Validate -mincoming-stack-boundary= value or default it to
3419 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3420 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3421 if (ix86_incoming_stack_boundary_string)
3423 i = atoi (ix86_incoming_stack_boundary_string);
3424 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3425 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3426 i, TARGET_64BIT ? 4 : 2);
3429 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3430 ix86_incoming_stack_boundary
3431 = ix86_user_incoming_stack_boundary;
3435 /* Accept -msseregparm only if at least SSE support is enabled. */
3436 if (TARGET_SSEREGPARM
3438 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3440 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3441 if (ix86_fpmath_string != 0)
3443 if (! strcmp (ix86_fpmath_string, "387"))
3444 ix86_fpmath = FPMATH_387;
3445 else if (! strcmp (ix86_fpmath_string, "sse"))
3449 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3450 ix86_fpmath = FPMATH_387;
3453 ix86_fpmath = FPMATH_SSE;
3455 else if (! strcmp (ix86_fpmath_string, "387,sse")
3456 || ! strcmp (ix86_fpmath_string, "387+sse")
3457 || ! strcmp (ix86_fpmath_string, "sse,387")
3458 || ! strcmp (ix86_fpmath_string, "sse+387")
3459 || ! strcmp (ix86_fpmath_string, "both"))
3463 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3464 ix86_fpmath = FPMATH_387;
3466 else if (!TARGET_80387)
3468 warning (0, "387 instruction set disabled, using SSE arithmetics");
3469 ix86_fpmath = FPMATH_SSE;
3472 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3475 error ("bad value (%s) for %sfpmath=%s %s",
3476 ix86_fpmath_string, prefix, suffix, sw);
3479 /* If the i387 is disabled, then do not return values in it. */
3481 target_flags &= ~MASK_FLOAT_RETURNS;
3483 /* Use external vectorized library in vectorizing intrinsics. */
3484 if (ix86_veclibabi_string)
3486 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3487 ix86_veclib_handler = ix86_veclibabi_svml;
3488 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3489 ix86_veclib_handler = ix86_veclibabi_acml;
3491 error ("unknown vectorization library ABI type (%s) for "
3492 "%sveclibabi=%s %s", ix86_veclibabi_string,
3493 prefix, suffix, sw);
3496 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3497 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3499 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3501 /* ??? Unwind info is not correct around the CFG unless either a frame
3502 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3503 unwind info generation to be aware of the CFG and propagating states
3505 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3506 || flag_exceptions || flag_non_call_exceptions)
3507 && flag_omit_frame_pointer
3508 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3510 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3511 warning (0, "unwind tables currently require either a frame pointer "
3512 "or %saccumulate-outgoing-args%s for correctness",
3514 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3517 /* If stack probes are required, the space used for large function
3518 arguments on the stack must also be probed, so enable
3519 -maccumulate-outgoing-args so this happens in the prologue. */
3520 if (TARGET_STACK_PROBE
3521 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3523 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3524 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3525 "for correctness", prefix, suffix);
3526 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3529 /* For sane SSE instruction set generation we need fcomi instruction.
3530 It is safe to enable all CMOVE instructions. */
3534 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3537 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3538 p = strchr (internal_label_prefix, 'X');
3539 internal_label_prefix_len = p - internal_label_prefix;
3543 /* When scheduling description is not available, disable scheduler pass
3544 so it won't slow down the compilation and make x87 code slower. */
3545 if (!TARGET_SCHEDULE)
3546 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3548 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3549 set_param_value ("simultaneous-prefetches",
3550 ix86_cost->simultaneous_prefetches);
3551 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3552 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3553 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3554 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3555 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3556 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3558 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3559 if (flag_prefetch_loop_arrays < 0
3561 && (optimize >= 3 || flag_profile_use)
3562 && software_prefetching_beneficial_p ())
3563 flag_prefetch_loop_arrays = 1;
3565 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3566 can be optimized to ap = __builtin_next_arg (0). */
3568 targetm.expand_builtin_va_start = NULL;
3572 ix86_gen_leave = gen_leave_rex64;
3573 ix86_gen_pop1 = gen_popdi1;
3574 ix86_gen_add3 = gen_adddi3;
3575 ix86_gen_sub3 = gen_subdi3;
3576 ix86_gen_sub3_carry = gen_subdi3_carry;
3577 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3578 ix86_gen_monitor = gen_sse3_monitor64;
3579 ix86_gen_andsp = gen_anddi3;
3583 ix86_gen_leave = gen_leave;
3584 ix86_gen_pop1 = gen_popsi1;
3585 ix86_gen_add3 = gen_addsi3;
3586 ix86_gen_sub3 = gen_subsi3;
3587 ix86_gen_sub3_carry = gen_subsi3_carry;
3588 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3589 ix86_gen_monitor = gen_sse3_monitor;
3590 ix86_gen_andsp = gen_andsi3;
3594 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3596 target_flags |= MASK_CLD & ~target_flags_explicit;
3599 /* Save the initial options in case the user does function specific options */
3601 target_option_default_node = target_option_current_node
3602 = build_target_option_node ();
3605 /* Update register usage after having seen the compiler flags. */
3608 ix86_conditional_register_usage (void)
3613 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3615 if (fixed_regs[i] > 1)
3616 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3617 if (call_used_regs[i] > 1)
3618 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3621 /* The PIC register, if it exists, is fixed. */
3622 j = PIC_OFFSET_TABLE_REGNUM;
3623 if (j != INVALID_REGNUM)
3624 fixed_regs[j] = call_used_regs[j] = 1;
3626 /* The MS_ABI changes the set of call-used registers. */
3627 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3629 call_used_regs[SI_REG] = 0;
3630 call_used_regs[DI_REG] = 0;
3631 call_used_regs[XMM6_REG] = 0;
3632 call_used_regs[XMM7_REG] = 0;
3633 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3634 call_used_regs[i] = 0;
3637 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3638 other call-clobbered regs for 64-bit. */
3641 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3643 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3644 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3645 && call_used_regs[i])
3646 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3649 /* If MMX is disabled, squash the registers. */
3651 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3652 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3653 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3655 /* If SSE is disabled, squash the registers. */
3657 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3658 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3659 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3661 /* If the FPU is disabled, squash the registers. */
3662 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3663 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3664 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3665 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3667 /* If 32-bit, squash the 64-bit registers. */
3670 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3672 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3678 /* Save the current options */
3681 ix86_function_specific_save (struct cl_target_option *ptr)
3683 ptr->arch = ix86_arch;
3684 ptr->schedule = ix86_schedule;
3685 ptr->tune = ix86_tune;
3686 ptr->fpmath = ix86_fpmath;
3687 ptr->branch_cost = ix86_branch_cost;
3688 ptr->tune_defaulted = ix86_tune_defaulted;
3689 ptr->arch_specified = ix86_arch_specified;
3690 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3691 ptr->target_flags_explicit = target_flags_explicit;
3693 /* The fields are char but the variables are not; make sure the
3694 values fit in the fields. */
3695 gcc_assert (ptr->arch == ix86_arch);
3696 gcc_assert (ptr->schedule == ix86_schedule);
3697 gcc_assert (ptr->tune == ix86_tune);
3698 gcc_assert (ptr->fpmath == ix86_fpmath);
3699 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3702 /* Restore the current options */
3705 ix86_function_specific_restore (struct cl_target_option *ptr)
3707 enum processor_type old_tune = ix86_tune;
3708 enum processor_type old_arch = ix86_arch;
3709 unsigned int ix86_arch_mask, ix86_tune_mask;
3712 ix86_arch = (enum processor_type) ptr->arch;
3713 ix86_schedule = (enum attr_cpu) ptr->schedule;
3714 ix86_tune = (enum processor_type) ptr->tune;
3715 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3716 ix86_branch_cost = ptr->branch_cost;
3717 ix86_tune_defaulted = ptr->tune_defaulted;
3718 ix86_arch_specified = ptr->arch_specified;
3719 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3720 target_flags_explicit = ptr->target_flags_explicit;
3722 /* Recreate the arch feature tests if the arch changed */
3723 if (old_arch != ix86_arch)
3725 ix86_arch_mask = 1u << ix86_arch;
3726 for (i = 0; i < X86_ARCH_LAST; ++i)
3727 ix86_arch_features[i]
3728 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3731 /* Recreate the tune optimization tests */
3732 if (old_tune != ix86_tune)
3734 ix86_tune_mask = 1u << ix86_tune;
3735 for (i = 0; i < X86_TUNE_LAST; ++i)
3736 ix86_tune_features[i]
3737 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3741 /* Print the current options */
3744 ix86_function_specific_print (FILE *file, int indent,
3745 struct cl_target_option *ptr)
3748 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3749 NULL, NULL, NULL, false);
3751 fprintf (file, "%*sarch = %d (%s)\n",
3754 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3755 ? cpu_names[ptr->arch]
3758 fprintf (file, "%*stune = %d (%s)\n",
3761 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3762 ? cpu_names[ptr->tune]
3765 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3766 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3767 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3768 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3772 fprintf (file, "%*s%s\n", indent, "", target_string);
3773 free (target_string);
3778 /* Inner function to process the attribute((target(...))), take an argument and
3779 set the current options from the argument. If we have a list, recursively go
3783 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3788 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3789 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3790 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3791 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3806 enum ix86_opt_type type;
3811 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3812 IX86_ATTR_ISA ("abm", OPT_mabm),
3813 IX86_ATTR_ISA ("aes", OPT_maes),
3814 IX86_ATTR_ISA ("avx", OPT_mavx),
3815 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3816 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3817 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3818 IX86_ATTR_ISA ("sse", OPT_msse),
3819 IX86_ATTR_ISA ("sse2", OPT_msse2),
3820 IX86_ATTR_ISA ("sse3", OPT_msse3),
3821 IX86_ATTR_ISA ("sse4", OPT_msse4),
3822 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3823 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3824 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3825 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3826 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3827 IX86_ATTR_ISA ("xop", OPT_mxop),
3828 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3830 /* string options */
3831 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3832 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3833 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3836 IX86_ATTR_YES ("cld",
3840 IX86_ATTR_NO ("fancy-math-387",
3841 OPT_mfancy_math_387,
3842 MASK_NO_FANCY_MATH_387),
3844 IX86_ATTR_YES ("ieee-fp",
3848 IX86_ATTR_YES ("inline-all-stringops",
3849 OPT_minline_all_stringops,
3850 MASK_INLINE_ALL_STRINGOPS),
3852 IX86_ATTR_YES ("inline-stringops-dynamically",
3853 OPT_minline_stringops_dynamically,
3854 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3856 IX86_ATTR_NO ("align-stringops",
3857 OPT_mno_align_stringops,
3858 MASK_NO_ALIGN_STRINGOPS),
3860 IX86_ATTR_YES ("recip",
3866 /* If this is a list, recurse to get the options. */
3867 if (TREE_CODE (args) == TREE_LIST)
3871 for (; args; args = TREE_CHAIN (args))
3872 if (TREE_VALUE (args)
3873 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3879 else if (TREE_CODE (args) != STRING_CST)
3882 /* Handle multiple arguments separated by commas. */
3883 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3885 while (next_optstr && *next_optstr != '\0')
3887 char *p = next_optstr;
3889 char *comma = strchr (next_optstr, ',');
3890 const char *opt_string;
3891 size_t len, opt_len;
3896 enum ix86_opt_type type = ix86_opt_unknown;
3902 len = comma - next_optstr;
3903 next_optstr = comma + 1;
3911 /* Recognize no-xxx. */
3912 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3921 /* Find the option. */
3924 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3926 type = attrs[i].type;
3927 opt_len = attrs[i].len;
3928 if (ch == attrs[i].string[0]
3929 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3930 && memcmp (p, attrs[i].string, opt_len) == 0)
3933 mask = attrs[i].mask;
3934 opt_string = attrs[i].string;
3939 /* Process the option. */
3942 error ("attribute(target(\"%s\")) is unknown", orig_p);
3946 else if (type == ix86_opt_isa)
3947 ix86_handle_option (opt, p, opt_set_p);
3949 else if (type == ix86_opt_yes || type == ix86_opt_no)
3951 if (type == ix86_opt_no)
3952 opt_set_p = !opt_set_p;
3955 target_flags |= mask;
3957 target_flags &= ~mask;
3960 else if (type == ix86_opt_str)
3964 error ("option(\"%s\") was already specified", opt_string);
3968 p_strings[opt] = xstrdup (p + opt_len);
3978 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3981 ix86_valid_target_attribute_tree (tree args)
3983 const char *orig_arch_string = ix86_arch_string;
3984 const char *orig_tune_string = ix86_tune_string;
3985 const char *orig_fpmath_string = ix86_fpmath_string;
3986 int orig_tune_defaulted = ix86_tune_defaulted;
3987 int orig_arch_specified = ix86_arch_specified;
3988 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3991 struct cl_target_option *def
3992 = TREE_TARGET_OPTION (target_option_default_node);
3994 /* Process each of the options on the chain. */
3995 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3998 /* If the changed options are different from the default, rerun override_options,
3999 and then save the options away. The string options are are attribute options,
4000 and will be undone when we copy the save structure. */
4001 if (ix86_isa_flags != def->ix86_isa_flags
4002 || target_flags != def->target_flags
4003 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4004 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4005 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4007 /* If we are using the default tune= or arch=, undo the string assigned,
4008 and use the default. */
4009 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4010 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4011 else if (!orig_arch_specified)
4012 ix86_arch_string = NULL;
4014 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4015 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4016 else if (orig_tune_defaulted)
4017 ix86_tune_string = NULL;
4019 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4020 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4021 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4022 else if (!TARGET_64BIT && TARGET_SSE)
4023 ix86_fpmath_string = "sse,387";
4025 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4026 override_options (false);
4028 /* Add any builtin functions with the new isa if any. */
4029 ix86_add_new_builtins (ix86_isa_flags);
4031 /* Save the current options unless we are validating options for
4033 t = build_target_option_node ();
4035 ix86_arch_string = orig_arch_string;
4036 ix86_tune_string = orig_tune_string;
4037 ix86_fpmath_string = orig_fpmath_string;
4039 /* Free up memory allocated to hold the strings */
4040 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4041 if (option_strings[i])
4042 free (option_strings[i]);
4048 /* Hook to validate attribute((target("string"))). */
4051 ix86_valid_target_attribute_p (tree fndecl,
4052 tree ARG_UNUSED (name),
4054 int ARG_UNUSED (flags))
4056 struct cl_target_option cur_target;
4058 tree old_optimize = build_optimization_node ();
4059 tree new_target, new_optimize;
4060 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4062 /* If the function changed the optimization levels as well as setting target
4063 options, start with the optimizations specified. */
4064 if (func_optimize && func_optimize != old_optimize)
4065 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
4067 /* The target attributes may also change some optimization flags, so update
4068 the optimization options if necessary. */
4069 cl_target_option_save (&cur_target);
4070 new_target = ix86_valid_target_attribute_tree (args);
4071 new_optimize = build_optimization_node ();
4078 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4080 if (old_optimize != new_optimize)
4081 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4084 cl_target_option_restore (&cur_target);
4086 if (old_optimize != new_optimize)
4087 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
4093 /* Hook to determine if one function can safely inline another. */
4096 ix86_can_inline_p (tree caller, tree callee)
4099 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4100 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4102 /* If callee has no option attributes, then it is ok to inline. */
4106 /* If caller has no option attributes, but callee does then it is not ok to
4108 else if (!caller_tree)
4113 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4114 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4116 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4117 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4119 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
4120 != callee_opts->ix86_isa_flags)
4123 /* See if we have the same non-isa options. */
4124 else if (caller_opts->target_flags != callee_opts->target_flags)
4127 /* See if arch, tune, etc. are the same. */
4128 else if (caller_opts->arch != callee_opts->arch)
4131 else if (caller_opts->tune != callee_opts->tune)
4134 else if (caller_opts->fpmath != callee_opts->fpmath)
4137 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4148 /* Remember the last target of ix86_set_current_function. */
4149 static GTY(()) tree ix86_previous_fndecl;
4151 /* Establish appropriate back-end context for processing the function
4152 FNDECL. The argument might be NULL to indicate processing at top
4153 level, outside of any function scope. */
4155 ix86_set_current_function (tree fndecl)
4157 /* Only change the context if the function changes. This hook is called
4158 several times in the course of compiling a function, and we don't want to
4159 slow things down too much or call target_reinit when it isn't safe. */
4160 if (fndecl && fndecl != ix86_previous_fndecl)
4162 tree old_tree = (ix86_previous_fndecl
4163 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4166 tree new_tree = (fndecl
4167 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4170 ix86_previous_fndecl = fndecl;
4171 if (old_tree == new_tree)
4176 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4182 struct cl_target_option *def
4183 = TREE_TARGET_OPTION (target_option_current_node);
4185 cl_target_option_restore (def);
4192 /* Return true if this goes in large data/bss. */
4195 ix86_in_large_data_p (tree exp)
4197 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4200 /* Functions are never large data. */
4201 if (TREE_CODE (exp) == FUNCTION_DECL)
4204 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4206 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4207 if (strcmp (section, ".ldata") == 0
4208 || strcmp (section, ".lbss") == 0)
4214 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4216 /* If this is an incomplete type with size 0, then we can't put it
4217 in data because it might be too big when completed. */
4218 if (!size || size > ix86_section_threshold)
4225 /* Switch to the appropriate section for output of DECL.
4226 DECL is either a `VAR_DECL' node or a constant of some sort.
4227 RELOC indicates whether forming the initial value of DECL requires
4228 link-time relocations. */
4230 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4234 x86_64_elf_select_section (tree decl, int reloc,
4235 unsigned HOST_WIDE_INT align)
4237 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4238 && ix86_in_large_data_p (decl))
4240 const char *sname = NULL;
4241 unsigned int flags = SECTION_WRITE;
4242 switch (categorize_decl_for_section (decl, reloc))
4247 case SECCAT_DATA_REL:
4248 sname = ".ldata.rel";
4250 case SECCAT_DATA_REL_LOCAL:
4251 sname = ".ldata.rel.local";
4253 case SECCAT_DATA_REL_RO:
4254 sname = ".ldata.rel.ro";
4256 case SECCAT_DATA_REL_RO_LOCAL:
4257 sname = ".ldata.rel.ro.local";
4261 flags |= SECTION_BSS;
4264 case SECCAT_RODATA_MERGE_STR:
4265 case SECCAT_RODATA_MERGE_STR_INIT:
4266 case SECCAT_RODATA_MERGE_CONST:
4270 case SECCAT_SRODATA:
4277 /* We don't split these for medium model. Place them into
4278 default sections and hope for best. */
4280 case SECCAT_EMUTLS_VAR:
4281 case SECCAT_EMUTLS_TMPL:
4286 /* We might get called with string constants, but get_named_section
4287 doesn't like them as they are not DECLs. Also, we need to set
4288 flags in that case. */
4290 return get_section (sname, flags, NULL);
4291 return get_named_section (decl, sname, reloc);
4294 return default_elf_select_section (decl, reloc, align);
4297 /* Build up a unique section name, expressed as a
4298 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4299 RELOC indicates whether the initial value of EXP requires
4300 link-time relocations. */
4302 static void ATTRIBUTE_UNUSED
4303 x86_64_elf_unique_section (tree decl, int reloc)
4305 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4306 && ix86_in_large_data_p (decl))
4308 const char *prefix = NULL;
4309 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4310 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4312 switch (categorize_decl_for_section (decl, reloc))
4315 case SECCAT_DATA_REL:
4316 case SECCAT_DATA_REL_LOCAL:
4317 case SECCAT_DATA_REL_RO:
4318 case SECCAT_DATA_REL_RO_LOCAL:
4319 prefix = one_only ? ".ld" : ".ldata";
4322 prefix = one_only ? ".lb" : ".lbss";
4325 case SECCAT_RODATA_MERGE_STR:
4326 case SECCAT_RODATA_MERGE_STR_INIT:
4327 case SECCAT_RODATA_MERGE_CONST:
4328 prefix = one_only ? ".lr" : ".lrodata";
4330 case SECCAT_SRODATA:
4337 /* We don't split these for medium model. Place them into
4338 default sections and hope for best. */
4340 case SECCAT_EMUTLS_VAR:
4341 prefix = targetm.emutls.var_section;
4343 case SECCAT_EMUTLS_TMPL:
4344 prefix = targetm.emutls.tmpl_section;
4349 const char *name, *linkonce;
4352 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4353 name = targetm.strip_name_encoding (name);
4355 /* If we're using one_only, then there needs to be a .gnu.linkonce
4356 prefix to the section name. */
4357 linkonce = one_only ? ".gnu.linkonce" : "";
4359 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4361 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4365 default_unique_section (decl, reloc);
4368 #ifdef COMMON_ASM_OP
4369 /* This says how to output assembler code to declare an
4370 uninitialized external linkage data object.
4372 For medium model x86-64 we need to use .largecomm opcode for
4375 x86_elf_aligned_common (FILE *file,
4376 const char *name, unsigned HOST_WIDE_INT size,
4379 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4380 && size > (unsigned int)ix86_section_threshold)
4381 fputs (".largecomm\t", file);
4383 fputs (COMMON_ASM_OP, file);
4384 assemble_name (file, name);
4385 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4386 size, align / BITS_PER_UNIT);
4390 /* Utility function for targets to use in implementing
4391 ASM_OUTPUT_ALIGNED_BSS. */
4394 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4395 const char *name, unsigned HOST_WIDE_INT size,
4398 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4399 && size > (unsigned int)ix86_section_threshold)
4400 switch_to_section (get_named_section (decl, ".lbss", 0));
4402 switch_to_section (bss_section);
4403 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4404 #ifdef ASM_DECLARE_OBJECT_NAME
4405 last_assemble_variable_decl = decl;
4406 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4408 /* Standard thing is just output label for the object. */
4409 ASM_OUTPUT_LABEL (file, name);
4410 #endif /* ASM_DECLARE_OBJECT_NAME */
4411 ASM_OUTPUT_SKIP (file, size ? size : 1);
4415 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4417 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4418 make the problem with not enough registers even worse. */
4419 #ifdef INSN_SCHEDULING
4421 flag_schedule_insns = 0;
4425 /* The Darwin libraries never set errno, so we might as well
4426 avoid calling them when that's the only reason we would. */
4427 flag_errno_math = 0;
4429 /* The default values of these switches depend on the TARGET_64BIT
4430 that is not known at this moment. Mark these values with 2 and
4431 let user the to override these. In case there is no command line option
4432 specifying them, we will set the defaults in override_options. */
4434 flag_omit_frame_pointer = 2;
4436 /* For -O2 and beyond, turn on -fzee for x86_64 target. */
4440 flag_pcc_struct_return = 2;
4441 flag_asynchronous_unwind_tables = 2;
4442 flag_vect_cost_model = 1;
4443 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4444 SUBTARGET_OPTIMIZATION_OPTIONS;
4448 /* Decide whether we can make a sibling call to a function. DECL is the
4449 declaration of the function being targeted by the call and EXP is the
4450 CALL_EXPR representing the call. */
4453 ix86_function_ok_for_sibcall (tree decl, tree exp)
4455 tree type, decl_or_type;
4458 /* If we are generating position-independent code, we cannot sibcall
4459 optimize any indirect call, or a direct call to a global function,
4460 as the PLT requires %ebx be live. */
4461 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4464 /* If we need to align the outgoing stack, then sibcalling would
4465 unalign the stack, which may break the called function. */
4466 if (ix86_minimum_incoming_stack_boundary (true)
4467 < PREFERRED_STACK_BOUNDARY)
4472 decl_or_type = decl;
4473 type = TREE_TYPE (decl);
4477 /* We're looking at the CALL_EXPR, we need the type of the function. */
4478 type = CALL_EXPR_FN (exp); /* pointer expression */
4479 type = TREE_TYPE (type); /* pointer type */
4480 type = TREE_TYPE (type); /* function type */
4481 decl_or_type = type;
4484 /* Check that the return value locations are the same. Like
4485 if we are returning floats on the 80387 register stack, we cannot
4486 make a sibcall from a function that doesn't return a float to a
4487 function that does or, conversely, from a function that does return
4488 a float to a function that doesn't; the necessary stack adjustment
4489 would not be executed. This is also the place we notice
4490 differences in the return value ABI. Note that it is ok for one
4491 of the functions to have void return type as long as the return
4492 value of the other is passed in a register. */
4493 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4494 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4496 if (STACK_REG_P (a) || STACK_REG_P (b))
4498 if (!rtx_equal_p (a, b))
4501 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4503 else if (!rtx_equal_p (a, b))
4508 /* The SYSV ABI has more call-clobbered registers;
4509 disallow sibcalls from MS to SYSV. */
4510 if (cfun->machine->call_abi == MS_ABI
4511 && ix86_function_type_abi (type) == SYSV_ABI)
4516 /* If this call is indirect, we'll need to be able to use a
4517 call-clobbered register for the address of the target function.
4518 Make sure that all such registers are not used for passing
4519 parameters. Note that DLLIMPORT functions are indirect. */
4521 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4523 if (ix86_function_regparm (type, NULL) >= 3)
4525 /* ??? Need to count the actual number of registers to be used,
4526 not the possible number of registers. Fix later. */
4532 /* Otherwise okay. That also includes certain types of indirect calls. */
4536 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4537 and "sseregparm" calling convention attributes;
4538 arguments as in struct attribute_spec.handler. */
4541 ix86_handle_cconv_attribute (tree *node, tree name,
4543 int flags ATTRIBUTE_UNUSED,
4546 if (TREE_CODE (*node) != FUNCTION_TYPE
4547 && TREE_CODE (*node) != METHOD_TYPE
4548 && TREE_CODE (*node) != FIELD_DECL
4549 && TREE_CODE (*node) != TYPE_DECL)
4551 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4553 *no_add_attrs = true;
4557 /* Can combine regparm with all attributes but fastcall. */
4558 if (is_attribute_p ("regparm", name))
4562 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4564 error ("fastcall and regparm attributes are not compatible");
4567 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4569 error ("regparam and thiscall attributes are not compatible");
4572 cst = TREE_VALUE (args);
4573 if (TREE_CODE (cst) != INTEGER_CST)
4575 warning (OPT_Wattributes,
4576 "%qE attribute requires an integer constant argument",
4578 *no_add_attrs = true;
4580 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4582 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4584 *no_add_attrs = true;
4592 /* Do not warn when emulating the MS ABI. */
4593 if ((TREE_CODE (*node) != FUNCTION_TYPE
4594 && TREE_CODE (*node) != METHOD_TYPE)
4595 || ix86_function_type_abi (*node) != MS_ABI)
4596 warning (OPT_Wattributes, "%qE attribute ignored",
4598 *no_add_attrs = true;
4602 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4603 if (is_attribute_p ("fastcall", name))
4605 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4607 error ("fastcall and cdecl attributes are not compatible");
4609 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4611 error ("fastcall and stdcall attributes are not compatible");
4613 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4615 error ("fastcall and regparm attributes are not compatible");
4617 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4619 error ("fastcall and thiscall attributes are not compatible");
4623 /* Can combine stdcall with fastcall (redundant), regparm and
4625 else if (is_attribute_p ("stdcall", name))
4627 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4629 error ("stdcall and cdecl attributes are not compatible");
4631 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4633 error ("stdcall and fastcall attributes are not compatible");
4635 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4637 error ("stdcall and thiscall attributes are not compatible");
4641 /* Can combine cdecl with regparm and sseregparm. */
4642 else if (is_attribute_p ("cdecl", name))
4644 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4646 error ("stdcall and cdecl attributes are not compatible");
4648 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4650 error ("fastcall and cdecl attributes are not compatible");
4652 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4654 error ("cdecl and thiscall attributes are not compatible");
4657 else if (is_attribute_p ("thiscall", name))
4659 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4660 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4662 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4664 error ("stdcall and thiscall attributes are not compatible");
4666 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4668 error ("fastcall and thiscall attributes are not compatible");
4670 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4672 error ("cdecl and thiscall attributes are not compatible");
4676 /* Can combine sseregparm with all attributes. */
4681 /* Return 0 if the attributes for two types are incompatible, 1 if they
4682 are compatible, and 2 if they are nearly compatible (which causes a
4683 warning to be generated). */
4686 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4688 /* Check for mismatch of non-default calling convention. */
4689 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4691 if (TREE_CODE (type1) != FUNCTION_TYPE
4692 && TREE_CODE (type1) != METHOD_TYPE)
4695 /* Check for mismatched fastcall/regparm types. */
4696 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4697 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4698 || (ix86_function_regparm (type1, NULL)
4699 != ix86_function_regparm (type2, NULL)))
4702 /* Check for mismatched sseregparm types. */
4703 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4704 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4707 /* Check for mismatched thiscall types. */
4708 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4709 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4712 /* Check for mismatched return types (cdecl vs stdcall). */
4713 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4714 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4720 /* Return the regparm value for a function with the indicated TYPE and DECL.
4721 DECL may be NULL when calling function indirectly
4722 or considering a libcall. */
4725 ix86_function_regparm (const_tree type, const_tree decl)
4731 return (ix86_function_type_abi (type) == SYSV_ABI
4732 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4734 regparm = ix86_regparm;
4735 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4738 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4742 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4745 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4748 /* Use register calling convention for local functions when possible. */
4750 && TREE_CODE (decl) == FUNCTION_DECL
4754 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4755 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4758 int local_regparm, globals = 0, regno;
4760 /* Make sure no regparm register is taken by a
4761 fixed register variable. */
4762 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4763 if (fixed_regs[local_regparm])
4766 /* We don't want to use regparm(3) for nested functions as
4767 these use a static chain pointer in the third argument. */
4768 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4771 /* Each fixed register usage increases register pressure,
4772 so less registers should be used for argument passing.
4773 This functionality can be overriden by an explicit
4775 for (regno = 0; regno <= DI_REG; regno++)
4776 if (fixed_regs[regno])
4780 = globals < local_regparm ? local_regparm - globals : 0;
4782 if (local_regparm > regparm)
4783 regparm = local_regparm;
4790 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4791 DFmode (2) arguments in SSE registers for a function with the
4792 indicated TYPE and DECL. DECL may be NULL when calling function
4793 indirectly or considering a libcall. Otherwise return 0. */
4796 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4798 gcc_assert (!TARGET_64BIT);
4800 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4801 by the sseregparm attribute. */
4802 if (TARGET_SSEREGPARM
4803 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4810 error ("Calling %qD with attribute sseregparm without "
4811 "SSE/SSE2 enabled", decl);
4813 error ("Calling %qT with attribute sseregparm without "
4814 "SSE/SSE2 enabled", type);
4822 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4823 (and DFmode for SSE2) arguments in SSE registers. */
4824 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4826 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4827 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4829 return TARGET_SSE2 ? 2 : 1;
4835 /* Return true if EAX is live at the start of the function. Used by
4836 ix86_expand_prologue to determine if we need special help before
4837 calling allocate_stack_worker. */
4840 ix86_eax_live_at_start_p (void)
4842 /* Cheat. Don't bother working forward from ix86_function_regparm
4843 to the function type to whether an actual argument is located in
4844 eax. Instead just look at cfg info, which is still close enough
4845 to correct at this point. This gives false positives for broken
4846 functions that might use uninitialized data that happens to be
4847 allocated in eax, but who cares? */
4848 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4851 /* Value is the number of bytes of arguments automatically
4852 popped when returning from a subroutine call.
4853 FUNDECL is the declaration node of the function (as a tree),
4854 FUNTYPE is the data type of the function (as a tree),
4855 or for a library call it is an identifier node for the subroutine name.
4856 SIZE is the number of bytes of arguments passed on the stack.
4858 On the 80386, the RTD insn may be used to pop them if the number
4859 of args is fixed, but if the number is variable then the caller
4860 must pop them all. RTD can't be used for library calls now
4861 because the library is compiled with the Unix compiler.
4862 Use of RTD is a selectable option, since it is incompatible with
4863 standard Unix calling sequences. If the option is not selected,
4864 the caller must always pop the args.
4866 The attribute stdcall is equivalent to RTD on a per module basis. */
4869 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4873 /* None of the 64-bit ABIs pop arguments. */
4877 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4879 /* Cdecl functions override -mrtd, and never pop the stack. */
4880 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4882 /* Stdcall and fastcall functions will pop the stack if not
4884 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4885 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4886 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4889 if (rtd && ! stdarg_p (funtype))
4893 /* Lose any fake structure return argument if it is passed on the stack. */
4894 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4895 && !KEEP_AGGREGATE_RETURN_POINTER)
4897 int nregs = ix86_function_regparm (funtype, fundecl);
4899 return GET_MODE_SIZE (Pmode);
4905 /* Argument support functions. */
4907 /* Return true when register may be used to pass function parameters. */
4909 ix86_function_arg_regno_p (int regno)
4912 const int *parm_regs;
4917 return (regno < REGPARM_MAX
4918 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4920 return (regno < REGPARM_MAX
4921 || (TARGET_MMX && MMX_REGNO_P (regno)
4922 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4923 || (TARGET_SSE && SSE_REGNO_P (regno)
4924 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4929 if (SSE_REGNO_P (regno) && TARGET_SSE)
4934 if (TARGET_SSE && SSE_REGNO_P (regno)
4935 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4939 /* TODO: The function should depend on current function ABI but
4940 builtins.c would need updating then. Therefore we use the
4943 /* RAX is used as hidden argument to va_arg functions. */
4944 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4947 if (ix86_abi == MS_ABI)
4948 parm_regs = x86_64_ms_abi_int_parameter_registers;
4950 parm_regs = x86_64_int_parameter_registers;
4951 for (i = 0; i < (ix86_abi == MS_ABI
4952 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4953 if (regno == parm_regs[i])
4958 /* Return if we do not know how to pass TYPE solely in registers. */
4961 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4963 if (must_pass_in_stack_var_size_or_pad (mode, type))
4966 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4967 The layout_type routine is crafty and tries to trick us into passing
4968 currently unsupported vector types on the stack by using TImode. */
4969 return (!TARGET_64BIT && mode == TImode
4970 && type && TREE_CODE (type) != VECTOR_TYPE);
4973 /* It returns the size, in bytes, of the area reserved for arguments passed
4974 in registers for the function represented by fndecl dependent to the used
4977 ix86_reg_parm_stack_space (const_tree fndecl)
4979 enum calling_abi call_abi = SYSV_ABI;
4980 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4981 call_abi = ix86_function_abi (fndecl);
4983 call_abi = ix86_function_type_abi (fndecl);
4984 if (call_abi == MS_ABI)
4989 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4992 ix86_function_type_abi (const_tree fntype)
4994 if (TARGET_64BIT && fntype != NULL)
4996 enum calling_abi abi = ix86_abi;
4997 if (abi == SYSV_ABI)
4999 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5002 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5010 ix86_function_ms_hook_prologue (const_tree fntype)
5014 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
5016 if (decl_function_context (fntype) != NULL_TREE)
5018 error_at (DECL_SOURCE_LOCATION (fntype),
5019 "ms_hook_prologue is not compatible with nested function");
5028 static enum calling_abi
5029 ix86_function_abi (const_tree fndecl)
5033 return ix86_function_type_abi (TREE_TYPE (fndecl));
5036 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5039 ix86_cfun_abi (void)
5041 if (! cfun || ! TARGET_64BIT)
5043 return cfun->machine->call_abi;
5047 extern void init_regs (void);
5049 /* Implementation of call abi switching target hook. Specific to FNDECL
5050 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5051 for more details. */
5053 ix86_call_abi_override (const_tree fndecl)
5055 if (fndecl == NULL_TREE)
5056 cfun->machine->call_abi = ix86_abi;
5058 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5061 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5062 re-initialization of init_regs each time we switch function context since
5063 this is needed only during RTL expansion. */
5065 ix86_maybe_switch_abi (void)
5068 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5072 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5073 for a call to a function whose data type is FNTYPE.
5074 For a library call, FNTYPE is 0. */
5077 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5078 tree fntype, /* tree ptr for function decl */
5079 rtx libname, /* SYMBOL_REF of library name or 0 */
5082 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
5083 memset (cum, 0, sizeof (*cum));
5086 cum->call_abi = ix86_function_abi (fndecl);
5088 cum->call_abi = ix86_function_type_abi (fntype);
5089 /* Set up the number of registers to use for passing arguments. */
5091 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5092 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5093 "or subtarget optimization implying it");
5094 cum->nregs = ix86_regparm;
5097 cum->nregs = (cum->call_abi == SYSV_ABI
5098 ? X86_64_REGPARM_MAX
5099 : X86_64_MS_REGPARM_MAX);
5103 cum->sse_nregs = SSE_REGPARM_MAX;
5106 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5107 ? X86_64_SSE_REGPARM_MAX
5108 : X86_64_MS_SSE_REGPARM_MAX);
5112 cum->mmx_nregs = MMX_REGPARM_MAX;
5113 cum->warn_avx = true;
5114 cum->warn_sse = true;
5115 cum->warn_mmx = true;
5117 /* Because type might mismatch in between caller and callee, we need to
5118 use actual type of function for local calls.
5119 FIXME: cgraph_analyze can be told to actually record if function uses
5120 va_start so for local functions maybe_vaarg can be made aggressive
5122 FIXME: once typesytem is fixed, we won't need this code anymore. */
5124 fntype = TREE_TYPE (fndecl);
5125 cum->maybe_vaarg = (fntype
5126 ? (!prototype_p (fntype) || stdarg_p (fntype))
5131 /* If there are variable arguments, then we won't pass anything
5132 in registers in 32-bit mode. */
5133 if (stdarg_p (fntype))
5144 /* Use ecx and edx registers if function has fastcall attribute,
5145 else look for regparm information. */
5148 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5151 cum->fastcall = 1; /* Same first register as in fastcall. */
5153 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5159 cum->nregs = ix86_function_regparm (fntype, fndecl);
5162 /* Set up the number of SSE registers used for passing SFmode
5163 and DFmode arguments. Warn for mismatching ABI. */
5164 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5168 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5169 But in the case of vector types, it is some vector mode.
5171 When we have only some of our vector isa extensions enabled, then there
5172 are some modes for which vector_mode_supported_p is false. For these
5173 modes, the generic vector support in gcc will choose some non-vector mode
5174 in order to implement the type. By computing the natural mode, we'll
5175 select the proper ABI location for the operand and not depend on whatever
5176 the middle-end decides to do with these vector types.
5178 The midde-end can't deal with the vector types > 16 bytes. In this
5179 case, we return the original mode and warn ABI change if CUM isn't
5182 static enum machine_mode
5183 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
5185 enum machine_mode mode = TYPE_MODE (type);
5187 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5189 HOST_WIDE_INT size = int_size_in_bytes (type);
5190 if ((size == 8 || size == 16 || size == 32)
5191 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5192 && TYPE_VECTOR_SUBPARTS (type) > 1)
5194 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5196 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5197 mode = MIN_MODE_VECTOR_FLOAT;
5199 mode = MIN_MODE_VECTOR_INT;
5201 /* Get the mode which has this inner mode and number of units. */
5202 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5203 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5204 && GET_MODE_INNER (mode) == innermode)
5206 if (size == 32 && !TARGET_AVX)
5208 static bool warnedavx;
5215 warning (0, "AVX vector argument without AVX "
5216 "enabled changes the ABI");
5218 return TYPE_MODE (type);
5231 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5232 this may not agree with the mode that the type system has chosen for the
5233 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5234 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5237 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5242 if (orig_mode != BLKmode)
5243 tmp = gen_rtx_REG (orig_mode, regno);
5246 tmp = gen_rtx_REG (mode, regno);
5247 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5248 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5254 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5255 of this code is to classify each 8bytes of incoming argument by the register
5256 class and assign registers accordingly. */
5258 /* Return the union class of CLASS1 and CLASS2.
5259 See the x86-64 PS ABI for details. */
5261 static enum x86_64_reg_class
5262 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5264 /* Rule #1: If both classes are equal, this is the resulting class. */
5265 if (class1 == class2)
5268 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5270 if (class1 == X86_64_NO_CLASS)
5272 if (class2 == X86_64_NO_CLASS)
5275 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5276 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5277 return X86_64_MEMORY_CLASS;
5279 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5280 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5281 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5282 return X86_64_INTEGERSI_CLASS;
5283 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5284 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5285 return X86_64_INTEGER_CLASS;
5287 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5289 if (class1 == X86_64_X87_CLASS
5290 || class1 == X86_64_X87UP_CLASS
5291 || class1 == X86_64_COMPLEX_X87_CLASS
5292 || class2 == X86_64_X87_CLASS
5293 || class2 == X86_64_X87UP_CLASS
5294 || class2 == X86_64_COMPLEX_X87_CLASS)
5295 return X86_64_MEMORY_CLASS;
5297 /* Rule #6: Otherwise class SSE is used. */
5298 return X86_64_SSE_CLASS;
5301 /* Classify the argument of type TYPE and mode MODE.
5302 CLASSES will be filled by the register class used to pass each word
5303 of the operand. The number of words is returned. In case the parameter
5304 should be passed in memory, 0 is returned. As a special case for zero
5305 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5307 BIT_OFFSET is used internally for handling records and specifies offset
5308 of the offset in bits modulo 256 to avoid overflow cases.
5310 See the x86-64 PS ABI for details.
5314 classify_argument (enum machine_mode mode, const_tree type,
5315 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5317 HOST_WIDE_INT bytes =
5318 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5319 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5321 /* Variable sized entities are always passed/returned in memory. */
5325 if (mode != VOIDmode
5326 && targetm.calls.must_pass_in_stack (mode, type))
5329 if (type && AGGREGATE_TYPE_P (type))
5333 enum x86_64_reg_class subclasses[MAX_CLASSES];
5335 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5339 for (i = 0; i < words; i++)
5340 classes[i] = X86_64_NO_CLASS;
5342 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5343 signalize memory class, so handle it as special case. */
5346 classes[0] = X86_64_NO_CLASS;
5350 /* Classify each field of record and merge classes. */
5351 switch (TREE_CODE (type))
5354 /* And now merge the fields of structure. */
5355 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5357 if (TREE_CODE (field) == FIELD_DECL)
5361 if (TREE_TYPE (field) == error_mark_node)
5364 /* Bitfields are always classified as integer. Handle them
5365 early, since later code would consider them to be
5366 misaligned integers. */
5367 if (DECL_BIT_FIELD (field))
5369 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5370 i < ((int_bit_position (field) + (bit_offset % 64))
5371 + tree_low_cst (DECL_SIZE (field), 0)
5374 merge_classes (X86_64_INTEGER_CLASS,
5381 type = TREE_TYPE (field);
5383 /* Flexible array member is ignored. */
5384 if (TYPE_MODE (type) == BLKmode
5385 && TREE_CODE (type) == ARRAY_TYPE
5386 && TYPE_SIZE (type) == NULL_TREE
5387 && TYPE_DOMAIN (type) != NULL_TREE
5388 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5393 if (!warned && warn_psabi)
5396 inform (input_location,
5397 "The ABI of passing struct with"
5398 " a flexible array member has"
5399 " changed in GCC 4.4");
5403 num = classify_argument (TYPE_MODE (type), type,
5405 (int_bit_position (field)
5406 + bit_offset) % 256);
5409 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5410 for (i = 0; i < num && (i + pos) < words; i++)
5412 merge_classes (subclasses[i], classes[i + pos]);
5419 /* Arrays are handled as small records. */
5422 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5423 TREE_TYPE (type), subclasses, bit_offset);
5427 /* The partial classes are now full classes. */
5428 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5429 subclasses[0] = X86_64_SSE_CLASS;
5430 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5431 && !((bit_offset % 64) == 0 && bytes == 4))
5432 subclasses[0] = X86_64_INTEGER_CLASS;
5434 for (i = 0; i < words; i++)
5435 classes[i] = subclasses[i % num];
5440 case QUAL_UNION_TYPE:
5441 /* Unions are similar to RECORD_TYPE but offset is always 0.
5443 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5445 if (TREE_CODE (field) == FIELD_DECL)
5449 if (TREE_TYPE (field) == error_mark_node)
5452 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5453 TREE_TYPE (field), subclasses,
5457 for (i = 0; i < num; i++)
5458 classes[i] = merge_classes (subclasses[i], classes[i]);
5469 /* When size > 16 bytes, if the first one isn't
5470 X86_64_SSE_CLASS or any other ones aren't
5471 X86_64_SSEUP_CLASS, everything should be passed in
5473 if (classes[0] != X86_64_SSE_CLASS)
5476 for (i = 1; i < words; i++)
5477 if (classes[i] != X86_64_SSEUP_CLASS)
5481 /* Final merger cleanup. */
5482 for (i = 0; i < words; i++)
5484 /* If one class is MEMORY, everything should be passed in
5486 if (classes[i] == X86_64_MEMORY_CLASS)
5489 /* The X86_64_SSEUP_CLASS should be always preceded by
5490 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5491 if (classes[i] == X86_64_SSEUP_CLASS
5492 && classes[i - 1] != X86_64_SSE_CLASS
5493 && classes[i - 1] != X86_64_SSEUP_CLASS)
5495 /* The first one should never be X86_64_SSEUP_CLASS. */
5496 gcc_assert (i != 0);
5497 classes[i] = X86_64_SSE_CLASS;
5500 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5501 everything should be passed in memory. */
5502 if (classes[i] == X86_64_X87UP_CLASS
5503 && (classes[i - 1] != X86_64_X87_CLASS))
5507 /* The first one should never be X86_64_X87UP_CLASS. */
5508 gcc_assert (i != 0);
5509 if (!warned && warn_psabi)
5512 inform (input_location,
5513 "The ABI of passing union with long double"
5514 " has changed in GCC 4.4");
5522 /* Compute alignment needed. We align all types to natural boundaries with
5523 exception of XFmode that is aligned to 64bits. */
5524 if (mode != VOIDmode && mode != BLKmode)
5526 int mode_alignment = GET_MODE_BITSIZE (mode);
5529 mode_alignment = 128;
5530 else if (mode == XCmode)
5531 mode_alignment = 256;
5532 if (COMPLEX_MODE_P (mode))
5533 mode_alignment /= 2;
5534 /* Misaligned fields are always returned in memory. */
5535 if (bit_offset % mode_alignment)
5539 /* for V1xx modes, just use the base mode */
5540 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5541 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5542 mode = GET_MODE_INNER (mode);
5544 /* Classification of atomic types. */
5549 classes[0] = X86_64_SSE_CLASS;
5552 classes[0] = X86_64_SSE_CLASS;
5553 classes[1] = X86_64_SSEUP_CLASS;
5563 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5567 classes[0] = X86_64_INTEGERSI_CLASS;
5570 else if (size <= 64)
5572 classes[0] = X86_64_INTEGER_CLASS;
5575 else if (size <= 64+32)
5577 classes[0] = X86_64_INTEGER_CLASS;
5578 classes[1] = X86_64_INTEGERSI_CLASS;
5581 else if (size <= 64+64)
5583 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5591 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5595 /* OImode shouldn't be used directly. */
5600 if (!(bit_offset % 64))
5601 classes[0] = X86_64_SSESF_CLASS;
5603 classes[0] = X86_64_SSE_CLASS;
5606 classes[0] = X86_64_SSEDF_CLASS;
5609 classes[0] = X86_64_X87_CLASS;
5610 classes[1] = X86_64_X87UP_CLASS;
5613 classes[0] = X86_64_SSE_CLASS;
5614 classes[1] = X86_64_SSEUP_CLASS;
5617 classes[0] = X86_64_SSE_CLASS;
5618 if (!(bit_offset % 64))
5624 if (!warned && warn_psabi)
5627 inform (input_location,
5628 "The ABI of passing structure with complex float"
5629 " member has changed in GCC 4.4");
5631 classes[1] = X86_64_SSESF_CLASS;
5635 classes[0] = X86_64_SSEDF_CLASS;
5636 classes[1] = X86_64_SSEDF_CLASS;
5639 classes[0] = X86_64_COMPLEX_X87_CLASS;
5642 /* This modes is larger than 16 bytes. */
5650 classes[0] = X86_64_SSE_CLASS;
5651 classes[1] = X86_64_SSEUP_CLASS;
5652 classes[2] = X86_64_SSEUP_CLASS;
5653 classes[3] = X86_64_SSEUP_CLASS;
5661 classes[0] = X86_64_SSE_CLASS;
5662 classes[1] = X86_64_SSEUP_CLASS;
5670 classes[0] = X86_64_SSE_CLASS;
5676 gcc_assert (VECTOR_MODE_P (mode));
5681 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5683 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5684 classes[0] = X86_64_INTEGERSI_CLASS;
5686 classes[0] = X86_64_INTEGER_CLASS;
5687 classes[1] = X86_64_INTEGER_CLASS;
5688 return 1 + (bytes > 8);
5692 /* Examine the argument and return set number of register required in each
5693 class. Return 0 iff parameter should be passed in memory. */
5695 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5696 int *int_nregs, int *sse_nregs)
5698 enum x86_64_reg_class regclass[MAX_CLASSES];
5699 int n = classify_argument (mode, type, regclass, 0);
5705 for (n--; n >= 0; n--)
5706 switch (regclass[n])
5708 case X86_64_INTEGER_CLASS:
5709 case X86_64_INTEGERSI_CLASS:
5712 case X86_64_SSE_CLASS:
5713 case X86_64_SSESF_CLASS:
5714 case X86_64_SSEDF_CLASS:
5717 case X86_64_NO_CLASS:
5718 case X86_64_SSEUP_CLASS:
5720 case X86_64_X87_CLASS:
5721 case X86_64_X87UP_CLASS:
5725 case X86_64_COMPLEX_X87_CLASS:
5726 return in_return ? 2 : 0;
5727 case X86_64_MEMORY_CLASS:
5733 /* Construct container for the argument used by GCC interface. See
5734 FUNCTION_ARG for the detailed description. */
5737 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5738 const_tree type, int in_return, int nintregs, int nsseregs,
5739 const int *intreg, int sse_regno)
5741 /* The following variables hold the static issued_error state. */
5742 static bool issued_sse_arg_error;
5743 static bool issued_sse_ret_error;
5744 static bool issued_x87_ret_error;
5746 enum machine_mode tmpmode;
5748 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5749 enum x86_64_reg_class regclass[MAX_CLASSES];
5753 int needed_sseregs, needed_intregs;
5754 rtx exp[MAX_CLASSES];
5757 n = classify_argument (mode, type, regclass, 0);
5760 if (!examine_argument (mode, type, in_return, &needed_intregs,
5763 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5766 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5767 some less clueful developer tries to use floating-point anyway. */
5768 if (needed_sseregs && !TARGET_SSE)
5772 if (!issued_sse_ret_error)
5774 error ("SSE register return with SSE disabled");
5775 issued_sse_ret_error = true;
5778 else if (!issued_sse_arg_error)
5780 error ("SSE register argument with SSE disabled");
5781 issued_sse_arg_error = true;
5786 /* Likewise, error if the ABI requires us to return values in the
5787 x87 registers and the user specified -mno-80387. */
5788 if (!TARGET_80387 && in_return)
5789 for (i = 0; i < n; i++)
5790 if (regclass[i] == X86_64_X87_CLASS
5791 || regclass[i] == X86_64_X87UP_CLASS
5792 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5794 if (!issued_x87_ret_error)
5796 error ("x87 register return with x87 disabled");
5797 issued_x87_ret_error = true;
5802 /* First construct simple cases. Avoid SCmode, since we want to use
5803 single register to pass this type. */
5804 if (n == 1 && mode != SCmode)
5805 switch (regclass[0])
5807 case X86_64_INTEGER_CLASS:
5808 case X86_64_INTEGERSI_CLASS:
5809 return gen_rtx_REG (mode, intreg[0]);
5810 case X86_64_SSE_CLASS:
5811 case X86_64_SSESF_CLASS:
5812 case X86_64_SSEDF_CLASS:
5813 if (mode != BLKmode)
5814 return gen_reg_or_parallel (mode, orig_mode,
5815 SSE_REGNO (sse_regno));
5817 case X86_64_X87_CLASS:
5818 case X86_64_COMPLEX_X87_CLASS:
5819 return gen_rtx_REG (mode, FIRST_STACK_REG);
5820 case X86_64_NO_CLASS:
5821 /* Zero sized array, struct or class. */
5826 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5827 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5828 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5830 && regclass[0] == X86_64_SSE_CLASS
5831 && regclass[1] == X86_64_SSEUP_CLASS
5832 && regclass[2] == X86_64_SSEUP_CLASS
5833 && regclass[3] == X86_64_SSEUP_CLASS
5835 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5838 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5839 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5840 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5841 && regclass[1] == X86_64_INTEGER_CLASS
5842 && (mode == CDImode || mode == TImode || mode == TFmode)
5843 && intreg[0] + 1 == intreg[1])
5844 return gen_rtx_REG (mode, intreg[0]);
5846 /* Otherwise figure out the entries of the PARALLEL. */
5847 for (i = 0; i < n; i++)
5851 switch (regclass[i])
5853 case X86_64_NO_CLASS:
5855 case X86_64_INTEGER_CLASS:
5856 case X86_64_INTEGERSI_CLASS:
5857 /* Merge TImodes on aligned occasions here too. */
5858 if (i * 8 + 8 > bytes)
5859 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5860 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5864 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5865 if (tmpmode == BLKmode)
5867 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5868 gen_rtx_REG (tmpmode, *intreg),
5872 case X86_64_SSESF_CLASS:
5873 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5874 gen_rtx_REG (SFmode,
5875 SSE_REGNO (sse_regno)),
5879 case X86_64_SSEDF_CLASS:
5880 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5881 gen_rtx_REG (DFmode,
5882 SSE_REGNO (sse_regno)),
5886 case X86_64_SSE_CLASS:
5894 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5904 && regclass[1] == X86_64_SSEUP_CLASS
5905 && regclass[2] == X86_64_SSEUP_CLASS
5906 && regclass[3] == X86_64_SSEUP_CLASS);
5913 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5914 gen_rtx_REG (tmpmode,
5915 SSE_REGNO (sse_regno)),
5924 /* Empty aligned struct, union or class. */
5928 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5929 for (i = 0; i < nexps; i++)
5930 XVECEXP (ret, 0, i) = exp [i];
5934 /* Update the data in CUM to advance over an argument of mode MODE
5935 and data type TYPE. (TYPE is null for libcalls where that information
5936 may not be available.) */
5939 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5940 const_tree type, HOST_WIDE_INT bytes,
5941 HOST_WIDE_INT words)
5957 cum->words += words;
5958 cum->nregs -= words;
5959 cum->regno += words;
5961 if (cum->nregs <= 0)
5969 /* OImode shouldn't be used directly. */
5973 if (cum->float_in_sse < 2)
5976 if (cum->float_in_sse < 1)
5993 if (!type || !AGGREGATE_TYPE_P (type))
5995 cum->sse_words += words;
5996 cum->sse_nregs -= 1;
5997 cum->sse_regno += 1;
5998 if (cum->sse_nregs <= 0)
6012 if (!type || !AGGREGATE_TYPE_P (type))
6014 cum->mmx_words += words;
6015 cum->mmx_nregs -= 1;
6016 cum->mmx_regno += 1;
6017 if (cum->mmx_nregs <= 0)
6028 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6029 const_tree type, HOST_WIDE_INT words, bool named)
6031 int int_nregs, sse_nregs;
6033 /* Unnamed 256bit vector mode parameters are passed on stack. */
6034 if (!named && VALID_AVX256_REG_MODE (mode))
6037 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
6038 cum->words += words;
6039 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6041 cum->nregs -= int_nregs;
6042 cum->sse_nregs -= sse_nregs;
6043 cum->regno += int_nregs;
6044 cum->sse_regno += sse_nregs;
6047 cum->words += words;
6051 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6052 HOST_WIDE_INT words)
6054 /* Otherwise, this should be passed indirect. */
6055 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6057 cum->words += words;
6065 /* Update the data in CUM to advance over an argument of mode MODE and
6066 data type TYPE. (TYPE is null for libcalls where that information
6067 may not be available.) */
6070 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6071 const_tree type, bool named)
6073 HOST_WIDE_INT bytes, words;
6075 if (mode == BLKmode)
6076 bytes = int_size_in_bytes (type);
6078 bytes = GET_MODE_SIZE (mode);
6079 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6082 mode = type_natural_mode (type, NULL);
6084 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6085 function_arg_advance_ms_64 (cum, bytes, words);
6086 else if (TARGET_64BIT)
6087 function_arg_advance_64 (cum, mode, type, words, named);
6089 function_arg_advance_32 (cum, mode, type, bytes, words);
6092 /* Define where to put the arguments to a function.
6093 Value is zero to push the argument on the stack,
6094 or a hard register in which to store the argument.
6096 MODE is the argument's machine mode.
6097 TYPE is the data type of the argument (as a tree).
6098 This is null for libcalls where that information may
6100 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6101 the preceding args and about the function being called.
6102 NAMED is nonzero if this argument is a named parameter
6103 (otherwise it is an extra parameter matching an ellipsis). */
6106 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6107 enum machine_mode orig_mode, const_tree type,
6108 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6110 static bool warnedsse, warnedmmx;
6112 /* Avoid the AL settings for the Unix64 ABI. */
6113 if (mode == VOIDmode)
6129 if (words <= cum->nregs)
6131 int regno = cum->regno;
6133 /* Fastcall allocates the first two DWORD (SImode) or
6134 smaller arguments to ECX and EDX if it isn't an
6140 || (type && AGGREGATE_TYPE_P (type)))
6143 /* ECX not EAX is the first allocated register. */
6144 if (regno == AX_REG)
6147 return gen_rtx_REG (mode, regno);
6152 if (cum->float_in_sse < 2)
6155 if (cum->float_in_sse < 1)
6159 /* In 32bit, we pass TImode in xmm registers. */
6166 if (!type || !AGGREGATE_TYPE_P (type))
6168 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6171 warning (0, "SSE vector argument without SSE enabled "
6175 return gen_reg_or_parallel (mode, orig_mode,
6176 cum->sse_regno + FIRST_SSE_REG);
6181 /* OImode shouldn't be used directly. */
6190 if (!type || !AGGREGATE_TYPE_P (type))
6193 return gen_reg_or_parallel (mode, orig_mode,
6194 cum->sse_regno + FIRST_SSE_REG);
6204 if (!type || !AGGREGATE_TYPE_P (type))
6206 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6209 warning (0, "MMX vector argument without MMX enabled "
6213 return gen_reg_or_parallel (mode, orig_mode,
6214 cum->mmx_regno + FIRST_MMX_REG);
6223 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6224 enum machine_mode orig_mode, const_tree type, bool named)
6226 /* Handle a hidden AL argument containing number of registers
6227 for varargs x86-64 functions. */
6228 if (mode == VOIDmode)
6229 return GEN_INT (cum->maybe_vaarg
6230 ? (cum->sse_nregs < 0
6231 ? X86_64_SSE_REGPARM_MAX
6246 /* Unnamed 256bit vector mode parameters are passed on stack. */
6252 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6254 &x86_64_int_parameter_registers [cum->regno],
6259 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6260 enum machine_mode orig_mode, bool named,
6261 HOST_WIDE_INT bytes)
6265 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6266 We use value of -2 to specify that current function call is MSABI. */
6267 if (mode == VOIDmode)
6268 return GEN_INT (-2);
6270 /* If we've run out of registers, it goes on the stack. */
6271 if (cum->nregs == 0)
6274 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6276 /* Only floating point modes are passed in anything but integer regs. */
6277 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6280 regno = cum->regno + FIRST_SSE_REG;
6285 /* Unnamed floating parameters are passed in both the
6286 SSE and integer registers. */
6287 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6288 t2 = gen_rtx_REG (mode, regno);
6289 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6290 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6291 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6294 /* Handle aggregated types passed in register. */
6295 if (orig_mode == BLKmode)
6297 if (bytes > 0 && bytes <= 8)
6298 mode = (bytes > 4 ? DImode : SImode);
6299 if (mode == BLKmode)
6303 return gen_reg_or_parallel (mode, orig_mode, regno);
6306 /* Return where to put the arguments to a function.
6307 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6309 MODE is the argument's machine mode. TYPE is the data type of the
6310 argument. It is null for libcalls where that information may not be
6311 available. CUM gives information about the preceding args and about
6312 the function being called. NAMED is nonzero if this argument is a
6313 named parameter (otherwise it is an extra parameter matching an
6317 ix86_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode omode,
6318 const_tree type, bool named)
6320 enum machine_mode mode = omode;
6321 HOST_WIDE_INT bytes, words;
6323 if (mode == BLKmode)
6324 bytes = int_size_in_bytes (type);
6326 bytes = GET_MODE_SIZE (mode);
6327 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6329 /* To simplify the code below, represent vector types with a vector mode
6330 even if MMX/SSE are not active. */
6331 if (type && TREE_CODE (type) == VECTOR_TYPE)
6332 mode = type_natural_mode (type, cum);
6334 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6335 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6336 else if (TARGET_64BIT)
6337 return function_arg_64 (cum, mode, omode, type, named);
6339 return function_arg_32 (cum, mode, omode, type, bytes, words);
6342 /* A C expression that indicates when an argument must be passed by
6343 reference. If nonzero for an argument, a copy of that argument is
6344 made in memory and a pointer to the argument is passed instead of
6345 the argument itself. The pointer is passed in whatever way is
6346 appropriate for passing a pointer to that type. */
6349 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6350 enum machine_mode mode ATTRIBUTE_UNUSED,
6351 const_tree type, bool named ATTRIBUTE_UNUSED)
6353 /* See Windows x64 Software Convention. */
6354 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6356 int msize = (int) GET_MODE_SIZE (mode);
6359 /* Arrays are passed by reference. */
6360 if (TREE_CODE (type) == ARRAY_TYPE)
6363 if (AGGREGATE_TYPE_P (type))
6365 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6366 are passed by reference. */
6367 msize = int_size_in_bytes (type);
6371 /* __m128 is passed by reference. */
6373 case 1: case 2: case 4: case 8:
6379 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6385 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6388 contains_aligned_value_p (tree type)
6390 enum machine_mode mode = TYPE_MODE (type);
6391 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6395 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6397 if (TYPE_ALIGN (type) < 128)
6400 if (AGGREGATE_TYPE_P (type))
6402 /* Walk the aggregates recursively. */
6403 switch (TREE_CODE (type))
6407 case QUAL_UNION_TYPE:
6411 /* Walk all the structure fields. */
6412 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6414 if (TREE_CODE (field) == FIELD_DECL
6415 && contains_aligned_value_p (TREE_TYPE (field)))
6422 /* Just for use if some languages passes arrays by value. */
6423 if (contains_aligned_value_p (TREE_TYPE (type)))
6434 /* Gives the alignment boundary, in bits, of an argument with the
6435 specified mode and type. */
6438 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6443 /* Since the main variant type is used for call, we convert it to
6444 the main variant type. */
6445 type = TYPE_MAIN_VARIANT (type);
6446 align = TYPE_ALIGN (type);
6449 align = GET_MODE_ALIGNMENT (mode);
6450 if (align < PARM_BOUNDARY)
6451 align = PARM_BOUNDARY;
6452 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6453 natural boundaries. */
6454 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6456 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6457 make an exception for SSE modes since these require 128bit
6460 The handling here differs from field_alignment. ICC aligns MMX
6461 arguments to 4 byte boundaries, while structure fields are aligned
6462 to 8 byte boundaries. */
6465 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6466 align = PARM_BOUNDARY;
6470 if (!contains_aligned_value_p (type))
6471 align = PARM_BOUNDARY;
6474 if (align > BIGGEST_ALIGNMENT)
6475 align = BIGGEST_ALIGNMENT;
6479 /* Return true if N is a possible register number of function value. */
6482 ix86_function_value_regno_p (const unsigned int regno)
6489 case FIRST_FLOAT_REG:
6490 /* TODO: The function should depend on current function ABI but
6491 builtins.c would need updating then. Therefore we use the
6493 if (TARGET_64BIT && ix86_abi == MS_ABI)
6495 return TARGET_FLOAT_RETURNS_IN_80387;
6501 if (TARGET_MACHO || TARGET_64BIT)
6509 /* Define how to find the value returned by a function.
6510 VALTYPE is the data type of the value (as a tree).
6511 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6512 otherwise, FUNC is 0. */
6515 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6516 const_tree fntype, const_tree fn)
6520 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6521 we normally prevent this case when mmx is not available. However
6522 some ABIs may require the result to be returned like DImode. */
6523 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6524 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6526 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6527 we prevent this case when sse is not available. However some ABIs
6528 may require the result to be returned like integer TImode. */
6529 else if (mode == TImode
6530 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6531 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6533 /* 32-byte vector modes in %ymm0. */
6534 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6535 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6537 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6538 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6539 regno = FIRST_FLOAT_REG;
6541 /* Most things go in %eax. */
6544 /* Override FP return register with %xmm0 for local functions when
6545 SSE math is enabled or for functions with sseregparm attribute. */
6546 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6548 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6549 if ((sse_level >= 1 && mode == SFmode)
6550 || (sse_level == 2 && mode == DFmode))
6551 regno = FIRST_SSE_REG;
6554 /* OImode shouldn't be used directly. */
6555 gcc_assert (mode != OImode);
6557 return gen_rtx_REG (orig_mode, regno);
6561 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6566 /* Handle libcalls, which don't provide a type node. */
6567 if (valtype == NULL)
6579 return gen_rtx_REG (mode, FIRST_SSE_REG);
6582 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6586 return gen_rtx_REG (mode, AX_REG);
6590 ret = construct_container (mode, orig_mode, valtype, 1,
6591 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6592 x86_64_int_return_registers, 0);
6594 /* For zero sized structures, construct_container returns NULL, but we
6595 need to keep rest of compiler happy by returning meaningful value. */
6597 ret = gen_rtx_REG (orig_mode, AX_REG);
6603 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6605 unsigned int regno = AX_REG;
6609 switch (GET_MODE_SIZE (mode))
6612 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6613 && !COMPLEX_MODE_P (mode))
6614 regno = FIRST_SSE_REG;
6618 if (mode == SFmode || mode == DFmode)
6619 regno = FIRST_SSE_REG;
6625 return gen_rtx_REG (orig_mode, regno);
6629 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6630 enum machine_mode orig_mode, enum machine_mode mode)
6632 const_tree fn, fntype;
6635 if (fntype_or_decl && DECL_P (fntype_or_decl))
6636 fn = fntype_or_decl;
6637 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6639 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6640 return function_value_ms_64 (orig_mode, mode);
6641 else if (TARGET_64BIT)
6642 return function_value_64 (orig_mode, mode, valtype);
6644 return function_value_32 (orig_mode, mode, fntype, fn);
6648 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6649 bool outgoing ATTRIBUTE_UNUSED)
6651 enum machine_mode mode, orig_mode;
6653 orig_mode = TYPE_MODE (valtype);
6654 mode = type_natural_mode (valtype, NULL);
6655 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6659 ix86_libcall_value (enum machine_mode mode)
6661 return ix86_function_value_1 (NULL, NULL, mode, mode);
6664 /* Return true iff type is returned in memory. */
6666 static int ATTRIBUTE_UNUSED
6667 return_in_memory_32 (const_tree type, enum machine_mode mode)
6671 if (mode == BLKmode)
6674 size = int_size_in_bytes (type);
6676 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6679 if (VECTOR_MODE_P (mode) || mode == TImode)
6681 /* User-created vectors small enough to fit in EAX. */
6685 /* MMX/3dNow values are returned in MM0,
6686 except when it doesn't exits. */
6688 return (TARGET_MMX ? 0 : 1);
6690 /* SSE values are returned in XMM0, except when it doesn't exist. */
6692 return (TARGET_SSE ? 0 : 1);
6694 /* AVX values are returned in YMM0, except when it doesn't exist. */
6696 return TARGET_AVX ? 0 : 1;
6705 /* OImode shouldn't be used directly. */
6706 gcc_assert (mode != OImode);
6711 static int ATTRIBUTE_UNUSED
6712 return_in_memory_64 (const_tree type, enum machine_mode mode)
6714 int needed_intregs, needed_sseregs;
6715 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6718 static int ATTRIBUTE_UNUSED
6719 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6721 HOST_WIDE_INT size = int_size_in_bytes (type);
6723 /* __m128 is returned in xmm0. */
6724 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6725 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6728 /* Otherwise, the size must be exactly in [1248]. */
6729 return (size != 1 && size != 2 && size != 4 && size != 8);
6733 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6735 #ifdef SUBTARGET_RETURN_IN_MEMORY
6736 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6738 const enum machine_mode mode = type_natural_mode (type, NULL);
6742 if (ix86_function_type_abi (fntype) == MS_ABI)
6743 return return_in_memory_ms_64 (type, mode);
6745 return return_in_memory_64 (type, mode);
6748 return return_in_memory_32 (type, mode);
6752 /* Return false iff TYPE is returned in memory. This version is used
6753 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6754 but differs notably in that when MMX is available, 8-byte vectors
6755 are returned in memory, rather than in MMX registers. */
6758 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6761 enum machine_mode mode = type_natural_mode (type, NULL);
6764 return return_in_memory_64 (type, mode);
6766 if (mode == BLKmode)
6769 size = int_size_in_bytes (type);
6771 if (VECTOR_MODE_P (mode))
6773 /* Return in memory only if MMX registers *are* available. This
6774 seems backwards, but it is consistent with the existing
6781 else if (mode == TImode)
6783 else if (mode == XFmode)
6789 /* When returning SSE vector types, we have a choice of either
6790 (1) being abi incompatible with a -march switch, or
6791 (2) generating an error.
6792 Given no good solution, I think the safest thing is one warning.
6793 The user won't be able to use -Werror, but....
6795 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6796 called in response to actually generating a caller or callee that
6797 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6798 via aggregate_value_p for general type probing from tree-ssa. */
6801 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6803 static bool warnedsse, warnedmmx;
6805 if (!TARGET_64BIT && type)
6807 /* Look at the return type of the function, not the function type. */
6808 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6810 if (!TARGET_SSE && !warnedsse)
6813 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6816 warning (0, "SSE vector return without SSE enabled "
6821 if (!TARGET_MMX && !warnedmmx)
6823 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6826 warning (0, "MMX vector return without MMX enabled "
6836 /* Create the va_list data type. */
6838 /* Returns the calling convention specific va_list date type.
6839 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6842 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6844 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6846 /* For i386 we use plain pointer to argument area. */
6847 if (!TARGET_64BIT || abi == MS_ABI)
6848 return build_pointer_type (char_type_node);
6850 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6851 type_decl = build_decl (BUILTINS_LOCATION,
6852 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6854 f_gpr = build_decl (BUILTINS_LOCATION,
6855 FIELD_DECL, get_identifier ("gp_offset"),
6856 unsigned_type_node);
6857 f_fpr = build_decl (BUILTINS_LOCATION,
6858 FIELD_DECL, get_identifier ("fp_offset"),
6859 unsigned_type_node);
6860 f_ovf = build_decl (BUILTINS_LOCATION,
6861 FIELD_DECL, get_identifier ("overflow_arg_area"),
6863 f_sav = build_decl (BUILTINS_LOCATION,
6864 FIELD_DECL, get_identifier ("reg_save_area"),
6867 va_list_gpr_counter_field = f_gpr;
6868 va_list_fpr_counter_field = f_fpr;
6870 DECL_FIELD_CONTEXT (f_gpr) = record;
6871 DECL_FIELD_CONTEXT (f_fpr) = record;
6872 DECL_FIELD_CONTEXT (f_ovf) = record;
6873 DECL_FIELD_CONTEXT (f_sav) = record;
6875 TREE_CHAIN (record) = type_decl;
6876 TYPE_NAME (record) = type_decl;
6877 TYPE_FIELDS (record) = f_gpr;
6878 TREE_CHAIN (f_gpr) = f_fpr;
6879 TREE_CHAIN (f_fpr) = f_ovf;
6880 TREE_CHAIN (f_ovf) = f_sav;
6882 layout_type (record);
6884 /* The correct type is an array type of one element. */
6885 return build_array_type (record, build_index_type (size_zero_node));
6888 /* Setup the builtin va_list data type and for 64-bit the additional
6889 calling convention specific va_list data types. */
6892 ix86_build_builtin_va_list (void)
6894 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6896 /* Initialize abi specific va_list builtin types. */
6900 if (ix86_abi == MS_ABI)
6902 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6903 if (TREE_CODE (t) != RECORD_TYPE)
6904 t = build_variant_type_copy (t);
6905 sysv_va_list_type_node = t;
6910 if (TREE_CODE (t) != RECORD_TYPE)
6911 t = build_variant_type_copy (t);
6912 sysv_va_list_type_node = t;
6914 if (ix86_abi != MS_ABI)
6916 t = ix86_build_builtin_va_list_abi (MS_ABI);
6917 if (TREE_CODE (t) != RECORD_TYPE)
6918 t = build_variant_type_copy (t);
6919 ms_va_list_type_node = t;
6924 if (TREE_CODE (t) != RECORD_TYPE)
6925 t = build_variant_type_copy (t);
6926 ms_va_list_type_node = t;
6933 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6936 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6945 /* GPR size of varargs save area. */
6946 if (cfun->va_list_gpr_size)
6947 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6949 ix86_varargs_gpr_size = 0;
6951 /* FPR size of varargs save area. We don't need it if we don't pass
6952 anything in SSE registers. */
6953 if (cum->sse_nregs && cfun->va_list_fpr_size)
6954 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6956 ix86_varargs_fpr_size = 0;
6958 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6961 save_area = frame_pointer_rtx;
6962 set = get_varargs_alias_set ();
6964 for (i = cum->regno;
6965 i < X86_64_REGPARM_MAX
6966 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6969 mem = gen_rtx_MEM (Pmode,
6970 plus_constant (save_area, i * UNITS_PER_WORD));
6971 MEM_NOTRAP_P (mem) = 1;
6972 set_mem_alias_set (mem, set);
6973 emit_move_insn (mem, gen_rtx_REG (Pmode,
6974 x86_64_int_parameter_registers[i]));
6977 if (ix86_varargs_fpr_size)
6979 /* Now emit code to save SSE registers. The AX parameter contains number
6980 of SSE parameter registers used to call this function. We use
6981 sse_prologue_save insn template that produces computed jump across
6982 SSE saves. We need some preparation work to get this working. */
6984 label = gen_label_rtx ();
6986 nsse_reg = gen_reg_rtx (Pmode);
6987 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6989 /* Compute address of memory block we save into. We always use pointer
6990 pointing 127 bytes after first byte to store - this is needed to keep
6991 instruction size limited by 4 bytes (5 bytes for AVX) with one
6992 byte displacement. */
6993 tmp_reg = gen_reg_rtx (Pmode);
6994 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6995 plus_constant (save_area,
6996 ix86_varargs_gpr_size + 127)));
6997 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6998 MEM_NOTRAP_P (mem) = 1;
6999 set_mem_alias_set (mem, set);
7000 set_mem_align (mem, 64);
7002 /* And finally do the dirty job! */
7003 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
7004 GEN_INT (cum->sse_regno), label,
7005 gen_reg_rtx (Pmode)));
7010 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7012 alias_set_type set = get_varargs_alias_set ();
7015 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7019 mem = gen_rtx_MEM (Pmode,
7020 plus_constant (virtual_incoming_args_rtx,
7021 i * UNITS_PER_WORD));
7022 MEM_NOTRAP_P (mem) = 1;
7023 set_mem_alias_set (mem, set);
7025 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7026 emit_move_insn (mem, reg);
7031 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7032 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7035 CUMULATIVE_ARGS next_cum;
7038 /* This argument doesn't appear to be used anymore. Which is good,
7039 because the old code here didn't suppress rtl generation. */
7040 gcc_assert (!no_rtl);
7045 fntype = TREE_TYPE (current_function_decl);
7047 /* For varargs, we do not want to skip the dummy va_dcl argument.
7048 For stdargs, we do want to skip the last named argument. */
7050 if (stdarg_p (fntype))
7051 ix86_function_arg_advance (&next_cum, mode, type, true);
7053 if (cum->call_abi == MS_ABI)
7054 setup_incoming_varargs_ms_64 (&next_cum);
7056 setup_incoming_varargs_64 (&next_cum);
7059 /* Checks if TYPE is of kind va_list char *. */
7062 is_va_list_char_pointer (tree type)
7066 /* For 32-bit it is always true. */
7069 canonic = ix86_canonical_va_list_type (type);
7070 return (canonic == ms_va_list_type_node
7071 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7074 /* Implement va_start. */
7077 ix86_va_start (tree valist, rtx nextarg)
7079 HOST_WIDE_INT words, n_gpr, n_fpr;
7080 tree f_gpr, f_fpr, f_ovf, f_sav;
7081 tree gpr, fpr, ovf, sav, t;
7084 /* Only 64bit target needs something special. */
7085 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7087 std_expand_builtin_va_start (valist, nextarg);
7091 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7092 f_fpr = TREE_CHAIN (f_gpr);
7093 f_ovf = TREE_CHAIN (f_fpr);
7094 f_sav = TREE_CHAIN (f_ovf);
7096 valist = build_simple_mem_ref (valist);
7097 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
7098 /* The following should be folded into the MEM_REF offset. */
7099 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
7101 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7103 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7105 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7108 /* Count number of gp and fp argument registers used. */
7109 words = crtl->args.info.words;
7110 n_gpr = crtl->args.info.regno;
7111 n_fpr = crtl->args.info.sse_regno;
7113 if (cfun->va_list_gpr_size)
7115 type = TREE_TYPE (gpr);
7116 t = build2 (MODIFY_EXPR, type,
7117 gpr, build_int_cst (type, n_gpr * 8));
7118 TREE_SIDE_EFFECTS (t) = 1;
7119 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7122 if (TARGET_SSE && cfun->va_list_fpr_size)
7124 type = TREE_TYPE (fpr);
7125 t = build2 (MODIFY_EXPR, type, fpr,
7126 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7127 TREE_SIDE_EFFECTS (t) = 1;
7128 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7131 /* Find the overflow area. */
7132 type = TREE_TYPE (ovf);
7133 t = make_tree (type, crtl->args.internal_arg_pointer);
7135 t = build2 (POINTER_PLUS_EXPR, type, t,
7136 size_int (words * UNITS_PER_WORD));
7137 t = build2 (MODIFY_EXPR, type, ovf, t);
7138 TREE_SIDE_EFFECTS (t) = 1;
7139 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7141 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7143 /* Find the register save area.
7144 Prologue of the function save it right above stack frame. */
7145 type = TREE_TYPE (sav);
7146 t = make_tree (type, frame_pointer_rtx);
7147 if (!ix86_varargs_gpr_size)
7148 t = build2 (POINTER_PLUS_EXPR, type, t,
7149 size_int (-8 * X86_64_REGPARM_MAX));
7150 t = build2 (MODIFY_EXPR, type, sav, t);
7151 TREE_SIDE_EFFECTS (t) = 1;
7152 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7156 /* Implement va_arg. */
7159 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7162 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7163 tree f_gpr, f_fpr, f_ovf, f_sav;
7164 tree gpr, fpr, ovf, sav, t;
7166 tree lab_false, lab_over = NULL_TREE;
7171 enum machine_mode nat_mode;
7172 unsigned int arg_boundary;
7174 /* Only 64bit target needs something special. */
7175 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7176 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7178 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7179 f_fpr = TREE_CHAIN (f_gpr);
7180 f_ovf = TREE_CHAIN (f_fpr);
7181 f_sav = TREE_CHAIN (f_ovf);
7183 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7184 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7185 valist = build_va_arg_indirect_ref (valist);
7186 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7187 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7188 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7190 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7192 type = build_pointer_type (type);
7193 size = int_size_in_bytes (type);
7194 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7196 nat_mode = type_natural_mode (type, NULL);
7205 /* Unnamed 256bit vector mode parameters are passed on stack. */
7206 if (ix86_cfun_abi () == SYSV_ABI)
7213 container = construct_container (nat_mode, TYPE_MODE (type),
7214 type, 0, X86_64_REGPARM_MAX,
7215 X86_64_SSE_REGPARM_MAX, intreg,
7220 /* Pull the value out of the saved registers. */
7222 addr = create_tmp_var (ptr_type_node, "addr");
7226 int needed_intregs, needed_sseregs;
7228 tree int_addr, sse_addr;
7230 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7231 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7233 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7235 need_temp = (!REG_P (container)
7236 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7237 || TYPE_ALIGN (type) > 128));
7239 /* In case we are passing structure, verify that it is consecutive block
7240 on the register save area. If not we need to do moves. */
7241 if (!need_temp && !REG_P (container))
7243 /* Verify that all registers are strictly consecutive */
7244 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7248 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7250 rtx slot = XVECEXP (container, 0, i);
7251 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7252 || INTVAL (XEXP (slot, 1)) != i * 16)
7260 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7262 rtx slot = XVECEXP (container, 0, i);
7263 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7264 || INTVAL (XEXP (slot, 1)) != i * 8)
7276 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7277 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7280 /* First ensure that we fit completely in registers. */
7283 t = build_int_cst (TREE_TYPE (gpr),
7284 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7285 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7286 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7287 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7288 gimplify_and_add (t, pre_p);
7292 t = build_int_cst (TREE_TYPE (fpr),
7293 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7294 + X86_64_REGPARM_MAX * 8);
7295 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7296 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7297 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7298 gimplify_and_add (t, pre_p);
7301 /* Compute index to start of area used for integer regs. */
7304 /* int_addr = gpr + sav; */
7305 t = fold_convert (sizetype, gpr);
7306 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7307 gimplify_assign (int_addr, t, pre_p);
7311 /* sse_addr = fpr + sav; */
7312 t = fold_convert (sizetype, fpr);
7313 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7314 gimplify_assign (sse_addr, t, pre_p);
7318 int i, prev_size = 0;
7319 tree temp = create_tmp_var (type, "va_arg_tmp");
7322 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7323 gimplify_assign (addr, t, pre_p);
7325 for (i = 0; i < XVECLEN (container, 0); i++)
7327 rtx slot = XVECEXP (container, 0, i);
7328 rtx reg = XEXP (slot, 0);
7329 enum machine_mode mode = GET_MODE (reg);
7335 tree dest_addr, dest;
7336 int cur_size = GET_MODE_SIZE (mode);
7338 if (prev_size + cur_size > size)
7340 cur_size = size - prev_size;
7341 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
7342 if (mode == BLKmode)
7345 piece_type = lang_hooks.types.type_for_mode (mode, 1);
7346 if (mode == GET_MODE (reg))
7347 addr_type = build_pointer_type (piece_type);
7349 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7351 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7354 if (SSE_REGNO_P (REGNO (reg)))
7356 src_addr = sse_addr;
7357 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7361 src_addr = int_addr;
7362 src_offset = REGNO (reg) * 8;
7364 src_addr = fold_convert (addr_type, src_addr);
7365 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7366 size_int (src_offset));
7368 dest_addr = fold_convert (daddr_type, addr);
7369 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7370 size_int (INTVAL (XEXP (slot, 1))));
7371 if (cur_size == GET_MODE_SIZE (mode))
7373 src = build_va_arg_indirect_ref (src_addr);
7374 dest = build_va_arg_indirect_ref (dest_addr);
7376 gimplify_assign (dest, src, pre_p);
7381 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
7382 3, dest_addr, src_addr,
7383 size_int (cur_size));
7384 gimplify_and_add (copy, pre_p);
7386 prev_size += cur_size;
7392 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7393 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7394 gimplify_assign (gpr, t, pre_p);
7399 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7400 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7401 gimplify_assign (fpr, t, pre_p);
7404 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7406 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7409 /* ... otherwise out of the overflow area. */
7411 /* When we align parameter on stack for caller, if the parameter
7412 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7413 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7414 here with caller. */
7415 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7416 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7417 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7419 /* Care for on-stack alignment if needed. */
7420 if (arg_boundary <= 64
7421 || integer_zerop (TYPE_SIZE (type)))
7425 HOST_WIDE_INT align = arg_boundary / 8;
7426 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7427 size_int (align - 1));
7428 t = fold_convert (sizetype, t);
7429 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7431 t = fold_convert (TREE_TYPE (ovf), t);
7432 if (crtl->stack_alignment_needed < arg_boundary)
7433 crtl->stack_alignment_needed = arg_boundary;
7435 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7436 gimplify_assign (addr, t, pre_p);
7438 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7439 size_int (rsize * UNITS_PER_WORD));
7440 gimplify_assign (unshare_expr (ovf), t, pre_p);
7443 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7445 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7446 addr = fold_convert (ptrtype, addr);
7449 addr = build_va_arg_indirect_ref (addr);
7450 return build_va_arg_indirect_ref (addr);
7453 /* Return nonzero if OPNUM's MEM should be matched
7454 in movabs* patterns. */
7457 ix86_check_movabs (rtx insn, int opnum)
7461 set = PATTERN (insn);
7462 if (GET_CODE (set) == PARALLEL)
7463 set = XVECEXP (set, 0, 0);
7464 gcc_assert (GET_CODE (set) == SET);
7465 mem = XEXP (set, opnum);
7466 while (GET_CODE (mem) == SUBREG)
7467 mem = SUBREG_REG (mem);
7468 gcc_assert (MEM_P (mem));
7469 return (volatile_ok || !MEM_VOLATILE_P (mem));
7472 /* Initialize the table of extra 80387 mathematical constants. */
7475 init_ext_80387_constants (void)
7477 static const char * cst[5] =
7479 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7480 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7481 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7482 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7483 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7487 for (i = 0; i < 5; i++)
7489 real_from_string (&ext_80387_constants_table[i], cst[i]);
7490 /* Ensure each constant is rounded to XFmode precision. */
7491 real_convert (&ext_80387_constants_table[i],
7492 XFmode, &ext_80387_constants_table[i]);
7495 ext_80387_constants_init = 1;
7498 /* Return true if the constant is something that can be loaded with
7499 a special instruction. */
7502 standard_80387_constant_p (rtx x)
7504 enum machine_mode mode = GET_MODE (x);
7508 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7511 if (x == CONST0_RTX (mode))
7513 if (x == CONST1_RTX (mode))
7516 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7518 /* For XFmode constants, try to find a special 80387 instruction when
7519 optimizing for size or on those CPUs that benefit from them. */
7521 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7525 if (! ext_80387_constants_init)
7526 init_ext_80387_constants ();
7528 for (i = 0; i < 5; i++)
7529 if (real_identical (&r, &ext_80387_constants_table[i]))
7533 /* Load of the constant -0.0 or -1.0 will be split as
7534 fldz;fchs or fld1;fchs sequence. */
7535 if (real_isnegzero (&r))
7537 if (real_identical (&r, &dconstm1))
7543 /* Return the opcode of the special instruction to be used to load
7547 standard_80387_constant_opcode (rtx x)
7549 switch (standard_80387_constant_p (x))
7573 /* Return the CONST_DOUBLE representing the 80387 constant that is
7574 loaded by the specified special instruction. The argument IDX
7575 matches the return value from standard_80387_constant_p. */
7578 standard_80387_constant_rtx (int idx)
7582 if (! ext_80387_constants_init)
7583 init_ext_80387_constants ();
7599 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7603 /* Return 1 if X is all 0s and 2 if x is all 1s
7604 in supported SSE vector mode. */
7607 standard_sse_constant_p (rtx x)
7609 enum machine_mode mode = GET_MODE (x);
7611 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7613 if (vector_all_ones_operand (x, mode))
7629 /* Return the opcode of the special instruction to be used to load
7633 standard_sse_constant_opcode (rtx insn, rtx x)
7635 switch (standard_sse_constant_p (x))
7638 switch (get_attr_mode (insn))
7641 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7643 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7644 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7646 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7648 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7649 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7651 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7653 return "vxorps\t%x0, %x0, %x0";
7655 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7656 return "vxorps\t%x0, %x0, %x0";
7658 return "vxorpd\t%x0, %x0, %x0";
7660 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7661 return "vxorps\t%x0, %x0, %x0";
7663 return "vpxor\t%x0, %x0, %x0";
7668 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7675 /* Returns 1 if OP contains a symbol reference */
7678 symbolic_reference_mentioned_p (rtx op)
7683 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7686 fmt = GET_RTX_FORMAT (GET_CODE (op));
7687 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7693 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7694 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7698 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7705 /* Return 1 if it is appropriate to emit `ret' instructions in the
7706 body of a function. Do this only if the epilogue is simple, needing a
7707 couple of insns. Prior to reloading, we can't tell how many registers
7708 must be saved, so return 0 then. Return 0 if there is no frame
7709 marker to de-allocate. */
7712 ix86_can_use_return_insn_p (void)
7714 struct ix86_frame frame;
7716 if (! reload_completed || frame_pointer_needed)
7719 /* Don't allow more than 32 pop, since that's all we can do
7720 with one instruction. */
7721 if (crtl->args.pops_args
7722 && crtl->args.size >= 32768)
7725 ix86_compute_frame_layout (&frame);
7726 return frame.to_allocate == 0 && frame.padding0 == 0
7727 && (frame.nregs + frame.nsseregs) == 0;
7730 /* Value should be nonzero if functions must have frame pointers.
7731 Zero means the frame pointer need not be set up (and parms may
7732 be accessed via the stack pointer) in functions that seem suitable. */
7735 ix86_frame_pointer_required (void)
7737 /* If we accessed previous frames, then the generated code expects
7738 to be able to access the saved ebp value in our frame. */
7739 if (cfun->machine->accesses_prev_frame)
7742 /* Several x86 os'es need a frame pointer for other reasons,
7743 usually pertaining to setjmp. */
7744 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7747 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7748 the frame pointer by default. Turn it back on now if we've not
7749 got a leaf function. */
7750 if (TARGET_OMIT_LEAF_FRAME_POINTER
7751 && (!current_function_is_leaf
7752 || ix86_current_function_calls_tls_descriptor))
7761 /* Record that the current function accesses previous call frames. */
7764 ix86_setup_frame_addresses (void)
7766 cfun->machine->accesses_prev_frame = 1;
7769 #ifndef USE_HIDDEN_LINKONCE
7770 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7771 # define USE_HIDDEN_LINKONCE 1
7773 # define USE_HIDDEN_LINKONCE 0
7777 static int pic_labels_used;
7779 /* Fills in the label name that should be used for a pc thunk for
7780 the given register. */
7783 get_pc_thunk_name (char name[32], unsigned int regno)
7785 gcc_assert (!TARGET_64BIT);
7787 if (USE_HIDDEN_LINKONCE)
7788 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7790 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7794 /* This function generates code for -fpic that loads %ebx with
7795 the return address of the caller and then returns. */
7798 ix86_code_end (void)
7803 for (regno = 0; regno < 8; ++regno)
7808 if (! ((pic_labels_used >> regno) & 1))
7811 get_pc_thunk_name (name, regno);
7813 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7814 get_identifier (name),
7815 build_function_type (void_type_node, void_list_node));
7816 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7817 NULL_TREE, void_type_node);
7818 TREE_PUBLIC (decl) = 1;
7819 TREE_STATIC (decl) = 1;
7824 switch_to_section (darwin_sections[text_coal_section]);
7825 fputs ("\t.weak_definition\t", asm_out_file);
7826 assemble_name (asm_out_file, name);
7827 fputs ("\n\t.private_extern\t", asm_out_file);
7828 assemble_name (asm_out_file, name);
7829 fputs ("\n", asm_out_file);
7830 ASM_OUTPUT_LABEL (asm_out_file, name);
7831 DECL_WEAK (decl) = 1;
7835 if (USE_HIDDEN_LINKONCE)
7837 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7839 (*targetm.asm_out.unique_section) (decl, 0);
7840 switch_to_section (get_named_section (decl, NULL, 0));
7842 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7843 fputs ("\t.hidden\t", asm_out_file);
7844 assemble_name (asm_out_file, name);
7845 putc ('\n', asm_out_file);
7846 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7850 switch_to_section (text_section);
7851 ASM_OUTPUT_LABEL (asm_out_file, name);
7854 DECL_INITIAL (decl) = make_node (BLOCK);
7855 current_function_decl = decl;
7856 init_function_start (decl);
7857 first_function_block_is_cold = false;
7858 /* Make sure unwind info is emitted for the thunk if needed. */
7859 final_start_function (emit_barrier (), asm_out_file, 1);
7861 xops[0] = gen_rtx_REG (Pmode, regno);
7862 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7863 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7864 output_asm_insn ("ret", xops);
7865 final_end_function ();
7866 init_insn_lengths ();
7867 free_after_compilation (cfun);
7869 current_function_decl = NULL;
7873 /* Emit code for the SET_GOT patterns. */
7876 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7882 if (TARGET_VXWORKS_RTP && flag_pic)
7884 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7885 xops[2] = gen_rtx_MEM (Pmode,
7886 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7887 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7889 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7890 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7891 an unadorned address. */
7892 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7893 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7894 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7898 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7900 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7902 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7905 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7908 output_asm_insn ("call\t%a2", xops);
7909 #ifdef DWARF2_UNWIND_INFO
7910 /* The call to next label acts as a push. */
7911 if (dwarf2out_do_frame ())
7915 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7916 gen_rtx_PLUS (Pmode,
7919 RTX_FRAME_RELATED_P (insn) = 1;
7920 dwarf2out_frame_debug (insn, true);
7927 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7928 is what will be referenced by the Mach-O PIC subsystem. */
7930 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7933 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7934 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7938 output_asm_insn ("pop%z0\t%0", xops);
7939 #ifdef DWARF2_UNWIND_INFO
7940 /* The pop is a pop and clobbers dest, but doesn't restore it
7941 for unwind info purposes. */
7942 if (dwarf2out_do_frame ())
7946 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7947 dwarf2out_frame_debug (insn, true);
7948 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7949 gen_rtx_PLUS (Pmode,
7952 RTX_FRAME_RELATED_P (insn) = 1;
7953 dwarf2out_frame_debug (insn, true);
7962 get_pc_thunk_name (name, REGNO (dest));
7963 pic_labels_used |= 1 << REGNO (dest);
7965 #ifdef DWARF2_UNWIND_INFO
7966 /* Ensure all queued register saves are flushed before the
7968 if (dwarf2out_do_frame ())
7972 insn = emit_barrier ();
7974 dwarf2out_frame_debug (insn, false);
7977 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7978 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7979 output_asm_insn ("call\t%X2", xops);
7980 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7981 is what will be referenced by the Mach-O PIC subsystem. */
7984 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7986 targetm.asm_out.internal_label (asm_out_file, "L",
7987 CODE_LABEL_NUMBER (label));
7994 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7995 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7997 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
8002 /* Generate an "push" pattern for input ARG. */
8007 if (ix86_cfa_state->reg == stack_pointer_rtx)
8008 ix86_cfa_state->offset += UNITS_PER_WORD;
8010 return gen_rtx_SET (VOIDmode,
8012 gen_rtx_PRE_DEC (Pmode,
8013 stack_pointer_rtx)),
8017 /* Return >= 0 if there is an unused call-clobbered register available
8018 for the entire function. */
8021 ix86_select_alt_pic_regnum (void)
8023 if (current_function_is_leaf && !crtl->profile
8024 && !ix86_current_function_calls_tls_descriptor)
8027 /* Can't use the same register for both PIC and DRAP. */
8029 drap = REGNO (crtl->drap_reg);
8032 for (i = 2; i >= 0; --i)
8033 if (i != drap && !df_regs_ever_live_p (i))
8037 return INVALID_REGNUM;
8040 /* Return 1 if we need to save REGNO. */
8042 ix86_save_reg (unsigned int regno, int maybe_eh_return)
8044 if (pic_offset_table_rtx
8045 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
8046 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8048 || crtl->calls_eh_return
8049 || crtl->uses_const_pool))
8051 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
8056 if (crtl->calls_eh_return && maybe_eh_return)
8061 unsigned test = EH_RETURN_DATA_REGNO (i);
8062 if (test == INVALID_REGNUM)
8069 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8072 return (df_regs_ever_live_p (regno)
8073 && !call_used_regs[regno]
8074 && !fixed_regs[regno]
8075 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8078 /* Return number of saved general prupose registers. */
8081 ix86_nsaved_regs (void)
8086 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8087 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8092 /* Return number of saved SSE registrers. */
8095 ix86_nsaved_sseregs (void)
8100 if (ix86_cfun_abi () != MS_ABI)
8102 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8103 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8108 /* Given FROM and TO register numbers, say whether this elimination is
8109 allowed. If stack alignment is needed, we can only replace argument
8110 pointer with hard frame pointer, or replace frame pointer with stack
8111 pointer. Otherwise, frame pointer elimination is automatically
8112 handled and all other eliminations are valid. */
8115 ix86_can_eliminate (const int from, const int to)
8117 if (stack_realign_fp)
8118 return ((from == ARG_POINTER_REGNUM
8119 && to == HARD_FRAME_POINTER_REGNUM)
8120 || (from == FRAME_POINTER_REGNUM
8121 && to == STACK_POINTER_REGNUM));
8123 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8126 /* Return the offset between two registers, one to be eliminated, and the other
8127 its replacement, at the start of a routine. */
8130 ix86_initial_elimination_offset (int from, int to)
8132 struct ix86_frame frame;
8133 ix86_compute_frame_layout (&frame);
8135 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8136 return frame.hard_frame_pointer_offset;
8137 else if (from == FRAME_POINTER_REGNUM
8138 && to == HARD_FRAME_POINTER_REGNUM)
8139 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8142 gcc_assert (to == STACK_POINTER_REGNUM);
8144 if (from == ARG_POINTER_REGNUM)
8145 return frame.stack_pointer_offset;
8147 gcc_assert (from == FRAME_POINTER_REGNUM);
8148 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8152 /* In a dynamically-aligned function, we can't know the offset from
8153 stack pointer to frame pointer, so we must ensure that setjmp
8154 eliminates fp against the hard fp (%ebp) rather than trying to
8155 index from %esp up to the top of the frame across a gap that is
8156 of unknown (at compile-time) size. */
8158 ix86_builtin_setjmp_frame_value (void)
8160 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8163 /* Fill structure ix86_frame about frame of currently computed function. */
8166 ix86_compute_frame_layout (struct ix86_frame *frame)
8168 unsigned int stack_alignment_needed;
8169 HOST_WIDE_INT offset;
8170 unsigned int preferred_alignment;
8171 HOST_WIDE_INT size = get_frame_size ();
8173 frame->nregs = ix86_nsaved_regs ();
8174 frame->nsseregs = ix86_nsaved_sseregs ();
8176 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8177 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8179 /* MS ABI seem to require stack alignment to be always 16 except for function
8180 prologues and leaf. */
8181 if ((ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8182 && (!current_function_is_leaf || cfun->calls_alloca != 0
8183 || ix86_current_function_calls_tls_descriptor))
8185 preferred_alignment = 16;
8186 stack_alignment_needed = 16;
8187 crtl->preferred_stack_boundary = 128;
8188 crtl->stack_alignment_needed = 128;
8191 gcc_assert (!size || stack_alignment_needed);
8192 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8193 gcc_assert (preferred_alignment <= stack_alignment_needed);
8195 /* During reload iteration the amount of registers saved can change.
8196 Recompute the value as needed. Do not recompute when amount of registers
8197 didn't change as reload does multiple calls to the function and does not
8198 expect the decision to change within single iteration. */
8199 if (!optimize_function_for_size_p (cfun)
8200 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8202 int count = frame->nregs;
8203 struct cgraph_node *node = cgraph_node (current_function_decl);
8205 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8206 /* The fast prologue uses move instead of push to save registers. This
8207 is significantly longer, but also executes faster as modern hardware
8208 can execute the moves in parallel, but can't do that for push/pop.
8210 Be careful about choosing what prologue to emit: When function takes
8211 many instructions to execute we may use slow version as well as in
8212 case function is known to be outside hot spot (this is known with
8213 feedback only). Weight the size of function by number of registers
8214 to save as it is cheap to use one or two push instructions but very
8215 slow to use many of them. */
8217 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8218 if (node->frequency < NODE_FREQUENCY_NORMAL
8219 || (flag_branch_probabilities
8220 && node->frequency < NODE_FREQUENCY_HOT))
8221 cfun->machine->use_fast_prologue_epilogue = false;
8223 cfun->machine->use_fast_prologue_epilogue
8224 = !expensive_function_p (count);
8226 if (TARGET_PROLOGUE_USING_MOVE
8227 && cfun->machine->use_fast_prologue_epilogue)
8228 frame->save_regs_using_mov = true;
8230 frame->save_regs_using_mov = false;
8232 /* Skip return address. */
8233 offset = UNITS_PER_WORD;
8235 /* Skip pushed static chain. */
8236 if (ix86_static_chain_on_stack)
8237 offset += UNITS_PER_WORD;
8239 /* Skip saved base pointer. */
8240 if (frame_pointer_needed)
8241 offset += UNITS_PER_WORD;
8243 frame->hard_frame_pointer_offset = offset;
8245 /* Set offset to aligned because the realigned frame starts from
8247 if (stack_realign_fp)
8248 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8250 /* Register save area */
8251 offset += frame->nregs * UNITS_PER_WORD;
8253 /* Align SSE reg save area. */
8254 if (frame->nsseregs)
8255 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8257 frame->padding0 = 0;
8259 /* SSE register save area. */
8260 offset += frame->padding0 + frame->nsseregs * 16;
8263 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8264 offset += frame->va_arg_size;
8266 /* Align start of frame for local function. */
8267 frame->padding1 = ((offset + stack_alignment_needed - 1)
8268 & -stack_alignment_needed) - offset;
8270 offset += frame->padding1;
8272 /* Frame pointer points here. */
8273 frame->frame_pointer_offset = offset;
8277 /* Add outgoing arguments area. Can be skipped if we eliminated
8278 all the function calls as dead code.
8279 Skipping is however impossible when function calls alloca. Alloca
8280 expander assumes that last crtl->outgoing_args_size
8281 of stack frame are unused. */
8282 if (ACCUMULATE_OUTGOING_ARGS
8283 && (!current_function_is_leaf || cfun->calls_alloca
8284 || ix86_current_function_calls_tls_descriptor))
8286 offset += crtl->outgoing_args_size;
8287 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8290 frame->outgoing_arguments_size = 0;
8292 /* Align stack boundary. Only needed if we're calling another function
8294 if (!current_function_is_leaf || cfun->calls_alloca
8295 || ix86_current_function_calls_tls_descriptor)
8296 frame->padding2 = ((offset + preferred_alignment - 1)
8297 & -preferred_alignment) - offset;
8299 frame->padding2 = 0;
8301 offset += frame->padding2;
8303 /* We've reached end of stack frame. */
8304 frame->stack_pointer_offset = offset;
8306 /* Size prologue needs to allocate. */
8307 frame->to_allocate =
8308 (size + frame->padding1 + frame->padding2
8309 + frame->outgoing_arguments_size + frame->va_arg_size);
8311 if ((!frame->to_allocate && frame->nregs <= 1)
8312 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8313 frame->save_regs_using_mov = false;
8315 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8316 && current_function_sp_is_unchanging
8317 && current_function_is_leaf
8318 && !ix86_current_function_calls_tls_descriptor)
8320 frame->red_zone_size = frame->to_allocate;
8321 if (frame->save_regs_using_mov)
8322 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8323 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8324 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8327 frame->red_zone_size = 0;
8328 frame->to_allocate -= frame->red_zone_size;
8329 frame->stack_pointer_offset -= frame->red_zone_size;
8332 /* Emit code to save registers in the prologue. */
8335 ix86_emit_save_regs (void)
8340 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8341 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8343 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8344 RTX_FRAME_RELATED_P (insn) = 1;
8348 /* Emit code to save registers using MOV insns. First register
8349 is restored from POINTER + OFFSET. */
8351 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8356 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8357 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8359 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8361 gen_rtx_REG (Pmode, regno));
8362 RTX_FRAME_RELATED_P (insn) = 1;
8363 offset += UNITS_PER_WORD;
8367 /* Emit code to save registers using MOV insns. First register
8368 is restored from POINTER + OFFSET. */
8370 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8376 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8377 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8379 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8380 set_mem_align (mem, 128);
8381 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8382 RTX_FRAME_RELATED_P (insn) = 1;
8387 static GTY(()) rtx queued_cfa_restores;
8389 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8390 manipulation insn. Don't add it if the previously
8391 saved value will be left untouched within stack red-zone till return,
8392 as unwinders can find the same value in the register and
8396 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8399 && !TARGET_64BIT_MS_ABI
8400 && red_offset + RED_ZONE_SIZE >= 0
8401 && crtl->args.pops_args < 65536)
8406 add_reg_note (insn, REG_CFA_RESTORE, reg);
8407 RTX_FRAME_RELATED_P (insn) = 1;
8411 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8414 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8417 ix86_add_queued_cfa_restore_notes (rtx insn)
8420 if (!queued_cfa_restores)
8422 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8424 XEXP (last, 1) = REG_NOTES (insn);
8425 REG_NOTES (insn) = queued_cfa_restores;
8426 queued_cfa_restores = NULL_RTX;
8427 RTX_FRAME_RELATED_P (insn) = 1;
8430 /* Expand prologue or epilogue stack adjustment.
8431 The pattern exist to put a dependency on all ebp-based memory accesses.
8432 STYLE should be negative if instructions should be marked as frame related,
8433 zero if %r11 register is live and cannot be freely used and positive
8437 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8438 int style, bool set_cfa)
8443 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8444 else if (x86_64_immediate_operand (offset, DImode))
8445 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8449 /* r11 is used by indirect sibcall return as well, set before the
8450 epilogue and used after the epilogue. */
8452 tmp = gen_rtx_REG (DImode, R11_REG);
8455 gcc_assert (src != hard_frame_pointer_rtx
8456 && dest != hard_frame_pointer_rtx);
8457 tmp = hard_frame_pointer_rtx;
8459 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
8461 RTX_FRAME_RELATED_P (insn) = 1;
8462 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, tmp,
8467 ix86_add_queued_cfa_restore_notes (insn);
8473 gcc_assert (ix86_cfa_state->reg == src);
8474 ix86_cfa_state->offset += INTVAL (offset);
8475 ix86_cfa_state->reg = dest;
8477 r = gen_rtx_PLUS (Pmode, src, offset);
8478 r = gen_rtx_SET (VOIDmode, dest, r);
8479 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8480 RTX_FRAME_RELATED_P (insn) = 1;
8483 RTX_FRAME_RELATED_P (insn) = 1;
8486 /* Find an available register to be used as dynamic realign argument
8487 pointer regsiter. Such a register will be written in prologue and
8488 used in begin of body, so it must not be
8489 1. parameter passing register.
8491 We reuse static-chain register if it is available. Otherwise, we
8492 use DI for i386 and R13 for x86-64. We chose R13 since it has
8495 Return: the regno of chosen register. */
8498 find_drap_reg (void)
8500 tree decl = cfun->decl;
8504 /* Use R13 for nested function or function need static chain.
8505 Since function with tail call may use any caller-saved
8506 registers in epilogue, DRAP must not use caller-saved
8507 register in such case. */
8508 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8515 /* Use DI for nested function or function need static chain.
8516 Since function with tail call may use any caller-saved
8517 registers in epilogue, DRAP must not use caller-saved
8518 register in such case. */
8519 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8522 /* Reuse static chain register if it isn't used for parameter
8524 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8525 && !lookup_attribute ("fastcall",
8526 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8527 && !lookup_attribute ("thiscall",
8528 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8535 /* Return minimum incoming stack alignment. */
8538 ix86_minimum_incoming_stack_boundary (bool sibcall)
8540 unsigned int incoming_stack_boundary;
8542 /* Prefer the one specified at command line. */
8543 if (ix86_user_incoming_stack_boundary)
8544 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8545 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8546 if -mstackrealign is used, it isn't used for sibcall check and
8547 estimated stack alignment is 128bit. */
8550 && ix86_force_align_arg_pointer
8551 && crtl->stack_alignment_estimated == 128)
8552 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8554 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8556 /* Incoming stack alignment can be changed on individual functions
8557 via force_align_arg_pointer attribute. We use the smallest
8558 incoming stack boundary. */
8559 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8560 && lookup_attribute (ix86_force_align_arg_pointer_string,
8561 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8562 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8564 /* The incoming stack frame has to be aligned at least at
8565 parm_stack_boundary. */
8566 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8567 incoming_stack_boundary = crtl->parm_stack_boundary;
8569 /* Stack at entrance of main is aligned by runtime. We use the
8570 smallest incoming stack boundary. */
8571 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8572 && DECL_NAME (current_function_decl)
8573 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8574 && DECL_FILE_SCOPE_P (current_function_decl))
8575 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8577 return incoming_stack_boundary;
8580 /* Update incoming stack boundary and estimated stack alignment. */
8583 ix86_update_stack_boundary (void)
8585 ix86_incoming_stack_boundary
8586 = ix86_minimum_incoming_stack_boundary (false);
8588 /* x86_64 vararg needs 16byte stack alignment for register save
8592 && crtl->stack_alignment_estimated < 128)
8593 crtl->stack_alignment_estimated = 128;
8596 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8597 needed or an rtx for DRAP otherwise. */
8600 ix86_get_drap_rtx (void)
8602 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8603 crtl->need_drap = true;
8605 if (stack_realign_drap)
8607 /* Assign DRAP to vDRAP and returns vDRAP */
8608 unsigned int regno = find_drap_reg ();
8613 arg_ptr = gen_rtx_REG (Pmode, regno);
8614 crtl->drap_reg = arg_ptr;
8617 drap_vreg = copy_to_reg (arg_ptr);
8621 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8624 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8625 RTX_FRAME_RELATED_P (insn) = 1;
8633 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8636 ix86_internal_arg_pointer (void)
8638 return virtual_incoming_args_rtx;
8641 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8642 to be generated in correct form. */
8644 ix86_finalize_stack_realign_flags (void)
8646 /* Check if stack realign is really needed after reload, and
8647 stores result in cfun */
8648 unsigned int incoming_stack_boundary
8649 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8650 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8651 unsigned int stack_realign = (incoming_stack_boundary
8652 < (current_function_is_leaf
8653 ? crtl->max_used_stack_slot_alignment
8654 : crtl->stack_alignment_needed));
8656 if (crtl->stack_realign_finalized)
8658 /* After stack_realign_needed is finalized, we can't no longer
8660 gcc_assert (crtl->stack_realign_needed == stack_realign);
8664 crtl->stack_realign_needed = stack_realign;
8665 crtl->stack_realign_finalized = true;
8669 /* Expand the prologue into a bunch of separate insns. */
8672 ix86_expand_prologue (void)
8676 struct ix86_frame frame;
8677 HOST_WIDE_INT allocate;
8678 int gen_frame_pointer = frame_pointer_needed;
8680 ix86_finalize_stack_realign_flags ();
8682 /* DRAP should not coexist with stack_realign_fp */
8683 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8685 /* Initialize CFA state for before the prologue. */
8686 ix86_cfa_state->reg = stack_pointer_rtx;
8687 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8689 ix86_compute_frame_layout (&frame);
8691 if (ix86_function_ms_hook_prologue (current_function_decl))
8695 /* Make sure the function starts with
8696 8b ff movl.s %edi,%edi
8698 8b ec movl.s %esp,%ebp
8700 This matches the hookable function prologue in Win32 API
8701 functions in Microsoft Windows XP Service Pack 2 and newer.
8702 Wine uses this to enable Windows apps to hook the Win32 API
8703 functions provided by Wine. */
8704 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8705 gen_rtx_REG (SImode, DI_REG)));
8706 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8707 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8708 stack_pointer_rtx));
8710 if (frame_pointer_needed && !(crtl->drap_reg
8711 && crtl->stack_realign_needed))
8713 /* The push %ebp and movl.s %esp, %ebp already set up
8714 the frame pointer. No need to do this again. */
8715 gen_frame_pointer = 0;
8716 RTX_FRAME_RELATED_P (push) = 1;
8717 RTX_FRAME_RELATED_P (mov) = 1;
8718 if (ix86_cfa_state->reg == stack_pointer_rtx)
8719 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8722 /* If the frame pointer is not needed, pop %ebp again. This
8723 could be optimized for cases where ebp needs to be backed up
8724 for some other reason. If stack realignment is needed, pop
8725 the base pointer again, align the stack, and later regenerate
8726 the frame pointer setup. The frame pointer generated by the
8727 hook prologue is not aligned, so it can't be used. */
8728 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8731 /* The first insn of a function that accepts its static chain on the
8732 stack is to push the register that would be filled in by a direct
8733 call. This insn will be skipped by the trampoline. */
8734 if (ix86_static_chain_on_stack)
8738 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8739 emit_insn (gen_blockage ());
8741 /* We don't want to interpret this push insn as a register save,
8742 only as a stack adjustment. The real copy of the register as
8743 a save will be done later, if needed. */
8744 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8745 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8746 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8747 RTX_FRAME_RELATED_P (insn) = 1;
8750 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8751 of DRAP is needed and stack realignment is really needed after reload */
8752 if (crtl->drap_reg && crtl->stack_realign_needed)
8755 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8756 int param_ptr_offset = UNITS_PER_WORD;
8758 if (ix86_static_chain_on_stack)
8759 param_ptr_offset += UNITS_PER_WORD;
8760 if (!call_used_regs[REGNO (crtl->drap_reg)])
8761 param_ptr_offset += UNITS_PER_WORD;
8763 gcc_assert (stack_realign_drap);
8765 /* Grab the argument pointer. */
8766 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8769 /* Only need to push parameter pointer reg if it is caller
8771 if (!call_used_regs[REGNO (crtl->drap_reg)])
8773 /* Push arg pointer reg */
8774 insn = emit_insn (gen_push (y));
8775 RTX_FRAME_RELATED_P (insn) = 1;
8778 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8779 RTX_FRAME_RELATED_P (insn) = 1;
8780 ix86_cfa_state->reg = crtl->drap_reg;
8782 /* Align the stack. */
8783 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8785 GEN_INT (-align_bytes)));
8786 RTX_FRAME_RELATED_P (insn) = 1;
8788 /* Replicate the return address on the stack so that return
8789 address can be reached via (argp - 1) slot. This is needed
8790 to implement macro RETURN_ADDR_RTX and intrinsic function
8791 expand_builtin_return_addr etc. */
8793 x = gen_frame_mem (Pmode,
8794 plus_constant (x, -UNITS_PER_WORD));
8795 insn = emit_insn (gen_push (x));
8796 RTX_FRAME_RELATED_P (insn) = 1;
8799 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8800 slower on all targets. Also sdb doesn't like it. */
8802 if (gen_frame_pointer)
8804 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8805 RTX_FRAME_RELATED_P (insn) = 1;
8807 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8808 RTX_FRAME_RELATED_P (insn) = 1;
8810 if (ix86_cfa_state->reg == stack_pointer_rtx)
8811 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8814 if (stack_realign_fp)
8816 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8817 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8819 /* Align the stack. */
8820 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8822 GEN_INT (-align_bytes)));
8823 RTX_FRAME_RELATED_P (insn) = 1;
8826 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8828 if (!frame.save_regs_using_mov)
8829 ix86_emit_save_regs ();
8831 allocate += frame.nregs * UNITS_PER_WORD;
8833 /* When using red zone we may start register saving before allocating
8834 the stack frame saving one cycle of the prologue. However I will
8835 avoid doing this if I am going to have to probe the stack since
8836 at least on x86_64 the stack probe can turn into a call that clobbers
8837 a red zone location */
8838 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8839 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8840 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8841 && !crtl->stack_realign_needed)
8842 ? hard_frame_pointer_rtx
8843 : stack_pointer_rtx,
8844 -frame.nregs * UNITS_PER_WORD);
8848 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8849 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8850 GEN_INT (-allocate), -1,
8851 ix86_cfa_state->reg == stack_pointer_rtx);
8854 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8858 if (cfun->machine->call_abi == MS_ABI)
8861 eax_live = ix86_eax_live_at_start_p ();
8865 emit_insn (gen_push (eax));
8866 allocate -= UNITS_PER_WORD;
8869 emit_move_insn (eax, GEN_INT (allocate));
8872 insn = gen_allocate_stack_worker_64 (eax, eax);
8874 insn = gen_allocate_stack_worker_32 (eax, eax);
8875 insn = emit_insn (insn);
8877 if (ix86_cfa_state->reg == stack_pointer_rtx)
8879 ix86_cfa_state->offset += allocate;
8880 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8881 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8882 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8883 RTX_FRAME_RELATED_P (insn) = 1;
8888 if (frame_pointer_needed)
8889 t = plus_constant (hard_frame_pointer_rtx,
8892 - frame.nregs * UNITS_PER_WORD);
8894 t = plus_constant (stack_pointer_rtx, allocate);
8895 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8899 if (frame.save_regs_using_mov
8900 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8901 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8903 if (!frame_pointer_needed
8904 || !(frame.to_allocate + frame.padding0)
8905 || crtl->stack_realign_needed)
8906 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8908 + frame.nsseregs * 16 + frame.padding0);
8910 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8911 -frame.nregs * UNITS_PER_WORD);
8913 if (!frame_pointer_needed
8914 || !(frame.to_allocate + frame.padding0)
8915 || crtl->stack_realign_needed)
8916 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8919 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8920 - frame.nregs * UNITS_PER_WORD
8921 - frame.nsseregs * 16
8924 pic_reg_used = false;
8925 if (pic_offset_table_rtx
8926 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8929 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8931 if (alt_pic_reg_used != INVALID_REGNUM)
8932 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8934 pic_reg_used = true;
8941 if (ix86_cmodel == CM_LARGE_PIC)
8943 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8944 rtx label = gen_label_rtx ();
8946 LABEL_PRESERVE_P (label) = 1;
8947 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8948 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8949 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8950 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8951 pic_offset_table_rtx, tmp_reg));
8954 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8957 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8960 /* In the pic_reg_used case, make sure that the got load isn't deleted
8961 when mcount needs it. Blockage to avoid call movement across mcount
8962 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8964 if (crtl->profile && pic_reg_used)
8965 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8967 if (crtl->drap_reg && !crtl->stack_realign_needed)
8969 /* vDRAP is setup but after reload it turns out stack realign
8970 isn't necessary, here we will emit prologue to setup DRAP
8971 without stack realign adjustment */
8973 int drap_bp_offset = UNITS_PER_WORD * 2;
8975 if (ix86_static_chain_on_stack)
8976 drap_bp_offset += UNITS_PER_WORD;
8977 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8978 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8981 /* Prevent instructions from being scheduled into register save push
8982 sequence when access to the redzone area is done through frame pointer.
8983 The offset between the frame pointer and the stack pointer is calculated
8984 relative to the value of the stack pointer at the end of the function
8985 prologue, and moving instructions that access redzone area via frame
8986 pointer inside push sequence violates this assumption. */
8987 if (frame_pointer_needed && frame.red_zone_size)
8988 emit_insn (gen_memory_blockage ());
8990 /* Emit cld instruction if stringops are used in the function. */
8991 if (TARGET_CLD && ix86_current_function_needs_cld)
8992 emit_insn (gen_cld ());
8995 /* Emit code to restore REG using a POP insn. */
8998 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
9000 rtx insn = emit_insn (ix86_gen_pop1 (reg));
9002 if (ix86_cfa_state->reg == crtl->drap_reg
9003 && REGNO (reg) == REGNO (crtl->drap_reg))
9005 /* Previously we'd represented the CFA as an expression
9006 like *(%ebp - 8). We've just popped that value from
9007 the stack, which means we need to reset the CFA to
9008 the drap register. This will remain until we restore
9009 the stack pointer. */
9010 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9011 RTX_FRAME_RELATED_P (insn) = 1;
9015 if (ix86_cfa_state->reg == stack_pointer_rtx)
9017 ix86_cfa_state->offset -= UNITS_PER_WORD;
9018 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9019 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9020 RTX_FRAME_RELATED_P (insn) = 1;
9023 /* When the frame pointer is the CFA, and we pop it, we are
9024 swapping back to the stack pointer as the CFA. This happens
9025 for stack frames that don't allocate other data, so we assume
9026 the stack pointer is now pointing at the return address, i.e.
9027 the function entry state, which makes the offset be 1 word. */
9028 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
9029 && reg == hard_frame_pointer_rtx)
9031 ix86_cfa_state->reg = stack_pointer_rtx;
9032 ix86_cfa_state->offset -= UNITS_PER_WORD;
9034 add_reg_note (insn, REG_CFA_DEF_CFA,
9035 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
9036 GEN_INT (ix86_cfa_state->offset)));
9037 RTX_FRAME_RELATED_P (insn) = 1;
9040 ix86_add_cfa_restore_note (insn, reg, red_offset);
9043 /* Emit code to restore saved registers using POP insns. */
9046 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
9050 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9051 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
9053 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
9055 red_offset += UNITS_PER_WORD;
9059 /* Emit code and notes for the LEAVE instruction. */
9062 ix86_emit_leave (HOST_WIDE_INT red_offset)
9064 rtx insn = emit_insn (ix86_gen_leave ());
9066 ix86_add_queued_cfa_restore_notes (insn);
9068 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
9070 ix86_cfa_state->reg = stack_pointer_rtx;
9071 ix86_cfa_state->offset -= UNITS_PER_WORD;
9073 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9074 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
9075 RTX_FRAME_RELATED_P (insn) = 1;
9076 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
9080 /* Emit code to restore saved registers using MOV insns. First register
9081 is restored from POINTER + OFFSET. */
9083 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9084 HOST_WIDE_INT red_offset,
9085 int maybe_eh_return)
9088 rtx base_address = gen_rtx_MEM (Pmode, pointer);
9091 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9092 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9094 rtx reg = gen_rtx_REG (Pmode, regno);
9096 /* Ensure that adjust_address won't be forced to produce pointer
9097 out of range allowed by x86-64 instruction set. */
9098 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9102 r11 = gen_rtx_REG (DImode, R11_REG);
9103 emit_move_insn (r11, GEN_INT (offset));
9104 emit_insn (gen_adddi3 (r11, r11, pointer));
9105 base_address = gen_rtx_MEM (Pmode, r11);
9108 insn = emit_move_insn (reg,
9109 adjust_address (base_address, Pmode, offset));
9110 offset += UNITS_PER_WORD;
9112 if (ix86_cfa_state->reg == crtl->drap_reg
9113 && regno == REGNO (crtl->drap_reg))
9115 /* Previously we'd represented the CFA as an expression
9116 like *(%ebp - 8). We've just popped that value from
9117 the stack, which means we need to reset the CFA to
9118 the drap register. This will remain until we restore
9119 the stack pointer. */
9120 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9121 RTX_FRAME_RELATED_P (insn) = 1;
9124 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9126 red_offset += UNITS_PER_WORD;
9130 /* Emit code to restore saved registers using MOV insns. First register
9131 is restored from POINTER + OFFSET. */
9133 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9134 HOST_WIDE_INT red_offset,
9135 int maybe_eh_return)
9138 rtx base_address = gen_rtx_MEM (TImode, pointer);
9141 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9142 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9144 rtx reg = gen_rtx_REG (TImode, regno);
9146 /* Ensure that adjust_address won't be forced to produce pointer
9147 out of range allowed by x86-64 instruction set. */
9148 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9152 r11 = gen_rtx_REG (DImode, R11_REG);
9153 emit_move_insn (r11, GEN_INT (offset));
9154 emit_insn (gen_adddi3 (r11, r11, pointer));
9155 base_address = gen_rtx_MEM (TImode, r11);
9158 mem = adjust_address (base_address, TImode, offset);
9159 set_mem_align (mem, 128);
9160 emit_move_insn (reg, mem);
9163 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9169 /* Restore function stack, frame, and registers. */
9172 ix86_expand_epilogue (int style)
9175 struct ix86_frame frame;
9176 HOST_WIDE_INT offset, red_offset;
9177 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9180 ix86_finalize_stack_realign_flags ();
9182 /* When stack is realigned, SP must be valid. */
9183 sp_valid = (!frame_pointer_needed
9184 || current_function_sp_is_unchanging
9185 || stack_realign_fp);
9187 ix86_compute_frame_layout (&frame);
9189 /* See the comment about red zone and frame
9190 pointer usage in ix86_expand_prologue. */
9191 if (frame_pointer_needed && frame.red_zone_size)
9192 emit_insn (gen_memory_blockage ());
9194 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9195 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9197 /* Calculate start of saved registers relative to ebp. Special care
9198 must be taken for the normal return case of a function using
9199 eh_return: the eax and edx registers are marked as saved, but not
9200 restored along this path. */
9201 offset = frame.nregs;
9202 if (crtl->calls_eh_return && style != 2)
9204 offset *= -UNITS_PER_WORD;
9205 offset -= frame.nsseregs * 16 + frame.padding0;
9207 /* Calculate start of saved registers relative to esp on entry of the
9208 function. When realigning stack, this needs to be the most negative
9209 value possible at runtime. */
9210 red_offset = offset;
9212 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9214 else if (stack_realign_fp)
9215 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9217 if (ix86_static_chain_on_stack)
9218 red_offset -= UNITS_PER_WORD;
9219 if (frame_pointer_needed)
9220 red_offset -= UNITS_PER_WORD;
9222 /* If we're only restoring one register and sp is not valid then
9223 using a move instruction to restore the register since it's
9224 less work than reloading sp and popping the register.
9226 The default code result in stack adjustment using add/lea instruction,
9227 while this code results in LEAVE instruction (or discrete equivalent),
9228 so it is profitable in some other cases as well. Especially when there
9229 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9230 and there is exactly one register to pop. This heuristic may need some
9231 tuning in future. */
9232 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9233 || (TARGET_EPILOGUE_USING_MOVE
9234 && cfun->machine->use_fast_prologue_epilogue
9235 && ((frame.nregs + frame.nsseregs) > 1
9236 || (frame.to_allocate + frame.padding0) != 0))
9237 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9238 && (frame.to_allocate + frame.padding0) != 0)
9239 || (frame_pointer_needed && TARGET_USE_LEAVE
9240 && cfun->machine->use_fast_prologue_epilogue
9241 && (frame.nregs + frame.nsseregs) == 1)
9242 || crtl->calls_eh_return)
9244 /* Restore registers. We can use ebp or esp to address the memory
9245 locations. If both are available, default to ebp, since offsets
9246 are known to be small. Only exception is esp pointing directly
9247 to the end of block of saved registers, where we may simplify
9250 If we are realigning stack with bp and sp, regs restore can't
9251 be addressed by bp. sp must be used instead. */
9253 if (!frame_pointer_needed
9254 || (sp_valid && !(frame.to_allocate + frame.padding0))
9255 || stack_realign_fp)
9257 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9258 frame.to_allocate, red_offset,
9260 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9262 + frame.nsseregs * 16
9265 + frame.nsseregs * 16
9266 + frame.padding0, style == 2);
9270 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9273 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9275 + frame.nsseregs * 16
9278 + frame.nsseregs * 16
9279 + frame.padding0, style == 2);
9282 red_offset -= offset;
9284 /* eh_return epilogues need %ecx added to the stack pointer. */
9287 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9289 /* Stack align doesn't work with eh_return. */
9290 gcc_assert (!crtl->stack_realign_needed);
9291 /* Neither does regparm nested functions. */
9292 gcc_assert (!ix86_static_chain_on_stack);
9294 if (frame_pointer_needed)
9296 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9297 tmp = plus_constant (tmp, UNITS_PER_WORD);
9298 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9300 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9301 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9303 /* Note that we use SA as a temporary CFA, as the return
9304 address is at the proper place relative to it. We
9305 pretend this happens at the FP restore insn because
9306 prior to this insn the FP would be stored at the wrong
9307 offset relative to SA, and after this insn we have no
9308 other reasonable register to use for the CFA. We don't
9309 bother resetting the CFA to the SP for the duration of
9311 add_reg_note (tmp, REG_CFA_DEF_CFA,
9312 plus_constant (sa, UNITS_PER_WORD));
9313 ix86_add_queued_cfa_restore_notes (tmp);
9314 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9315 RTX_FRAME_RELATED_P (tmp) = 1;
9316 ix86_cfa_state->reg = sa;
9317 ix86_cfa_state->offset = UNITS_PER_WORD;
9319 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9320 const0_rtx, style, false);
9324 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9325 tmp = plus_constant (tmp, (frame.to_allocate
9326 + frame.nregs * UNITS_PER_WORD
9327 + frame.nsseregs * 16
9329 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9330 ix86_add_queued_cfa_restore_notes (tmp);
9332 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9333 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9335 ix86_cfa_state->offset = UNITS_PER_WORD;
9336 add_reg_note (tmp, REG_CFA_DEF_CFA,
9337 plus_constant (stack_pointer_rtx,
9339 RTX_FRAME_RELATED_P (tmp) = 1;
9343 else if (!frame_pointer_needed)
9344 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9345 GEN_INT (frame.to_allocate
9346 + frame.nregs * UNITS_PER_WORD
9347 + frame.nsseregs * 16
9349 style, !using_drap);
9350 /* If not an i386, mov & pop is faster than "leave". */
9351 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9352 || !cfun->machine->use_fast_prologue_epilogue)
9353 ix86_emit_leave (red_offset);
9356 pro_epilogue_adjust_stack (stack_pointer_rtx,
9357 hard_frame_pointer_rtx,
9358 const0_rtx, style, !using_drap);
9360 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9365 /* First step is to deallocate the stack frame so that we can
9368 If we realign stack with frame pointer, then stack pointer
9369 won't be able to recover via lea $offset(%bp), %sp, because
9370 there is a padding area between bp and sp for realign.
9371 "add $to_allocate, %sp" must be used instead. */
9374 gcc_assert (frame_pointer_needed);
9375 gcc_assert (!stack_realign_fp);
9376 pro_epilogue_adjust_stack (stack_pointer_rtx,
9377 hard_frame_pointer_rtx,
9378 GEN_INT (offset), style, false);
9379 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9382 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9383 GEN_INT (frame.nsseregs * 16
9387 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9389 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9390 frame.to_allocate, red_offset,
9392 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9393 GEN_INT (frame.to_allocate
9394 + frame.nsseregs * 16
9395 + frame.padding0), style,
9396 !using_drap && !frame_pointer_needed);
9399 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9401 red_offset -= offset;
9403 if (frame_pointer_needed)
9405 /* Leave results in shorter dependency chains on CPUs that are
9406 able to grok it fast. */
9407 if (TARGET_USE_LEAVE)
9408 ix86_emit_leave (red_offset);
9411 /* For stack realigned really happens, recover stack
9412 pointer to hard frame pointer is a must, if not using
9414 if (stack_realign_fp)
9415 pro_epilogue_adjust_stack (stack_pointer_rtx,
9416 hard_frame_pointer_rtx,
9417 const0_rtx, style, !using_drap);
9418 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9426 int param_ptr_offset = UNITS_PER_WORD;
9429 gcc_assert (stack_realign_drap);
9431 if (ix86_static_chain_on_stack)
9432 param_ptr_offset += UNITS_PER_WORD;
9433 if (!call_used_regs[REGNO (crtl->drap_reg)])
9434 param_ptr_offset += UNITS_PER_WORD;
9436 insn = emit_insn (gen_rtx_SET
9437 (VOIDmode, stack_pointer_rtx,
9438 gen_rtx_PLUS (Pmode,
9440 GEN_INT (-param_ptr_offset))));
9441 ix86_cfa_state->reg = stack_pointer_rtx;
9442 ix86_cfa_state->offset = param_ptr_offset;
9444 add_reg_note (insn, REG_CFA_DEF_CFA,
9445 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9446 GEN_INT (ix86_cfa_state->offset)));
9447 RTX_FRAME_RELATED_P (insn) = 1;
9449 if (!call_used_regs[REGNO (crtl->drap_reg)])
9450 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9453 /* Remove the saved static chain from the stack. The use of ECX is
9454 merely as a scratch register, not as the actual static chain. */
9455 if (ix86_static_chain_on_stack)
9459 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9460 ix86_cfa_state->offset += UNITS_PER_WORD;
9462 r = gen_rtx_REG (Pmode, CX_REG);
9463 insn = emit_insn (ix86_gen_pop1 (r));
9465 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9466 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9467 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9468 RTX_FRAME_RELATED_P (insn) = 1;
9471 /* Sibcall epilogues don't want a return instruction. */
9474 *ix86_cfa_state = cfa_state_save;
9478 if (crtl->args.pops_args && crtl->args.size)
9480 rtx popc = GEN_INT (crtl->args.pops_args);
9482 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9483 address, do explicit add, and jump indirectly to the caller. */
9485 if (crtl->args.pops_args >= 65536)
9487 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9490 /* There is no "pascal" calling convention in any 64bit ABI. */
9491 gcc_assert (!TARGET_64BIT);
9493 insn = emit_insn (gen_popsi1 (ecx));
9494 ix86_cfa_state->offset -= UNITS_PER_WORD;
9496 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9497 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9498 add_reg_note (insn, REG_CFA_REGISTER,
9499 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9500 RTX_FRAME_RELATED_P (insn) = 1;
9502 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9504 emit_jump_insn (gen_return_indirect_internal (ecx));
9507 emit_jump_insn (gen_return_pop_internal (popc));
9510 emit_jump_insn (gen_return_internal ());
9512 /* Restore the state back to the state from the prologue,
9513 so that it's correct for the next epilogue. */
9514 *ix86_cfa_state = cfa_state_save;
9517 /* Reset from the function's potential modifications. */
9520 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9521 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9523 if (pic_offset_table_rtx)
9524 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9526 /* Mach-O doesn't support labels at the end of objects, so if
9527 it looks like we might want one, insert a NOP. */
9529 rtx insn = get_last_insn ();
9532 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9533 insn = PREV_INSN (insn);
9537 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9538 fputs ("\tnop\n", file);
9544 /* Extract the parts of an RTL expression that is a valid memory address
9545 for an instruction. Return 0 if the structure of the address is
9546 grossly off. Return -1 if the address contains ASHIFT, so it is not
9547 strictly valid, but still used for computing length of lea instruction. */
9550 ix86_decompose_address (rtx addr, struct ix86_address *out)
9552 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9553 rtx base_reg, index_reg;
9554 HOST_WIDE_INT scale = 1;
9555 rtx scale_rtx = NULL_RTX;
9558 enum ix86_address_seg seg = SEG_DEFAULT;
9560 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9562 else if (GET_CODE (addr) == PLUS)
9572 addends[n++] = XEXP (op, 1);
9575 while (GET_CODE (op) == PLUS);
9580 for (i = n; i >= 0; --i)
9583 switch (GET_CODE (op))
9588 index = XEXP (op, 0);
9589 scale_rtx = XEXP (op, 1);
9595 index = XEXP (op, 0);
9597 if (!CONST_INT_P (tmp))
9599 scale = INTVAL (tmp);
9600 if ((unsigned HOST_WIDE_INT) scale > 3)
9606 if (XINT (op, 1) == UNSPEC_TP
9607 && TARGET_TLS_DIRECT_SEG_REFS
9608 && seg == SEG_DEFAULT)
9609 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9638 else if (GET_CODE (addr) == MULT)
9640 index = XEXP (addr, 0); /* index*scale */
9641 scale_rtx = XEXP (addr, 1);
9643 else if (GET_CODE (addr) == ASHIFT)
9645 /* We're called for lea too, which implements ashift on occasion. */
9646 index = XEXP (addr, 0);
9647 tmp = XEXP (addr, 1);
9648 if (!CONST_INT_P (tmp))
9650 scale = INTVAL (tmp);
9651 if ((unsigned HOST_WIDE_INT) scale > 3)
9657 disp = addr; /* displacement */
9659 /* Extract the integral value of scale. */
9662 if (!CONST_INT_P (scale_rtx))
9664 scale = INTVAL (scale_rtx);
9667 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9668 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9670 /* Avoid useless 0 displacement. */
9671 if (disp == const0_rtx && (base || index))
9674 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9675 if (base_reg && index_reg && scale == 1
9676 && (index_reg == arg_pointer_rtx
9677 || index_reg == frame_pointer_rtx
9678 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9681 tmp = base, base = index, index = tmp;
9682 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9685 /* Special case: %ebp cannot be encoded as a base without a displacement.
9689 && (base_reg == hard_frame_pointer_rtx
9690 || base_reg == frame_pointer_rtx
9691 || base_reg == arg_pointer_rtx
9692 || (REG_P (base_reg)
9693 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9694 || REGNO (base_reg) == R13_REG))))
9697 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9698 Avoid this by transforming to [%esi+0].
9699 Reload calls address legitimization without cfun defined, so we need
9700 to test cfun for being non-NULL. */
9701 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9702 && base_reg && !index_reg && !disp
9704 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9707 /* Special case: encode reg+reg instead of reg*2. */
9708 if (!base && index && scale == 2)
9709 base = index, base_reg = index_reg, scale = 1;
9711 /* Special case: scaling cannot be encoded without base or displacement. */
9712 if (!base && !disp && index && scale != 1)
9724 /* Return cost of the memory address x.
9725 For i386, it is better to use a complex address than let gcc copy
9726 the address into a reg and make a new pseudo. But not if the address
9727 requires to two regs - that would mean more pseudos with longer
9730 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9732 struct ix86_address parts;
9734 int ok = ix86_decompose_address (x, &parts);
9738 if (parts.base && GET_CODE (parts.base) == SUBREG)
9739 parts.base = SUBREG_REG (parts.base);
9740 if (parts.index && GET_CODE (parts.index) == SUBREG)
9741 parts.index = SUBREG_REG (parts.index);
9743 /* Attempt to minimize number of registers in the address. */
9745 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9747 && (!REG_P (parts.index)
9748 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9752 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9754 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9755 && parts.base != parts.index)
9758 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9759 since it's predecode logic can't detect the length of instructions
9760 and it degenerates to vector decoded. Increase cost of such
9761 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9762 to split such addresses or even refuse such addresses at all.
9764 Following addressing modes are affected:
9769 The first and last case may be avoidable by explicitly coding the zero in
9770 memory address, but I don't have AMD-K6 machine handy to check this
9774 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9775 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9776 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9782 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9783 this is used for to form addresses to local data when -fPIC is in
9787 darwin_local_data_pic (rtx disp)
9789 return (GET_CODE (disp) == UNSPEC
9790 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9793 /* Determine if a given RTX is a valid constant. We already know this
9794 satisfies CONSTANT_P. */
9797 legitimate_constant_p (rtx x)
9799 switch (GET_CODE (x))
9804 if (GET_CODE (x) == PLUS)
9806 if (!CONST_INT_P (XEXP (x, 1)))
9811 if (TARGET_MACHO && darwin_local_data_pic (x))
9814 /* Only some unspecs are valid as "constants". */
9815 if (GET_CODE (x) == UNSPEC)
9816 switch (XINT (x, 1))
9821 return TARGET_64BIT;
9824 x = XVECEXP (x, 0, 0);
9825 return (GET_CODE (x) == SYMBOL_REF
9826 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9828 x = XVECEXP (x, 0, 0);
9829 return (GET_CODE (x) == SYMBOL_REF
9830 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9835 /* We must have drilled down to a symbol. */
9836 if (GET_CODE (x) == LABEL_REF)
9838 if (GET_CODE (x) != SYMBOL_REF)
9843 /* TLS symbols are never valid. */
9844 if (SYMBOL_REF_TLS_MODEL (x))
9847 /* DLLIMPORT symbols are never valid. */
9848 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9849 && SYMBOL_REF_DLLIMPORT_P (x))
9854 if (GET_MODE (x) == TImode
9855 && x != CONST0_RTX (TImode)
9861 if (!standard_sse_constant_p (x))
9868 /* Otherwise we handle everything else in the move patterns. */
9872 /* Determine if it's legal to put X into the constant pool. This
9873 is not possible for the address of thread-local symbols, which
9874 is checked above. */
9877 ix86_cannot_force_const_mem (rtx x)
9879 /* We can always put integral constants and vectors in memory. */
9880 switch (GET_CODE (x))
9890 return !legitimate_constant_p (x);
9894 /* Nonzero if the constant value X is a legitimate general operand
9895 when generating PIC code. It is given that flag_pic is on and
9896 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9899 legitimate_pic_operand_p (rtx x)
9903 switch (GET_CODE (x))
9906 inner = XEXP (x, 0);
9907 if (GET_CODE (inner) == PLUS
9908 && CONST_INT_P (XEXP (inner, 1)))
9909 inner = XEXP (inner, 0);
9911 /* Only some unspecs are valid as "constants". */
9912 if (GET_CODE (inner) == UNSPEC)
9913 switch (XINT (inner, 1))
9918 return TARGET_64BIT;
9920 x = XVECEXP (inner, 0, 0);
9921 return (GET_CODE (x) == SYMBOL_REF
9922 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9923 case UNSPEC_MACHOPIC_OFFSET:
9924 return legitimate_pic_address_disp_p (x);
9932 return legitimate_pic_address_disp_p (x);
9939 /* Determine if a given CONST RTX is a valid memory displacement
9943 legitimate_pic_address_disp_p (rtx disp)
9947 /* In 64bit mode we can allow direct addresses of symbols and labels
9948 when they are not dynamic symbols. */
9951 rtx op0 = disp, op1;
9953 switch (GET_CODE (disp))
9959 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9961 op0 = XEXP (XEXP (disp, 0), 0);
9962 op1 = XEXP (XEXP (disp, 0), 1);
9963 if (!CONST_INT_P (op1)
9964 || INTVAL (op1) >= 16*1024*1024
9965 || INTVAL (op1) < -16*1024*1024)
9967 if (GET_CODE (op0) == LABEL_REF)
9969 if (GET_CODE (op0) != SYMBOL_REF)
9974 /* TLS references should always be enclosed in UNSPEC. */
9975 if (SYMBOL_REF_TLS_MODEL (op0))
9977 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9978 && ix86_cmodel != CM_LARGE_PIC)
9986 if (GET_CODE (disp) != CONST)
9988 disp = XEXP (disp, 0);
9992 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9993 of GOT tables. We should not need these anyway. */
9994 if (GET_CODE (disp) != UNSPEC
9995 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9996 && XINT (disp, 1) != UNSPEC_GOTOFF
9997 && XINT (disp, 1) != UNSPEC_PLTOFF))
10000 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
10001 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
10007 if (GET_CODE (disp) == PLUS)
10009 if (!CONST_INT_P (XEXP (disp, 1)))
10011 disp = XEXP (disp, 0);
10015 if (TARGET_MACHO && darwin_local_data_pic (disp))
10018 if (GET_CODE (disp) != UNSPEC)
10021 switch (XINT (disp, 1))
10026 /* We need to check for both symbols and labels because VxWorks loads
10027 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
10029 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
10030 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
10031 case UNSPEC_GOTOFF:
10032 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
10033 While ABI specify also 32bit relocation but we don't produce it in
10034 small PIC model at all. */
10035 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
10036 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
10038 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
10040 case UNSPEC_GOTTPOFF:
10041 case UNSPEC_GOTNTPOFF:
10042 case UNSPEC_INDNTPOFF:
10045 disp = XVECEXP (disp, 0, 0);
10046 return (GET_CODE (disp) == SYMBOL_REF
10047 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
10048 case UNSPEC_NTPOFF:
10049 disp = XVECEXP (disp, 0, 0);
10050 return (GET_CODE (disp) == SYMBOL_REF
10051 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
10052 case UNSPEC_DTPOFF:
10053 disp = XVECEXP (disp, 0, 0);
10054 return (GET_CODE (disp) == SYMBOL_REF
10055 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
10061 /* Recognizes RTL expressions that are valid memory addresses for an
10062 instruction. The MODE argument is the machine mode for the MEM
10063 expression that wants to use this address.
10065 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
10066 convert common non-canonical forms to canonical form so that they will
10070 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
10071 rtx addr, bool strict)
10073 struct ix86_address parts;
10074 rtx base, index, disp;
10075 HOST_WIDE_INT scale;
10077 if (ix86_decompose_address (addr, &parts) <= 0)
10078 /* Decomposition failed. */
10082 index = parts.index;
10084 scale = parts.scale;
10086 /* Validate base register.
10088 Don't allow SUBREG's that span more than a word here. It can lead to spill
10089 failures when the base is one word out of a two word structure, which is
10090 represented internally as a DImode int. */
10098 else if (GET_CODE (base) == SUBREG
10099 && REG_P (SUBREG_REG (base))
10100 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
10102 reg = SUBREG_REG (base);
10104 /* Base is not a register. */
10107 if (GET_MODE (base) != Pmode)
10108 /* Base is not in Pmode. */
10111 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
10112 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
10113 /* Base is not valid. */
10117 /* Validate index register.
10119 Don't allow SUBREG's that span more than a word here -- same as above. */
10127 else if (GET_CODE (index) == SUBREG
10128 && REG_P (SUBREG_REG (index))
10129 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
10131 reg = SUBREG_REG (index);
10133 /* Index is not a register. */
10136 if (GET_MODE (index) != Pmode)
10137 /* Index is not in Pmode. */
10140 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
10141 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
10142 /* Index is not valid. */
10146 /* Validate scale factor. */
10150 /* Scale without index. */
10153 if (scale != 2 && scale != 4 && scale != 8)
10154 /* Scale is not a valid multiplier. */
10158 /* Validate displacement. */
10161 if (GET_CODE (disp) == CONST
10162 && GET_CODE (XEXP (disp, 0)) == UNSPEC
10163 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
10164 switch (XINT (XEXP (disp, 0), 1))
10166 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
10167 used. While ABI specify also 32bit relocations, we don't produce
10168 them at all and use IP relative instead. */
10170 case UNSPEC_GOTOFF:
10171 gcc_assert (flag_pic);
10173 goto is_legitimate_pic;
10175 /* 64bit address unspec. */
10178 case UNSPEC_GOTPCREL:
10179 gcc_assert (flag_pic);
10180 goto is_legitimate_pic;
10182 case UNSPEC_GOTTPOFF:
10183 case UNSPEC_GOTNTPOFF:
10184 case UNSPEC_INDNTPOFF:
10185 case UNSPEC_NTPOFF:
10186 case UNSPEC_DTPOFF:
10190 /* Invalid address unspec. */
10194 else if (SYMBOLIC_CONST (disp)
10198 && MACHOPIC_INDIRECT
10199 && !machopic_operand_p (disp)
10205 if (TARGET_64BIT && (index || base))
10207 /* foo@dtpoff(%rX) is ok. */
10208 if (GET_CODE (disp) != CONST
10209 || GET_CODE (XEXP (disp, 0)) != PLUS
10210 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10211 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10212 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10213 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10214 /* Non-constant pic memory reference. */
10217 else if (! legitimate_pic_address_disp_p (disp))
10218 /* Displacement is an invalid pic construct. */
10221 /* This code used to verify that a symbolic pic displacement
10222 includes the pic_offset_table_rtx register.
10224 While this is good idea, unfortunately these constructs may
10225 be created by "adds using lea" optimization for incorrect
10234 This code is nonsensical, but results in addressing
10235 GOT table with pic_offset_table_rtx base. We can't
10236 just refuse it easily, since it gets matched by
10237 "addsi3" pattern, that later gets split to lea in the
10238 case output register differs from input. While this
10239 can be handled by separate addsi pattern for this case
10240 that never results in lea, this seems to be easier and
10241 correct fix for crash to disable this test. */
10243 else if (GET_CODE (disp) != LABEL_REF
10244 && !CONST_INT_P (disp)
10245 && (GET_CODE (disp) != CONST
10246 || !legitimate_constant_p (disp))
10247 && (GET_CODE (disp) != SYMBOL_REF
10248 || !legitimate_constant_p (disp)))
10249 /* Displacement is not constant. */
10251 else if (TARGET_64BIT
10252 && !x86_64_immediate_operand (disp, VOIDmode))
10253 /* Displacement is out of range. */
10257 /* Everything looks valid. */
10261 /* Determine if a given RTX is a valid constant address. */
10264 constant_address_p (rtx x)
10266 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10269 /* Return a unique alias set for the GOT. */
10271 static alias_set_type
10272 ix86_GOT_alias_set (void)
10274 static alias_set_type set = -1;
10276 set = new_alias_set ();
10280 /* Return a legitimate reference for ORIG (an address) using the
10281 register REG. If REG is 0, a new pseudo is generated.
10283 There are two types of references that must be handled:
10285 1. Global data references must load the address from the GOT, via
10286 the PIC reg. An insn is emitted to do this load, and the reg is
10289 2. Static data references, constant pool addresses, and code labels
10290 compute the address as an offset from the GOT, whose base is in
10291 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10292 differentiate them from global data objects. The returned
10293 address is the PIC reg + an unspec constant.
10295 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10296 reg also appears in the address. */
10299 legitimize_pic_address (rtx orig, rtx reg)
10302 rtx new_rtx = orig;
10306 if (TARGET_MACHO && !TARGET_64BIT)
10309 reg = gen_reg_rtx (Pmode);
10310 /* Use the generic Mach-O PIC machinery. */
10311 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10315 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10317 else if (TARGET_64BIT
10318 && ix86_cmodel != CM_SMALL_PIC
10319 && gotoff_operand (addr, Pmode))
10322 /* This symbol may be referenced via a displacement from the PIC
10323 base address (@GOTOFF). */
10325 if (reload_in_progress)
10326 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10327 if (GET_CODE (addr) == CONST)
10328 addr = XEXP (addr, 0);
10329 if (GET_CODE (addr) == PLUS)
10331 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10333 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10336 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10337 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10339 tmpreg = gen_reg_rtx (Pmode);
10342 emit_move_insn (tmpreg, new_rtx);
10346 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10347 tmpreg, 1, OPTAB_DIRECT);
10350 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10352 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10354 /* This symbol may be referenced via a displacement from the PIC
10355 base address (@GOTOFF). */
10357 if (reload_in_progress)
10358 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10359 if (GET_CODE (addr) == CONST)
10360 addr = XEXP (addr, 0);
10361 if (GET_CODE (addr) == PLUS)
10363 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10365 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10368 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10369 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10370 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10374 emit_move_insn (reg, new_rtx);
10378 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10379 /* We can't use @GOTOFF for text labels on VxWorks;
10380 see gotoff_operand. */
10381 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10383 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10385 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10386 return legitimize_dllimport_symbol (addr, true);
10387 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10388 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10389 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10391 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10392 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10396 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10398 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10399 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10400 new_rtx = gen_const_mem (Pmode, new_rtx);
10401 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10404 reg = gen_reg_rtx (Pmode);
10405 /* Use directly gen_movsi, otherwise the address is loaded
10406 into register for CSE. We don't want to CSE this addresses,
10407 instead we CSE addresses from the GOT table, so skip this. */
10408 emit_insn (gen_movsi (reg, new_rtx));
10413 /* This symbol must be referenced via a load from the
10414 Global Offset Table (@GOT). */
10416 if (reload_in_progress)
10417 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10418 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10419 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10421 new_rtx = force_reg (Pmode, new_rtx);
10422 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10423 new_rtx = gen_const_mem (Pmode, new_rtx);
10424 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10427 reg = gen_reg_rtx (Pmode);
10428 emit_move_insn (reg, new_rtx);
10434 if (CONST_INT_P (addr)
10435 && !x86_64_immediate_operand (addr, VOIDmode))
10439 emit_move_insn (reg, addr);
10443 new_rtx = force_reg (Pmode, addr);
10445 else if (GET_CODE (addr) == CONST)
10447 addr = XEXP (addr, 0);
10449 /* We must match stuff we generate before. Assume the only
10450 unspecs that can get here are ours. Not that we could do
10451 anything with them anyway.... */
10452 if (GET_CODE (addr) == UNSPEC
10453 || (GET_CODE (addr) == PLUS
10454 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10456 gcc_assert (GET_CODE (addr) == PLUS);
10458 if (GET_CODE (addr) == PLUS)
10460 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10462 /* Check first to see if this is a constant offset from a @GOTOFF
10463 symbol reference. */
10464 if (gotoff_operand (op0, Pmode)
10465 && CONST_INT_P (op1))
10469 if (reload_in_progress)
10470 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10471 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10473 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10474 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10475 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10479 emit_move_insn (reg, new_rtx);
10485 if (INTVAL (op1) < -16*1024*1024
10486 || INTVAL (op1) >= 16*1024*1024)
10488 if (!x86_64_immediate_operand (op1, Pmode))
10489 op1 = force_reg (Pmode, op1);
10490 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10496 base = legitimize_pic_address (XEXP (addr, 0), reg);
10497 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10498 base == reg ? NULL_RTX : reg);
10500 if (CONST_INT_P (new_rtx))
10501 new_rtx = plus_constant (base, INTVAL (new_rtx));
10504 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10506 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10507 new_rtx = XEXP (new_rtx, 1);
10509 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10517 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10520 get_thread_pointer (int to_reg)
10524 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10528 reg = gen_reg_rtx (Pmode);
10529 insn = gen_rtx_SET (VOIDmode, reg, tp);
10530 insn = emit_insn (insn);
10535 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10536 false if we expect this to be used for a memory address and true if
10537 we expect to load the address into a register. */
10540 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10542 rtx dest, base, off, pic, tp;
10547 case TLS_MODEL_GLOBAL_DYNAMIC:
10548 dest = gen_reg_rtx (Pmode);
10549 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10551 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10553 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10556 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10557 insns = get_insns ();
10560 RTL_CONST_CALL_P (insns) = 1;
10561 emit_libcall_block (insns, dest, rax, x);
10563 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10564 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10566 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10568 if (TARGET_GNU2_TLS)
10570 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10572 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10576 case TLS_MODEL_LOCAL_DYNAMIC:
10577 base = gen_reg_rtx (Pmode);
10578 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10580 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10582 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10585 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10586 insns = get_insns ();
10589 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10590 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10591 RTL_CONST_CALL_P (insns) = 1;
10592 emit_libcall_block (insns, base, rax, note);
10594 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10595 emit_insn (gen_tls_local_dynamic_base_64 (base));
10597 emit_insn (gen_tls_local_dynamic_base_32 (base));
10599 if (TARGET_GNU2_TLS)
10601 rtx x = ix86_tls_module_base ();
10603 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10604 gen_rtx_MINUS (Pmode, x, tp));
10607 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10608 off = gen_rtx_CONST (Pmode, off);
10610 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10612 if (TARGET_GNU2_TLS)
10614 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10616 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10621 case TLS_MODEL_INITIAL_EXEC:
10625 type = UNSPEC_GOTNTPOFF;
10629 if (reload_in_progress)
10630 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10631 pic = pic_offset_table_rtx;
10632 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10634 else if (!TARGET_ANY_GNU_TLS)
10636 pic = gen_reg_rtx (Pmode);
10637 emit_insn (gen_set_got (pic));
10638 type = UNSPEC_GOTTPOFF;
10643 type = UNSPEC_INDNTPOFF;
10646 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10647 off = gen_rtx_CONST (Pmode, off);
10649 off = gen_rtx_PLUS (Pmode, pic, off);
10650 off = gen_const_mem (Pmode, off);
10651 set_mem_alias_set (off, ix86_GOT_alias_set ());
10653 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10655 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10656 off = force_reg (Pmode, off);
10657 return gen_rtx_PLUS (Pmode, base, off);
10661 base = get_thread_pointer (true);
10662 dest = gen_reg_rtx (Pmode);
10663 emit_insn (gen_subsi3 (dest, base, off));
10667 case TLS_MODEL_LOCAL_EXEC:
10668 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10669 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10670 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10671 off = gen_rtx_CONST (Pmode, off);
10673 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10675 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10676 return gen_rtx_PLUS (Pmode, base, off);
10680 base = get_thread_pointer (true);
10681 dest = gen_reg_rtx (Pmode);
10682 emit_insn (gen_subsi3 (dest, base, off));
10687 gcc_unreachable ();
10693 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10696 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10697 htab_t dllimport_map;
10700 get_dllimport_decl (tree decl)
10702 struct tree_map *h, in;
10705 const char *prefix;
10706 size_t namelen, prefixlen;
10711 if (!dllimport_map)
10712 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10714 in.hash = htab_hash_pointer (decl);
10715 in.base.from = decl;
10716 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10717 h = (struct tree_map *) *loc;
10721 *loc = h = ggc_alloc_tree_map ();
10723 h->base.from = decl;
10724 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10725 VAR_DECL, NULL, ptr_type_node);
10726 DECL_ARTIFICIAL (to) = 1;
10727 DECL_IGNORED_P (to) = 1;
10728 DECL_EXTERNAL (to) = 1;
10729 TREE_READONLY (to) = 1;
10731 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10732 name = targetm.strip_name_encoding (name);
10733 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10734 ? "*__imp_" : "*__imp__";
10735 namelen = strlen (name);
10736 prefixlen = strlen (prefix);
10737 imp_name = (char *) alloca (namelen + prefixlen + 1);
10738 memcpy (imp_name, prefix, prefixlen);
10739 memcpy (imp_name + prefixlen, name, namelen + 1);
10741 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10742 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10743 SET_SYMBOL_REF_DECL (rtl, to);
10744 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10746 rtl = gen_const_mem (Pmode, rtl);
10747 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10749 SET_DECL_RTL (to, rtl);
10750 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10755 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10756 true if we require the result be a register. */
10759 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10764 gcc_assert (SYMBOL_REF_DECL (symbol));
10765 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10767 x = DECL_RTL (imp_decl);
10769 x = force_reg (Pmode, x);
10773 /* Try machine-dependent ways of modifying an illegitimate address
10774 to be legitimate. If we find one, return the new, valid address.
10775 This macro is used in only one place: `memory_address' in explow.c.
10777 OLDX is the address as it was before break_out_memory_refs was called.
10778 In some cases it is useful to look at this to decide what needs to be done.
10780 It is always safe for this macro to do nothing. It exists to recognize
10781 opportunities to optimize the output.
10783 For the 80386, we handle X+REG by loading X into a register R and
10784 using R+REG. R will go in a general reg and indexing will be used.
10785 However, if REG is a broken-out memory address or multiplication,
10786 nothing needs to be done because REG can certainly go in a general reg.
10788 When -fpic is used, special handling is needed for symbolic references.
10789 See comments by legitimize_pic_address in i386.c for details. */
10792 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10793 enum machine_mode mode)
10798 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10800 return legitimize_tls_address (x, (enum tls_model) log, false);
10801 if (GET_CODE (x) == CONST
10802 && GET_CODE (XEXP (x, 0)) == PLUS
10803 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10804 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10806 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10807 (enum tls_model) log, false);
10808 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10811 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10813 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10814 return legitimize_dllimport_symbol (x, true);
10815 if (GET_CODE (x) == CONST
10816 && GET_CODE (XEXP (x, 0)) == PLUS
10817 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10818 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10820 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10821 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10825 if (flag_pic && SYMBOLIC_CONST (x))
10826 return legitimize_pic_address (x, 0);
10828 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10829 if (GET_CODE (x) == ASHIFT
10830 && CONST_INT_P (XEXP (x, 1))
10831 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10834 log = INTVAL (XEXP (x, 1));
10835 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10836 GEN_INT (1 << log));
10839 if (GET_CODE (x) == PLUS)
10841 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10843 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10844 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10845 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10848 log = INTVAL (XEXP (XEXP (x, 0), 1));
10849 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10850 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10851 GEN_INT (1 << log));
10854 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10855 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10856 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10859 log = INTVAL (XEXP (XEXP (x, 1), 1));
10860 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10861 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10862 GEN_INT (1 << log));
10865 /* Put multiply first if it isn't already. */
10866 if (GET_CODE (XEXP (x, 1)) == MULT)
10868 rtx tmp = XEXP (x, 0);
10869 XEXP (x, 0) = XEXP (x, 1);
10874 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10875 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10876 created by virtual register instantiation, register elimination, and
10877 similar optimizations. */
10878 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10881 x = gen_rtx_PLUS (Pmode,
10882 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10883 XEXP (XEXP (x, 1), 0)),
10884 XEXP (XEXP (x, 1), 1));
10888 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10889 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10890 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10891 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10892 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10893 && CONSTANT_P (XEXP (x, 1)))
10896 rtx other = NULL_RTX;
10898 if (CONST_INT_P (XEXP (x, 1)))
10900 constant = XEXP (x, 1);
10901 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10903 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10905 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10906 other = XEXP (x, 1);
10914 x = gen_rtx_PLUS (Pmode,
10915 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10916 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10917 plus_constant (other, INTVAL (constant)));
10921 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10924 if (GET_CODE (XEXP (x, 0)) == MULT)
10927 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10930 if (GET_CODE (XEXP (x, 1)) == MULT)
10933 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10937 && REG_P (XEXP (x, 1))
10938 && REG_P (XEXP (x, 0)))
10941 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10944 x = legitimize_pic_address (x, 0);
10947 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10950 if (REG_P (XEXP (x, 0)))
10952 rtx temp = gen_reg_rtx (Pmode);
10953 rtx val = force_operand (XEXP (x, 1), temp);
10955 emit_move_insn (temp, val);
10957 XEXP (x, 1) = temp;
10961 else if (REG_P (XEXP (x, 1)))
10963 rtx temp = gen_reg_rtx (Pmode);
10964 rtx val = force_operand (XEXP (x, 0), temp);
10966 emit_move_insn (temp, val);
10968 XEXP (x, 0) = temp;
10976 /* Print an integer constant expression in assembler syntax. Addition
10977 and subtraction are the only arithmetic that may appear in these
10978 expressions. FILE is the stdio stream to write to, X is the rtx, and
10979 CODE is the operand print code from the output string. */
10982 output_pic_addr_const (FILE *file, rtx x, int code)
10986 switch (GET_CODE (x))
10989 gcc_assert (flag_pic);
10994 if (! TARGET_MACHO || TARGET_64BIT)
10995 output_addr_const (file, x);
10998 const char *name = XSTR (x, 0);
11000 /* Mark the decl as referenced so that cgraph will
11001 output the function. */
11002 if (SYMBOL_REF_DECL (x))
11003 mark_decl_referenced (SYMBOL_REF_DECL (x));
11006 if (MACHOPIC_INDIRECT
11007 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
11008 name = machopic_indirection_name (x, /*stub_p=*/true);
11010 assemble_name (file, name);
11012 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
11013 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
11014 fputs ("@PLT", file);
11021 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
11022 assemble_name (asm_out_file, buf);
11026 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11030 /* This used to output parentheses around the expression,
11031 but that does not work on the 386 (either ATT or BSD assembler). */
11032 output_pic_addr_const (file, XEXP (x, 0), code);
11036 if (GET_MODE (x) == VOIDmode)
11038 /* We can use %d if the number is <32 bits and positive. */
11039 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
11040 fprintf (file, "0x%lx%08lx",
11041 (unsigned long) CONST_DOUBLE_HIGH (x),
11042 (unsigned long) CONST_DOUBLE_LOW (x));
11044 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
11047 /* We can't handle floating point constants;
11048 TARGET_PRINT_OPERAND must handle them. */
11049 output_operand_lossage ("floating constant misused");
11053 /* Some assemblers need integer constants to appear first. */
11054 if (CONST_INT_P (XEXP (x, 0)))
11056 output_pic_addr_const (file, XEXP (x, 0), code);
11058 output_pic_addr_const (file, XEXP (x, 1), code);
11062 gcc_assert (CONST_INT_P (XEXP (x, 1)));
11063 output_pic_addr_const (file, XEXP (x, 1), code);
11065 output_pic_addr_const (file, XEXP (x, 0), code);
11071 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
11072 output_pic_addr_const (file, XEXP (x, 0), code);
11074 output_pic_addr_const (file, XEXP (x, 1), code);
11076 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
11080 gcc_assert (XVECLEN (x, 0) == 1);
11081 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
11082 switch (XINT (x, 1))
11085 fputs ("@GOT", file);
11087 case UNSPEC_GOTOFF:
11088 fputs ("@GOTOFF", file);
11090 case UNSPEC_PLTOFF:
11091 fputs ("@PLTOFF", file);
11093 case UNSPEC_GOTPCREL:
11094 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11095 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
11097 case UNSPEC_GOTTPOFF:
11098 /* FIXME: This might be @TPOFF in Sun ld too. */
11099 fputs ("@gottpoff", file);
11102 fputs ("@tpoff", file);
11104 case UNSPEC_NTPOFF:
11106 fputs ("@tpoff", file);
11108 fputs ("@ntpoff", file);
11110 case UNSPEC_DTPOFF:
11111 fputs ("@dtpoff", file);
11113 case UNSPEC_GOTNTPOFF:
11115 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11116 "@gottpoff(%rip)": "@gottpoff[rip]", file);
11118 fputs ("@gotntpoff", file);
11120 case UNSPEC_INDNTPOFF:
11121 fputs ("@indntpoff", file);
11124 case UNSPEC_MACHOPIC_OFFSET:
11126 machopic_output_function_base_name (file);
11130 output_operand_lossage ("invalid UNSPEC as operand");
11136 output_operand_lossage ("invalid expression as operand");
11140 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11141 We need to emit DTP-relative relocations. */
11143 static void ATTRIBUTE_UNUSED
11144 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
11146 fputs (ASM_LONG, file);
11147 output_addr_const (file, x);
11148 fputs ("@dtpoff", file);
11154 fputs (", 0", file);
11157 gcc_unreachable ();
11161 /* Return true if X is a representation of the PIC register. This copes
11162 with calls from ix86_find_base_term, where the register might have
11163 been replaced by a cselib value. */
11166 ix86_pic_register_p (rtx x)
11168 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
11169 return (pic_offset_table_rtx
11170 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
11172 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
11175 /* In the name of slightly smaller debug output, and to cater to
11176 general assembler lossage, recognize PIC+GOTOFF and turn it back
11177 into a direct symbol reference.
11179 On Darwin, this is necessary to avoid a crash, because Darwin
11180 has a different PIC label for each routine but the DWARF debugging
11181 information is not associated with any particular routine, so it's
11182 necessary to remove references to the PIC label from RTL stored by
11183 the DWARF output code. */
11186 ix86_delegitimize_address (rtx x)
11188 rtx orig_x = delegitimize_mem_from_attrs (x);
11189 /* addend is NULL or some rtx if x is something+GOTOFF where
11190 something doesn't include the PIC register. */
11191 rtx addend = NULL_RTX;
11192 /* reg_addend is NULL or a multiple of some register. */
11193 rtx reg_addend = NULL_RTX;
11194 /* const_addend is NULL or a const_int. */
11195 rtx const_addend = NULL_RTX;
11196 /* This is the result, or NULL. */
11197 rtx result = NULL_RTX;
11206 if (GET_CODE (x) != CONST
11207 || GET_CODE (XEXP (x, 0)) != UNSPEC
11208 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11209 || !MEM_P (orig_x))
11211 x = XVECEXP (XEXP (x, 0), 0, 0);
11212 if (GET_MODE (orig_x) != Pmode)
11213 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
11217 if (GET_CODE (x) != PLUS
11218 || GET_CODE (XEXP (x, 1)) != CONST)
11221 if (ix86_pic_register_p (XEXP (x, 0)))
11222 /* %ebx + GOT/GOTOFF */
11224 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11226 /* %ebx + %reg * scale + GOT/GOTOFF */
11227 reg_addend = XEXP (x, 0);
11228 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11229 reg_addend = XEXP (reg_addend, 1);
11230 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11231 reg_addend = XEXP (reg_addend, 0);
11234 reg_addend = NULL_RTX;
11235 addend = XEXP (x, 0);
11239 addend = XEXP (x, 0);
11241 x = XEXP (XEXP (x, 1), 0);
11242 if (GET_CODE (x) == PLUS
11243 && CONST_INT_P (XEXP (x, 1)))
11245 const_addend = XEXP (x, 1);
11249 if (GET_CODE (x) == UNSPEC
11250 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11251 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11252 result = XVECEXP (x, 0, 0);
11254 if (TARGET_MACHO && darwin_local_data_pic (x)
11255 && !MEM_P (orig_x))
11256 result = XVECEXP (x, 0, 0);
11262 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11264 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11267 /* If the rest of original X doesn't involve the PIC register, add
11268 addend and subtract pic_offset_table_rtx. This can happen e.g.
11270 leal (%ebx, %ecx, 4), %ecx
11272 movl foo@GOTOFF(%ecx), %edx
11273 in which case we return (%ecx - %ebx) + foo. */
11274 if (pic_offset_table_rtx)
11275 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11276 pic_offset_table_rtx),
11281 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
11282 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
11286 /* If X is a machine specific address (i.e. a symbol or label being
11287 referenced as a displacement from the GOT implemented using an
11288 UNSPEC), then return the base term. Otherwise return X. */
11291 ix86_find_base_term (rtx x)
11297 if (GET_CODE (x) != CONST)
11299 term = XEXP (x, 0);
11300 if (GET_CODE (term) == PLUS
11301 && (CONST_INT_P (XEXP (term, 1))
11302 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11303 term = XEXP (term, 0);
11304 if (GET_CODE (term) != UNSPEC
11305 || XINT (term, 1) != UNSPEC_GOTPCREL)
11308 return XVECEXP (term, 0, 0);
11311 return ix86_delegitimize_address (x);
11315 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11316 int fp, FILE *file)
11318 const char *suffix;
11320 if (mode == CCFPmode || mode == CCFPUmode)
11322 code = ix86_fp_compare_code_to_integer (code);
11326 code = reverse_condition (code);
11377 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11381 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11382 Those same assemblers have the same but opposite lossage on cmov. */
11383 if (mode == CCmode)
11384 suffix = fp ? "nbe" : "a";
11385 else if (mode == CCCmode)
11388 gcc_unreachable ();
11404 gcc_unreachable ();
11408 gcc_assert (mode == CCmode || mode == CCCmode);
11425 gcc_unreachable ();
11429 /* ??? As above. */
11430 gcc_assert (mode == CCmode || mode == CCCmode);
11431 suffix = fp ? "nb" : "ae";
11434 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11438 /* ??? As above. */
11439 if (mode == CCmode)
11441 else if (mode == CCCmode)
11442 suffix = fp ? "nb" : "ae";
11444 gcc_unreachable ();
11447 suffix = fp ? "u" : "p";
11450 suffix = fp ? "nu" : "np";
11453 gcc_unreachable ();
11455 fputs (suffix, file);
11458 /* Print the name of register X to FILE based on its machine mode and number.
11459 If CODE is 'w', pretend the mode is HImode.
11460 If CODE is 'b', pretend the mode is QImode.
11461 If CODE is 'k', pretend the mode is SImode.
11462 If CODE is 'q', pretend the mode is DImode.
11463 If CODE is 'x', pretend the mode is V4SFmode.
11464 If CODE is 't', pretend the mode is V8SFmode.
11465 If CODE is 'h', pretend the reg is the 'high' byte register.
11466 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11467 If CODE is 'd', duplicate the operand for AVX instruction.
11471 print_reg (rtx x, int code, FILE *file)
11474 bool duplicated = code == 'd' && TARGET_AVX;
11476 gcc_assert (x == pc_rtx
11477 || (REGNO (x) != ARG_POINTER_REGNUM
11478 && REGNO (x) != FRAME_POINTER_REGNUM
11479 && REGNO (x) != FLAGS_REG
11480 && REGNO (x) != FPSR_REG
11481 && REGNO (x) != FPCR_REG));
11483 if (ASSEMBLER_DIALECT == ASM_ATT)
11488 gcc_assert (TARGET_64BIT);
11489 fputs ("rip", file);
11493 if (code == 'w' || MMX_REG_P (x))
11495 else if (code == 'b')
11497 else if (code == 'k')
11499 else if (code == 'q')
11501 else if (code == 'y')
11503 else if (code == 'h')
11505 else if (code == 'x')
11507 else if (code == 't')
11510 code = GET_MODE_SIZE (GET_MODE (x));
11512 /* Irritatingly, AMD extended registers use different naming convention
11513 from the normal registers. */
11514 if (REX_INT_REG_P (x))
11516 gcc_assert (TARGET_64BIT);
11520 error ("extended registers have no high halves");
11523 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11526 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11529 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11532 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11535 error ("unsupported operand size for extended register");
11545 if (STACK_TOP_P (x))
11554 if (! ANY_FP_REG_P (x))
11555 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11560 reg = hi_reg_name[REGNO (x)];
11563 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11565 reg = qi_reg_name[REGNO (x)];
11568 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11570 reg = qi_high_reg_name[REGNO (x)];
11575 gcc_assert (!duplicated);
11577 fputs (hi_reg_name[REGNO (x)] + 1, file);
11582 gcc_unreachable ();
11588 if (ASSEMBLER_DIALECT == ASM_ATT)
11589 fprintf (file, ", %%%s", reg);
11591 fprintf (file, ", %s", reg);
11595 /* Locate some local-dynamic symbol still in use by this function
11596 so that we can print its name in some tls_local_dynamic_base
11600 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11604 if (GET_CODE (x) == SYMBOL_REF
11605 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11607 cfun->machine->some_ld_name = XSTR (x, 0);
11614 static const char *
11615 get_some_local_dynamic_name (void)
11619 if (cfun->machine->some_ld_name)
11620 return cfun->machine->some_ld_name;
11622 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11623 if (NONDEBUG_INSN_P (insn)
11624 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11625 return cfun->machine->some_ld_name;
11630 /* Meaning of CODE:
11631 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11632 C -- print opcode suffix for set/cmov insn.
11633 c -- like C, but print reversed condition
11634 F,f -- likewise, but for floating-point.
11635 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11637 R -- print the prefix for register names.
11638 z -- print the opcode suffix for the size of the current operand.
11639 Z -- likewise, with special suffixes for x87 instructions.
11640 * -- print a star (in certain assembler syntax)
11641 A -- print an absolute memory reference.
11642 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11643 s -- print a shift double count, followed by the assemblers argument
11645 b -- print the QImode name of the register for the indicated operand.
11646 %b0 would print %al if operands[0] is reg 0.
11647 w -- likewise, print the HImode name of the register.
11648 k -- likewise, print the SImode name of the register.
11649 q -- likewise, print the DImode name of the register.
11650 x -- likewise, print the V4SFmode name of the register.
11651 t -- likewise, print the V8SFmode name of the register.
11652 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11653 y -- print "st(0)" instead of "st" as a register.
11654 d -- print duplicated register operand for AVX instruction.
11655 D -- print condition for SSE cmp instruction.
11656 P -- if PIC, print an @PLT suffix.
11657 X -- don't print any sort of PIC '@' suffix for a symbol.
11658 & -- print some in-use local-dynamic symbol name.
11659 H -- print a memory address offset by 8; used for sse high-parts
11660 Y -- print condition for XOP pcom* instruction.
11661 + -- print a branch hint as 'cs' or 'ds' prefix
11662 ; -- print a semicolon (after prefixes due to bug in older gas).
11666 ix86_print_operand (FILE *file, rtx x, int code)
11673 if (ASSEMBLER_DIALECT == ASM_ATT)
11679 const char *name = get_some_local_dynamic_name ();
11681 output_operand_lossage ("'%%&' used without any "
11682 "local dynamic TLS references");
11684 assemble_name (file, name);
11689 switch (ASSEMBLER_DIALECT)
11696 /* Intel syntax. For absolute addresses, registers should not
11697 be surrounded by braces. */
11701 ix86_print_operand (file, x, 0);
11708 gcc_unreachable ();
11711 ix86_print_operand (file, x, 0);
11716 if (ASSEMBLER_DIALECT == ASM_ATT)
11721 if (ASSEMBLER_DIALECT == ASM_ATT)
11726 if (ASSEMBLER_DIALECT == ASM_ATT)
11731 if (ASSEMBLER_DIALECT == ASM_ATT)
11736 if (ASSEMBLER_DIALECT == ASM_ATT)
11741 if (ASSEMBLER_DIALECT == ASM_ATT)
11746 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11748 /* Opcodes don't get size suffixes if using Intel opcodes. */
11749 if (ASSEMBLER_DIALECT == ASM_INTEL)
11752 switch (GET_MODE_SIZE (GET_MODE (x)))
11771 output_operand_lossage
11772 ("invalid operand size for operand code '%c'", code);
11777 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11779 (0, "non-integer operand used with operand code '%c'", code);
11783 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11784 if (ASSEMBLER_DIALECT == ASM_INTEL)
11787 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11789 switch (GET_MODE_SIZE (GET_MODE (x)))
11792 #ifdef HAVE_AS_IX86_FILDS
11802 #ifdef HAVE_AS_IX86_FILDQ
11805 fputs ("ll", file);
11813 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11815 /* 387 opcodes don't get size suffixes
11816 if the operands are registers. */
11817 if (STACK_REG_P (x))
11820 switch (GET_MODE_SIZE (GET_MODE (x)))
11841 output_operand_lossage
11842 ("invalid operand type used with operand code '%c'", code);
11846 output_operand_lossage
11847 ("invalid operand size for operand code '%c'", code);
11864 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11866 ix86_print_operand (file, x, 0);
11867 fputs (", ", file);
11872 /* Little bit of braindamage here. The SSE compare instructions
11873 does use completely different names for the comparisons that the
11874 fp conditional moves. */
11877 switch (GET_CODE (x))
11880 fputs ("eq", file);
11883 fputs ("eq_us", file);
11886 fputs ("lt", file);
11889 fputs ("nge", file);
11892 fputs ("le", file);
11895 fputs ("ngt", file);
11898 fputs ("unord", file);
11901 fputs ("neq", file);
11904 fputs ("neq_oq", file);
11907 fputs ("ge", file);
11910 fputs ("nlt", file);
11913 fputs ("gt", file);
11916 fputs ("nle", file);
11919 fputs ("ord", file);
11922 output_operand_lossage ("operand is not a condition code, "
11923 "invalid operand code 'D'");
11929 switch (GET_CODE (x))
11933 fputs ("eq", file);
11937 fputs ("lt", file);
11941 fputs ("le", file);
11944 fputs ("unord", file);
11948 fputs ("neq", file);
11952 fputs ("nlt", file);
11956 fputs ("nle", file);
11959 fputs ("ord", file);
11962 output_operand_lossage ("operand is not a condition code, "
11963 "invalid operand code 'D'");
11969 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11970 if (ASSEMBLER_DIALECT == ASM_ATT)
11972 switch (GET_MODE (x))
11974 case HImode: putc ('w', file); break;
11976 case SFmode: putc ('l', file); break;
11978 case DFmode: putc ('q', file); break;
11979 default: gcc_unreachable ();
11986 if (!COMPARISON_P (x))
11988 output_operand_lossage ("operand is neither a constant nor a "
11989 "condition code, invalid operand code "
11993 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11996 if (!COMPARISON_P (x))
11998 output_operand_lossage ("operand is neither a constant nor a "
11999 "condition code, invalid operand code "
12003 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
12004 if (ASSEMBLER_DIALECT == ASM_ATT)
12007 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
12010 /* Like above, but reverse condition */
12012 /* Check to see if argument to %c is really a constant
12013 and not a condition code which needs to be reversed. */
12014 if (!COMPARISON_P (x))
12016 output_operand_lossage ("operand is neither a constant nor a "
12017 "condition code, invalid operand "
12021 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
12024 if (!COMPARISON_P (x))
12026 output_operand_lossage ("operand is neither a constant nor a "
12027 "condition code, invalid operand "
12031 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
12032 if (ASSEMBLER_DIALECT == ASM_ATT)
12035 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
12039 /* It doesn't actually matter what mode we use here, as we're
12040 only going to use this for printing. */
12041 x = adjust_address_nv (x, DImode, 8);
12049 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
12052 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
12055 int pred_val = INTVAL (XEXP (x, 0));
12057 if (pred_val < REG_BR_PROB_BASE * 45 / 100
12058 || pred_val > REG_BR_PROB_BASE * 55 / 100)
12060 int taken = pred_val > REG_BR_PROB_BASE / 2;
12061 int cputaken = final_forward_branch_p (current_output_insn) == 0;
12063 /* Emit hints only in the case default branch prediction
12064 heuristics would fail. */
12065 if (taken != cputaken)
12067 /* We use 3e (DS) prefix for taken branches and
12068 2e (CS) prefix for not taken branches. */
12070 fputs ("ds ; ", file);
12072 fputs ("cs ; ", file);
12080 switch (GET_CODE (x))
12083 fputs ("neq", file);
12086 fputs ("eq", file);
12090 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
12094 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
12098 fputs ("le", file);
12102 fputs ("lt", file);
12105 fputs ("unord", file);
12108 fputs ("ord", file);
12111 fputs ("ueq", file);
12114 fputs ("nlt", file);
12117 fputs ("nle", file);
12120 fputs ("ule", file);
12123 fputs ("ult", file);
12126 fputs ("une", file);
12129 output_operand_lossage ("operand is not a condition code, "
12130 "invalid operand code 'Y'");
12136 #if TARGET_MACHO || !HAVE_AS_IX86_REP_LOCK_PREFIX
12142 output_operand_lossage ("invalid operand code '%c'", code);
12147 print_reg (x, code, file);
12149 else if (MEM_P (x))
12151 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
12152 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
12153 && GET_MODE (x) != BLKmode)
12156 switch (GET_MODE_SIZE (GET_MODE (x)))
12158 case 1: size = "BYTE"; break;
12159 case 2: size = "WORD"; break;
12160 case 4: size = "DWORD"; break;
12161 case 8: size = "QWORD"; break;
12162 case 12: size = "TBYTE"; break;
12164 if (GET_MODE (x) == XFmode)
12169 case 32: size = "YMMWORD"; break;
12171 gcc_unreachable ();
12174 /* Check for explicit size override (codes 'b', 'w' and 'k') */
12177 else if (code == 'w')
12179 else if (code == 'k')
12182 fputs (size, file);
12183 fputs (" PTR ", file);
12187 /* Avoid (%rip) for call operands. */
12188 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12189 && !CONST_INT_P (x))
12190 output_addr_const (file, x);
12191 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12192 output_operand_lossage ("invalid constraints for operand");
12194 output_address (x);
12197 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12202 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12203 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12205 if (ASSEMBLER_DIALECT == ASM_ATT)
12207 fprintf (file, "0x%08lx", (long unsigned int) l);
12210 /* These float cases don't actually occur as immediate operands. */
12211 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12215 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12216 fputs (dstr, file);
12219 else if (GET_CODE (x) == CONST_DOUBLE
12220 && GET_MODE (x) == XFmode)
12224 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12225 fputs (dstr, file);
12230 /* We have patterns that allow zero sets of memory, for instance.
12231 In 64-bit mode, we should probably support all 8-byte vectors,
12232 since we can in fact encode that into an immediate. */
12233 if (GET_CODE (x) == CONST_VECTOR)
12235 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12241 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12243 if (ASSEMBLER_DIALECT == ASM_ATT)
12246 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12247 || GET_CODE (x) == LABEL_REF)
12249 if (ASSEMBLER_DIALECT == ASM_ATT)
12252 fputs ("OFFSET FLAT:", file);
12255 if (CONST_INT_P (x))
12256 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12258 output_pic_addr_const (file, x, code);
12260 output_addr_const (file, x);
12265 ix86_print_operand_punct_valid_p (unsigned char code)
12267 return (code == '*' || code == '+' || code == '&' || code == ';');
12270 /* Print a memory operand whose address is ADDR. */
12273 ix86_print_operand_address (FILE *file, rtx addr)
12275 struct ix86_address parts;
12276 rtx base, index, disp;
12278 int ok = ix86_decompose_address (addr, &parts);
12283 index = parts.index;
12285 scale = parts.scale;
12293 if (ASSEMBLER_DIALECT == ASM_ATT)
12295 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12298 gcc_unreachable ();
12301 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12302 if (TARGET_64BIT && !base && !index)
12306 if (GET_CODE (disp) == CONST
12307 && GET_CODE (XEXP (disp, 0)) == PLUS
12308 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12309 symbol = XEXP (XEXP (disp, 0), 0);
12311 if (GET_CODE (symbol) == LABEL_REF
12312 || (GET_CODE (symbol) == SYMBOL_REF
12313 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12316 if (!base && !index)
12318 /* Displacement only requires special attention. */
12320 if (CONST_INT_P (disp))
12322 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12323 fputs ("ds:", file);
12324 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12327 output_pic_addr_const (file, disp, 0);
12329 output_addr_const (file, disp);
12333 if (ASSEMBLER_DIALECT == ASM_ATT)
12338 output_pic_addr_const (file, disp, 0);
12339 else if (GET_CODE (disp) == LABEL_REF)
12340 output_asm_label (disp);
12342 output_addr_const (file, disp);
12347 print_reg (base, 0, file);
12351 print_reg (index, 0, file);
12353 fprintf (file, ",%d", scale);
12359 rtx offset = NULL_RTX;
12363 /* Pull out the offset of a symbol; print any symbol itself. */
12364 if (GET_CODE (disp) == CONST
12365 && GET_CODE (XEXP (disp, 0)) == PLUS
12366 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12368 offset = XEXP (XEXP (disp, 0), 1);
12369 disp = gen_rtx_CONST (VOIDmode,
12370 XEXP (XEXP (disp, 0), 0));
12374 output_pic_addr_const (file, disp, 0);
12375 else if (GET_CODE (disp) == LABEL_REF)
12376 output_asm_label (disp);
12377 else if (CONST_INT_P (disp))
12380 output_addr_const (file, disp);
12386 print_reg (base, 0, file);
12389 if (INTVAL (offset) >= 0)
12391 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12395 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12402 print_reg (index, 0, file);
12404 fprintf (file, "*%d", scale);
12412 output_addr_const_extra (FILE *file, rtx x)
12416 if (GET_CODE (x) != UNSPEC)
12419 op = XVECEXP (x, 0, 0);
12420 switch (XINT (x, 1))
12422 case UNSPEC_GOTTPOFF:
12423 output_addr_const (file, op);
12424 /* FIXME: This might be @TPOFF in Sun ld. */
12425 fputs ("@gottpoff", file);
12428 output_addr_const (file, op);
12429 fputs ("@tpoff", file);
12431 case UNSPEC_NTPOFF:
12432 output_addr_const (file, op);
12434 fputs ("@tpoff", file);
12436 fputs ("@ntpoff", file);
12438 case UNSPEC_DTPOFF:
12439 output_addr_const (file, op);
12440 fputs ("@dtpoff", file);
12442 case UNSPEC_GOTNTPOFF:
12443 output_addr_const (file, op);
12445 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12446 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12448 fputs ("@gotntpoff", file);
12450 case UNSPEC_INDNTPOFF:
12451 output_addr_const (file, op);
12452 fputs ("@indntpoff", file);
12455 case UNSPEC_MACHOPIC_OFFSET:
12456 output_addr_const (file, op);
12458 machopic_output_function_base_name (file);
12469 /* Split one or more DImode RTL references into pairs of SImode
12470 references. The RTL can be REG, offsettable MEM, integer constant, or
12471 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12472 split and "num" is its length. lo_half and hi_half are output arrays
12473 that parallel "operands". */
12476 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12480 rtx op = operands[num];
12482 /* simplify_subreg refuse to split volatile memory addresses,
12483 but we still have to handle it. */
12486 lo_half[num] = adjust_address (op, SImode, 0);
12487 hi_half[num] = adjust_address (op, SImode, 4);
12491 lo_half[num] = simplify_gen_subreg (SImode, op,
12492 GET_MODE (op) == VOIDmode
12493 ? DImode : GET_MODE (op), 0);
12494 hi_half[num] = simplify_gen_subreg (SImode, op,
12495 GET_MODE (op) == VOIDmode
12496 ? DImode : GET_MODE (op), 4);
12500 /* Split one or more TImode RTL references into pairs of DImode
12501 references. The RTL can be REG, offsettable MEM, integer constant, or
12502 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12503 split and "num" is its length. lo_half and hi_half are output arrays
12504 that parallel "operands". */
12507 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12511 rtx op = operands[num];
12513 /* simplify_subreg refuse to split volatile memory addresses, but we
12514 still have to handle it. */
12517 lo_half[num] = adjust_address (op, DImode, 0);
12518 hi_half[num] = adjust_address (op, DImode, 8);
12522 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12523 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12528 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12529 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12530 is the expression of the binary operation. The output may either be
12531 emitted here, or returned to the caller, like all output_* functions.
12533 There is no guarantee that the operands are the same mode, as they
12534 might be within FLOAT or FLOAT_EXTEND expressions. */
12536 #ifndef SYSV386_COMPAT
12537 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12538 wants to fix the assemblers because that causes incompatibility
12539 with gcc. No-one wants to fix gcc because that causes
12540 incompatibility with assemblers... You can use the option of
12541 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12542 #define SYSV386_COMPAT 1
12546 output_387_binary_op (rtx insn, rtx *operands)
12548 static char buf[40];
12551 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12553 #ifdef ENABLE_CHECKING
12554 /* Even if we do not want to check the inputs, this documents input
12555 constraints. Which helps in understanding the following code. */
12556 if (STACK_REG_P (operands[0])
12557 && ((REG_P (operands[1])
12558 && REGNO (operands[0]) == REGNO (operands[1])
12559 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12560 || (REG_P (operands[2])
12561 && REGNO (operands[0]) == REGNO (operands[2])
12562 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12563 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12566 gcc_assert (is_sse);
12569 switch (GET_CODE (operands[3]))
12572 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12573 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12581 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12582 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12590 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12591 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12599 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12600 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12608 gcc_unreachable ();
12615 strcpy (buf, ssep);
12616 if (GET_MODE (operands[0]) == SFmode)
12617 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12619 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12623 strcpy (buf, ssep + 1);
12624 if (GET_MODE (operands[0]) == SFmode)
12625 strcat (buf, "ss\t{%2, %0|%0, %2}");
12627 strcat (buf, "sd\t{%2, %0|%0, %2}");
12633 switch (GET_CODE (operands[3]))
12637 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12639 rtx temp = operands[2];
12640 operands[2] = operands[1];
12641 operands[1] = temp;
12644 /* know operands[0] == operands[1]. */
12646 if (MEM_P (operands[2]))
12652 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12654 if (STACK_TOP_P (operands[0]))
12655 /* How is it that we are storing to a dead operand[2]?
12656 Well, presumably operands[1] is dead too. We can't
12657 store the result to st(0) as st(0) gets popped on this
12658 instruction. Instead store to operands[2] (which I
12659 think has to be st(1)). st(1) will be popped later.
12660 gcc <= 2.8.1 didn't have this check and generated
12661 assembly code that the Unixware assembler rejected. */
12662 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12664 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12668 if (STACK_TOP_P (operands[0]))
12669 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12671 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12676 if (MEM_P (operands[1]))
12682 if (MEM_P (operands[2]))
12688 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12691 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12692 derived assemblers, confusingly reverse the direction of
12693 the operation for fsub{r} and fdiv{r} when the
12694 destination register is not st(0). The Intel assembler
12695 doesn't have this brain damage. Read !SYSV386_COMPAT to
12696 figure out what the hardware really does. */
12697 if (STACK_TOP_P (operands[0]))
12698 p = "{p\t%0, %2|rp\t%2, %0}";
12700 p = "{rp\t%2, %0|p\t%0, %2}";
12702 if (STACK_TOP_P (operands[0]))
12703 /* As above for fmul/fadd, we can't store to st(0). */
12704 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12706 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12711 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12714 if (STACK_TOP_P (operands[0]))
12715 p = "{rp\t%0, %1|p\t%1, %0}";
12717 p = "{p\t%1, %0|rp\t%0, %1}";
12719 if (STACK_TOP_P (operands[0]))
12720 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12722 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12727 if (STACK_TOP_P (operands[0]))
12729 if (STACK_TOP_P (operands[1]))
12730 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12732 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12735 else if (STACK_TOP_P (operands[1]))
12738 p = "{\t%1, %0|r\t%0, %1}";
12740 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12746 p = "{r\t%2, %0|\t%0, %2}";
12748 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12754 gcc_unreachable ();
12761 /* Return needed mode for entity in optimize_mode_switching pass. */
12764 ix86_mode_needed (int entity, rtx insn)
12766 enum attr_i387_cw mode;
12768 /* The mode UNINITIALIZED is used to store control word after a
12769 function call or ASM pattern. The mode ANY specify that function
12770 has no requirements on the control word and make no changes in the
12771 bits we are interested in. */
12774 || (NONJUMP_INSN_P (insn)
12775 && (asm_noperands (PATTERN (insn)) >= 0
12776 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12777 return I387_CW_UNINITIALIZED;
12779 if (recog_memoized (insn) < 0)
12780 return I387_CW_ANY;
12782 mode = get_attr_i387_cw (insn);
12787 if (mode == I387_CW_TRUNC)
12792 if (mode == I387_CW_FLOOR)
12797 if (mode == I387_CW_CEIL)
12802 if (mode == I387_CW_MASK_PM)
12807 gcc_unreachable ();
12810 return I387_CW_ANY;
12813 /* Output code to initialize control word copies used by trunc?f?i and
12814 rounding patterns. CURRENT_MODE is set to current control word,
12815 while NEW_MODE is set to new control word. */
12818 emit_i387_cw_initialization (int mode)
12820 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12823 enum ix86_stack_slot slot;
12825 rtx reg = gen_reg_rtx (HImode);
12827 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12828 emit_move_insn (reg, copy_rtx (stored_mode));
12830 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12831 || optimize_function_for_size_p (cfun))
12835 case I387_CW_TRUNC:
12836 /* round toward zero (truncate) */
12837 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12838 slot = SLOT_CW_TRUNC;
12841 case I387_CW_FLOOR:
12842 /* round down toward -oo */
12843 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12844 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12845 slot = SLOT_CW_FLOOR;
12849 /* round up toward +oo */
12850 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12851 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12852 slot = SLOT_CW_CEIL;
12855 case I387_CW_MASK_PM:
12856 /* mask precision exception for nearbyint() */
12857 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12858 slot = SLOT_CW_MASK_PM;
12862 gcc_unreachable ();
12869 case I387_CW_TRUNC:
12870 /* round toward zero (truncate) */
12871 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12872 slot = SLOT_CW_TRUNC;
12875 case I387_CW_FLOOR:
12876 /* round down toward -oo */
12877 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12878 slot = SLOT_CW_FLOOR;
12882 /* round up toward +oo */
12883 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12884 slot = SLOT_CW_CEIL;
12887 case I387_CW_MASK_PM:
12888 /* mask precision exception for nearbyint() */
12889 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12890 slot = SLOT_CW_MASK_PM;
12894 gcc_unreachable ();
12898 gcc_assert (slot < MAX_386_STACK_LOCALS);
12900 new_mode = assign_386_stack_local (HImode, slot);
12901 emit_move_insn (new_mode, reg);
12904 /* Output code for INSN to convert a float to a signed int. OPERANDS
12905 are the insn operands. The output may be [HSD]Imode and the input
12906 operand may be [SDX]Fmode. */
12909 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12911 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12912 int dimode_p = GET_MODE (operands[0]) == DImode;
12913 int round_mode = get_attr_i387_cw (insn);
12915 /* Jump through a hoop or two for DImode, since the hardware has no
12916 non-popping instruction. We used to do this a different way, but
12917 that was somewhat fragile and broke with post-reload splitters. */
12918 if ((dimode_p || fisttp) && !stack_top_dies)
12919 output_asm_insn ("fld\t%y1", operands);
12921 gcc_assert (STACK_TOP_P (operands[1]));
12922 gcc_assert (MEM_P (operands[0]));
12923 gcc_assert (GET_MODE (operands[1]) != TFmode);
12926 output_asm_insn ("fisttp%Z0\t%0", operands);
12929 if (round_mode != I387_CW_ANY)
12930 output_asm_insn ("fldcw\t%3", operands);
12931 if (stack_top_dies || dimode_p)
12932 output_asm_insn ("fistp%Z0\t%0", operands);
12934 output_asm_insn ("fist%Z0\t%0", operands);
12935 if (round_mode != I387_CW_ANY)
12936 output_asm_insn ("fldcw\t%2", operands);
12942 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12943 have the values zero or one, indicates the ffreep insn's operand
12944 from the OPERANDS array. */
12946 static const char *
12947 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12949 if (TARGET_USE_FFREEP)
12950 #ifdef HAVE_AS_IX86_FFREEP
12951 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12954 static char retval[32];
12955 int regno = REGNO (operands[opno]);
12957 gcc_assert (FP_REGNO_P (regno));
12959 regno -= FIRST_STACK_REG;
12961 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12966 return opno ? "fstp\t%y1" : "fstp\t%y0";
12970 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12971 should be used. UNORDERED_P is true when fucom should be used. */
12974 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12976 int stack_top_dies;
12977 rtx cmp_op0, cmp_op1;
12978 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12982 cmp_op0 = operands[0];
12983 cmp_op1 = operands[1];
12987 cmp_op0 = operands[1];
12988 cmp_op1 = operands[2];
12993 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12994 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12995 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12996 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12998 if (GET_MODE (operands[0]) == SFmode)
13000 return &ucomiss[TARGET_AVX ? 0 : 1];
13002 return &comiss[TARGET_AVX ? 0 : 1];
13005 return &ucomisd[TARGET_AVX ? 0 : 1];
13007 return &comisd[TARGET_AVX ? 0 : 1];
13010 gcc_assert (STACK_TOP_P (cmp_op0));
13012 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
13014 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
13016 if (stack_top_dies)
13018 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
13019 return output_387_ffreep (operands, 1);
13022 return "ftst\n\tfnstsw\t%0";
13025 if (STACK_REG_P (cmp_op1)
13027 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
13028 && REGNO (cmp_op1) != FIRST_STACK_REG)
13030 /* If both the top of the 387 stack dies, and the other operand
13031 is also a stack register that dies, then this must be a
13032 `fcompp' float compare */
13036 /* There is no double popping fcomi variant. Fortunately,
13037 eflags is immune from the fstp's cc clobbering. */
13039 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
13041 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
13042 return output_387_ffreep (operands, 0);
13047 return "fucompp\n\tfnstsw\t%0";
13049 return "fcompp\n\tfnstsw\t%0";
13054 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
13056 static const char * const alt[16] =
13058 "fcom%Z2\t%y2\n\tfnstsw\t%0",
13059 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
13060 "fucom%Z2\t%y2\n\tfnstsw\t%0",
13061 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
13063 "ficom%Z2\t%y2\n\tfnstsw\t%0",
13064 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
13068 "fcomi\t{%y1, %0|%0, %y1}",
13069 "fcomip\t{%y1, %0|%0, %y1}",
13070 "fucomi\t{%y1, %0|%0, %y1}",
13071 "fucomip\t{%y1, %0|%0, %y1}",
13082 mask = eflags_p << 3;
13083 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
13084 mask |= unordered_p << 1;
13085 mask |= stack_top_dies;
13087 gcc_assert (mask < 16);
13096 ix86_output_addr_vec_elt (FILE *file, int value)
13098 const char *directive = ASM_LONG;
13102 directive = ASM_QUAD;
13104 gcc_assert (!TARGET_64BIT);
13107 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
13111 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
13113 const char *directive = ASM_LONG;
13116 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
13117 directive = ASM_QUAD;
13119 gcc_assert (!TARGET_64BIT);
13121 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
13122 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
13123 fprintf (file, "%s%s%d-%s%d\n",
13124 directive, LPREFIX, value, LPREFIX, rel);
13125 else if (HAVE_AS_GOTOFF_IN_DATA)
13126 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
13128 else if (TARGET_MACHO)
13130 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
13131 machopic_output_function_base_name (file);
13136 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
13137 GOT_SYMBOL_NAME, LPREFIX, value);
13140 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
13144 ix86_expand_clear (rtx dest)
13148 /* We play register width games, which are only valid after reload. */
13149 gcc_assert (reload_completed);
13151 /* Avoid HImode and its attendant prefix byte. */
13152 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
13153 dest = gen_rtx_REG (SImode, REGNO (dest));
13154 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
13156 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
13157 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
13159 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13160 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
13166 /* X is an unchanging MEM. If it is a constant pool reference, return
13167 the constant pool rtx, else NULL. */
13170 maybe_get_pool_constant (rtx x)
13172 x = ix86_delegitimize_address (XEXP (x, 0));
13174 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
13175 return get_pool_constant (x);
13181 ix86_expand_move (enum machine_mode mode, rtx operands[])
13184 enum tls_model model;
13189 if (GET_CODE (op1) == SYMBOL_REF)
13191 model = SYMBOL_REF_TLS_MODEL (op1);
13194 op1 = legitimize_tls_address (op1, model, true);
13195 op1 = force_operand (op1, op0);
13199 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13200 && SYMBOL_REF_DLLIMPORT_P (op1))
13201 op1 = legitimize_dllimport_symbol (op1, false);
13203 else if (GET_CODE (op1) == CONST
13204 && GET_CODE (XEXP (op1, 0)) == PLUS
13205 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13207 rtx addend = XEXP (XEXP (op1, 0), 1);
13208 rtx symbol = XEXP (XEXP (op1, 0), 0);
13211 model = SYMBOL_REF_TLS_MODEL (symbol);
13213 tmp = legitimize_tls_address (symbol, model, true);
13214 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13215 && SYMBOL_REF_DLLIMPORT_P (symbol))
13216 tmp = legitimize_dllimport_symbol (symbol, true);
13220 tmp = force_operand (tmp, NULL);
13221 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13222 op0, 1, OPTAB_DIRECT);
13228 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13230 if (TARGET_MACHO && !TARGET_64BIT)
13235 rtx temp = ((reload_in_progress
13236 || ((op0 && REG_P (op0))
13238 ? op0 : gen_reg_rtx (Pmode));
13239 op1 = machopic_indirect_data_reference (op1, temp);
13240 op1 = machopic_legitimize_pic_address (op1, mode,
13241 temp == op1 ? 0 : temp);
13243 else if (MACHOPIC_INDIRECT)
13244 op1 = machopic_indirect_data_reference (op1, 0);
13252 op1 = force_reg (Pmode, op1);
13253 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13255 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13256 op1 = legitimize_pic_address (op1, reg);
13265 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13266 || !push_operand (op0, mode))
13268 op1 = force_reg (mode, op1);
13270 if (push_operand (op0, mode)
13271 && ! general_no_elim_operand (op1, mode))
13272 op1 = copy_to_mode_reg (mode, op1);
13274 /* Force large constants in 64bit compilation into register
13275 to get them CSEed. */
13276 if (can_create_pseudo_p ()
13277 && (mode == DImode) && TARGET_64BIT
13278 && immediate_operand (op1, mode)
13279 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13280 && !register_operand (op0, mode)
13282 op1 = copy_to_mode_reg (mode, op1);
13284 if (can_create_pseudo_p ()
13285 && FLOAT_MODE_P (mode)
13286 && GET_CODE (op1) == CONST_DOUBLE)
13288 /* If we are loading a floating point constant to a register,
13289 force the value to memory now, since we'll get better code
13290 out the back end. */
13292 op1 = validize_mem (force_const_mem (mode, op1));
13293 if (!register_operand (op0, mode))
13295 rtx temp = gen_reg_rtx (mode);
13296 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13297 emit_move_insn (op0, temp);
13303 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13307 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13309 rtx op0 = operands[0], op1 = operands[1];
13310 unsigned int align = GET_MODE_ALIGNMENT (mode);
13312 /* Force constants other than zero into memory. We do not know how
13313 the instructions used to build constants modify the upper 64 bits
13314 of the register, once we have that information we may be able
13315 to handle some of them more efficiently. */
13316 if (can_create_pseudo_p ()
13317 && register_operand (op0, mode)
13318 && (CONSTANT_P (op1)
13319 || (GET_CODE (op1) == SUBREG
13320 && CONSTANT_P (SUBREG_REG (op1))))
13321 && !standard_sse_constant_p (op1))
13322 op1 = validize_mem (force_const_mem (mode, op1));
13324 /* We need to check memory alignment for SSE mode since attribute
13325 can make operands unaligned. */
13326 if (can_create_pseudo_p ()
13327 && SSE_REG_MODE_P (mode)
13328 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13329 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13333 /* ix86_expand_vector_move_misalign() does not like constants ... */
13334 if (CONSTANT_P (op1)
13335 || (GET_CODE (op1) == SUBREG
13336 && CONSTANT_P (SUBREG_REG (op1))))
13337 op1 = validize_mem (force_const_mem (mode, op1));
13339 /* ... nor both arguments in memory. */
13340 if (!register_operand (op0, mode)
13341 && !register_operand (op1, mode))
13342 op1 = force_reg (mode, op1);
13344 tmp[0] = op0; tmp[1] = op1;
13345 ix86_expand_vector_move_misalign (mode, tmp);
13349 /* Make operand1 a register if it isn't already. */
13350 if (can_create_pseudo_p ()
13351 && !register_operand (op0, mode)
13352 && !register_operand (op1, mode))
13354 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13358 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13361 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13362 straight to ix86_expand_vector_move. */
13363 /* Code generation for scalar reg-reg moves of single and double precision data:
13364 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13368 if (x86_sse_partial_reg_dependency == true)
13373 Code generation for scalar loads of double precision data:
13374 if (x86_sse_split_regs == true)
13375 movlpd mem, reg (gas syntax)
13379 Code generation for unaligned packed loads of single precision data
13380 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13381 if (x86_sse_unaligned_move_optimal)
13384 if (x86_sse_partial_reg_dependency == true)
13396 Code generation for unaligned packed loads of double precision data
13397 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13398 if (x86_sse_unaligned_move_optimal)
13401 if (x86_sse_split_regs == true)
13414 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13423 switch (GET_MODE_CLASS (mode))
13425 case MODE_VECTOR_INT:
13427 switch (GET_MODE_SIZE (mode))
13430 /* If we're optimizing for size, movups is the smallest. */
13431 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13433 op0 = gen_lowpart (V4SFmode, op0);
13434 op1 = gen_lowpart (V4SFmode, op1);
13435 emit_insn (gen_avx_movups (op0, op1));
13438 op0 = gen_lowpart (V16QImode, op0);
13439 op1 = gen_lowpart (V16QImode, op1);
13440 emit_insn (gen_avx_movdqu (op0, op1));
13443 op0 = gen_lowpart (V32QImode, op0);
13444 op1 = gen_lowpart (V32QImode, op1);
13445 emit_insn (gen_avx_movdqu256 (op0, op1));
13448 gcc_unreachable ();
13451 case MODE_VECTOR_FLOAT:
13452 op0 = gen_lowpart (mode, op0);
13453 op1 = gen_lowpart (mode, op1);
13458 emit_insn (gen_avx_movups (op0, op1));
13461 emit_insn (gen_avx_movups256 (op0, op1));
13464 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13466 op0 = gen_lowpart (V4SFmode, op0);
13467 op1 = gen_lowpart (V4SFmode, op1);
13468 emit_insn (gen_avx_movups (op0, op1));
13471 emit_insn (gen_avx_movupd (op0, op1));
13474 emit_insn (gen_avx_movupd256 (op0, op1));
13477 gcc_unreachable ();
13482 gcc_unreachable ();
13490 /* If we're optimizing for size, movups is the smallest. */
13491 if (optimize_insn_for_size_p ()
13492 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13494 op0 = gen_lowpart (V4SFmode, op0);
13495 op1 = gen_lowpart (V4SFmode, op1);
13496 emit_insn (gen_sse_movups (op0, op1));
13500 /* ??? If we have typed data, then it would appear that using
13501 movdqu is the only way to get unaligned data loaded with
13503 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13505 op0 = gen_lowpart (V16QImode, op0);
13506 op1 = gen_lowpart (V16QImode, op1);
13507 emit_insn (gen_sse2_movdqu (op0, op1));
13511 if (TARGET_SSE2 && mode == V2DFmode)
13515 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13517 op0 = gen_lowpart (V2DFmode, op0);
13518 op1 = gen_lowpart (V2DFmode, op1);
13519 emit_insn (gen_sse2_movupd (op0, op1));
13523 /* When SSE registers are split into halves, we can avoid
13524 writing to the top half twice. */
13525 if (TARGET_SSE_SPLIT_REGS)
13527 emit_clobber (op0);
13532 /* ??? Not sure about the best option for the Intel chips.
13533 The following would seem to satisfy; the register is
13534 entirely cleared, breaking the dependency chain. We
13535 then store to the upper half, with a dependency depth
13536 of one. A rumor has it that Intel recommends two movsd
13537 followed by an unpacklpd, but this is unconfirmed. And
13538 given that the dependency depth of the unpacklpd would
13539 still be one, I'm not sure why this would be better. */
13540 zero = CONST0_RTX (V2DFmode);
13543 m = adjust_address (op1, DFmode, 0);
13544 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13545 m = adjust_address (op1, DFmode, 8);
13546 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13550 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13552 op0 = gen_lowpart (V4SFmode, op0);
13553 op1 = gen_lowpart (V4SFmode, op1);
13554 emit_insn (gen_sse_movups (op0, op1));
13558 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13559 emit_move_insn (op0, CONST0_RTX (mode));
13561 emit_clobber (op0);
13563 if (mode != V4SFmode)
13564 op0 = gen_lowpart (V4SFmode, op0);
13565 m = adjust_address (op1, V2SFmode, 0);
13566 emit_insn (gen_sse_loadlps (op0, op0, m));
13567 m = adjust_address (op1, V2SFmode, 8);
13568 emit_insn (gen_sse_loadhps (op0, op0, m));
13571 else if (MEM_P (op0))
13573 /* If we're optimizing for size, movups is the smallest. */
13574 if (optimize_insn_for_size_p ()
13575 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13577 op0 = gen_lowpart (V4SFmode, op0);
13578 op1 = gen_lowpart (V4SFmode, op1);
13579 emit_insn (gen_sse_movups (op0, op1));
13583 /* ??? Similar to above, only less clear because of quote
13584 typeless stores unquote. */
13585 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13586 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13588 op0 = gen_lowpart (V16QImode, op0);
13589 op1 = gen_lowpart (V16QImode, op1);
13590 emit_insn (gen_sse2_movdqu (op0, op1));
13594 if (TARGET_SSE2 && mode == V2DFmode)
13596 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13598 op0 = gen_lowpart (V2DFmode, op0);
13599 op1 = gen_lowpart (V2DFmode, op1);
13600 emit_insn (gen_sse2_movupd (op0, op1));
13604 m = adjust_address (op0, DFmode, 0);
13605 emit_insn (gen_sse2_storelpd (m, op1));
13606 m = adjust_address (op0, DFmode, 8);
13607 emit_insn (gen_sse2_storehpd (m, op1));
13612 if (mode != V4SFmode)
13613 op1 = gen_lowpart (V4SFmode, op1);
13615 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13617 op0 = gen_lowpart (V4SFmode, op0);
13618 emit_insn (gen_sse_movups (op0, op1));
13622 m = adjust_address (op0, V2SFmode, 0);
13623 emit_insn (gen_sse_storelps (m, op1));
13624 m = adjust_address (op0, V2SFmode, 8);
13625 emit_insn (gen_sse_storehps (m, op1));
13630 gcc_unreachable ();
13633 /* Expand a push in MODE. This is some mode for which we do not support
13634 proper push instructions, at least from the registers that we expect
13635 the value to live in. */
13638 ix86_expand_push (enum machine_mode mode, rtx x)
13642 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13643 GEN_INT (-GET_MODE_SIZE (mode)),
13644 stack_pointer_rtx, 1, OPTAB_DIRECT);
13645 if (tmp != stack_pointer_rtx)
13646 emit_move_insn (stack_pointer_rtx, tmp);
13648 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13650 /* When we push an operand onto stack, it has to be aligned at least
13651 at the function argument boundary. However since we don't have
13652 the argument type, we can't determine the actual argument
13654 emit_move_insn (tmp, x);
13657 /* Helper function of ix86_fixup_binary_operands to canonicalize
13658 operand order. Returns true if the operands should be swapped. */
13661 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13664 rtx dst = operands[0];
13665 rtx src1 = operands[1];
13666 rtx src2 = operands[2];
13668 /* If the operation is not commutative, we can't do anything. */
13669 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13672 /* Highest priority is that src1 should match dst. */
13673 if (rtx_equal_p (dst, src1))
13675 if (rtx_equal_p (dst, src2))
13678 /* Next highest priority is that immediate constants come second. */
13679 if (immediate_operand (src2, mode))
13681 if (immediate_operand (src1, mode))
13684 /* Lowest priority is that memory references should come second. */
13694 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13695 destination to use for the operation. If different from the true
13696 destination in operands[0], a copy operation will be required. */
13699 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13702 rtx dst = operands[0];
13703 rtx src1 = operands[1];
13704 rtx src2 = operands[2];
13706 /* Canonicalize operand order. */
13707 if (ix86_swap_binary_operands_p (code, mode, operands))
13711 /* It is invalid to swap operands of different modes. */
13712 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13719 /* Both source operands cannot be in memory. */
13720 if (MEM_P (src1) && MEM_P (src2))
13722 /* Optimization: Only read from memory once. */
13723 if (rtx_equal_p (src1, src2))
13725 src2 = force_reg (mode, src2);
13729 src2 = force_reg (mode, src2);
13732 /* If the destination is memory, and we do not have matching source
13733 operands, do things in registers. */
13734 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13735 dst = gen_reg_rtx (mode);
13737 /* Source 1 cannot be a constant. */
13738 if (CONSTANT_P (src1))
13739 src1 = force_reg (mode, src1);
13741 /* Source 1 cannot be a non-matching memory. */
13742 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13743 src1 = force_reg (mode, src1);
13745 operands[1] = src1;
13746 operands[2] = src2;
13750 /* Similarly, but assume that the destination has already been
13751 set up properly. */
13754 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13755 enum machine_mode mode, rtx operands[])
13757 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13758 gcc_assert (dst == operands[0]);
13761 /* Attempt to expand a binary operator. Make the expansion closer to the
13762 actual machine, then just general_operand, which will allow 3 separate
13763 memory references (one output, two input) in a single insn. */
13766 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13769 rtx src1, src2, dst, op, clob;
13771 dst = ix86_fixup_binary_operands (code, mode, operands);
13772 src1 = operands[1];
13773 src2 = operands[2];
13775 /* Emit the instruction. */
13777 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13778 if (reload_in_progress)
13780 /* Reload doesn't know about the flags register, and doesn't know that
13781 it doesn't want to clobber it. We can only do this with PLUS. */
13782 gcc_assert (code == PLUS);
13787 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13788 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13791 /* Fix up the destination if needed. */
13792 if (dst != operands[0])
13793 emit_move_insn (operands[0], dst);
13796 /* Return TRUE or FALSE depending on whether the binary operator meets the
13797 appropriate constraints. */
13800 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13803 rtx dst = operands[0];
13804 rtx src1 = operands[1];
13805 rtx src2 = operands[2];
13807 /* Both source operands cannot be in memory. */
13808 if (MEM_P (src1) && MEM_P (src2))
13811 /* Canonicalize operand order for commutative operators. */
13812 if (ix86_swap_binary_operands_p (code, mode, operands))
13819 /* If the destination is memory, we must have a matching source operand. */
13820 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13823 /* Source 1 cannot be a constant. */
13824 if (CONSTANT_P (src1))
13827 /* Source 1 cannot be a non-matching memory. */
13828 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13834 /* Attempt to expand a unary operator. Make the expansion closer to the
13835 actual machine, then just general_operand, which will allow 2 separate
13836 memory references (one output, one input) in a single insn. */
13839 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13842 int matching_memory;
13843 rtx src, dst, op, clob;
13848 /* If the destination is memory, and we do not have matching source
13849 operands, do things in registers. */
13850 matching_memory = 0;
13853 if (rtx_equal_p (dst, src))
13854 matching_memory = 1;
13856 dst = gen_reg_rtx (mode);
13859 /* When source operand is memory, destination must match. */
13860 if (MEM_P (src) && !matching_memory)
13861 src = force_reg (mode, src);
13863 /* Emit the instruction. */
13865 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13866 if (reload_in_progress || code == NOT)
13868 /* Reload doesn't know about the flags register, and doesn't know that
13869 it doesn't want to clobber it. */
13870 gcc_assert (code == NOT);
13875 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13876 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13879 /* Fix up the destination if needed. */
13880 if (dst != operands[0])
13881 emit_move_insn (operands[0], dst);
13884 #define LEA_SEARCH_THRESHOLD 12
13886 /* Search backward for non-agu definition of register number REGNO1
13887 or register number REGNO2 in INSN's basic block until
13888 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13889 2. Reach BB boundary, or
13890 3. Reach agu definition.
13891 Returns the distance between the non-agu definition point and INSN.
13892 If no definition point, returns -1. */
13895 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13898 basic_block bb = BLOCK_FOR_INSN (insn);
13901 enum attr_type insn_type;
13903 if (insn != BB_HEAD (bb))
13905 rtx prev = PREV_INSN (insn);
13906 while (prev && distance < LEA_SEARCH_THRESHOLD)
13908 if (NONDEBUG_INSN_P (prev))
13911 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13912 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13913 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13914 && (regno1 == DF_REF_REGNO (*def_rec)
13915 || regno2 == DF_REF_REGNO (*def_rec)))
13917 insn_type = get_attr_type (prev);
13918 if (insn_type != TYPE_LEA)
13922 if (prev == BB_HEAD (bb))
13924 prev = PREV_INSN (prev);
13928 if (distance < LEA_SEARCH_THRESHOLD)
13932 bool simple_loop = false;
13934 FOR_EACH_EDGE (e, ei, bb->preds)
13937 simple_loop = true;
13943 rtx prev = BB_END (bb);
13946 && distance < LEA_SEARCH_THRESHOLD)
13948 if (NONDEBUG_INSN_P (prev))
13951 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13952 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13953 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13954 && (regno1 == DF_REF_REGNO (*def_rec)
13955 || regno2 == DF_REF_REGNO (*def_rec)))
13957 insn_type = get_attr_type (prev);
13958 if (insn_type != TYPE_LEA)
13962 prev = PREV_INSN (prev);
13970 /* get_attr_type may modify recog data. We want to make sure
13971 that recog data is valid for instruction INSN, on which
13972 distance_non_agu_define is called. INSN is unchanged here. */
13973 extract_insn_cached (insn);
13977 /* Return the distance between INSN and the next insn that uses
13978 register number REGNO0 in memory address. Return -1 if no such
13979 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13982 distance_agu_use (unsigned int regno0, rtx insn)
13984 basic_block bb = BLOCK_FOR_INSN (insn);
13989 if (insn != BB_END (bb))
13991 rtx next = NEXT_INSN (insn);
13992 while (next && distance < LEA_SEARCH_THRESHOLD)
13994 if (NONDEBUG_INSN_P (next))
13998 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13999 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
14000 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
14001 && regno0 == DF_REF_REGNO (*use_rec))
14003 /* Return DISTANCE if OP0 is used in memory
14004 address in NEXT. */
14008 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
14009 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
14010 && !DF_REF_IS_ARTIFICIAL (*def_rec)
14011 && regno0 == DF_REF_REGNO (*def_rec))
14013 /* Return -1 if OP0 is set in NEXT. */
14017 if (next == BB_END (bb))
14019 next = NEXT_INSN (next);
14023 if (distance < LEA_SEARCH_THRESHOLD)
14027 bool simple_loop = false;
14029 FOR_EACH_EDGE (e, ei, bb->succs)
14032 simple_loop = true;
14038 rtx next = BB_HEAD (bb);
14041 && distance < LEA_SEARCH_THRESHOLD)
14043 if (NONDEBUG_INSN_P (next))
14047 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
14048 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
14049 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
14050 && regno0 == DF_REF_REGNO (*use_rec))
14052 /* Return DISTANCE if OP0 is used in memory
14053 address in NEXT. */
14057 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
14058 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
14059 && !DF_REF_IS_ARTIFICIAL (*def_rec)
14060 && regno0 == DF_REF_REGNO (*def_rec))
14062 /* Return -1 if OP0 is set in NEXT. */
14067 next = NEXT_INSN (next);
14075 /* Define this macro to tune LEA priority vs ADD, it take effect when
14076 there is a dilemma of choicing LEA or ADD
14077 Negative value: ADD is more preferred than LEA
14079 Positive value: LEA is more preferred than ADD*/
14080 #define IX86_LEA_PRIORITY 2
14082 /* Return true if it is ok to optimize an ADD operation to LEA
14083 operation to avoid flag register consumation. For the processors
14084 like ATOM, if the destination register of LEA holds an actual
14085 address which will be used soon, LEA is better and otherwise ADD
14089 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14090 rtx insn, rtx operands[])
14092 unsigned int regno0 = true_regnum (operands[0]);
14093 unsigned int regno1 = true_regnum (operands[1]);
14094 unsigned int regno2;
14096 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
14097 return regno0 != regno1;
14099 regno2 = true_regnum (operands[2]);
14101 /* If a = b + c, (a!=b && a!=c), must use lea form. */
14102 if (regno0 != regno1 && regno0 != regno2)
14106 int dist_define, dist_use;
14107 dist_define = distance_non_agu_define (regno1, regno2, insn);
14108 if (dist_define <= 0)
14111 /* If this insn has both backward non-agu dependence and forward
14112 agu dependence, the one with short distance take effect. */
14113 dist_use = distance_agu_use (regno0, insn);
14115 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
14122 /* Return true if destination reg of SET_BODY is shift count of
14126 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
14132 /* Retrieve destination of SET_BODY. */
14133 switch (GET_CODE (set_body))
14136 set_dest = SET_DEST (set_body);
14137 if (!set_dest || !REG_P (set_dest))
14141 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
14142 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
14150 /* Retrieve shift count of USE_BODY. */
14151 switch (GET_CODE (use_body))
14154 shift_rtx = XEXP (use_body, 1);
14157 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
14158 if (ix86_dep_by_shift_count_body (set_body,
14159 XVECEXP (use_body, 0, i)))
14167 && (GET_CODE (shift_rtx) == ASHIFT
14168 || GET_CODE (shift_rtx) == LSHIFTRT
14169 || GET_CODE (shift_rtx) == ASHIFTRT
14170 || GET_CODE (shift_rtx) == ROTATE
14171 || GET_CODE (shift_rtx) == ROTATERT))
14173 rtx shift_count = XEXP (shift_rtx, 1);
14175 /* Return true if shift count is dest of SET_BODY. */
14176 if (REG_P (shift_count)
14177 && true_regnum (set_dest) == true_regnum (shift_count))
14184 /* Return true if destination reg of SET_INSN is shift count of
14188 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
14190 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
14191 PATTERN (use_insn));
14194 /* Return TRUE or FALSE depending on whether the unary operator meets the
14195 appropriate constraints. */
14198 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14199 enum machine_mode mode ATTRIBUTE_UNUSED,
14200 rtx operands[2] ATTRIBUTE_UNUSED)
14202 /* If one of operands is memory, source and destination must match. */
14203 if ((MEM_P (operands[0])
14204 || MEM_P (operands[1]))
14205 && ! rtx_equal_p (operands[0], operands[1]))
14210 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
14211 are ok, keeping in mind the possible movddup alternative. */
14214 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
14216 if (MEM_P (operands[0]))
14217 return rtx_equal_p (operands[0], operands[1 + high]);
14218 if (MEM_P (operands[1]) && MEM_P (operands[2]))
14219 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
14223 /* Post-reload splitter for converting an SF or DFmode value in an
14224 SSE register into an unsigned SImode. */
14227 ix86_split_convert_uns_si_sse (rtx operands[])
14229 enum machine_mode vecmode;
14230 rtx value, large, zero_or_two31, input, two31, x;
14232 large = operands[1];
14233 zero_or_two31 = operands[2];
14234 input = operands[3];
14235 two31 = operands[4];
14236 vecmode = GET_MODE (large);
14237 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14239 /* Load up the value into the low element. We must ensure that the other
14240 elements are valid floats -- zero is the easiest such value. */
14243 if (vecmode == V4SFmode)
14244 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14246 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14250 input = gen_rtx_REG (vecmode, REGNO (input));
14251 emit_move_insn (value, CONST0_RTX (vecmode));
14252 if (vecmode == V4SFmode)
14253 emit_insn (gen_sse_movss (value, value, input));
14255 emit_insn (gen_sse2_movsd (value, value, input));
14258 emit_move_insn (large, two31);
14259 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14261 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14262 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14264 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14265 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14267 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14268 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14270 large = gen_rtx_REG (V4SImode, REGNO (large));
14271 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14273 x = gen_rtx_REG (V4SImode, REGNO (value));
14274 if (vecmode == V4SFmode)
14275 emit_insn (gen_sse2_cvttps2dq (x, value));
14277 emit_insn (gen_sse2_cvttpd2dq (x, value));
14280 emit_insn (gen_xorv4si3 (value, value, large));
14283 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14284 Expects the 64-bit DImode to be supplied in a pair of integral
14285 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14286 -mfpmath=sse, !optimize_size only. */
14289 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14291 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14292 rtx int_xmm, fp_xmm;
14293 rtx biases, exponents;
14296 int_xmm = gen_reg_rtx (V4SImode);
14297 if (TARGET_INTER_UNIT_MOVES)
14298 emit_insn (gen_movdi_to_sse (int_xmm, input));
14299 else if (TARGET_SSE_SPLIT_REGS)
14301 emit_clobber (int_xmm);
14302 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14306 x = gen_reg_rtx (V2DImode);
14307 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14308 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14311 x = gen_rtx_CONST_VECTOR (V4SImode,
14312 gen_rtvec (4, GEN_INT (0x43300000UL),
14313 GEN_INT (0x45300000UL),
14314 const0_rtx, const0_rtx));
14315 exponents = validize_mem (force_const_mem (V4SImode, x));
14317 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14318 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14320 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14321 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14322 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14323 (0x1.0p84 + double(fp_value_hi_xmm)).
14324 Note these exponents differ by 32. */
14326 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14328 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14329 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14330 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14331 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14332 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14333 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14334 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14335 biases = validize_mem (force_const_mem (V2DFmode, biases));
14336 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14338 /* Add the upper and lower DFmode values together. */
14340 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14343 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14344 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14345 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14348 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14351 /* Not used, but eases macroization of patterns. */
14353 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14354 rtx input ATTRIBUTE_UNUSED)
14356 gcc_unreachable ();
14359 /* Convert an unsigned SImode value into a DFmode. Only currently used
14360 for SSE, but applicable anywhere. */
14363 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14365 REAL_VALUE_TYPE TWO31r;
14368 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14369 NULL, 1, OPTAB_DIRECT);
14371 fp = gen_reg_rtx (DFmode);
14372 emit_insn (gen_floatsidf2 (fp, x));
14374 real_ldexp (&TWO31r, &dconst1, 31);
14375 x = const_double_from_real_value (TWO31r, DFmode);
14377 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14379 emit_move_insn (target, x);
14382 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14383 32-bit mode; otherwise we have a direct convert instruction. */
14386 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14388 REAL_VALUE_TYPE TWO32r;
14389 rtx fp_lo, fp_hi, x;
14391 fp_lo = gen_reg_rtx (DFmode);
14392 fp_hi = gen_reg_rtx (DFmode);
14394 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14396 real_ldexp (&TWO32r, &dconst1, 32);
14397 x = const_double_from_real_value (TWO32r, DFmode);
14398 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14400 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14402 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14405 emit_move_insn (target, x);
14408 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14409 For x86_32, -mfpmath=sse, !optimize_size only. */
14411 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14413 REAL_VALUE_TYPE ONE16r;
14414 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14416 real_ldexp (&ONE16r, &dconst1, 16);
14417 x = const_double_from_real_value (ONE16r, SFmode);
14418 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14419 NULL, 0, OPTAB_DIRECT);
14420 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14421 NULL, 0, OPTAB_DIRECT);
14422 fp_hi = gen_reg_rtx (SFmode);
14423 fp_lo = gen_reg_rtx (SFmode);
14424 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14425 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14426 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14428 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14430 if (!rtx_equal_p (target, fp_hi))
14431 emit_move_insn (target, fp_hi);
14434 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14435 then replicate the value for all elements of the vector
14439 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14446 v = gen_rtvec (4, value, value, value, value);
14447 return gen_rtx_CONST_VECTOR (V4SImode, v);
14451 v = gen_rtvec (2, value, value);
14452 return gen_rtx_CONST_VECTOR (V2DImode, v);
14456 v = gen_rtvec (4, value, value, value, value);
14458 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14459 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14460 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14464 v = gen_rtvec (2, value, value);
14466 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14467 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14470 gcc_unreachable ();
14474 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14475 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14476 for an SSE register. If VECT is true, then replicate the mask for
14477 all elements of the vector register. If INVERT is true, then create
14478 a mask excluding the sign bit. */
14481 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14483 enum machine_mode vec_mode, imode;
14484 HOST_WIDE_INT hi, lo;
14489 /* Find the sign bit, sign extended to 2*HWI. */
14495 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14496 lo = 0x80000000, hi = lo < 0;
14502 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14503 if (HOST_BITS_PER_WIDE_INT >= 64)
14504 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14506 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14511 vec_mode = VOIDmode;
14512 if (HOST_BITS_PER_WIDE_INT >= 64)
14515 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14522 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14526 lo = ~lo, hi = ~hi;
14532 mask = immed_double_const (lo, hi, imode);
14534 vec = gen_rtvec (2, v, mask);
14535 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14536 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14543 gcc_unreachable ();
14547 lo = ~lo, hi = ~hi;
14549 /* Force this value into the low part of a fp vector constant. */
14550 mask = immed_double_const (lo, hi, imode);
14551 mask = gen_lowpart (mode, mask);
14553 if (vec_mode == VOIDmode)
14554 return force_reg (mode, mask);
14556 v = ix86_build_const_vector (mode, vect, mask);
14557 return force_reg (vec_mode, v);
14560 /* Generate code for floating point ABS or NEG. */
14563 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14566 rtx mask, set, use, clob, dst, src;
14567 bool use_sse = false;
14568 bool vector_mode = VECTOR_MODE_P (mode);
14569 enum machine_mode elt_mode = mode;
14573 elt_mode = GET_MODE_INNER (mode);
14576 else if (mode == TFmode)
14578 else if (TARGET_SSE_MATH)
14579 use_sse = SSE_FLOAT_MODE_P (mode);
14581 /* NEG and ABS performed with SSE use bitwise mask operations.
14582 Create the appropriate mask now. */
14584 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14593 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14594 set = gen_rtx_SET (VOIDmode, dst, set);
14599 set = gen_rtx_fmt_e (code, mode, src);
14600 set = gen_rtx_SET (VOIDmode, dst, set);
14603 use = gen_rtx_USE (VOIDmode, mask);
14604 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14605 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14606 gen_rtvec (3, set, use, clob)));
14613 /* Expand a copysign operation. Special case operand 0 being a constant. */
14616 ix86_expand_copysign (rtx operands[])
14618 enum machine_mode mode;
14619 rtx dest, op0, op1, mask, nmask;
14621 dest = operands[0];
14625 mode = GET_MODE (dest);
14627 if (GET_CODE (op0) == CONST_DOUBLE)
14629 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14631 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14632 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14634 if (mode == SFmode || mode == DFmode)
14636 enum machine_mode vmode;
14638 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14640 if (op0 == CONST0_RTX (mode))
14641 op0 = CONST0_RTX (vmode);
14644 rtx v = ix86_build_const_vector (mode, false, op0);
14646 op0 = force_reg (vmode, v);
14649 else if (op0 != CONST0_RTX (mode))
14650 op0 = force_reg (mode, op0);
14652 mask = ix86_build_signbit_mask (mode, 0, 0);
14654 if (mode == SFmode)
14655 copysign_insn = gen_copysignsf3_const;
14656 else if (mode == DFmode)
14657 copysign_insn = gen_copysigndf3_const;
14659 copysign_insn = gen_copysigntf3_const;
14661 emit_insn (copysign_insn (dest, op0, op1, mask));
14665 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14667 nmask = ix86_build_signbit_mask (mode, 0, 1);
14668 mask = ix86_build_signbit_mask (mode, 0, 0);
14670 if (mode == SFmode)
14671 copysign_insn = gen_copysignsf3_var;
14672 else if (mode == DFmode)
14673 copysign_insn = gen_copysigndf3_var;
14675 copysign_insn = gen_copysigntf3_var;
14677 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14681 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14682 be a constant, and so has already been expanded into a vector constant. */
14685 ix86_split_copysign_const (rtx operands[])
14687 enum machine_mode mode, vmode;
14688 rtx dest, op0, mask, x;
14690 dest = operands[0];
14692 mask = operands[3];
14694 mode = GET_MODE (dest);
14695 vmode = GET_MODE (mask);
14697 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14698 x = gen_rtx_AND (vmode, dest, mask);
14699 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14701 if (op0 != CONST0_RTX (vmode))
14703 x = gen_rtx_IOR (vmode, dest, op0);
14704 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14708 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14709 so we have to do two masks. */
14712 ix86_split_copysign_var (rtx operands[])
14714 enum machine_mode mode, vmode;
14715 rtx dest, scratch, op0, op1, mask, nmask, x;
14717 dest = operands[0];
14718 scratch = operands[1];
14721 nmask = operands[4];
14722 mask = operands[5];
14724 mode = GET_MODE (dest);
14725 vmode = GET_MODE (mask);
14727 if (rtx_equal_p (op0, op1))
14729 /* Shouldn't happen often (it's useless, obviously), but when it does
14730 we'd generate incorrect code if we continue below. */
14731 emit_move_insn (dest, op0);
14735 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14737 gcc_assert (REGNO (op1) == REGNO (scratch));
14739 x = gen_rtx_AND (vmode, scratch, mask);
14740 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14743 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14744 x = gen_rtx_NOT (vmode, dest);
14745 x = gen_rtx_AND (vmode, x, op0);
14746 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14750 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14752 x = gen_rtx_AND (vmode, scratch, mask);
14754 else /* alternative 2,4 */
14756 gcc_assert (REGNO (mask) == REGNO (scratch));
14757 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14758 x = gen_rtx_AND (vmode, scratch, op1);
14760 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14762 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14764 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14765 x = gen_rtx_AND (vmode, dest, nmask);
14767 else /* alternative 3,4 */
14769 gcc_assert (REGNO (nmask) == REGNO (dest));
14771 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14772 x = gen_rtx_AND (vmode, dest, op0);
14774 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14777 x = gen_rtx_IOR (vmode, dest, scratch);
14778 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14781 /* Return TRUE or FALSE depending on whether the first SET in INSN
14782 has source and destination with matching CC modes, and that the
14783 CC mode is at least as constrained as REQ_MODE. */
14786 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14789 enum machine_mode set_mode;
14791 set = PATTERN (insn);
14792 if (GET_CODE (set) == PARALLEL)
14793 set = XVECEXP (set, 0, 0);
14794 gcc_assert (GET_CODE (set) == SET);
14795 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14797 set_mode = GET_MODE (SET_DEST (set));
14801 if (req_mode != CCNOmode
14802 && (req_mode != CCmode
14803 || XEXP (SET_SRC (set), 1) != const0_rtx))
14807 if (req_mode == CCGCmode)
14811 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14815 if (req_mode == CCZmode)
14826 gcc_unreachable ();
14829 return (GET_MODE (SET_SRC (set)) == set_mode);
14832 /* Generate insn patterns to do an integer compare of OPERANDS. */
14835 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14837 enum machine_mode cmpmode;
14840 cmpmode = SELECT_CC_MODE (code, op0, op1);
14841 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14843 /* This is very simple, but making the interface the same as in the
14844 FP case makes the rest of the code easier. */
14845 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14846 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14848 /* Return the test that should be put into the flags user, i.e.
14849 the bcc, scc, or cmov instruction. */
14850 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14853 /* Figure out whether to use ordered or unordered fp comparisons.
14854 Return the appropriate mode to use. */
14857 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14859 /* ??? In order to make all comparisons reversible, we do all comparisons
14860 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14861 all forms trapping and nontrapping comparisons, we can make inequality
14862 comparisons trapping again, since it results in better code when using
14863 FCOM based compares. */
14864 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14868 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14870 enum machine_mode mode = GET_MODE (op0);
14872 if (SCALAR_FLOAT_MODE_P (mode))
14874 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14875 return ix86_fp_compare_mode (code);
14880 /* Only zero flag is needed. */
14881 case EQ: /* ZF=0 */
14882 case NE: /* ZF!=0 */
14884 /* Codes needing carry flag. */
14885 case GEU: /* CF=0 */
14886 case LTU: /* CF=1 */
14887 /* Detect overflow checks. They need just the carry flag. */
14888 if (GET_CODE (op0) == PLUS
14889 && rtx_equal_p (op1, XEXP (op0, 0)))
14893 case GTU: /* CF=0 & ZF=0 */
14894 case LEU: /* CF=1 | ZF=1 */
14895 /* Detect overflow checks. They need just the carry flag. */
14896 if (GET_CODE (op0) == MINUS
14897 && rtx_equal_p (op1, XEXP (op0, 0)))
14901 /* Codes possibly doable only with sign flag when
14902 comparing against zero. */
14903 case GE: /* SF=OF or SF=0 */
14904 case LT: /* SF<>OF or SF=1 */
14905 if (op1 == const0_rtx)
14908 /* For other cases Carry flag is not required. */
14910 /* Codes doable only with sign flag when comparing
14911 against zero, but we miss jump instruction for it
14912 so we need to use relational tests against overflow
14913 that thus needs to be zero. */
14914 case GT: /* ZF=0 & SF=OF */
14915 case LE: /* ZF=1 | SF<>OF */
14916 if (op1 == const0_rtx)
14920 /* strcmp pattern do (use flags) and combine may ask us for proper
14925 gcc_unreachable ();
14929 /* Return the fixed registers used for condition codes. */
14932 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14939 /* If two condition code modes are compatible, return a condition code
14940 mode which is compatible with both. Otherwise, return
14943 static enum machine_mode
14944 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14949 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14952 if ((m1 == CCGCmode && m2 == CCGOCmode)
14953 || (m1 == CCGOCmode && m2 == CCGCmode))
14959 gcc_unreachable ();
14989 /* These are only compatible with themselves, which we already
14996 /* Return a comparison we can do and that it is equivalent to
14997 swap_condition (code) apart possibly from orderedness.
14998 But, never change orderedness if TARGET_IEEE_FP, returning
14999 UNKNOWN in that case if necessary. */
15001 static enum rtx_code
15002 ix86_fp_swap_condition (enum rtx_code code)
15006 case GT: /* GTU - CF=0 & ZF=0 */
15007 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
15008 case GE: /* GEU - CF=0 */
15009 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
15010 case UNLT: /* LTU - CF=1 */
15011 return TARGET_IEEE_FP ? UNKNOWN : GT;
15012 case UNLE: /* LEU - CF=1 | ZF=1 */
15013 return TARGET_IEEE_FP ? UNKNOWN : GE;
15015 return swap_condition (code);
15019 /* Return cost of comparison CODE using the best strategy for performance.
15020 All following functions do use number of instructions as a cost metrics.
15021 In future this should be tweaked to compute bytes for optimize_size and
15022 take into account performance of various instructions on various CPUs. */
15025 ix86_fp_comparison_cost (enum rtx_code code)
15029 /* The cost of code using bit-twiddling on %ah. */
15046 arith_cost = TARGET_IEEE_FP ? 5 : 4;
15050 arith_cost = TARGET_IEEE_FP ? 6 : 4;
15053 gcc_unreachable ();
15056 switch (ix86_fp_comparison_strategy (code))
15058 case IX86_FPCMP_COMI:
15059 return arith_cost > 4 ? 3 : 2;
15060 case IX86_FPCMP_SAHF:
15061 return arith_cost > 4 ? 4 : 3;
15067 /* Return strategy to use for floating-point. We assume that fcomi is always
15068 preferrable where available, since that is also true when looking at size
15069 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
15071 enum ix86_fpcmp_strategy
15072 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
15074 /* Do fcomi/sahf based test when profitable. */
15077 return IX86_FPCMP_COMI;
15079 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
15080 return IX86_FPCMP_SAHF;
15082 return IX86_FPCMP_ARITH;
15085 /* Swap, force into registers, or otherwise massage the two operands
15086 to a fp comparison. The operands are updated in place; the new
15087 comparison code is returned. */
15089 static enum rtx_code
15090 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
15092 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
15093 rtx op0 = *pop0, op1 = *pop1;
15094 enum machine_mode op_mode = GET_MODE (op0);
15095 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
15097 /* All of the unordered compare instructions only work on registers.
15098 The same is true of the fcomi compare instructions. The XFmode
15099 compare instructions require registers except when comparing
15100 against zero or when converting operand 1 from fixed point to
15104 && (fpcmp_mode == CCFPUmode
15105 || (op_mode == XFmode
15106 && ! (standard_80387_constant_p (op0) == 1
15107 || standard_80387_constant_p (op1) == 1)
15108 && GET_CODE (op1) != FLOAT)
15109 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
15111 op0 = force_reg (op_mode, op0);
15112 op1 = force_reg (op_mode, op1);
15116 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
15117 things around if they appear profitable, otherwise force op0
15118 into a register. */
15120 if (standard_80387_constant_p (op0) == 0
15122 && ! (standard_80387_constant_p (op1) == 0
15125 enum rtx_code new_code = ix86_fp_swap_condition (code);
15126 if (new_code != UNKNOWN)
15129 tmp = op0, op0 = op1, op1 = tmp;
15135 op0 = force_reg (op_mode, op0);
15137 if (CONSTANT_P (op1))
15139 int tmp = standard_80387_constant_p (op1);
15141 op1 = validize_mem (force_const_mem (op_mode, op1));
15145 op1 = force_reg (op_mode, op1);
15148 op1 = force_reg (op_mode, op1);
15152 /* Try to rearrange the comparison to make it cheaper. */
15153 if (ix86_fp_comparison_cost (code)
15154 > ix86_fp_comparison_cost (swap_condition (code))
15155 && (REG_P (op1) || can_create_pseudo_p ()))
15158 tmp = op0, op0 = op1, op1 = tmp;
15159 code = swap_condition (code);
15161 op0 = force_reg (op_mode, op0);
15169 /* Convert comparison codes we use to represent FP comparison to integer
15170 code that will result in proper branch. Return UNKNOWN if no such code
15174 ix86_fp_compare_code_to_integer (enum rtx_code code)
15203 /* Generate insn patterns to do a floating point compare of OPERANDS. */
15206 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
15208 enum machine_mode fpcmp_mode, intcmp_mode;
15211 fpcmp_mode = ix86_fp_compare_mode (code);
15212 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
15214 /* Do fcomi/sahf based test when profitable. */
15215 switch (ix86_fp_comparison_strategy (code))
15217 case IX86_FPCMP_COMI:
15218 intcmp_mode = fpcmp_mode;
15219 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15220 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15225 case IX86_FPCMP_SAHF:
15226 intcmp_mode = fpcmp_mode;
15227 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15228 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15232 scratch = gen_reg_rtx (HImode);
15233 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15234 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15237 case IX86_FPCMP_ARITH:
15238 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15239 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15240 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15242 scratch = gen_reg_rtx (HImode);
15243 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15245 /* In the unordered case, we have to check C2 for NaN's, which
15246 doesn't happen to work out to anything nice combination-wise.
15247 So do some bit twiddling on the value we've got in AH to come
15248 up with an appropriate set of condition codes. */
15250 intcmp_mode = CCNOmode;
15255 if (code == GT || !TARGET_IEEE_FP)
15257 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15262 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15263 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15264 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15265 intcmp_mode = CCmode;
15271 if (code == LT && TARGET_IEEE_FP)
15273 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15274 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15275 intcmp_mode = CCmode;
15280 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15286 if (code == GE || !TARGET_IEEE_FP)
15288 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15293 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15294 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15300 if (code == LE && TARGET_IEEE_FP)
15302 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15303 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15304 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15305 intcmp_mode = CCmode;
15310 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15316 if (code == EQ && TARGET_IEEE_FP)
15318 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15319 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15320 intcmp_mode = CCmode;
15325 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15331 if (code == NE && TARGET_IEEE_FP)
15333 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15334 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15340 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15346 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15350 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15355 gcc_unreachable ();
15363 /* Return the test that should be put into the flags user, i.e.
15364 the bcc, scc, or cmov instruction. */
15365 return gen_rtx_fmt_ee (code, VOIDmode,
15366 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15371 ix86_expand_compare (enum rtx_code code)
15374 op0 = ix86_compare_op0;
15375 op1 = ix86_compare_op1;
15377 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15378 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15380 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15382 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15383 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15386 ret = ix86_expand_int_compare (code, op0, op1);
15392 ix86_expand_branch (enum rtx_code code, rtx label)
15396 switch (GET_MODE (ix86_compare_op0))
15405 tmp = ix86_expand_compare (code);
15406 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15407 gen_rtx_LABEL_REF (VOIDmode, label),
15409 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15416 /* Expand DImode branch into multiple compare+branch. */
15418 rtx lo[2], hi[2], label2;
15419 enum rtx_code code1, code2, code3;
15420 enum machine_mode submode;
15422 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15424 tmp = ix86_compare_op0;
15425 ix86_compare_op0 = ix86_compare_op1;
15426 ix86_compare_op1 = tmp;
15427 code = swap_condition (code);
15429 if (GET_MODE (ix86_compare_op0) == DImode)
15431 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15432 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15437 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15438 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15442 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15443 avoid two branches. This costs one extra insn, so disable when
15444 optimizing for size. */
15446 if ((code == EQ || code == NE)
15447 && (!optimize_insn_for_size_p ()
15448 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15453 if (hi[1] != const0_rtx)
15454 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15455 NULL_RTX, 0, OPTAB_WIDEN);
15458 if (lo[1] != const0_rtx)
15459 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15460 NULL_RTX, 0, OPTAB_WIDEN);
15462 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15463 NULL_RTX, 0, OPTAB_WIDEN);
15465 ix86_compare_op0 = tmp;
15466 ix86_compare_op1 = const0_rtx;
15467 ix86_expand_branch (code, label);
15471 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15472 op1 is a constant and the low word is zero, then we can just
15473 examine the high word. Similarly for low word -1 and
15474 less-or-equal-than or greater-than. */
15476 if (CONST_INT_P (hi[1]))
15479 case LT: case LTU: case GE: case GEU:
15480 if (lo[1] == const0_rtx)
15482 ix86_compare_op0 = hi[0];
15483 ix86_compare_op1 = hi[1];
15484 ix86_expand_branch (code, label);
15488 case LE: case LEU: case GT: case GTU:
15489 if (lo[1] == constm1_rtx)
15491 ix86_compare_op0 = hi[0];
15492 ix86_compare_op1 = hi[1];
15493 ix86_expand_branch (code, label);
15501 /* Otherwise, we need two or three jumps. */
15503 label2 = gen_label_rtx ();
15506 code2 = swap_condition (code);
15507 code3 = unsigned_condition (code);
15511 case LT: case GT: case LTU: case GTU:
15514 case LE: code1 = LT; code2 = GT; break;
15515 case GE: code1 = GT; code2 = LT; break;
15516 case LEU: code1 = LTU; code2 = GTU; break;
15517 case GEU: code1 = GTU; code2 = LTU; break;
15519 case EQ: code1 = UNKNOWN; code2 = NE; break;
15520 case NE: code2 = UNKNOWN; break;
15523 gcc_unreachable ();
15528 * if (hi(a) < hi(b)) goto true;
15529 * if (hi(a) > hi(b)) goto false;
15530 * if (lo(a) < lo(b)) goto true;
15534 ix86_compare_op0 = hi[0];
15535 ix86_compare_op1 = hi[1];
15537 if (code1 != UNKNOWN)
15538 ix86_expand_branch (code1, label);
15539 if (code2 != UNKNOWN)
15540 ix86_expand_branch (code2, label2);
15542 ix86_compare_op0 = lo[0];
15543 ix86_compare_op1 = lo[1];
15544 ix86_expand_branch (code3, label);
15546 if (code2 != UNKNOWN)
15547 emit_label (label2);
15552 /* If we have already emitted a compare insn, go straight to simple.
15553 ix86_expand_compare won't emit anything if ix86_compare_emitted
15555 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15560 /* Split branch based on floating point condition. */
15562 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15563 rtx target1, rtx target2, rtx tmp, rtx pushed)
15568 if (target2 != pc_rtx)
15571 code = reverse_condition_maybe_unordered (code);
15576 condition = ix86_expand_fp_compare (code, op1, op2,
15579 /* Remove pushed operand from stack. */
15581 ix86_free_from_memory (GET_MODE (pushed));
15583 i = emit_jump_insn (gen_rtx_SET
15585 gen_rtx_IF_THEN_ELSE (VOIDmode,
15586 condition, target1, target2)));
15587 if (split_branch_probability >= 0)
15588 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15592 ix86_expand_setcc (enum rtx_code code, rtx dest)
15596 gcc_assert (GET_MODE (dest) == QImode);
15598 ret = ix86_expand_compare (code);
15599 PUT_MODE (ret, QImode);
15600 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15603 /* Expand comparison setting or clearing carry flag. Return true when
15604 successful and set pop for the operation. */
15606 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15608 enum machine_mode mode =
15609 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15611 /* Do not handle DImode compares that go through special path. */
15612 if (mode == (TARGET_64BIT ? TImode : DImode))
15615 if (SCALAR_FLOAT_MODE_P (mode))
15617 rtx compare_op, compare_seq;
15619 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15621 /* Shortcut: following common codes never translate
15622 into carry flag compares. */
15623 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15624 || code == ORDERED || code == UNORDERED)
15627 /* These comparisons require zero flag; swap operands so they won't. */
15628 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15629 && !TARGET_IEEE_FP)
15634 code = swap_condition (code);
15637 /* Try to expand the comparison and verify that we end up with
15638 carry flag based comparison. This fails to be true only when
15639 we decide to expand comparison using arithmetic that is not
15640 too common scenario. */
15642 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15643 compare_seq = get_insns ();
15646 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15647 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15648 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15650 code = GET_CODE (compare_op);
15652 if (code != LTU && code != GEU)
15655 emit_insn (compare_seq);
15660 if (!INTEGRAL_MODE_P (mode))
15669 /* Convert a==0 into (unsigned)a<1. */
15672 if (op1 != const0_rtx)
15675 code = (code == EQ ? LTU : GEU);
15678 /* Convert a>b into b<a or a>=b-1. */
15681 if (CONST_INT_P (op1))
15683 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15684 /* Bail out on overflow. We still can swap operands but that
15685 would force loading of the constant into register. */
15686 if (op1 == const0_rtx
15687 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15689 code = (code == GTU ? GEU : LTU);
15696 code = (code == GTU ? LTU : GEU);
15700 /* Convert a>=0 into (unsigned)a<0x80000000. */
15703 if (mode == DImode || op1 != const0_rtx)
15705 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15706 code = (code == LT ? GEU : LTU);
15710 if (mode == DImode || op1 != constm1_rtx)
15712 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15713 code = (code == LE ? GEU : LTU);
15719 /* Swapping operands may cause constant to appear as first operand. */
15720 if (!nonimmediate_operand (op0, VOIDmode))
15722 if (!can_create_pseudo_p ())
15724 op0 = force_reg (mode, op0);
15726 ix86_compare_op0 = op0;
15727 ix86_compare_op1 = op1;
15728 *pop = ix86_expand_compare (code);
15729 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15734 ix86_expand_int_movcc (rtx operands[])
15736 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15737 rtx compare_seq, compare_op;
15738 enum machine_mode mode = GET_MODE (operands[0]);
15739 bool sign_bit_compare_p = false;
15742 ix86_compare_op0 = XEXP (operands[1], 0);
15743 ix86_compare_op1 = XEXP (operands[1], 1);
15744 compare_op = ix86_expand_compare (code);
15745 compare_seq = get_insns ();
15748 compare_code = GET_CODE (compare_op);
15750 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15751 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15752 sign_bit_compare_p = true;
15754 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15755 HImode insns, we'd be swallowed in word prefix ops. */
15757 if ((mode != HImode || TARGET_FAST_PREFIX)
15758 && (mode != (TARGET_64BIT ? TImode : DImode))
15759 && CONST_INT_P (operands[2])
15760 && CONST_INT_P (operands[3]))
15762 rtx out = operands[0];
15763 HOST_WIDE_INT ct = INTVAL (operands[2]);
15764 HOST_WIDE_INT cf = INTVAL (operands[3]);
15765 HOST_WIDE_INT diff;
15768 /* Sign bit compares are better done using shifts than we do by using
15770 if (sign_bit_compare_p
15771 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15772 ix86_compare_op1, &compare_op))
15774 /* Detect overlap between destination and compare sources. */
15777 if (!sign_bit_compare_p)
15780 bool fpcmp = false;
15782 compare_code = GET_CODE (compare_op);
15784 flags = XEXP (compare_op, 0);
15786 if (GET_MODE (flags) == CCFPmode
15787 || GET_MODE (flags) == CCFPUmode)
15791 = ix86_fp_compare_code_to_integer (compare_code);
15794 /* To simplify rest of code, restrict to the GEU case. */
15795 if (compare_code == LTU)
15797 HOST_WIDE_INT tmp = ct;
15800 compare_code = reverse_condition (compare_code);
15801 code = reverse_condition (code);
15806 PUT_CODE (compare_op,
15807 reverse_condition_maybe_unordered
15808 (GET_CODE (compare_op)));
15810 PUT_CODE (compare_op,
15811 reverse_condition (GET_CODE (compare_op)));
15815 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15816 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15817 tmp = gen_reg_rtx (mode);
15819 if (mode == DImode)
15820 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15822 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15823 flags, compare_op));
15827 if (code == GT || code == GE)
15828 code = reverse_condition (code);
15831 HOST_WIDE_INT tmp = ct;
15836 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15837 ix86_compare_op1, VOIDmode, 0, -1);
15850 tmp = expand_simple_binop (mode, PLUS,
15852 copy_rtx (tmp), 1, OPTAB_DIRECT);
15863 tmp = expand_simple_binop (mode, IOR,
15865 copy_rtx (tmp), 1, OPTAB_DIRECT);
15867 else if (diff == -1 && ct)
15877 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15879 tmp = expand_simple_binop (mode, PLUS,
15880 copy_rtx (tmp), GEN_INT (cf),
15881 copy_rtx (tmp), 1, OPTAB_DIRECT);
15889 * andl cf - ct, dest
15899 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15902 tmp = expand_simple_binop (mode, AND,
15904 gen_int_mode (cf - ct, mode),
15905 copy_rtx (tmp), 1, OPTAB_DIRECT);
15907 tmp = expand_simple_binop (mode, PLUS,
15908 copy_rtx (tmp), GEN_INT (ct),
15909 copy_rtx (tmp), 1, OPTAB_DIRECT);
15912 if (!rtx_equal_p (tmp, out))
15913 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15915 return 1; /* DONE */
15920 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15923 tmp = ct, ct = cf, cf = tmp;
15926 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15928 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15930 /* We may be reversing unordered compare to normal compare, that
15931 is not valid in general (we may convert non-trapping condition
15932 to trapping one), however on i386 we currently emit all
15933 comparisons unordered. */
15934 compare_code = reverse_condition_maybe_unordered (compare_code);
15935 code = reverse_condition_maybe_unordered (code);
15939 compare_code = reverse_condition (compare_code);
15940 code = reverse_condition (code);
15944 compare_code = UNKNOWN;
15945 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15946 && CONST_INT_P (ix86_compare_op1))
15948 if (ix86_compare_op1 == const0_rtx
15949 && (code == LT || code == GE))
15950 compare_code = code;
15951 else if (ix86_compare_op1 == constm1_rtx)
15955 else if (code == GT)
15960 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15961 if (compare_code != UNKNOWN
15962 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15963 && (cf == -1 || ct == -1))
15965 /* If lea code below could be used, only optimize
15966 if it results in a 2 insn sequence. */
15968 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15969 || diff == 3 || diff == 5 || diff == 9)
15970 || (compare_code == LT && ct == -1)
15971 || (compare_code == GE && cf == -1))
15974 * notl op1 (if necessary)
15982 code = reverse_condition (code);
15985 out = emit_store_flag (out, code, ix86_compare_op0,
15986 ix86_compare_op1, VOIDmode, 0, -1);
15988 out = expand_simple_binop (mode, IOR,
15990 out, 1, OPTAB_DIRECT);
15991 if (out != operands[0])
15992 emit_move_insn (operands[0], out);
15994 return 1; /* DONE */
15999 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
16000 || diff == 3 || diff == 5 || diff == 9)
16001 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
16003 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
16009 * lea cf(dest*(ct-cf)),dest
16013 * This also catches the degenerate setcc-only case.
16019 out = emit_store_flag (out, code, ix86_compare_op0,
16020 ix86_compare_op1, VOIDmode, 0, 1);
16023 /* On x86_64 the lea instruction operates on Pmode, so we need
16024 to get arithmetics done in proper mode to match. */
16026 tmp = copy_rtx (out);
16030 out1 = copy_rtx (out);
16031 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
16035 tmp = gen_rtx_PLUS (mode, tmp, out1);
16041 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
16044 if (!rtx_equal_p (tmp, out))
16047 out = force_operand (tmp, copy_rtx (out));
16049 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
16051 if (!rtx_equal_p (out, operands[0]))
16052 emit_move_insn (operands[0], copy_rtx (out));
16054 return 1; /* DONE */
16058 * General case: Jumpful:
16059 * xorl dest,dest cmpl op1, op2
16060 * cmpl op1, op2 movl ct, dest
16061 * setcc dest jcc 1f
16062 * decl dest movl cf, dest
16063 * andl (cf-ct),dest 1:
16066 * Size 20. Size 14.
16068 * This is reasonably steep, but branch mispredict costs are
16069 * high on modern cpus, so consider failing only if optimizing
16073 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16074 && BRANCH_COST (optimize_insn_for_speed_p (),
16079 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
16084 if (SCALAR_FLOAT_MODE_P (cmp_mode))
16086 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
16088 /* We may be reversing unordered compare to normal compare,
16089 that is not valid in general (we may convert non-trapping
16090 condition to trapping one), however on i386 we currently
16091 emit all comparisons unordered. */
16092 code = reverse_condition_maybe_unordered (code);
16096 code = reverse_condition (code);
16097 if (compare_code != UNKNOWN)
16098 compare_code = reverse_condition (compare_code);
16102 if (compare_code != UNKNOWN)
16104 /* notl op1 (if needed)
16109 For x < 0 (resp. x <= -1) there will be no notl,
16110 so if possible swap the constants to get rid of the
16112 True/false will be -1/0 while code below (store flag
16113 followed by decrement) is 0/-1, so the constants need
16114 to be exchanged once more. */
16116 if (compare_code == GE || !cf)
16118 code = reverse_condition (code);
16123 HOST_WIDE_INT tmp = cf;
16128 out = emit_store_flag (out, code, ix86_compare_op0,
16129 ix86_compare_op1, VOIDmode, 0, -1);
16133 out = emit_store_flag (out, code, ix86_compare_op0,
16134 ix86_compare_op1, VOIDmode, 0, 1);
16136 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
16137 copy_rtx (out), 1, OPTAB_DIRECT);
16140 out = expand_simple_binop (mode, AND, copy_rtx (out),
16141 gen_int_mode (cf - ct, mode),
16142 copy_rtx (out), 1, OPTAB_DIRECT);
16144 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
16145 copy_rtx (out), 1, OPTAB_DIRECT);
16146 if (!rtx_equal_p (out, operands[0]))
16147 emit_move_insn (operands[0], copy_rtx (out));
16149 return 1; /* DONE */
16153 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16155 /* Try a few things more with specific constants and a variable. */
16158 rtx var, orig_out, out, tmp;
16160 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
16161 return 0; /* FAIL */
16163 /* If one of the two operands is an interesting constant, load a
16164 constant with the above and mask it in with a logical operation. */
16166 if (CONST_INT_P (operands[2]))
16169 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
16170 operands[3] = constm1_rtx, op = and_optab;
16171 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
16172 operands[3] = const0_rtx, op = ior_optab;
16174 return 0; /* FAIL */
16176 else if (CONST_INT_P (operands[3]))
16179 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
16180 operands[2] = constm1_rtx, op = and_optab;
16181 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
16182 operands[2] = const0_rtx, op = ior_optab;
16184 return 0; /* FAIL */
16187 return 0; /* FAIL */
16189 orig_out = operands[0];
16190 tmp = gen_reg_rtx (mode);
16193 /* Recurse to get the constant loaded. */
16194 if (ix86_expand_int_movcc (operands) == 0)
16195 return 0; /* FAIL */
16197 /* Mask in the interesting variable. */
16198 out = expand_binop (mode, op, var, tmp, orig_out, 0,
16200 if (!rtx_equal_p (out, orig_out))
16201 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
16203 return 1; /* DONE */
16207 * For comparison with above,
16217 if (! nonimmediate_operand (operands[2], mode))
16218 operands[2] = force_reg (mode, operands[2]);
16219 if (! nonimmediate_operand (operands[3], mode))
16220 operands[3] = force_reg (mode, operands[3]);
16222 if (! register_operand (operands[2], VOIDmode)
16224 || ! register_operand (operands[3], VOIDmode)))
16225 operands[2] = force_reg (mode, operands[2]);
16228 && ! register_operand (operands[3], VOIDmode))
16229 operands[3] = force_reg (mode, operands[3]);
16231 emit_insn (compare_seq);
16232 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16233 gen_rtx_IF_THEN_ELSE (mode,
16234 compare_op, operands[2],
16237 return 1; /* DONE */
16240 /* Swap, force into registers, or otherwise massage the two operands
16241 to an sse comparison with a mask result. Thus we differ a bit from
16242 ix86_prepare_fp_compare_args which expects to produce a flags result.
16244 The DEST operand exists to help determine whether to commute commutative
16245 operators. The POP0/POP1 operands are updated in place. The new
16246 comparison code is returned, or UNKNOWN if not implementable. */
16248 static enum rtx_code
16249 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16250 rtx *pop0, rtx *pop1)
16258 /* We have no LTGT as an operator. We could implement it with
16259 NE & ORDERED, but this requires an extra temporary. It's
16260 not clear that it's worth it. */
16267 /* These are supported directly. */
16274 /* For commutative operators, try to canonicalize the destination
16275 operand to be first in the comparison - this helps reload to
16276 avoid extra moves. */
16277 if (!dest || !rtx_equal_p (dest, *pop1))
16285 /* These are not supported directly. Swap the comparison operands
16286 to transform into something that is supported. */
16290 code = swap_condition (code);
16294 gcc_unreachable ();
16300 /* Detect conditional moves that exactly match min/max operational
16301 semantics. Note that this is IEEE safe, as long as we don't
16302 interchange the operands.
16304 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16305 and TRUE if the operation is successful and instructions are emitted. */
16308 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16309 rtx cmp_op1, rtx if_true, rtx if_false)
16311 enum machine_mode mode;
16317 else if (code == UNGE)
16320 if_true = if_false;
16326 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16328 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16333 mode = GET_MODE (dest);
16335 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16336 but MODE may be a vector mode and thus not appropriate. */
16337 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16339 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16342 if_true = force_reg (mode, if_true);
16343 v = gen_rtvec (2, if_true, if_false);
16344 tmp = gen_rtx_UNSPEC (mode, v, u);
16348 code = is_min ? SMIN : SMAX;
16349 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16352 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16356 /* Expand an sse vector comparison. Return the register with the result. */
16359 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16360 rtx op_true, rtx op_false)
16362 enum machine_mode mode = GET_MODE (dest);
16365 cmp_op0 = force_reg (mode, cmp_op0);
16366 if (!nonimmediate_operand (cmp_op1, mode))
16367 cmp_op1 = force_reg (mode, cmp_op1);
16370 || reg_overlap_mentioned_p (dest, op_true)
16371 || reg_overlap_mentioned_p (dest, op_false))
16372 dest = gen_reg_rtx (mode);
16374 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16375 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16380 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16381 operations. This is used for both scalar and vector conditional moves. */
16384 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16386 enum machine_mode mode = GET_MODE (dest);
16389 if (op_false == CONST0_RTX (mode))
16391 op_true = force_reg (mode, op_true);
16392 x = gen_rtx_AND (mode, cmp, op_true);
16393 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16395 else if (op_true == CONST0_RTX (mode))
16397 op_false = force_reg (mode, op_false);
16398 x = gen_rtx_NOT (mode, cmp);
16399 x = gen_rtx_AND (mode, x, op_false);
16400 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16402 else if (TARGET_XOP)
16404 rtx pcmov = gen_rtx_SET (mode, dest,
16405 gen_rtx_IF_THEN_ELSE (mode, cmp,
16412 op_true = force_reg (mode, op_true);
16413 op_false = force_reg (mode, op_false);
16415 t2 = gen_reg_rtx (mode);
16417 t3 = gen_reg_rtx (mode);
16421 x = gen_rtx_AND (mode, op_true, cmp);
16422 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16424 x = gen_rtx_NOT (mode, cmp);
16425 x = gen_rtx_AND (mode, x, op_false);
16426 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16428 x = gen_rtx_IOR (mode, t3, t2);
16429 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16433 /* Expand a floating-point conditional move. Return true if successful. */
16436 ix86_expand_fp_movcc (rtx operands[])
16438 enum machine_mode mode = GET_MODE (operands[0]);
16439 enum rtx_code code = GET_CODE (operands[1]);
16440 rtx tmp, compare_op;
16442 ix86_compare_op0 = XEXP (operands[1], 0);
16443 ix86_compare_op1 = XEXP (operands[1], 1);
16444 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16446 enum machine_mode cmode;
16448 /* Since we've no cmove for sse registers, don't force bad register
16449 allocation just to gain access to it. Deny movcc when the
16450 comparison mode doesn't match the move mode. */
16451 cmode = GET_MODE (ix86_compare_op0);
16452 if (cmode == VOIDmode)
16453 cmode = GET_MODE (ix86_compare_op1);
16457 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16459 &ix86_compare_op1);
16460 if (code == UNKNOWN)
16463 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16464 ix86_compare_op1, operands[2],
16468 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16469 ix86_compare_op1, operands[2], operands[3]);
16470 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16474 /* The floating point conditional move instructions don't directly
16475 support conditions resulting from a signed integer comparison. */
16477 compare_op = ix86_expand_compare (code);
16478 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16480 tmp = gen_reg_rtx (QImode);
16481 ix86_expand_setcc (code, tmp);
16483 ix86_compare_op0 = tmp;
16484 ix86_compare_op1 = const0_rtx;
16485 compare_op = ix86_expand_compare (code);
16488 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16489 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16490 operands[2], operands[3])));
16495 /* Expand a floating-point vector conditional move; a vcond operation
16496 rather than a movcc operation. */
16499 ix86_expand_fp_vcond (rtx operands[])
16501 enum rtx_code code = GET_CODE (operands[3]);
16504 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16505 &operands[4], &operands[5]);
16506 if (code == UNKNOWN)
16509 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16510 operands[5], operands[1], operands[2]))
16513 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16514 operands[1], operands[2]);
16515 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16519 /* Expand a signed/unsigned integral vector conditional move. */
16522 ix86_expand_int_vcond (rtx operands[])
16524 enum machine_mode mode = GET_MODE (operands[0]);
16525 enum rtx_code code = GET_CODE (operands[3]);
16526 bool negate = false;
16529 cop0 = operands[4];
16530 cop1 = operands[5];
16532 /* XOP supports all of the comparisons on all vector int types. */
16535 /* Canonicalize the comparison to EQ, GT, GTU. */
16546 code = reverse_condition (code);
16552 code = reverse_condition (code);
16558 code = swap_condition (code);
16559 x = cop0, cop0 = cop1, cop1 = x;
16563 gcc_unreachable ();
16566 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16567 if (mode == V2DImode)
16572 /* SSE4.1 supports EQ. */
16573 if (!TARGET_SSE4_1)
16579 /* SSE4.2 supports GT/GTU. */
16580 if (!TARGET_SSE4_2)
16585 gcc_unreachable ();
16589 /* Unsigned parallel compare is not supported by the hardware.
16590 Play some tricks to turn this into a signed comparison
16594 cop0 = force_reg (mode, cop0);
16602 rtx (*gen_sub3) (rtx, rtx, rtx);
16604 /* Subtract (-(INT MAX) - 1) from both operands to make
16606 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16608 gen_sub3 = (mode == V4SImode
16609 ? gen_subv4si3 : gen_subv2di3);
16610 t1 = gen_reg_rtx (mode);
16611 emit_insn (gen_sub3 (t1, cop0, mask));
16613 t2 = gen_reg_rtx (mode);
16614 emit_insn (gen_sub3 (t2, cop1, mask));
16624 /* Perform a parallel unsigned saturating subtraction. */
16625 x = gen_reg_rtx (mode);
16626 emit_insn (gen_rtx_SET (VOIDmode, x,
16627 gen_rtx_US_MINUS (mode, cop0, cop1)));
16630 cop1 = CONST0_RTX (mode);
16636 gcc_unreachable ();
16641 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16642 operands[1+negate], operands[2-negate]);
16644 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16645 operands[2-negate]);
16649 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16650 true if we should do zero extension, else sign extension. HIGH_P is
16651 true if we want the N/2 high elements, else the low elements. */
16654 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16656 enum machine_mode imode = GET_MODE (operands[1]);
16657 rtx (*unpack)(rtx, rtx, rtx);
16664 unpack = gen_vec_interleave_highv16qi;
16666 unpack = gen_vec_interleave_lowv16qi;
16670 unpack = gen_vec_interleave_highv8hi;
16672 unpack = gen_vec_interleave_lowv8hi;
16676 unpack = gen_vec_interleave_highv4si;
16678 unpack = gen_vec_interleave_lowv4si;
16681 gcc_unreachable ();
16684 dest = gen_lowpart (imode, operands[0]);
16687 se = force_reg (imode, CONST0_RTX (imode));
16689 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16690 operands[1], pc_rtx, pc_rtx);
16692 emit_insn (unpack (dest, operands[1], se));
16695 /* This function performs the same task as ix86_expand_sse_unpack,
16696 but with SSE4.1 instructions. */
16699 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16701 enum machine_mode imode = GET_MODE (operands[1]);
16702 rtx (*unpack)(rtx, rtx);
16709 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16711 unpack = gen_sse4_1_extendv8qiv8hi2;
16715 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16717 unpack = gen_sse4_1_extendv4hiv4si2;
16721 unpack = gen_sse4_1_zero_extendv2siv2di2;
16723 unpack = gen_sse4_1_extendv2siv2di2;
16726 gcc_unreachable ();
16729 dest = operands[0];
16732 /* Shift higher 8 bytes to lower 8 bytes. */
16733 src = gen_reg_rtx (imode);
16734 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16735 gen_lowpart (V1TImode, operands[1]),
16741 emit_insn (unpack (dest, src));
16744 /* Expand conditional increment or decrement using adb/sbb instructions.
16745 The default case using setcc followed by the conditional move can be
16746 done by generic code. */
16748 ix86_expand_int_addcc (rtx operands[])
16750 enum rtx_code code = GET_CODE (operands[1]);
16752 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16754 rtx val = const0_rtx;
16755 bool fpcmp = false;
16756 enum machine_mode mode;
16758 ix86_compare_op0 = XEXP (operands[1], 0);
16759 ix86_compare_op1 = XEXP (operands[1], 1);
16760 if (operands[3] != const1_rtx
16761 && operands[3] != constm1_rtx)
16763 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16764 ix86_compare_op1, &compare_op))
16766 code = GET_CODE (compare_op);
16768 flags = XEXP (compare_op, 0);
16770 if (GET_MODE (flags) == CCFPmode
16771 || GET_MODE (flags) == CCFPUmode)
16774 code = ix86_fp_compare_code_to_integer (code);
16781 PUT_CODE (compare_op,
16782 reverse_condition_maybe_unordered
16783 (GET_CODE (compare_op)));
16785 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16788 mode = GET_MODE (operands[0]);
16790 /* Construct either adc or sbb insn. */
16791 if ((code == LTU) == (operands[3] == constm1_rtx))
16796 insn = gen_subqi3_carry;
16799 insn = gen_subhi3_carry;
16802 insn = gen_subsi3_carry;
16805 insn = gen_subdi3_carry;
16808 gcc_unreachable ();
16816 insn = gen_addqi3_carry;
16819 insn = gen_addhi3_carry;
16822 insn = gen_addsi3_carry;
16825 insn = gen_adddi3_carry;
16828 gcc_unreachable ();
16831 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16833 return 1; /* DONE */
16837 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16838 works for floating pointer parameters and nonoffsetable memories.
16839 For pushes, it returns just stack offsets; the values will be saved
16840 in the right order. Maximally three parts are generated. */
16843 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16848 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16850 size = (GET_MODE_SIZE (mode) + 4) / 8;
16852 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16853 gcc_assert (size >= 2 && size <= 4);
16855 /* Optimize constant pool reference to immediates. This is used by fp
16856 moves, that force all constants to memory to allow combining. */
16857 if (MEM_P (operand) && MEM_READONLY_P (operand))
16859 rtx tmp = maybe_get_pool_constant (operand);
16864 if (MEM_P (operand) && !offsettable_memref_p (operand))
16866 /* The only non-offsetable memories we handle are pushes. */
16867 int ok = push_operand (operand, VOIDmode);
16871 operand = copy_rtx (operand);
16872 PUT_MODE (operand, Pmode);
16873 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16877 if (GET_CODE (operand) == CONST_VECTOR)
16879 enum machine_mode imode = int_mode_for_mode (mode);
16880 /* Caution: if we looked through a constant pool memory above,
16881 the operand may actually have a different mode now. That's
16882 ok, since we want to pun this all the way back to an integer. */
16883 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16884 gcc_assert (operand != NULL);
16890 if (mode == DImode)
16891 split_di (&operand, 1, &parts[0], &parts[1]);
16896 if (REG_P (operand))
16898 gcc_assert (reload_completed);
16899 for (i = 0; i < size; i++)
16900 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16902 else if (offsettable_memref_p (operand))
16904 operand = adjust_address (operand, SImode, 0);
16905 parts[0] = operand;
16906 for (i = 1; i < size; i++)
16907 parts[i] = adjust_address (operand, SImode, 4 * i);
16909 else if (GET_CODE (operand) == CONST_DOUBLE)
16914 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16918 real_to_target (l, &r, mode);
16919 parts[3] = gen_int_mode (l[3], SImode);
16920 parts[2] = gen_int_mode (l[2], SImode);
16923 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16924 parts[2] = gen_int_mode (l[2], SImode);
16927 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16930 gcc_unreachable ();
16932 parts[1] = gen_int_mode (l[1], SImode);
16933 parts[0] = gen_int_mode (l[0], SImode);
16936 gcc_unreachable ();
16941 if (mode == TImode)
16942 split_ti (&operand, 1, &parts[0], &parts[1]);
16943 if (mode == XFmode || mode == TFmode)
16945 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16946 if (REG_P (operand))
16948 gcc_assert (reload_completed);
16949 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16950 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16952 else if (offsettable_memref_p (operand))
16954 operand = adjust_address (operand, DImode, 0);
16955 parts[0] = operand;
16956 parts[1] = adjust_address (operand, upper_mode, 8);
16958 else if (GET_CODE (operand) == CONST_DOUBLE)
16963 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16964 real_to_target (l, &r, mode);
16966 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16967 if (HOST_BITS_PER_WIDE_INT >= 64)
16970 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16971 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16974 parts[0] = immed_double_const (l[0], l[1], DImode);
16976 if (upper_mode == SImode)
16977 parts[1] = gen_int_mode (l[2], SImode);
16978 else if (HOST_BITS_PER_WIDE_INT >= 64)
16981 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16982 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16985 parts[1] = immed_double_const (l[2], l[3], DImode);
16988 gcc_unreachable ();
16995 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16996 Return false when normal moves are needed; true when all required
16997 insns have been emitted. Operands 2-4 contain the input values
16998 int the correct order; operands 5-7 contain the output values. */
17001 ix86_split_long_move (rtx operands[])
17006 int collisions = 0;
17007 enum machine_mode mode = GET_MODE (operands[0]);
17008 bool collisionparts[4];
17010 /* The DFmode expanders may ask us to move double.
17011 For 64bit target this is single move. By hiding the fact
17012 here we simplify i386.md splitters. */
17013 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
17015 /* Optimize constant pool reference to immediates. This is used by
17016 fp moves, that force all constants to memory to allow combining. */
17018 if (MEM_P (operands[1])
17019 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
17020 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
17021 operands[1] = get_pool_constant (XEXP (operands[1], 0));
17022 if (push_operand (operands[0], VOIDmode))
17024 operands[0] = copy_rtx (operands[0]);
17025 PUT_MODE (operands[0], Pmode);
17028 operands[0] = gen_lowpart (DImode, operands[0]);
17029 operands[1] = gen_lowpart (DImode, operands[1]);
17030 emit_move_insn (operands[0], operands[1]);
17034 /* The only non-offsettable memory we handle is push. */
17035 if (push_operand (operands[0], VOIDmode))
17038 gcc_assert (!MEM_P (operands[0])
17039 || offsettable_memref_p (operands[0]));
17041 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
17042 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
17044 /* When emitting push, take care for source operands on the stack. */
17045 if (push && MEM_P (operands[1])
17046 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
17048 rtx src_base = XEXP (part[1][nparts - 1], 0);
17050 /* Compensate for the stack decrement by 4. */
17051 if (!TARGET_64BIT && nparts == 3
17052 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
17053 src_base = plus_constant (src_base, 4);
17055 /* src_base refers to the stack pointer and is
17056 automatically decreased by emitted push. */
17057 for (i = 0; i < nparts; i++)
17058 part[1][i] = change_address (part[1][i],
17059 GET_MODE (part[1][i]), src_base);
17062 /* We need to do copy in the right order in case an address register
17063 of the source overlaps the destination. */
17064 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
17068 for (i = 0; i < nparts; i++)
17071 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
17072 if (collisionparts[i])
17076 /* Collision in the middle part can be handled by reordering. */
17077 if (collisions == 1 && nparts == 3 && collisionparts [1])
17079 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17080 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17082 else if (collisions == 1
17084 && (collisionparts [1] || collisionparts [2]))
17086 if (collisionparts [1])
17088 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17089 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17093 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
17094 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
17098 /* If there are more collisions, we can't handle it by reordering.
17099 Do an lea to the last part and use only one colliding move. */
17100 else if (collisions > 1)
17106 base = part[0][nparts - 1];
17108 /* Handle the case when the last part isn't valid for lea.
17109 Happens in 64-bit mode storing the 12-byte XFmode. */
17110 if (GET_MODE (base) != Pmode)
17111 base = gen_rtx_REG (Pmode, REGNO (base));
17113 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
17114 part[1][0] = replace_equiv_address (part[1][0], base);
17115 for (i = 1; i < nparts; i++)
17117 tmp = plus_constant (base, UNITS_PER_WORD * i);
17118 part[1][i] = replace_equiv_address (part[1][i], tmp);
17129 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
17130 emit_insn (gen_addsi3 (stack_pointer_rtx,
17131 stack_pointer_rtx, GEN_INT (-4)));
17132 emit_move_insn (part[0][2], part[1][2]);
17134 else if (nparts == 4)
17136 emit_move_insn (part[0][3], part[1][3]);
17137 emit_move_insn (part[0][2], part[1][2]);
17142 /* In 64bit mode we don't have 32bit push available. In case this is
17143 register, it is OK - we will just use larger counterpart. We also
17144 retype memory - these comes from attempt to avoid REX prefix on
17145 moving of second half of TFmode value. */
17146 if (GET_MODE (part[1][1]) == SImode)
17148 switch (GET_CODE (part[1][1]))
17151 part[1][1] = adjust_address (part[1][1], DImode, 0);
17155 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
17159 gcc_unreachable ();
17162 if (GET_MODE (part[1][0]) == SImode)
17163 part[1][0] = part[1][1];
17166 emit_move_insn (part[0][1], part[1][1]);
17167 emit_move_insn (part[0][0], part[1][0]);
17171 /* Choose correct order to not overwrite the source before it is copied. */
17172 if ((REG_P (part[0][0])
17173 && REG_P (part[1][1])
17174 && (REGNO (part[0][0]) == REGNO (part[1][1])
17176 && REGNO (part[0][0]) == REGNO (part[1][2]))
17178 && REGNO (part[0][0]) == REGNO (part[1][3]))))
17180 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
17182 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
17184 operands[2 + i] = part[0][j];
17185 operands[6 + i] = part[1][j];
17190 for (i = 0; i < nparts; i++)
17192 operands[2 + i] = part[0][i];
17193 operands[6 + i] = part[1][i];
17197 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
17198 if (optimize_insn_for_size_p ())
17200 for (j = 0; j < nparts - 1; j++)
17201 if (CONST_INT_P (operands[6 + j])
17202 && operands[6 + j] != const0_rtx
17203 && REG_P (operands[2 + j]))
17204 for (i = j; i < nparts - 1; i++)
17205 if (CONST_INT_P (operands[7 + i])
17206 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
17207 operands[7 + i] = operands[2 + j];
17210 for (i = 0; i < nparts; i++)
17211 emit_move_insn (operands[2 + i], operands[6 + i]);
17216 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
17217 left shift by a constant, either using a single shift or
17218 a sequence of add instructions. */
17221 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
17225 emit_insn ((mode == DImode
17227 : gen_adddi3) (operand, operand, operand));
17229 else if (!optimize_insn_for_size_p ()
17230 && count * ix86_cost->add <= ix86_cost->shift_const)
17233 for (i=0; i<count; i++)
17235 emit_insn ((mode == DImode
17237 : gen_adddi3) (operand, operand, operand));
17241 emit_insn ((mode == DImode
17243 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17247 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17249 rtx low[2], high[2];
17251 const int single_width = mode == DImode ? 32 : 64;
17253 if (CONST_INT_P (operands[2]))
17255 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17256 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17258 if (count >= single_width)
17260 emit_move_insn (high[0], low[1]);
17261 emit_move_insn (low[0], const0_rtx);
17263 if (count > single_width)
17264 ix86_expand_ashl_const (high[0], count - single_width, mode);
17268 if (!rtx_equal_p (operands[0], operands[1]))
17269 emit_move_insn (operands[0], operands[1]);
17270 emit_insn ((mode == DImode
17272 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17273 ix86_expand_ashl_const (low[0], count, mode);
17278 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17280 if (operands[1] == const1_rtx)
17282 /* Assuming we've chosen a QImode capable registers, then 1 << N
17283 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17284 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17286 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17288 ix86_expand_clear (low[0]);
17289 ix86_expand_clear (high[0]);
17290 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17292 d = gen_lowpart (QImode, low[0]);
17293 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17294 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17295 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17297 d = gen_lowpart (QImode, high[0]);
17298 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17299 s = gen_rtx_NE (QImode, flags, const0_rtx);
17300 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17303 /* Otherwise, we can get the same results by manually performing
17304 a bit extract operation on bit 5/6, and then performing the two
17305 shifts. The two methods of getting 0/1 into low/high are exactly
17306 the same size. Avoiding the shift in the bit extract case helps
17307 pentium4 a bit; no one else seems to care much either way. */
17312 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17313 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17315 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17316 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17318 emit_insn ((mode == DImode
17320 : gen_lshrdi3) (high[0], high[0],
17321 GEN_INT (mode == DImode ? 5 : 6)));
17322 emit_insn ((mode == DImode
17324 : gen_anddi3) (high[0], high[0], const1_rtx));
17325 emit_move_insn (low[0], high[0]);
17326 emit_insn ((mode == DImode
17328 : gen_xordi3) (low[0], low[0], const1_rtx));
17331 emit_insn ((mode == DImode
17333 : gen_ashldi3) (low[0], low[0], operands[2]));
17334 emit_insn ((mode == DImode
17336 : gen_ashldi3) (high[0], high[0], operands[2]));
17340 if (operands[1] == constm1_rtx)
17342 /* For -1 << N, we can avoid the shld instruction, because we
17343 know that we're shifting 0...31/63 ones into a -1. */
17344 emit_move_insn (low[0], constm1_rtx);
17345 if (optimize_insn_for_size_p ())
17346 emit_move_insn (high[0], low[0]);
17348 emit_move_insn (high[0], constm1_rtx);
17352 if (!rtx_equal_p (operands[0], operands[1]))
17353 emit_move_insn (operands[0], operands[1]);
17355 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17356 emit_insn ((mode == DImode
17358 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17361 emit_insn ((mode == DImode
17363 : gen_ashldi3) (low[0], low[0], operands[2]));
17365 if (TARGET_CMOVE && scratch)
17367 ix86_expand_clear (scratch);
17368 emit_insn ((mode == DImode
17369 ? gen_x86_shiftsi_adj_1
17370 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17374 emit_insn ((mode == DImode
17375 ? gen_x86_shiftsi_adj_2
17376 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17380 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17382 rtx low[2], high[2];
17384 const int single_width = mode == DImode ? 32 : 64;
17386 if (CONST_INT_P (operands[2]))
17388 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17389 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17391 if (count == single_width * 2 - 1)
17393 emit_move_insn (high[0], high[1]);
17394 emit_insn ((mode == DImode
17396 : gen_ashrdi3) (high[0], high[0],
17397 GEN_INT (single_width - 1)));
17398 emit_move_insn (low[0], high[0]);
17401 else if (count >= single_width)
17403 emit_move_insn (low[0], high[1]);
17404 emit_move_insn (high[0], low[0]);
17405 emit_insn ((mode == DImode
17407 : gen_ashrdi3) (high[0], high[0],
17408 GEN_INT (single_width - 1)));
17409 if (count > single_width)
17410 emit_insn ((mode == DImode
17412 : gen_ashrdi3) (low[0], low[0],
17413 GEN_INT (count - single_width)));
17417 if (!rtx_equal_p (operands[0], operands[1]))
17418 emit_move_insn (operands[0], operands[1]);
17419 emit_insn ((mode == DImode
17421 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17422 emit_insn ((mode == DImode
17424 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17429 if (!rtx_equal_p (operands[0], operands[1]))
17430 emit_move_insn (operands[0], operands[1]);
17432 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17434 emit_insn ((mode == DImode
17436 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17437 emit_insn ((mode == DImode
17439 : gen_ashrdi3) (high[0], high[0], operands[2]));
17441 if (TARGET_CMOVE && scratch)
17443 emit_move_insn (scratch, high[0]);
17444 emit_insn ((mode == DImode
17446 : gen_ashrdi3) (scratch, scratch,
17447 GEN_INT (single_width - 1)));
17448 emit_insn ((mode == DImode
17449 ? gen_x86_shiftsi_adj_1
17450 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17454 emit_insn ((mode == DImode
17455 ? gen_x86_shiftsi_adj_3
17456 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17461 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17463 rtx low[2], high[2];
17465 const int single_width = mode == DImode ? 32 : 64;
17467 if (CONST_INT_P (operands[2]))
17469 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17470 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17472 if (count >= single_width)
17474 emit_move_insn (low[0], high[1]);
17475 ix86_expand_clear (high[0]);
17477 if (count > single_width)
17478 emit_insn ((mode == DImode
17480 : gen_lshrdi3) (low[0], low[0],
17481 GEN_INT (count - single_width)));
17485 if (!rtx_equal_p (operands[0], operands[1]))
17486 emit_move_insn (operands[0], operands[1]);
17487 emit_insn ((mode == DImode
17489 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17490 emit_insn ((mode == DImode
17492 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17497 if (!rtx_equal_p (operands[0], operands[1]))
17498 emit_move_insn (operands[0], operands[1]);
17500 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17502 emit_insn ((mode == DImode
17504 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17505 emit_insn ((mode == DImode
17507 : gen_lshrdi3) (high[0], high[0], operands[2]));
17509 /* Heh. By reversing the arguments, we can reuse this pattern. */
17510 if (TARGET_CMOVE && scratch)
17512 ix86_expand_clear (scratch);
17513 emit_insn ((mode == DImode
17514 ? gen_x86_shiftsi_adj_1
17515 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17519 emit_insn ((mode == DImode
17520 ? gen_x86_shiftsi_adj_2
17521 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17525 /* Predict just emitted jump instruction to be taken with probability PROB. */
17527 predict_jump (int prob)
17529 rtx insn = get_last_insn ();
17530 gcc_assert (JUMP_P (insn));
17531 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17534 /* Helper function for the string operations below. Dest VARIABLE whether
17535 it is aligned to VALUE bytes. If true, jump to the label. */
17537 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17539 rtx label = gen_label_rtx ();
17540 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17541 if (GET_MODE (variable) == DImode)
17542 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17544 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17545 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17548 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17550 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17554 /* Adjust COUNTER by the VALUE. */
17556 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17558 if (GET_MODE (countreg) == DImode)
17559 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17561 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17564 /* Zero extend possibly SImode EXP to Pmode register. */
17566 ix86_zero_extend_to_Pmode (rtx exp)
17569 if (GET_MODE (exp) == VOIDmode)
17570 return force_reg (Pmode, exp);
17571 if (GET_MODE (exp) == Pmode)
17572 return copy_to_mode_reg (Pmode, exp);
17573 r = gen_reg_rtx (Pmode);
17574 emit_insn (gen_zero_extendsidi2 (r, exp));
17578 /* Divide COUNTREG by SCALE. */
17580 scale_counter (rtx countreg, int scale)
17586 if (CONST_INT_P (countreg))
17587 return GEN_INT (INTVAL (countreg) / scale);
17588 gcc_assert (REG_P (countreg));
17590 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17591 GEN_INT (exact_log2 (scale)),
17592 NULL, 1, OPTAB_DIRECT);
17596 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17597 DImode for constant loop counts. */
17599 static enum machine_mode
17600 counter_mode (rtx count_exp)
17602 if (GET_MODE (count_exp) != VOIDmode)
17603 return GET_MODE (count_exp);
17604 if (!CONST_INT_P (count_exp))
17606 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17611 /* When SRCPTR is non-NULL, output simple loop to move memory
17612 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17613 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17614 equivalent loop to set memory by VALUE (supposed to be in MODE).
17616 The size is rounded down to whole number of chunk size moved at once.
17617 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17621 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17622 rtx destptr, rtx srcptr, rtx value,
17623 rtx count, enum machine_mode mode, int unroll,
17626 rtx out_label, top_label, iter, tmp;
17627 enum machine_mode iter_mode = counter_mode (count);
17628 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17629 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17635 top_label = gen_label_rtx ();
17636 out_label = gen_label_rtx ();
17637 iter = gen_reg_rtx (iter_mode);
17639 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17640 NULL, 1, OPTAB_DIRECT);
17641 /* Those two should combine. */
17642 if (piece_size == const1_rtx)
17644 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17646 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17648 emit_move_insn (iter, const0_rtx);
17650 emit_label (top_label);
17652 tmp = convert_modes (Pmode, iter_mode, iter, true);
17653 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17654 destmem = change_address (destmem, mode, x_addr);
17658 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17659 srcmem = change_address (srcmem, mode, y_addr);
17661 /* When unrolling for chips that reorder memory reads and writes,
17662 we can save registers by using single temporary.
17663 Also using 4 temporaries is overkill in 32bit mode. */
17664 if (!TARGET_64BIT && 0)
17666 for (i = 0; i < unroll; i++)
17671 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17673 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17675 emit_move_insn (destmem, srcmem);
17681 gcc_assert (unroll <= 4);
17682 for (i = 0; i < unroll; i++)
17684 tmpreg[i] = gen_reg_rtx (mode);
17688 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17690 emit_move_insn (tmpreg[i], srcmem);
17692 for (i = 0; i < unroll; i++)
17697 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17699 emit_move_insn (destmem, tmpreg[i]);
17704 for (i = 0; i < unroll; i++)
17708 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17709 emit_move_insn (destmem, value);
17712 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17713 true, OPTAB_LIB_WIDEN);
17715 emit_move_insn (iter, tmp);
17717 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17719 if (expected_size != -1)
17721 expected_size /= GET_MODE_SIZE (mode) * unroll;
17722 if (expected_size == 0)
17724 else if (expected_size > REG_BR_PROB_BASE)
17725 predict_jump (REG_BR_PROB_BASE - 1);
17727 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17730 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17731 iter = ix86_zero_extend_to_Pmode (iter);
17732 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17733 true, OPTAB_LIB_WIDEN);
17734 if (tmp != destptr)
17735 emit_move_insn (destptr, tmp);
17738 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17739 true, OPTAB_LIB_WIDEN);
17741 emit_move_insn (srcptr, tmp);
17743 emit_label (out_label);
17746 /* Output "rep; mov" instruction.
17747 Arguments have same meaning as for previous function */
17749 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17750 rtx destptr, rtx srcptr,
17752 enum machine_mode mode)
17758 /* If the size is known, it is shorter to use rep movs. */
17759 if (mode == QImode && CONST_INT_P (count)
17760 && !(INTVAL (count) & 3))
17763 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17764 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17765 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17766 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17767 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17768 if (mode != QImode)
17770 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17771 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17772 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17773 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17774 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17775 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17779 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17780 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17782 if (CONST_INT_P (count))
17784 count = GEN_INT (INTVAL (count)
17785 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17786 destmem = shallow_copy_rtx (destmem);
17787 srcmem = shallow_copy_rtx (srcmem);
17788 set_mem_size (destmem, count);
17789 set_mem_size (srcmem, count);
17793 if (MEM_SIZE (destmem))
17794 set_mem_size (destmem, NULL_RTX);
17795 if (MEM_SIZE (srcmem))
17796 set_mem_size (srcmem, NULL_RTX);
17798 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17802 /* Output "rep; stos" instruction.
17803 Arguments have same meaning as for previous function */
17805 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17806 rtx count, enum machine_mode mode,
17812 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17813 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17814 value = force_reg (mode, gen_lowpart (mode, value));
17815 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17816 if (mode != QImode)
17818 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17819 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17820 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17823 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17824 if (orig_value == const0_rtx && CONST_INT_P (count))
17826 count = GEN_INT (INTVAL (count)
17827 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17828 destmem = shallow_copy_rtx (destmem);
17829 set_mem_size (destmem, count);
17831 else if (MEM_SIZE (destmem))
17832 set_mem_size (destmem, NULL_RTX);
17833 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17837 emit_strmov (rtx destmem, rtx srcmem,
17838 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17840 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17841 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17842 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17845 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17847 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17848 rtx destptr, rtx srcptr, rtx count, int max_size)
17851 if (CONST_INT_P (count))
17853 HOST_WIDE_INT countval = INTVAL (count);
17856 if ((countval & 0x10) && max_size > 16)
17860 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17861 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17864 gcc_unreachable ();
17867 if ((countval & 0x08) && max_size > 8)
17870 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17873 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17874 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17878 if ((countval & 0x04) && max_size > 4)
17880 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17883 if ((countval & 0x02) && max_size > 2)
17885 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17888 if ((countval & 0x01) && max_size > 1)
17890 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17897 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17898 count, 1, OPTAB_DIRECT);
17899 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17900 count, QImode, 1, 4);
17904 /* When there are stringops, we can cheaply increase dest and src pointers.
17905 Otherwise we save code size by maintaining offset (zero is readily
17906 available from preceding rep operation) and using x86 addressing modes.
17908 if (TARGET_SINGLE_STRINGOP)
17912 rtx label = ix86_expand_aligntest (count, 4, true);
17913 src = change_address (srcmem, SImode, srcptr);
17914 dest = change_address (destmem, SImode, destptr);
17915 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17916 emit_label (label);
17917 LABEL_NUSES (label) = 1;
17921 rtx label = ix86_expand_aligntest (count, 2, true);
17922 src = change_address (srcmem, HImode, srcptr);
17923 dest = change_address (destmem, HImode, destptr);
17924 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17925 emit_label (label);
17926 LABEL_NUSES (label) = 1;
17930 rtx label = ix86_expand_aligntest (count, 1, true);
17931 src = change_address (srcmem, QImode, srcptr);
17932 dest = change_address (destmem, QImode, destptr);
17933 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17934 emit_label (label);
17935 LABEL_NUSES (label) = 1;
17940 rtx offset = force_reg (Pmode, const0_rtx);
17945 rtx label = ix86_expand_aligntest (count, 4, true);
17946 src = change_address (srcmem, SImode, srcptr);
17947 dest = change_address (destmem, SImode, destptr);
17948 emit_move_insn (dest, src);
17949 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17950 true, OPTAB_LIB_WIDEN);
17952 emit_move_insn (offset, tmp);
17953 emit_label (label);
17954 LABEL_NUSES (label) = 1;
17958 rtx label = ix86_expand_aligntest (count, 2, true);
17959 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17960 src = change_address (srcmem, HImode, tmp);
17961 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17962 dest = change_address (destmem, HImode, tmp);
17963 emit_move_insn (dest, src);
17964 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17965 true, OPTAB_LIB_WIDEN);
17967 emit_move_insn (offset, tmp);
17968 emit_label (label);
17969 LABEL_NUSES (label) = 1;
17973 rtx label = ix86_expand_aligntest (count, 1, true);
17974 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17975 src = change_address (srcmem, QImode, tmp);
17976 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17977 dest = change_address (destmem, QImode, tmp);
17978 emit_move_insn (dest, src);
17979 emit_label (label);
17980 LABEL_NUSES (label) = 1;
17985 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17987 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17988 rtx count, int max_size)
17991 expand_simple_binop (counter_mode (count), AND, count,
17992 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17993 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17994 gen_lowpart (QImode, value), count, QImode,
17998 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
18000 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
18004 if (CONST_INT_P (count))
18006 HOST_WIDE_INT countval = INTVAL (count);
18009 if ((countval & 0x10) && max_size > 16)
18013 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
18014 emit_insn (gen_strset (destptr, dest, value));
18015 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
18016 emit_insn (gen_strset (destptr, dest, value));
18019 gcc_unreachable ();
18022 if ((countval & 0x08) && max_size > 8)
18026 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
18027 emit_insn (gen_strset (destptr, dest, value));
18031 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
18032 emit_insn (gen_strset (destptr, dest, value));
18033 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
18034 emit_insn (gen_strset (destptr, dest, value));
18038 if ((countval & 0x04) && max_size > 4)
18040 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
18041 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18044 if ((countval & 0x02) && max_size > 2)
18046 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
18047 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18050 if ((countval & 0x01) && max_size > 1)
18052 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
18053 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18060 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
18065 rtx label = ix86_expand_aligntest (count, 16, true);
18068 dest = change_address (destmem, DImode, destptr);
18069 emit_insn (gen_strset (destptr, dest, value));
18070 emit_insn (gen_strset (destptr, dest, value));
18074 dest = change_address (destmem, SImode, destptr);
18075 emit_insn (gen_strset (destptr, dest, value));
18076 emit_insn (gen_strset (destptr, dest, value));
18077 emit_insn (gen_strset (destptr, dest, value));
18078 emit_insn (gen_strset (destptr, dest, value));
18080 emit_label (label);
18081 LABEL_NUSES (label) = 1;
18085 rtx label = ix86_expand_aligntest (count, 8, true);
18088 dest = change_address (destmem, DImode, destptr);
18089 emit_insn (gen_strset (destptr, dest, value));
18093 dest = change_address (destmem, SImode, destptr);
18094 emit_insn (gen_strset (destptr, dest, value));
18095 emit_insn (gen_strset (destptr, dest, value));
18097 emit_label (label);
18098 LABEL_NUSES (label) = 1;
18102 rtx label = ix86_expand_aligntest (count, 4, true);
18103 dest = change_address (destmem, SImode, destptr);
18104 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18105 emit_label (label);
18106 LABEL_NUSES (label) = 1;
18110 rtx label = ix86_expand_aligntest (count, 2, true);
18111 dest = change_address (destmem, HImode, destptr);
18112 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18113 emit_label (label);
18114 LABEL_NUSES (label) = 1;
18118 rtx label = ix86_expand_aligntest (count, 1, true);
18119 dest = change_address (destmem, QImode, destptr);
18120 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18121 emit_label (label);
18122 LABEL_NUSES (label) = 1;
18126 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
18127 DESIRED_ALIGNMENT. */
18129 expand_movmem_prologue (rtx destmem, rtx srcmem,
18130 rtx destptr, rtx srcptr, rtx count,
18131 int align, int desired_alignment)
18133 if (align <= 1 && desired_alignment > 1)
18135 rtx label = ix86_expand_aligntest (destptr, 1, false);
18136 srcmem = change_address (srcmem, QImode, srcptr);
18137 destmem = change_address (destmem, QImode, destptr);
18138 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18139 ix86_adjust_counter (count, 1);
18140 emit_label (label);
18141 LABEL_NUSES (label) = 1;
18143 if (align <= 2 && desired_alignment > 2)
18145 rtx label = ix86_expand_aligntest (destptr, 2, false);
18146 srcmem = change_address (srcmem, HImode, srcptr);
18147 destmem = change_address (destmem, HImode, destptr);
18148 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18149 ix86_adjust_counter (count, 2);
18150 emit_label (label);
18151 LABEL_NUSES (label) = 1;
18153 if (align <= 4 && desired_alignment > 4)
18155 rtx label = ix86_expand_aligntest (destptr, 4, false);
18156 srcmem = change_address (srcmem, SImode, srcptr);
18157 destmem = change_address (destmem, SImode, destptr);
18158 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18159 ix86_adjust_counter (count, 4);
18160 emit_label (label);
18161 LABEL_NUSES (label) = 1;
18163 gcc_assert (desired_alignment <= 8);
18166 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
18167 ALIGN_BYTES is how many bytes need to be copied. */
18169 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
18170 int desired_align, int align_bytes)
18173 rtx src_size, dst_size;
18175 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
18176 if (src_align_bytes >= 0)
18177 src_align_bytes = desired_align - src_align_bytes;
18178 src_size = MEM_SIZE (src);
18179 dst_size = MEM_SIZE (dst);
18180 if (align_bytes & 1)
18182 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18183 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
18185 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18187 if (align_bytes & 2)
18189 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18190 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
18191 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18192 set_mem_align (dst, 2 * BITS_PER_UNIT);
18193 if (src_align_bytes >= 0
18194 && (src_align_bytes & 1) == (align_bytes & 1)
18195 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
18196 set_mem_align (src, 2 * BITS_PER_UNIT);
18198 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18200 if (align_bytes & 4)
18202 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18203 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
18204 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18205 set_mem_align (dst, 4 * BITS_PER_UNIT);
18206 if (src_align_bytes >= 0)
18208 unsigned int src_align = 0;
18209 if ((src_align_bytes & 3) == (align_bytes & 3))
18211 else if ((src_align_bytes & 1) == (align_bytes & 1))
18213 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18214 set_mem_align (src, src_align * BITS_PER_UNIT);
18217 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18219 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18220 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
18221 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18222 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18223 if (src_align_bytes >= 0)
18225 unsigned int src_align = 0;
18226 if ((src_align_bytes & 7) == (align_bytes & 7))
18228 else if ((src_align_bytes & 3) == (align_bytes & 3))
18230 else if ((src_align_bytes & 1) == (align_bytes & 1))
18232 if (src_align > (unsigned int) desired_align)
18233 src_align = desired_align;
18234 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18235 set_mem_align (src, src_align * BITS_PER_UNIT);
18238 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18240 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18245 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18246 DESIRED_ALIGNMENT. */
18248 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18249 int align, int desired_alignment)
18251 if (align <= 1 && desired_alignment > 1)
18253 rtx label = ix86_expand_aligntest (destptr, 1, false);
18254 destmem = change_address (destmem, QImode, destptr);
18255 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18256 ix86_adjust_counter (count, 1);
18257 emit_label (label);
18258 LABEL_NUSES (label) = 1;
18260 if (align <= 2 && desired_alignment > 2)
18262 rtx label = ix86_expand_aligntest (destptr, 2, false);
18263 destmem = change_address (destmem, HImode, destptr);
18264 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18265 ix86_adjust_counter (count, 2);
18266 emit_label (label);
18267 LABEL_NUSES (label) = 1;
18269 if (align <= 4 && desired_alignment > 4)
18271 rtx label = ix86_expand_aligntest (destptr, 4, false);
18272 destmem = change_address (destmem, SImode, destptr);
18273 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18274 ix86_adjust_counter (count, 4);
18275 emit_label (label);
18276 LABEL_NUSES (label) = 1;
18278 gcc_assert (desired_alignment <= 8);
18281 /* Set enough from DST to align DST known to by aligned by ALIGN to
18282 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18284 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18285 int desired_align, int align_bytes)
18288 rtx dst_size = MEM_SIZE (dst);
18289 if (align_bytes & 1)
18291 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18293 emit_insn (gen_strset (destreg, dst,
18294 gen_lowpart (QImode, value)));
18296 if (align_bytes & 2)
18298 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18299 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18300 set_mem_align (dst, 2 * BITS_PER_UNIT);
18302 emit_insn (gen_strset (destreg, dst,
18303 gen_lowpart (HImode, value)));
18305 if (align_bytes & 4)
18307 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18308 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18309 set_mem_align (dst, 4 * BITS_PER_UNIT);
18311 emit_insn (gen_strset (destreg, dst,
18312 gen_lowpart (SImode, value)));
18314 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18315 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18316 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18318 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18322 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18323 static enum stringop_alg
18324 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18325 int *dynamic_check)
18327 const struct stringop_algs * algs;
18328 bool optimize_for_speed;
18329 /* Algorithms using the rep prefix want at least edi and ecx;
18330 additionally, memset wants eax and memcpy wants esi. Don't
18331 consider such algorithms if the user has appropriated those
18332 registers for their own purposes. */
18333 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18335 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18337 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18338 || (alg != rep_prefix_1_byte \
18339 && alg != rep_prefix_4_byte \
18340 && alg != rep_prefix_8_byte))
18341 const struct processor_costs *cost;
18343 /* Even if the string operation call is cold, we still might spend a lot
18344 of time processing large blocks. */
18345 if (optimize_function_for_size_p (cfun)
18346 || (optimize_insn_for_size_p ()
18347 && expected_size != -1 && expected_size < 256))
18348 optimize_for_speed = false;
18350 optimize_for_speed = true;
18352 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18354 *dynamic_check = -1;
18356 algs = &cost->memset[TARGET_64BIT != 0];
18358 algs = &cost->memcpy[TARGET_64BIT != 0];
18359 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18360 return stringop_alg;
18361 /* rep; movq or rep; movl is the smallest variant. */
18362 else if (!optimize_for_speed)
18364 if (!count || (count & 3))
18365 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18367 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18369 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18371 else if (expected_size != -1 && expected_size < 4)
18372 return loop_1_byte;
18373 else if (expected_size != -1)
18376 enum stringop_alg alg = libcall;
18377 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18379 /* We get here if the algorithms that were not libcall-based
18380 were rep-prefix based and we are unable to use rep prefixes
18381 based on global register usage. Break out of the loop and
18382 use the heuristic below. */
18383 if (algs->size[i].max == 0)
18385 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18387 enum stringop_alg candidate = algs->size[i].alg;
18389 if (candidate != libcall && ALG_USABLE_P (candidate))
18391 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18392 last non-libcall inline algorithm. */
18393 if (TARGET_INLINE_ALL_STRINGOPS)
18395 /* When the current size is best to be copied by a libcall,
18396 but we are still forced to inline, run the heuristic below
18397 that will pick code for medium sized blocks. */
18398 if (alg != libcall)
18402 else if (ALG_USABLE_P (candidate))
18406 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18408 /* When asked to inline the call anyway, try to pick meaningful choice.
18409 We look for maximal size of block that is faster to copy by hand and
18410 take blocks of at most of that size guessing that average size will
18411 be roughly half of the block.
18413 If this turns out to be bad, we might simply specify the preferred
18414 choice in ix86_costs. */
18415 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18416 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18419 enum stringop_alg alg;
18421 bool any_alg_usable_p = true;
18423 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18425 enum stringop_alg candidate = algs->size[i].alg;
18426 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18428 if (candidate != libcall && candidate
18429 && ALG_USABLE_P (candidate))
18430 max = algs->size[i].max;
18432 /* If there aren't any usable algorithms, then recursing on
18433 smaller sizes isn't going to find anything. Just return the
18434 simple byte-at-a-time copy loop. */
18435 if (!any_alg_usable_p)
18437 /* Pick something reasonable. */
18438 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18439 *dynamic_check = 128;
18440 return loop_1_byte;
18444 alg = decide_alg (count, max / 2, memset, dynamic_check);
18445 gcc_assert (*dynamic_check == -1);
18446 gcc_assert (alg != libcall);
18447 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18448 *dynamic_check = max;
18451 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18452 #undef ALG_USABLE_P
18455 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18456 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18458 decide_alignment (int align,
18459 enum stringop_alg alg,
18462 int desired_align = 0;
18466 gcc_unreachable ();
18468 case unrolled_loop:
18469 desired_align = GET_MODE_SIZE (Pmode);
18471 case rep_prefix_8_byte:
18474 case rep_prefix_4_byte:
18475 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18476 copying whole cacheline at once. */
18477 if (TARGET_PENTIUMPRO)
18482 case rep_prefix_1_byte:
18483 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18484 copying whole cacheline at once. */
18485 if (TARGET_PENTIUMPRO)
18499 if (desired_align < align)
18500 desired_align = align;
18501 if (expected_size != -1 && expected_size < 4)
18502 desired_align = align;
18503 return desired_align;
18506 /* Return the smallest power of 2 greater than VAL. */
18508 smallest_pow2_greater_than (int val)
18516 /* Expand string move (memcpy) operation. Use i386 string operations when
18517 profitable. expand_setmem contains similar code. The code depends upon
18518 architecture, block size and alignment, but always has the same
18521 1) Prologue guard: Conditional that jumps up to epilogues for small
18522 blocks that can be handled by epilogue alone. This is faster but
18523 also needed for correctness, since prologue assume the block is larger
18524 than the desired alignment.
18526 Optional dynamic check for size and libcall for large
18527 blocks is emitted here too, with -minline-stringops-dynamically.
18529 2) Prologue: copy first few bytes in order to get destination aligned
18530 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18531 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18532 We emit either a jump tree on power of two sized blocks, or a byte loop.
18534 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18535 with specified algorithm.
18537 4) Epilogue: code copying tail of the block that is too small to be
18538 handled by main body (or up to size guarded by prologue guard). */
18541 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18542 rtx expected_align_exp, rtx expected_size_exp)
18548 rtx jump_around_label = NULL;
18549 HOST_WIDE_INT align = 1;
18550 unsigned HOST_WIDE_INT count = 0;
18551 HOST_WIDE_INT expected_size = -1;
18552 int size_needed = 0, epilogue_size_needed;
18553 int desired_align = 0, align_bytes = 0;
18554 enum stringop_alg alg;
18556 bool need_zero_guard = false;
18558 if (CONST_INT_P (align_exp))
18559 align = INTVAL (align_exp);
18560 /* i386 can do misaligned access on reasonably increased cost. */
18561 if (CONST_INT_P (expected_align_exp)
18562 && INTVAL (expected_align_exp) > align)
18563 align = INTVAL (expected_align_exp);
18564 /* ALIGN is the minimum of destination and source alignment, but we care here
18565 just about destination alignment. */
18566 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18567 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18569 if (CONST_INT_P (count_exp))
18570 count = expected_size = INTVAL (count_exp);
18571 if (CONST_INT_P (expected_size_exp) && count == 0)
18572 expected_size = INTVAL (expected_size_exp);
18574 /* Make sure we don't need to care about overflow later on. */
18575 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18578 /* Step 0: Decide on preferred algorithm, desired alignment and
18579 size of chunks to be copied by main loop. */
18581 alg = decide_alg (count, expected_size, false, &dynamic_check);
18582 desired_align = decide_alignment (align, alg, expected_size);
18584 if (!TARGET_ALIGN_STRINGOPS)
18585 align = desired_align;
18587 if (alg == libcall)
18589 gcc_assert (alg != no_stringop);
18591 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18592 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18593 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18598 gcc_unreachable ();
18600 need_zero_guard = true;
18601 size_needed = GET_MODE_SIZE (Pmode);
18603 case unrolled_loop:
18604 need_zero_guard = true;
18605 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18607 case rep_prefix_8_byte:
18610 case rep_prefix_4_byte:
18613 case rep_prefix_1_byte:
18617 need_zero_guard = true;
18622 epilogue_size_needed = size_needed;
18624 /* Step 1: Prologue guard. */
18626 /* Alignment code needs count to be in register. */
18627 if (CONST_INT_P (count_exp) && desired_align > align)
18629 if (INTVAL (count_exp) > desired_align
18630 && INTVAL (count_exp) > size_needed)
18633 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18634 if (align_bytes <= 0)
18637 align_bytes = desired_align - align_bytes;
18639 if (align_bytes == 0)
18640 count_exp = force_reg (counter_mode (count_exp), count_exp);
18642 gcc_assert (desired_align >= 1 && align >= 1);
18644 /* Ensure that alignment prologue won't copy past end of block. */
18645 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18647 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18648 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18649 Make sure it is power of 2. */
18650 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18654 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18656 /* If main algorithm works on QImode, no epilogue is needed.
18657 For small sizes just don't align anything. */
18658 if (size_needed == 1)
18659 desired_align = align;
18666 label = gen_label_rtx ();
18667 emit_cmp_and_jump_insns (count_exp,
18668 GEN_INT (epilogue_size_needed),
18669 LTU, 0, counter_mode (count_exp), 1, label);
18670 if (expected_size == -1 || expected_size < epilogue_size_needed)
18671 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18673 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18677 /* Emit code to decide on runtime whether library call or inline should be
18679 if (dynamic_check != -1)
18681 if (CONST_INT_P (count_exp))
18683 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18685 emit_block_move_via_libcall (dst, src, count_exp, false);
18686 count_exp = const0_rtx;
18692 rtx hot_label = gen_label_rtx ();
18693 jump_around_label = gen_label_rtx ();
18694 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18695 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18696 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18697 emit_block_move_via_libcall (dst, src, count_exp, false);
18698 emit_jump (jump_around_label);
18699 emit_label (hot_label);
18703 /* Step 2: Alignment prologue. */
18705 if (desired_align > align)
18707 if (align_bytes == 0)
18709 /* Except for the first move in epilogue, we no longer know
18710 constant offset in aliasing info. It don't seems to worth
18711 the pain to maintain it for the first move, so throw away
18713 src = change_address (src, BLKmode, srcreg);
18714 dst = change_address (dst, BLKmode, destreg);
18715 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18720 /* If we know how many bytes need to be stored before dst is
18721 sufficiently aligned, maintain aliasing info accurately. */
18722 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18723 desired_align, align_bytes);
18724 count_exp = plus_constant (count_exp, -align_bytes);
18725 count -= align_bytes;
18727 if (need_zero_guard
18728 && (count < (unsigned HOST_WIDE_INT) size_needed
18729 || (align_bytes == 0
18730 && count < ((unsigned HOST_WIDE_INT) size_needed
18731 + desired_align - align))))
18733 /* It is possible that we copied enough so the main loop will not
18735 gcc_assert (size_needed > 1);
18736 if (label == NULL_RTX)
18737 label = gen_label_rtx ();
18738 emit_cmp_and_jump_insns (count_exp,
18739 GEN_INT (size_needed),
18740 LTU, 0, counter_mode (count_exp), 1, label);
18741 if (expected_size == -1
18742 || expected_size < (desired_align - align) / 2 + size_needed)
18743 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18745 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18748 if (label && size_needed == 1)
18750 emit_label (label);
18751 LABEL_NUSES (label) = 1;
18753 epilogue_size_needed = 1;
18755 else if (label == NULL_RTX)
18756 epilogue_size_needed = size_needed;
18758 /* Step 3: Main loop. */
18764 gcc_unreachable ();
18766 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18767 count_exp, QImode, 1, expected_size);
18770 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18771 count_exp, Pmode, 1, expected_size);
18773 case unrolled_loop:
18774 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18775 registers for 4 temporaries anyway. */
18776 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18777 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18780 case rep_prefix_8_byte:
18781 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18784 case rep_prefix_4_byte:
18785 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18788 case rep_prefix_1_byte:
18789 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18793 /* Adjust properly the offset of src and dest memory for aliasing. */
18794 if (CONST_INT_P (count_exp))
18796 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18797 (count / size_needed) * size_needed);
18798 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18799 (count / size_needed) * size_needed);
18803 src = change_address (src, BLKmode, srcreg);
18804 dst = change_address (dst, BLKmode, destreg);
18807 /* Step 4: Epilogue to copy the remaining bytes. */
18811 /* When the main loop is done, COUNT_EXP might hold original count,
18812 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18813 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18814 bytes. Compensate if needed. */
18816 if (size_needed < epilogue_size_needed)
18819 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18820 GEN_INT (size_needed - 1), count_exp, 1,
18822 if (tmp != count_exp)
18823 emit_move_insn (count_exp, tmp);
18825 emit_label (label);
18826 LABEL_NUSES (label) = 1;
18829 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18830 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18831 epilogue_size_needed);
18832 if (jump_around_label)
18833 emit_label (jump_around_label);
18837 /* Helper function for memcpy. For QImode value 0xXY produce
18838 0xXYXYXYXY of wide specified by MODE. This is essentially
18839 a * 0x10101010, but we can do slightly better than
18840 synth_mult by unwinding the sequence by hand on CPUs with
18843 promote_duplicated_reg (enum machine_mode mode, rtx val)
18845 enum machine_mode valmode = GET_MODE (val);
18847 int nops = mode == DImode ? 3 : 2;
18849 gcc_assert (mode == SImode || mode == DImode);
18850 if (val == const0_rtx)
18851 return copy_to_mode_reg (mode, const0_rtx);
18852 if (CONST_INT_P (val))
18854 HOST_WIDE_INT v = INTVAL (val) & 255;
18858 if (mode == DImode)
18859 v |= (v << 16) << 16;
18860 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18863 if (valmode == VOIDmode)
18865 if (valmode != QImode)
18866 val = gen_lowpart (QImode, val);
18867 if (mode == QImode)
18869 if (!TARGET_PARTIAL_REG_STALL)
18871 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18872 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18873 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18874 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18876 rtx reg = convert_modes (mode, QImode, val, true);
18877 tmp = promote_duplicated_reg (mode, const1_rtx);
18878 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18883 rtx reg = convert_modes (mode, QImode, val, true);
18885 if (!TARGET_PARTIAL_REG_STALL)
18886 if (mode == SImode)
18887 emit_insn (gen_movsi_insv_1 (reg, reg));
18889 emit_insn (gen_movdi_insv_1 (reg, reg));
18892 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18893 NULL, 1, OPTAB_DIRECT);
18895 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18897 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18898 NULL, 1, OPTAB_DIRECT);
18899 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18900 if (mode == SImode)
18902 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18903 NULL, 1, OPTAB_DIRECT);
18904 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18909 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18910 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18911 alignment from ALIGN to DESIRED_ALIGN. */
18913 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18918 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18919 promoted_val = promote_duplicated_reg (DImode, val);
18920 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18921 promoted_val = promote_duplicated_reg (SImode, val);
18922 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18923 promoted_val = promote_duplicated_reg (HImode, val);
18925 promoted_val = val;
18927 return promoted_val;
18930 /* Expand string clear operation (bzero). Use i386 string operations when
18931 profitable. See expand_movmem comment for explanation of individual
18932 steps performed. */
18934 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18935 rtx expected_align_exp, rtx expected_size_exp)
18940 rtx jump_around_label = NULL;
18941 HOST_WIDE_INT align = 1;
18942 unsigned HOST_WIDE_INT count = 0;
18943 HOST_WIDE_INT expected_size = -1;
18944 int size_needed = 0, epilogue_size_needed;
18945 int desired_align = 0, align_bytes = 0;
18946 enum stringop_alg alg;
18947 rtx promoted_val = NULL;
18948 bool force_loopy_epilogue = false;
18950 bool need_zero_guard = false;
18952 if (CONST_INT_P (align_exp))
18953 align = INTVAL (align_exp);
18954 /* i386 can do misaligned access on reasonably increased cost. */
18955 if (CONST_INT_P (expected_align_exp)
18956 && INTVAL (expected_align_exp) > align)
18957 align = INTVAL (expected_align_exp);
18958 if (CONST_INT_P (count_exp))
18959 count = expected_size = INTVAL (count_exp);
18960 if (CONST_INT_P (expected_size_exp) && count == 0)
18961 expected_size = INTVAL (expected_size_exp);
18963 /* Make sure we don't need to care about overflow later on. */
18964 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18967 /* Step 0: Decide on preferred algorithm, desired alignment and
18968 size of chunks to be copied by main loop. */
18970 alg = decide_alg (count, expected_size, true, &dynamic_check);
18971 desired_align = decide_alignment (align, alg, expected_size);
18973 if (!TARGET_ALIGN_STRINGOPS)
18974 align = desired_align;
18976 if (alg == libcall)
18978 gcc_assert (alg != no_stringop);
18980 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18981 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18986 gcc_unreachable ();
18988 need_zero_guard = true;
18989 size_needed = GET_MODE_SIZE (Pmode);
18991 case unrolled_loop:
18992 need_zero_guard = true;
18993 size_needed = GET_MODE_SIZE (Pmode) * 4;
18995 case rep_prefix_8_byte:
18998 case rep_prefix_4_byte:
19001 case rep_prefix_1_byte:
19005 need_zero_guard = true;
19009 epilogue_size_needed = size_needed;
19011 /* Step 1: Prologue guard. */
19013 /* Alignment code needs count to be in register. */
19014 if (CONST_INT_P (count_exp) && desired_align > align)
19016 if (INTVAL (count_exp) > desired_align
19017 && INTVAL (count_exp) > size_needed)
19020 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
19021 if (align_bytes <= 0)
19024 align_bytes = desired_align - align_bytes;
19026 if (align_bytes == 0)
19028 enum machine_mode mode = SImode;
19029 if (TARGET_64BIT && (count & ~0xffffffff))
19031 count_exp = force_reg (mode, count_exp);
19034 /* Do the cheap promotion to allow better CSE across the
19035 main loop and epilogue (ie one load of the big constant in the
19036 front of all code. */
19037 if (CONST_INT_P (val_exp))
19038 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19039 desired_align, align);
19040 /* Ensure that alignment prologue won't copy past end of block. */
19041 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
19043 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
19044 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
19045 Make sure it is power of 2. */
19046 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
19048 /* To improve performance of small blocks, we jump around the VAL
19049 promoting mode. This mean that if the promoted VAL is not constant,
19050 we might not use it in the epilogue and have to use byte
19052 if (epilogue_size_needed > 2 && !promoted_val)
19053 force_loopy_epilogue = true;
19056 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
19058 /* If main algorithm works on QImode, no epilogue is needed.
19059 For small sizes just don't align anything. */
19060 if (size_needed == 1)
19061 desired_align = align;
19068 label = gen_label_rtx ();
19069 emit_cmp_and_jump_insns (count_exp,
19070 GEN_INT (epilogue_size_needed),
19071 LTU, 0, counter_mode (count_exp), 1, label);
19072 if (expected_size == -1 || expected_size <= epilogue_size_needed)
19073 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19075 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19078 if (dynamic_check != -1)
19080 rtx hot_label = gen_label_rtx ();
19081 jump_around_label = gen_label_rtx ();
19082 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
19083 LEU, 0, counter_mode (count_exp), 1, hot_label);
19084 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19085 set_storage_via_libcall (dst, count_exp, val_exp, false);
19086 emit_jump (jump_around_label);
19087 emit_label (hot_label);
19090 /* Step 2: Alignment prologue. */
19092 /* Do the expensive promotion once we branched off the small blocks. */
19094 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19095 desired_align, align);
19096 gcc_assert (desired_align >= 1 && align >= 1);
19098 if (desired_align > align)
19100 if (align_bytes == 0)
19102 /* Except for the first move in epilogue, we no longer know
19103 constant offset in aliasing info. It don't seems to worth
19104 the pain to maintain it for the first move, so throw away
19106 dst = change_address (dst, BLKmode, destreg);
19107 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
19112 /* If we know how many bytes need to be stored before dst is
19113 sufficiently aligned, maintain aliasing info accurately. */
19114 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
19115 desired_align, align_bytes);
19116 count_exp = plus_constant (count_exp, -align_bytes);
19117 count -= align_bytes;
19119 if (need_zero_guard
19120 && (count < (unsigned HOST_WIDE_INT) size_needed
19121 || (align_bytes == 0
19122 && count < ((unsigned HOST_WIDE_INT) size_needed
19123 + desired_align - align))))
19125 /* It is possible that we copied enough so the main loop will not
19127 gcc_assert (size_needed > 1);
19128 if (label == NULL_RTX)
19129 label = gen_label_rtx ();
19130 emit_cmp_and_jump_insns (count_exp,
19131 GEN_INT (size_needed),
19132 LTU, 0, counter_mode (count_exp), 1, label);
19133 if (expected_size == -1
19134 || expected_size < (desired_align - align) / 2 + size_needed)
19135 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19137 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19140 if (label && size_needed == 1)
19142 emit_label (label);
19143 LABEL_NUSES (label) = 1;
19145 promoted_val = val_exp;
19146 epilogue_size_needed = 1;
19148 else if (label == NULL_RTX)
19149 epilogue_size_needed = size_needed;
19151 /* Step 3: Main loop. */
19157 gcc_unreachable ();
19159 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19160 count_exp, QImode, 1, expected_size);
19163 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19164 count_exp, Pmode, 1, expected_size);
19166 case unrolled_loop:
19167 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19168 count_exp, Pmode, 4, expected_size);
19170 case rep_prefix_8_byte:
19171 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19174 case rep_prefix_4_byte:
19175 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19178 case rep_prefix_1_byte:
19179 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19183 /* Adjust properly the offset of src and dest memory for aliasing. */
19184 if (CONST_INT_P (count_exp))
19185 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
19186 (count / size_needed) * size_needed);
19188 dst = change_address (dst, BLKmode, destreg);
19190 /* Step 4: Epilogue to copy the remaining bytes. */
19194 /* When the main loop is done, COUNT_EXP might hold original count,
19195 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
19196 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
19197 bytes. Compensate if needed. */
19199 if (size_needed < epilogue_size_needed)
19202 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
19203 GEN_INT (size_needed - 1), count_exp, 1,
19205 if (tmp != count_exp)
19206 emit_move_insn (count_exp, tmp);
19208 emit_label (label);
19209 LABEL_NUSES (label) = 1;
19212 if (count_exp != const0_rtx && epilogue_size_needed > 1)
19214 if (force_loopy_epilogue)
19215 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
19216 epilogue_size_needed);
19218 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
19219 epilogue_size_needed);
19221 if (jump_around_label)
19222 emit_label (jump_around_label);
19226 /* Expand the appropriate insns for doing strlen if not just doing
19229 out = result, initialized with the start address
19230 align_rtx = alignment of the address.
19231 scratch = scratch register, initialized with the startaddress when
19232 not aligned, otherwise undefined
19234 This is just the body. It needs the initializations mentioned above and
19235 some address computing at the end. These things are done in i386.md. */
19238 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19242 rtx align_2_label = NULL_RTX;
19243 rtx align_3_label = NULL_RTX;
19244 rtx align_4_label = gen_label_rtx ();
19245 rtx end_0_label = gen_label_rtx ();
19247 rtx tmpreg = gen_reg_rtx (SImode);
19248 rtx scratch = gen_reg_rtx (SImode);
19252 if (CONST_INT_P (align_rtx))
19253 align = INTVAL (align_rtx);
19255 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19257 /* Is there a known alignment and is it less than 4? */
19260 rtx scratch1 = gen_reg_rtx (Pmode);
19261 emit_move_insn (scratch1, out);
19262 /* Is there a known alignment and is it not 2? */
19265 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19266 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19268 /* Leave just the 3 lower bits. */
19269 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19270 NULL_RTX, 0, OPTAB_WIDEN);
19272 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19273 Pmode, 1, align_4_label);
19274 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19275 Pmode, 1, align_2_label);
19276 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19277 Pmode, 1, align_3_label);
19281 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19282 check if is aligned to 4 - byte. */
19284 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19285 NULL_RTX, 0, OPTAB_WIDEN);
19287 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19288 Pmode, 1, align_4_label);
19291 mem = change_address (src, QImode, out);
19293 /* Now compare the bytes. */
19295 /* Compare the first n unaligned byte on a byte per byte basis. */
19296 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19297 QImode, 1, end_0_label);
19299 /* Increment the address. */
19300 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19302 /* Not needed with an alignment of 2 */
19305 emit_label (align_2_label);
19307 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19310 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19312 emit_label (align_3_label);
19315 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19318 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19321 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19322 align this loop. It gives only huge programs, but does not help to
19324 emit_label (align_4_label);
19326 mem = change_address (src, SImode, out);
19327 emit_move_insn (scratch, mem);
19328 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19330 /* This formula yields a nonzero result iff one of the bytes is zero.
19331 This saves three branches inside loop and many cycles. */
19333 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19334 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19335 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19336 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19337 gen_int_mode (0x80808080, SImode)));
19338 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19343 rtx reg = gen_reg_rtx (SImode);
19344 rtx reg2 = gen_reg_rtx (Pmode);
19345 emit_move_insn (reg, tmpreg);
19346 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19348 /* If zero is not in the first two bytes, move two bytes forward. */
19349 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19350 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19351 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19352 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19353 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19356 /* Emit lea manually to avoid clobbering of flags. */
19357 emit_insn (gen_rtx_SET (SImode, reg2,
19358 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19360 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19361 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19362 emit_insn (gen_rtx_SET (VOIDmode, out,
19363 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19369 rtx end_2_label = gen_label_rtx ();
19370 /* Is zero in the first two bytes? */
19372 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19373 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19374 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19375 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19376 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19378 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19379 JUMP_LABEL (tmp) = end_2_label;
19381 /* Not in the first two. Move two bytes forward. */
19382 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19383 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19385 emit_label (end_2_label);
19389 /* Avoid branch in fixing the byte. */
19390 tmpreg = gen_lowpart (QImode, tmpreg);
19391 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19392 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19393 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19394 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19396 emit_label (end_0_label);
19399 /* Expand strlen. */
19402 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19404 rtx addr, scratch1, scratch2, scratch3, scratch4;
19406 /* The generic case of strlen expander is long. Avoid it's
19407 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19409 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19410 && !TARGET_INLINE_ALL_STRINGOPS
19411 && !optimize_insn_for_size_p ()
19412 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19415 addr = force_reg (Pmode, XEXP (src, 0));
19416 scratch1 = gen_reg_rtx (Pmode);
19418 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19419 && !optimize_insn_for_size_p ())
19421 /* Well it seems that some optimizer does not combine a call like
19422 foo(strlen(bar), strlen(bar));
19423 when the move and the subtraction is done here. It does calculate
19424 the length just once when these instructions are done inside of
19425 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19426 often used and I use one fewer register for the lifetime of
19427 output_strlen_unroll() this is better. */
19429 emit_move_insn (out, addr);
19431 ix86_expand_strlensi_unroll_1 (out, src, align);
19433 /* strlensi_unroll_1 returns the address of the zero at the end of
19434 the string, like memchr(), so compute the length by subtracting
19435 the start address. */
19436 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19442 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19443 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19446 scratch2 = gen_reg_rtx (Pmode);
19447 scratch3 = gen_reg_rtx (Pmode);
19448 scratch4 = force_reg (Pmode, constm1_rtx);
19450 emit_move_insn (scratch3, addr);
19451 eoschar = force_reg (QImode, eoschar);
19453 src = replace_equiv_address_nv (src, scratch3);
19455 /* If .md starts supporting :P, this can be done in .md. */
19456 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19457 scratch4), UNSPEC_SCAS);
19458 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19459 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19460 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19465 /* For given symbol (function) construct code to compute address of it's PLT
19466 entry in large x86-64 PIC model. */
19468 construct_plt_address (rtx symbol)
19470 rtx tmp = gen_reg_rtx (Pmode);
19471 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19473 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19474 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19476 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19477 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19482 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19484 rtx pop, int sibcall)
19486 rtx use = NULL, call;
19488 if (pop == const0_rtx)
19490 gcc_assert (!TARGET_64BIT || !pop);
19492 if (TARGET_MACHO && !TARGET_64BIT)
19495 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19496 fnaddr = machopic_indirect_call_target (fnaddr);
19501 /* Static functions and indirect calls don't need the pic register. */
19502 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19503 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19504 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19505 use_reg (&use, pic_offset_table_rtx);
19508 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19510 rtx al = gen_rtx_REG (QImode, AX_REG);
19511 emit_move_insn (al, callarg2);
19512 use_reg (&use, al);
19515 if (ix86_cmodel == CM_LARGE_PIC
19517 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19518 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19519 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19521 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19522 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19524 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19525 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19528 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19530 call = gen_rtx_SET (VOIDmode, retval, call);
19533 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19534 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19535 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19538 && ix86_cfun_abi () == MS_ABI
19539 && (!callarg2 || INTVAL (callarg2) != -2))
19541 /* We need to represent that SI and DI registers are clobbered
19543 static int clobbered_registers[] = {
19544 XMM6_REG, XMM7_REG, XMM8_REG,
19545 XMM9_REG, XMM10_REG, XMM11_REG,
19546 XMM12_REG, XMM13_REG, XMM14_REG,
19547 XMM15_REG, SI_REG, DI_REG
19550 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19551 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19552 UNSPEC_MS_TO_SYSV_CALL);
19556 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19557 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19560 (SSE_REGNO_P (clobbered_registers[i])
19562 clobbered_registers[i]));
19564 call = gen_rtx_PARALLEL (VOIDmode,
19565 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19569 call = emit_call_insn (call);
19571 CALL_INSN_FUNCTION_USAGE (call) = use;
19575 /* Clear stack slot assignments remembered from previous functions.
19576 This is called from INIT_EXPANDERS once before RTL is emitted for each
19579 static struct machine_function *
19580 ix86_init_machine_status (void)
19582 struct machine_function *f;
19584 f = ggc_alloc_cleared_machine_function ();
19585 f->use_fast_prologue_epilogue_nregs = -1;
19586 f->tls_descriptor_call_expanded_p = 0;
19587 f->call_abi = ix86_abi;
19592 /* Return a MEM corresponding to a stack slot with mode MODE.
19593 Allocate a new slot if necessary.
19595 The RTL for a function can have several slots available: N is
19596 which slot to use. */
19599 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19601 struct stack_local_entry *s;
19603 gcc_assert (n < MAX_386_STACK_LOCALS);
19605 /* Virtual slot is valid only before vregs are instantiated. */
19606 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19608 for (s = ix86_stack_locals; s; s = s->next)
19609 if (s->mode == mode && s->n == n)
19610 return copy_rtx (s->rtl);
19612 s = ggc_alloc_stack_local_entry ();
19615 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19617 s->next = ix86_stack_locals;
19618 ix86_stack_locals = s;
19622 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19624 static GTY(()) rtx ix86_tls_symbol;
19626 ix86_tls_get_addr (void)
19629 if (!ix86_tls_symbol)
19631 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19632 (TARGET_ANY_GNU_TLS
19634 ? "___tls_get_addr"
19635 : "__tls_get_addr");
19638 return ix86_tls_symbol;
19641 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19643 static GTY(()) rtx ix86_tls_module_base_symbol;
19645 ix86_tls_module_base (void)
19648 if (!ix86_tls_module_base_symbol)
19650 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19651 "_TLS_MODULE_BASE_");
19652 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19653 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19656 return ix86_tls_module_base_symbol;
19659 /* Calculate the length of the memory address in the instruction
19660 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19663 memory_address_length (rtx addr)
19665 struct ix86_address parts;
19666 rtx base, index, disp;
19670 if (GET_CODE (addr) == PRE_DEC
19671 || GET_CODE (addr) == POST_INC
19672 || GET_CODE (addr) == PRE_MODIFY
19673 || GET_CODE (addr) == POST_MODIFY)
19676 ok = ix86_decompose_address (addr, &parts);
19679 if (parts.base && GET_CODE (parts.base) == SUBREG)
19680 parts.base = SUBREG_REG (parts.base);
19681 if (parts.index && GET_CODE (parts.index) == SUBREG)
19682 parts.index = SUBREG_REG (parts.index);
19685 index = parts.index;
19690 - esp as the base always wants an index,
19691 - ebp as the base always wants a displacement,
19692 - r12 as the base always wants an index,
19693 - r13 as the base always wants a displacement. */
19695 /* Register Indirect. */
19696 if (base && !index && !disp)
19698 /* esp (for its index) and ebp (for its displacement) need
19699 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19702 && (addr == arg_pointer_rtx
19703 || addr == frame_pointer_rtx
19704 || REGNO (addr) == SP_REG
19705 || REGNO (addr) == BP_REG
19706 || REGNO (addr) == R12_REG
19707 || REGNO (addr) == R13_REG))
19711 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19712 is not disp32, but disp32(%rip), so for disp32
19713 SIB byte is needed, unless print_operand_address
19714 optimizes it into disp32(%rip) or (%rip) is implied
19716 else if (disp && !base && !index)
19723 if (GET_CODE (disp) == CONST)
19724 symbol = XEXP (disp, 0);
19725 if (GET_CODE (symbol) == PLUS
19726 && CONST_INT_P (XEXP (symbol, 1)))
19727 symbol = XEXP (symbol, 0);
19729 if (GET_CODE (symbol) != LABEL_REF
19730 && (GET_CODE (symbol) != SYMBOL_REF
19731 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19732 && (GET_CODE (symbol) != UNSPEC
19733 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19734 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19741 /* Find the length of the displacement constant. */
19744 if (base && satisfies_constraint_K (disp))
19749 /* ebp always wants a displacement. Similarly r13. */
19750 else if (base && REG_P (base)
19751 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19754 /* An index requires the two-byte modrm form.... */
19756 /* ...like esp (or r12), which always wants an index. */
19757 || base == arg_pointer_rtx
19758 || base == frame_pointer_rtx
19759 || (base && REG_P (base)
19760 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19777 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19778 is set, expect that insn have 8bit immediate alternative. */
19780 ix86_attr_length_immediate_default (rtx insn, int shortform)
19784 extract_insn_cached (insn);
19785 for (i = recog_data.n_operands - 1; i >= 0; --i)
19786 if (CONSTANT_P (recog_data.operand[i]))
19788 enum attr_mode mode = get_attr_mode (insn);
19791 if (shortform && CONST_INT_P (recog_data.operand[i]))
19793 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19800 ival = trunc_int_for_mode (ival, HImode);
19803 ival = trunc_int_for_mode (ival, SImode);
19808 if (IN_RANGE (ival, -128, 127))
19825 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19830 fatal_insn ("unknown insn mode", insn);
19835 /* Compute default value for "length_address" attribute. */
19837 ix86_attr_length_address_default (rtx insn)
19841 if (get_attr_type (insn) == TYPE_LEA)
19843 rtx set = PATTERN (insn), addr;
19845 if (GET_CODE (set) == PARALLEL)
19846 set = XVECEXP (set, 0, 0);
19848 gcc_assert (GET_CODE (set) == SET);
19850 addr = SET_SRC (set);
19851 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19853 if (GET_CODE (addr) == ZERO_EXTEND)
19854 addr = XEXP (addr, 0);
19855 if (GET_CODE (addr) == SUBREG)
19856 addr = SUBREG_REG (addr);
19859 return memory_address_length (addr);
19862 extract_insn_cached (insn);
19863 for (i = recog_data.n_operands - 1; i >= 0; --i)
19864 if (MEM_P (recog_data.operand[i]))
19866 constrain_operands_cached (reload_completed);
19867 if (which_alternative != -1)
19869 const char *constraints = recog_data.constraints[i];
19870 int alt = which_alternative;
19872 while (*constraints == '=' || *constraints == '+')
19875 while (*constraints++ != ',')
19877 /* Skip ignored operands. */
19878 if (*constraints == 'X')
19881 return memory_address_length (XEXP (recog_data.operand[i], 0));
19886 /* Compute default value for "length_vex" attribute. It includes
19887 2 or 3 byte VEX prefix and 1 opcode byte. */
19890 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19895 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19896 byte VEX prefix. */
19897 if (!has_0f_opcode || has_vex_w)
19900 /* We can always use 2 byte VEX prefix in 32bit. */
19904 extract_insn_cached (insn);
19906 for (i = recog_data.n_operands - 1; i >= 0; --i)
19907 if (REG_P (recog_data.operand[i]))
19909 /* REX.W bit uses 3 byte VEX prefix. */
19910 if (GET_MODE (recog_data.operand[i]) == DImode
19911 && GENERAL_REG_P (recog_data.operand[i]))
19916 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19917 if (MEM_P (recog_data.operand[i])
19918 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19925 /* Return the maximum number of instructions a cpu can issue. */
19928 ix86_issue_rate (void)
19932 case PROCESSOR_PENTIUM:
19933 case PROCESSOR_ATOM:
19937 case PROCESSOR_PENTIUMPRO:
19938 case PROCESSOR_PENTIUM4:
19939 case PROCESSOR_ATHLON:
19941 case PROCESSOR_AMDFAM10:
19942 case PROCESSOR_NOCONA:
19943 case PROCESSOR_GENERIC32:
19944 case PROCESSOR_GENERIC64:
19945 case PROCESSOR_BDVER1:
19948 case PROCESSOR_CORE2:
19956 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19957 by DEP_INSN and nothing set by DEP_INSN. */
19960 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19964 /* Simplify the test for uninteresting insns. */
19965 if (insn_type != TYPE_SETCC
19966 && insn_type != TYPE_ICMOV
19967 && insn_type != TYPE_FCMOV
19968 && insn_type != TYPE_IBR)
19971 if ((set = single_set (dep_insn)) != 0)
19973 set = SET_DEST (set);
19976 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19977 && XVECLEN (PATTERN (dep_insn), 0) == 2
19978 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19979 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19981 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19982 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19987 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19990 /* This test is true if the dependent insn reads the flags but
19991 not any other potentially set register. */
19992 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19995 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
20001 /* Return true iff USE_INSN has a memory address with operands set by
20005 ix86_agi_dependent (rtx set_insn, rtx use_insn)
20008 extract_insn_cached (use_insn);
20009 for (i = recog_data.n_operands - 1; i >= 0; --i)
20010 if (MEM_P (recog_data.operand[i]))
20012 rtx addr = XEXP (recog_data.operand[i], 0);
20013 return modified_in_p (addr, set_insn) != 0;
20019 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
20021 enum attr_type insn_type, dep_insn_type;
20022 enum attr_memory memory;
20024 int dep_insn_code_number;
20026 /* Anti and output dependencies have zero cost on all CPUs. */
20027 if (REG_NOTE_KIND (link) != 0)
20030 dep_insn_code_number = recog_memoized (dep_insn);
20032 /* If we can't recognize the insns, we can't really do anything. */
20033 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
20036 insn_type = get_attr_type (insn);
20037 dep_insn_type = get_attr_type (dep_insn);
20041 case PROCESSOR_PENTIUM:
20042 /* Address Generation Interlock adds a cycle of latency. */
20043 if (insn_type == TYPE_LEA)
20045 rtx addr = PATTERN (insn);
20047 if (GET_CODE (addr) == PARALLEL)
20048 addr = XVECEXP (addr, 0, 0);
20050 gcc_assert (GET_CODE (addr) == SET);
20052 addr = SET_SRC (addr);
20053 if (modified_in_p (addr, dep_insn))
20056 else if (ix86_agi_dependent (dep_insn, insn))
20059 /* ??? Compares pair with jump/setcc. */
20060 if (ix86_flags_dependent (insn, dep_insn, insn_type))
20063 /* Floating point stores require value to be ready one cycle earlier. */
20064 if (insn_type == TYPE_FMOV
20065 && get_attr_memory (insn) == MEMORY_STORE
20066 && !ix86_agi_dependent (dep_insn, insn))
20070 case PROCESSOR_PENTIUMPRO:
20071 memory = get_attr_memory (insn);
20073 /* INT->FP conversion is expensive. */
20074 if (get_attr_fp_int_src (dep_insn))
20077 /* There is one cycle extra latency between an FP op and a store. */
20078 if (insn_type == TYPE_FMOV
20079 && (set = single_set (dep_insn)) != NULL_RTX
20080 && (set2 = single_set (insn)) != NULL_RTX
20081 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
20082 && MEM_P (SET_DEST (set2)))
20085 /* Show ability of reorder buffer to hide latency of load by executing
20086 in parallel with previous instruction in case
20087 previous instruction is not needed to compute the address. */
20088 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20089 && !ix86_agi_dependent (dep_insn, insn))
20091 /* Claim moves to take one cycle, as core can issue one load
20092 at time and the next load can start cycle later. */
20093 if (dep_insn_type == TYPE_IMOV
20094 || dep_insn_type == TYPE_FMOV)
20102 memory = get_attr_memory (insn);
20104 /* The esp dependency is resolved before the instruction is really
20106 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
20107 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
20110 /* INT->FP conversion is expensive. */
20111 if (get_attr_fp_int_src (dep_insn))
20114 /* Show ability of reorder buffer to hide latency of load by executing
20115 in parallel with previous instruction in case
20116 previous instruction is not needed to compute the address. */
20117 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20118 && !ix86_agi_dependent (dep_insn, insn))
20120 /* Claim moves to take one cycle, as core can issue one load
20121 at time and the next load can start cycle later. */
20122 if (dep_insn_type == TYPE_IMOV
20123 || dep_insn_type == TYPE_FMOV)
20132 case PROCESSOR_ATHLON:
20134 case PROCESSOR_AMDFAM10:
20135 case PROCESSOR_BDVER1:
20136 case PROCESSOR_ATOM:
20137 case PROCESSOR_GENERIC32:
20138 case PROCESSOR_GENERIC64:
20139 memory = get_attr_memory (insn);
20141 /* Show ability of reorder buffer to hide latency of load by executing
20142 in parallel with previous instruction in case
20143 previous instruction is not needed to compute the address. */
20144 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20145 && !ix86_agi_dependent (dep_insn, insn))
20147 enum attr_unit unit = get_attr_unit (insn);
20150 /* Because of the difference between the length of integer and
20151 floating unit pipeline preparation stages, the memory operands
20152 for floating point are cheaper.
20154 ??? For Athlon it the difference is most probably 2. */
20155 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
20158 loadcost = TARGET_ATHLON ? 2 : 0;
20160 if (cost >= loadcost)
20173 /* How many alternative schedules to try. This should be as wide as the
20174 scheduling freedom in the DFA, but no wider. Making this value too
20175 large results extra work for the scheduler. */
20178 ia32_multipass_dfa_lookahead (void)
20182 case PROCESSOR_PENTIUM:
20185 case PROCESSOR_PENTIUMPRO:
20195 /* Compute the alignment given to a constant that is being placed in memory.
20196 EXP is the constant and ALIGN is the alignment that the object would
20198 The value of this function is used instead of that alignment to align
20202 ix86_constant_alignment (tree exp, int align)
20204 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
20205 || TREE_CODE (exp) == INTEGER_CST)
20207 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
20209 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
20212 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
20213 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
20214 return BITS_PER_WORD;
20219 /* Compute the alignment for a static variable.
20220 TYPE is the data type, and ALIGN is the alignment that
20221 the object would ordinarily have. The value of this function is used
20222 instead of that alignment to align the object. */
20225 ix86_data_alignment (tree type, int align)
20227 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
20229 if (AGGREGATE_TYPE_P (type)
20230 && TYPE_SIZE (type)
20231 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20232 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20233 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20234 && align < max_align)
20237 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20238 to 16byte boundary. */
20241 if (AGGREGATE_TYPE_P (type)
20242 && TYPE_SIZE (type)
20243 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20244 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20245 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20249 if (TREE_CODE (type) == ARRAY_TYPE)
20251 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20253 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20256 else if (TREE_CODE (type) == COMPLEX_TYPE)
20259 if (TYPE_MODE (type) == DCmode && align < 64)
20261 if ((TYPE_MODE (type) == XCmode
20262 || TYPE_MODE (type) == TCmode) && align < 128)
20265 else if ((TREE_CODE (type) == RECORD_TYPE
20266 || TREE_CODE (type) == UNION_TYPE
20267 || TREE_CODE (type) == QUAL_UNION_TYPE)
20268 && TYPE_FIELDS (type))
20270 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20272 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20275 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20276 || TREE_CODE (type) == INTEGER_TYPE)
20278 if (TYPE_MODE (type) == DFmode && align < 64)
20280 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20287 /* Compute the alignment for a local variable or a stack slot. EXP is
20288 the data type or decl itself, MODE is the widest mode available and
20289 ALIGN is the alignment that the object would ordinarily have. The
20290 value of this macro is used instead of that alignment to align the
20294 ix86_local_alignment (tree exp, enum machine_mode mode,
20295 unsigned int align)
20299 if (exp && DECL_P (exp))
20301 type = TREE_TYPE (exp);
20310 /* Don't do dynamic stack realignment for long long objects with
20311 -mpreferred-stack-boundary=2. */
20314 && ix86_preferred_stack_boundary < 64
20315 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20316 && (!type || !TYPE_USER_ALIGN (type))
20317 && (!decl || !DECL_USER_ALIGN (decl)))
20320 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20321 register in MODE. We will return the largest alignment of XF
20325 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20326 align = GET_MODE_ALIGNMENT (DFmode);
20330 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20331 to 16byte boundary. Exact wording is:
20333 An array uses the same alignment as its elements, except that a local or
20334 global array variable of length at least 16 bytes or
20335 a C99 variable-length array variable always has alignment of at least 16 bytes.
20337 This was added to allow use of aligned SSE instructions at arrays. This
20338 rule is meant for static storage (where compiler can not do the analysis
20339 by itself). We follow it for automatic variables only when convenient.
20340 We fully control everything in the function compiled and functions from
20341 other unit can not rely on the alignment.
20343 Exclude va_list type. It is the common case of local array where
20344 we can not benefit from the alignment. */
20345 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20348 if (AGGREGATE_TYPE_P (type)
20349 && (TYPE_MAIN_VARIANT (type)
20350 != TYPE_MAIN_VARIANT (va_list_type_node))
20351 && TYPE_SIZE (type)
20352 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20353 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20354 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20357 if (TREE_CODE (type) == ARRAY_TYPE)
20359 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20361 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20364 else if (TREE_CODE (type) == COMPLEX_TYPE)
20366 if (TYPE_MODE (type) == DCmode && align < 64)
20368 if ((TYPE_MODE (type) == XCmode
20369 || TYPE_MODE (type) == TCmode) && align < 128)
20372 else if ((TREE_CODE (type) == RECORD_TYPE
20373 || TREE_CODE (type) == UNION_TYPE
20374 || TREE_CODE (type) == QUAL_UNION_TYPE)
20375 && TYPE_FIELDS (type))
20377 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20379 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20382 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20383 || TREE_CODE (type) == INTEGER_TYPE)
20386 if (TYPE_MODE (type) == DFmode && align < 64)
20388 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20394 /* Compute the minimum required alignment for dynamic stack realignment
20395 purposes for a local variable, parameter or a stack slot. EXP is
20396 the data type or decl itself, MODE is its mode and ALIGN is the
20397 alignment that the object would ordinarily have. */
20400 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20401 unsigned int align)
20405 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20408 if (exp && DECL_P (exp))
20410 type = TREE_TYPE (exp);
20419 /* Don't do dynamic stack realignment for long long objects with
20420 -mpreferred-stack-boundary=2. */
20421 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20422 && (!type || !TYPE_USER_ALIGN (type))
20423 && (!decl || !DECL_USER_ALIGN (decl)))
20429 /* Find a location for the static chain incoming to a nested function.
20430 This is a register, unless all free registers are used by arguments. */
20433 ix86_static_chain (const_tree fndecl, bool incoming_p)
20437 if (!DECL_STATIC_CHAIN (fndecl))
20442 /* We always use R10 in 64-bit mode. */
20448 /* By default in 32-bit mode we use ECX to pass the static chain. */
20451 fntype = TREE_TYPE (fndecl);
20452 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20454 /* Fastcall functions use ecx/edx for arguments, which leaves
20455 us with EAX for the static chain. */
20458 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20460 /* Thiscall functions use ecx for arguments, which leaves
20461 us with EAX for the static chain. */
20464 else if (ix86_function_regparm (fntype, fndecl) == 3)
20466 /* For regparm 3, we have no free call-clobbered registers in
20467 which to store the static chain. In order to implement this,
20468 we have the trampoline push the static chain to the stack.
20469 However, we can't push a value below the return address when
20470 we call the nested function directly, so we have to use an
20471 alternate entry point. For this we use ESI, and have the
20472 alternate entry point push ESI, so that things appear the
20473 same once we're executing the nested function. */
20476 if (fndecl == current_function_decl)
20477 ix86_static_chain_on_stack = true;
20478 return gen_frame_mem (SImode,
20479 plus_constant (arg_pointer_rtx, -8));
20485 return gen_rtx_REG (Pmode, regno);
20488 /* Emit RTL insns to initialize the variable parts of a trampoline.
20489 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20490 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20491 to be passed to the target function. */
20494 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20498 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20505 /* Depending on the static chain location, either load a register
20506 with a constant, or push the constant to the stack. All of the
20507 instructions are the same size. */
20508 chain = ix86_static_chain (fndecl, true);
20511 if (REGNO (chain) == CX_REG)
20513 else if (REGNO (chain) == AX_REG)
20516 gcc_unreachable ();
20521 mem = adjust_address (m_tramp, QImode, 0);
20522 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20524 mem = adjust_address (m_tramp, SImode, 1);
20525 emit_move_insn (mem, chain_value);
20527 /* Compute offset from the end of the jmp to the target function.
20528 In the case in which the trampoline stores the static chain on
20529 the stack, we need to skip the first insn which pushes the
20530 (call-saved) register static chain; this push is 1 byte. */
20531 disp = expand_binop (SImode, sub_optab, fnaddr,
20532 plus_constant (XEXP (m_tramp, 0),
20533 MEM_P (chain) ? 9 : 10),
20534 NULL_RTX, 1, OPTAB_DIRECT);
20536 mem = adjust_address (m_tramp, QImode, 5);
20537 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20539 mem = adjust_address (m_tramp, SImode, 6);
20540 emit_move_insn (mem, disp);
20546 /* Load the function address to r11. Try to load address using
20547 the shorter movl instead of movabs. We may want to support
20548 movq for kernel mode, but kernel does not use trampolines at
20550 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20552 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20554 mem = adjust_address (m_tramp, HImode, offset);
20555 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20557 mem = adjust_address (m_tramp, SImode, offset + 2);
20558 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20563 mem = adjust_address (m_tramp, HImode, offset);
20564 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20566 mem = adjust_address (m_tramp, DImode, offset + 2);
20567 emit_move_insn (mem, fnaddr);
20571 /* Load static chain using movabs to r10. */
20572 mem = adjust_address (m_tramp, HImode, offset);
20573 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20575 mem = adjust_address (m_tramp, DImode, offset + 2);
20576 emit_move_insn (mem, chain_value);
20579 /* Jump to r11; the last (unused) byte is a nop, only there to
20580 pad the write out to a single 32-bit store. */
20581 mem = adjust_address (m_tramp, SImode, offset);
20582 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20585 gcc_assert (offset <= TRAMPOLINE_SIZE);
20588 #ifdef ENABLE_EXECUTE_STACK
20589 #ifdef CHECK_EXECUTE_STACK_ENABLED
20590 if (CHECK_EXECUTE_STACK_ENABLED)
20592 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20593 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20597 /* The following file contains several enumerations and data structures
20598 built from the definitions in i386-builtin-types.def. */
20600 #include "i386-builtin-types.inc"
20602 /* Table for the ix86 builtin non-function types. */
20603 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20605 /* Retrieve an element from the above table, building some of
20606 the types lazily. */
20609 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20611 unsigned int index;
20614 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20616 type = ix86_builtin_type_tab[(int) tcode];
20620 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20621 if (tcode <= IX86_BT_LAST_VECT)
20623 enum machine_mode mode;
20625 index = tcode - IX86_BT_LAST_PRIM - 1;
20626 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20627 mode = ix86_builtin_type_vect_mode[index];
20629 type = build_vector_type_for_mode (itype, mode);
20635 index = tcode - IX86_BT_LAST_VECT - 1;
20636 if (tcode <= IX86_BT_LAST_PTR)
20637 quals = TYPE_UNQUALIFIED;
20639 quals = TYPE_QUAL_CONST;
20641 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20642 if (quals != TYPE_UNQUALIFIED)
20643 itype = build_qualified_type (itype, quals);
20645 type = build_pointer_type (itype);
20648 ix86_builtin_type_tab[(int) tcode] = type;
20652 /* Table for the ix86 builtin function types. */
20653 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20655 /* Retrieve an element from the above table, building some of
20656 the types lazily. */
20659 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20663 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20665 type = ix86_builtin_func_type_tab[(int) tcode];
20669 if (tcode <= IX86_BT_LAST_FUNC)
20671 unsigned start = ix86_builtin_func_start[(int) tcode];
20672 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20673 tree rtype, atype, args = void_list_node;
20676 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20677 for (i = after - 1; i > start; --i)
20679 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20680 args = tree_cons (NULL, atype, args);
20683 type = build_function_type (rtype, args);
20687 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20688 enum ix86_builtin_func_type icode;
20690 icode = ix86_builtin_func_alias_base[index];
20691 type = ix86_get_builtin_func_type (icode);
20694 ix86_builtin_func_type_tab[(int) tcode] = type;
20699 /* Codes for all the SSE/MMX builtins. */
20702 IX86_BUILTIN_ADDPS,
20703 IX86_BUILTIN_ADDSS,
20704 IX86_BUILTIN_DIVPS,
20705 IX86_BUILTIN_DIVSS,
20706 IX86_BUILTIN_MULPS,
20707 IX86_BUILTIN_MULSS,
20708 IX86_BUILTIN_SUBPS,
20709 IX86_BUILTIN_SUBSS,
20711 IX86_BUILTIN_CMPEQPS,
20712 IX86_BUILTIN_CMPLTPS,
20713 IX86_BUILTIN_CMPLEPS,
20714 IX86_BUILTIN_CMPGTPS,
20715 IX86_BUILTIN_CMPGEPS,
20716 IX86_BUILTIN_CMPNEQPS,
20717 IX86_BUILTIN_CMPNLTPS,
20718 IX86_BUILTIN_CMPNLEPS,
20719 IX86_BUILTIN_CMPNGTPS,
20720 IX86_BUILTIN_CMPNGEPS,
20721 IX86_BUILTIN_CMPORDPS,
20722 IX86_BUILTIN_CMPUNORDPS,
20723 IX86_BUILTIN_CMPEQSS,
20724 IX86_BUILTIN_CMPLTSS,
20725 IX86_BUILTIN_CMPLESS,
20726 IX86_BUILTIN_CMPNEQSS,
20727 IX86_BUILTIN_CMPNLTSS,
20728 IX86_BUILTIN_CMPNLESS,
20729 IX86_BUILTIN_CMPNGTSS,
20730 IX86_BUILTIN_CMPNGESS,
20731 IX86_BUILTIN_CMPORDSS,
20732 IX86_BUILTIN_CMPUNORDSS,
20734 IX86_BUILTIN_COMIEQSS,
20735 IX86_BUILTIN_COMILTSS,
20736 IX86_BUILTIN_COMILESS,
20737 IX86_BUILTIN_COMIGTSS,
20738 IX86_BUILTIN_COMIGESS,
20739 IX86_BUILTIN_COMINEQSS,
20740 IX86_BUILTIN_UCOMIEQSS,
20741 IX86_BUILTIN_UCOMILTSS,
20742 IX86_BUILTIN_UCOMILESS,
20743 IX86_BUILTIN_UCOMIGTSS,
20744 IX86_BUILTIN_UCOMIGESS,
20745 IX86_BUILTIN_UCOMINEQSS,
20747 IX86_BUILTIN_CVTPI2PS,
20748 IX86_BUILTIN_CVTPS2PI,
20749 IX86_BUILTIN_CVTSI2SS,
20750 IX86_BUILTIN_CVTSI642SS,
20751 IX86_BUILTIN_CVTSS2SI,
20752 IX86_BUILTIN_CVTSS2SI64,
20753 IX86_BUILTIN_CVTTPS2PI,
20754 IX86_BUILTIN_CVTTSS2SI,
20755 IX86_BUILTIN_CVTTSS2SI64,
20757 IX86_BUILTIN_MAXPS,
20758 IX86_BUILTIN_MAXSS,
20759 IX86_BUILTIN_MINPS,
20760 IX86_BUILTIN_MINSS,
20762 IX86_BUILTIN_LOADUPS,
20763 IX86_BUILTIN_STOREUPS,
20764 IX86_BUILTIN_MOVSS,
20766 IX86_BUILTIN_MOVHLPS,
20767 IX86_BUILTIN_MOVLHPS,
20768 IX86_BUILTIN_LOADHPS,
20769 IX86_BUILTIN_LOADLPS,
20770 IX86_BUILTIN_STOREHPS,
20771 IX86_BUILTIN_STORELPS,
20773 IX86_BUILTIN_MASKMOVQ,
20774 IX86_BUILTIN_MOVMSKPS,
20775 IX86_BUILTIN_PMOVMSKB,
20777 IX86_BUILTIN_MOVNTPS,
20778 IX86_BUILTIN_MOVNTQ,
20780 IX86_BUILTIN_LOADDQU,
20781 IX86_BUILTIN_STOREDQU,
20783 IX86_BUILTIN_PACKSSWB,
20784 IX86_BUILTIN_PACKSSDW,
20785 IX86_BUILTIN_PACKUSWB,
20787 IX86_BUILTIN_PADDB,
20788 IX86_BUILTIN_PADDW,
20789 IX86_BUILTIN_PADDD,
20790 IX86_BUILTIN_PADDQ,
20791 IX86_BUILTIN_PADDSB,
20792 IX86_BUILTIN_PADDSW,
20793 IX86_BUILTIN_PADDUSB,
20794 IX86_BUILTIN_PADDUSW,
20795 IX86_BUILTIN_PSUBB,
20796 IX86_BUILTIN_PSUBW,
20797 IX86_BUILTIN_PSUBD,
20798 IX86_BUILTIN_PSUBQ,
20799 IX86_BUILTIN_PSUBSB,
20800 IX86_BUILTIN_PSUBSW,
20801 IX86_BUILTIN_PSUBUSB,
20802 IX86_BUILTIN_PSUBUSW,
20805 IX86_BUILTIN_PANDN,
20809 IX86_BUILTIN_PAVGB,
20810 IX86_BUILTIN_PAVGW,
20812 IX86_BUILTIN_PCMPEQB,
20813 IX86_BUILTIN_PCMPEQW,
20814 IX86_BUILTIN_PCMPEQD,
20815 IX86_BUILTIN_PCMPGTB,
20816 IX86_BUILTIN_PCMPGTW,
20817 IX86_BUILTIN_PCMPGTD,
20819 IX86_BUILTIN_PMADDWD,
20821 IX86_BUILTIN_PMAXSW,
20822 IX86_BUILTIN_PMAXUB,
20823 IX86_BUILTIN_PMINSW,
20824 IX86_BUILTIN_PMINUB,
20826 IX86_BUILTIN_PMULHUW,
20827 IX86_BUILTIN_PMULHW,
20828 IX86_BUILTIN_PMULLW,
20830 IX86_BUILTIN_PSADBW,
20831 IX86_BUILTIN_PSHUFW,
20833 IX86_BUILTIN_PSLLW,
20834 IX86_BUILTIN_PSLLD,
20835 IX86_BUILTIN_PSLLQ,
20836 IX86_BUILTIN_PSRAW,
20837 IX86_BUILTIN_PSRAD,
20838 IX86_BUILTIN_PSRLW,
20839 IX86_BUILTIN_PSRLD,
20840 IX86_BUILTIN_PSRLQ,
20841 IX86_BUILTIN_PSLLWI,
20842 IX86_BUILTIN_PSLLDI,
20843 IX86_BUILTIN_PSLLQI,
20844 IX86_BUILTIN_PSRAWI,
20845 IX86_BUILTIN_PSRADI,
20846 IX86_BUILTIN_PSRLWI,
20847 IX86_BUILTIN_PSRLDI,
20848 IX86_BUILTIN_PSRLQI,
20850 IX86_BUILTIN_PUNPCKHBW,
20851 IX86_BUILTIN_PUNPCKHWD,
20852 IX86_BUILTIN_PUNPCKHDQ,
20853 IX86_BUILTIN_PUNPCKLBW,
20854 IX86_BUILTIN_PUNPCKLWD,
20855 IX86_BUILTIN_PUNPCKLDQ,
20857 IX86_BUILTIN_SHUFPS,
20859 IX86_BUILTIN_RCPPS,
20860 IX86_BUILTIN_RCPSS,
20861 IX86_BUILTIN_RSQRTPS,
20862 IX86_BUILTIN_RSQRTPS_NR,
20863 IX86_BUILTIN_RSQRTSS,
20864 IX86_BUILTIN_RSQRTF,
20865 IX86_BUILTIN_SQRTPS,
20866 IX86_BUILTIN_SQRTPS_NR,
20867 IX86_BUILTIN_SQRTSS,
20869 IX86_BUILTIN_UNPCKHPS,
20870 IX86_BUILTIN_UNPCKLPS,
20872 IX86_BUILTIN_ANDPS,
20873 IX86_BUILTIN_ANDNPS,
20875 IX86_BUILTIN_XORPS,
20878 IX86_BUILTIN_LDMXCSR,
20879 IX86_BUILTIN_STMXCSR,
20880 IX86_BUILTIN_SFENCE,
20882 /* 3DNow! Original */
20883 IX86_BUILTIN_FEMMS,
20884 IX86_BUILTIN_PAVGUSB,
20885 IX86_BUILTIN_PF2ID,
20886 IX86_BUILTIN_PFACC,
20887 IX86_BUILTIN_PFADD,
20888 IX86_BUILTIN_PFCMPEQ,
20889 IX86_BUILTIN_PFCMPGE,
20890 IX86_BUILTIN_PFCMPGT,
20891 IX86_BUILTIN_PFMAX,
20892 IX86_BUILTIN_PFMIN,
20893 IX86_BUILTIN_PFMUL,
20894 IX86_BUILTIN_PFRCP,
20895 IX86_BUILTIN_PFRCPIT1,
20896 IX86_BUILTIN_PFRCPIT2,
20897 IX86_BUILTIN_PFRSQIT1,
20898 IX86_BUILTIN_PFRSQRT,
20899 IX86_BUILTIN_PFSUB,
20900 IX86_BUILTIN_PFSUBR,
20901 IX86_BUILTIN_PI2FD,
20902 IX86_BUILTIN_PMULHRW,
20904 /* 3DNow! Athlon Extensions */
20905 IX86_BUILTIN_PF2IW,
20906 IX86_BUILTIN_PFNACC,
20907 IX86_BUILTIN_PFPNACC,
20908 IX86_BUILTIN_PI2FW,
20909 IX86_BUILTIN_PSWAPDSI,
20910 IX86_BUILTIN_PSWAPDSF,
20913 IX86_BUILTIN_ADDPD,
20914 IX86_BUILTIN_ADDSD,
20915 IX86_BUILTIN_DIVPD,
20916 IX86_BUILTIN_DIVSD,
20917 IX86_BUILTIN_MULPD,
20918 IX86_BUILTIN_MULSD,
20919 IX86_BUILTIN_SUBPD,
20920 IX86_BUILTIN_SUBSD,
20922 IX86_BUILTIN_CMPEQPD,
20923 IX86_BUILTIN_CMPLTPD,
20924 IX86_BUILTIN_CMPLEPD,
20925 IX86_BUILTIN_CMPGTPD,
20926 IX86_BUILTIN_CMPGEPD,
20927 IX86_BUILTIN_CMPNEQPD,
20928 IX86_BUILTIN_CMPNLTPD,
20929 IX86_BUILTIN_CMPNLEPD,
20930 IX86_BUILTIN_CMPNGTPD,
20931 IX86_BUILTIN_CMPNGEPD,
20932 IX86_BUILTIN_CMPORDPD,
20933 IX86_BUILTIN_CMPUNORDPD,
20934 IX86_BUILTIN_CMPEQSD,
20935 IX86_BUILTIN_CMPLTSD,
20936 IX86_BUILTIN_CMPLESD,
20937 IX86_BUILTIN_CMPNEQSD,
20938 IX86_BUILTIN_CMPNLTSD,
20939 IX86_BUILTIN_CMPNLESD,
20940 IX86_BUILTIN_CMPORDSD,
20941 IX86_BUILTIN_CMPUNORDSD,
20943 IX86_BUILTIN_COMIEQSD,
20944 IX86_BUILTIN_COMILTSD,
20945 IX86_BUILTIN_COMILESD,
20946 IX86_BUILTIN_COMIGTSD,
20947 IX86_BUILTIN_COMIGESD,
20948 IX86_BUILTIN_COMINEQSD,
20949 IX86_BUILTIN_UCOMIEQSD,
20950 IX86_BUILTIN_UCOMILTSD,
20951 IX86_BUILTIN_UCOMILESD,
20952 IX86_BUILTIN_UCOMIGTSD,
20953 IX86_BUILTIN_UCOMIGESD,
20954 IX86_BUILTIN_UCOMINEQSD,
20956 IX86_BUILTIN_MAXPD,
20957 IX86_BUILTIN_MAXSD,
20958 IX86_BUILTIN_MINPD,
20959 IX86_BUILTIN_MINSD,
20961 IX86_BUILTIN_ANDPD,
20962 IX86_BUILTIN_ANDNPD,
20964 IX86_BUILTIN_XORPD,
20966 IX86_BUILTIN_SQRTPD,
20967 IX86_BUILTIN_SQRTSD,
20969 IX86_BUILTIN_UNPCKHPD,
20970 IX86_BUILTIN_UNPCKLPD,
20972 IX86_BUILTIN_SHUFPD,
20974 IX86_BUILTIN_LOADUPD,
20975 IX86_BUILTIN_STOREUPD,
20976 IX86_BUILTIN_MOVSD,
20978 IX86_BUILTIN_LOADHPD,
20979 IX86_BUILTIN_LOADLPD,
20981 IX86_BUILTIN_CVTDQ2PD,
20982 IX86_BUILTIN_CVTDQ2PS,
20984 IX86_BUILTIN_CVTPD2DQ,
20985 IX86_BUILTIN_CVTPD2PI,
20986 IX86_BUILTIN_CVTPD2PS,
20987 IX86_BUILTIN_CVTTPD2DQ,
20988 IX86_BUILTIN_CVTTPD2PI,
20990 IX86_BUILTIN_CVTPI2PD,
20991 IX86_BUILTIN_CVTSI2SD,
20992 IX86_BUILTIN_CVTSI642SD,
20994 IX86_BUILTIN_CVTSD2SI,
20995 IX86_BUILTIN_CVTSD2SI64,
20996 IX86_BUILTIN_CVTSD2SS,
20997 IX86_BUILTIN_CVTSS2SD,
20998 IX86_BUILTIN_CVTTSD2SI,
20999 IX86_BUILTIN_CVTTSD2SI64,
21001 IX86_BUILTIN_CVTPS2DQ,
21002 IX86_BUILTIN_CVTPS2PD,
21003 IX86_BUILTIN_CVTTPS2DQ,
21005 IX86_BUILTIN_MOVNTI,
21006 IX86_BUILTIN_MOVNTPD,
21007 IX86_BUILTIN_MOVNTDQ,
21009 IX86_BUILTIN_MOVQ128,
21012 IX86_BUILTIN_MASKMOVDQU,
21013 IX86_BUILTIN_MOVMSKPD,
21014 IX86_BUILTIN_PMOVMSKB128,
21016 IX86_BUILTIN_PACKSSWB128,
21017 IX86_BUILTIN_PACKSSDW128,
21018 IX86_BUILTIN_PACKUSWB128,
21020 IX86_BUILTIN_PADDB128,
21021 IX86_BUILTIN_PADDW128,
21022 IX86_BUILTIN_PADDD128,
21023 IX86_BUILTIN_PADDQ128,
21024 IX86_BUILTIN_PADDSB128,
21025 IX86_BUILTIN_PADDSW128,
21026 IX86_BUILTIN_PADDUSB128,
21027 IX86_BUILTIN_PADDUSW128,
21028 IX86_BUILTIN_PSUBB128,
21029 IX86_BUILTIN_PSUBW128,
21030 IX86_BUILTIN_PSUBD128,
21031 IX86_BUILTIN_PSUBQ128,
21032 IX86_BUILTIN_PSUBSB128,
21033 IX86_BUILTIN_PSUBSW128,
21034 IX86_BUILTIN_PSUBUSB128,
21035 IX86_BUILTIN_PSUBUSW128,
21037 IX86_BUILTIN_PAND128,
21038 IX86_BUILTIN_PANDN128,
21039 IX86_BUILTIN_POR128,
21040 IX86_BUILTIN_PXOR128,
21042 IX86_BUILTIN_PAVGB128,
21043 IX86_BUILTIN_PAVGW128,
21045 IX86_BUILTIN_PCMPEQB128,
21046 IX86_BUILTIN_PCMPEQW128,
21047 IX86_BUILTIN_PCMPEQD128,
21048 IX86_BUILTIN_PCMPGTB128,
21049 IX86_BUILTIN_PCMPGTW128,
21050 IX86_BUILTIN_PCMPGTD128,
21052 IX86_BUILTIN_PMADDWD128,
21054 IX86_BUILTIN_PMAXSW128,
21055 IX86_BUILTIN_PMAXUB128,
21056 IX86_BUILTIN_PMINSW128,
21057 IX86_BUILTIN_PMINUB128,
21059 IX86_BUILTIN_PMULUDQ,
21060 IX86_BUILTIN_PMULUDQ128,
21061 IX86_BUILTIN_PMULHUW128,
21062 IX86_BUILTIN_PMULHW128,
21063 IX86_BUILTIN_PMULLW128,
21065 IX86_BUILTIN_PSADBW128,
21066 IX86_BUILTIN_PSHUFHW,
21067 IX86_BUILTIN_PSHUFLW,
21068 IX86_BUILTIN_PSHUFD,
21070 IX86_BUILTIN_PSLLDQI128,
21071 IX86_BUILTIN_PSLLWI128,
21072 IX86_BUILTIN_PSLLDI128,
21073 IX86_BUILTIN_PSLLQI128,
21074 IX86_BUILTIN_PSRAWI128,
21075 IX86_BUILTIN_PSRADI128,
21076 IX86_BUILTIN_PSRLDQI128,
21077 IX86_BUILTIN_PSRLWI128,
21078 IX86_BUILTIN_PSRLDI128,
21079 IX86_BUILTIN_PSRLQI128,
21081 IX86_BUILTIN_PSLLDQ128,
21082 IX86_BUILTIN_PSLLW128,
21083 IX86_BUILTIN_PSLLD128,
21084 IX86_BUILTIN_PSLLQ128,
21085 IX86_BUILTIN_PSRAW128,
21086 IX86_BUILTIN_PSRAD128,
21087 IX86_BUILTIN_PSRLW128,
21088 IX86_BUILTIN_PSRLD128,
21089 IX86_BUILTIN_PSRLQ128,
21091 IX86_BUILTIN_PUNPCKHBW128,
21092 IX86_BUILTIN_PUNPCKHWD128,
21093 IX86_BUILTIN_PUNPCKHDQ128,
21094 IX86_BUILTIN_PUNPCKHQDQ128,
21095 IX86_BUILTIN_PUNPCKLBW128,
21096 IX86_BUILTIN_PUNPCKLWD128,
21097 IX86_BUILTIN_PUNPCKLDQ128,
21098 IX86_BUILTIN_PUNPCKLQDQ128,
21100 IX86_BUILTIN_CLFLUSH,
21101 IX86_BUILTIN_MFENCE,
21102 IX86_BUILTIN_LFENCE,
21104 IX86_BUILTIN_BSRSI,
21105 IX86_BUILTIN_BSRDI,
21106 IX86_BUILTIN_RDPMC,
21107 IX86_BUILTIN_RDTSC,
21108 IX86_BUILTIN_RDTSCP,
21109 IX86_BUILTIN_ROLQI,
21110 IX86_BUILTIN_ROLHI,
21111 IX86_BUILTIN_RORQI,
21112 IX86_BUILTIN_RORHI,
21115 IX86_BUILTIN_ADDSUBPS,
21116 IX86_BUILTIN_HADDPS,
21117 IX86_BUILTIN_HSUBPS,
21118 IX86_BUILTIN_MOVSHDUP,
21119 IX86_BUILTIN_MOVSLDUP,
21120 IX86_BUILTIN_ADDSUBPD,
21121 IX86_BUILTIN_HADDPD,
21122 IX86_BUILTIN_HSUBPD,
21123 IX86_BUILTIN_LDDQU,
21125 IX86_BUILTIN_MONITOR,
21126 IX86_BUILTIN_MWAIT,
21129 IX86_BUILTIN_PHADDW,
21130 IX86_BUILTIN_PHADDD,
21131 IX86_BUILTIN_PHADDSW,
21132 IX86_BUILTIN_PHSUBW,
21133 IX86_BUILTIN_PHSUBD,
21134 IX86_BUILTIN_PHSUBSW,
21135 IX86_BUILTIN_PMADDUBSW,
21136 IX86_BUILTIN_PMULHRSW,
21137 IX86_BUILTIN_PSHUFB,
21138 IX86_BUILTIN_PSIGNB,
21139 IX86_BUILTIN_PSIGNW,
21140 IX86_BUILTIN_PSIGND,
21141 IX86_BUILTIN_PALIGNR,
21142 IX86_BUILTIN_PABSB,
21143 IX86_BUILTIN_PABSW,
21144 IX86_BUILTIN_PABSD,
21146 IX86_BUILTIN_PHADDW128,
21147 IX86_BUILTIN_PHADDD128,
21148 IX86_BUILTIN_PHADDSW128,
21149 IX86_BUILTIN_PHSUBW128,
21150 IX86_BUILTIN_PHSUBD128,
21151 IX86_BUILTIN_PHSUBSW128,
21152 IX86_BUILTIN_PMADDUBSW128,
21153 IX86_BUILTIN_PMULHRSW128,
21154 IX86_BUILTIN_PSHUFB128,
21155 IX86_BUILTIN_PSIGNB128,
21156 IX86_BUILTIN_PSIGNW128,
21157 IX86_BUILTIN_PSIGND128,
21158 IX86_BUILTIN_PALIGNR128,
21159 IX86_BUILTIN_PABSB128,
21160 IX86_BUILTIN_PABSW128,
21161 IX86_BUILTIN_PABSD128,
21163 /* AMDFAM10 - SSE4A New Instructions. */
21164 IX86_BUILTIN_MOVNTSD,
21165 IX86_BUILTIN_MOVNTSS,
21166 IX86_BUILTIN_EXTRQI,
21167 IX86_BUILTIN_EXTRQ,
21168 IX86_BUILTIN_INSERTQI,
21169 IX86_BUILTIN_INSERTQ,
21172 IX86_BUILTIN_BLENDPD,
21173 IX86_BUILTIN_BLENDPS,
21174 IX86_BUILTIN_BLENDVPD,
21175 IX86_BUILTIN_BLENDVPS,
21176 IX86_BUILTIN_PBLENDVB128,
21177 IX86_BUILTIN_PBLENDW128,
21182 IX86_BUILTIN_INSERTPS128,
21184 IX86_BUILTIN_MOVNTDQA,
21185 IX86_BUILTIN_MPSADBW128,
21186 IX86_BUILTIN_PACKUSDW128,
21187 IX86_BUILTIN_PCMPEQQ,
21188 IX86_BUILTIN_PHMINPOSUW128,
21190 IX86_BUILTIN_PMAXSB128,
21191 IX86_BUILTIN_PMAXSD128,
21192 IX86_BUILTIN_PMAXUD128,
21193 IX86_BUILTIN_PMAXUW128,
21195 IX86_BUILTIN_PMINSB128,
21196 IX86_BUILTIN_PMINSD128,
21197 IX86_BUILTIN_PMINUD128,
21198 IX86_BUILTIN_PMINUW128,
21200 IX86_BUILTIN_PMOVSXBW128,
21201 IX86_BUILTIN_PMOVSXBD128,
21202 IX86_BUILTIN_PMOVSXBQ128,
21203 IX86_BUILTIN_PMOVSXWD128,
21204 IX86_BUILTIN_PMOVSXWQ128,
21205 IX86_BUILTIN_PMOVSXDQ128,
21207 IX86_BUILTIN_PMOVZXBW128,
21208 IX86_BUILTIN_PMOVZXBD128,
21209 IX86_BUILTIN_PMOVZXBQ128,
21210 IX86_BUILTIN_PMOVZXWD128,
21211 IX86_BUILTIN_PMOVZXWQ128,
21212 IX86_BUILTIN_PMOVZXDQ128,
21214 IX86_BUILTIN_PMULDQ128,
21215 IX86_BUILTIN_PMULLD128,
21217 IX86_BUILTIN_ROUNDPD,
21218 IX86_BUILTIN_ROUNDPS,
21219 IX86_BUILTIN_ROUNDSD,
21220 IX86_BUILTIN_ROUNDSS,
21222 IX86_BUILTIN_PTESTZ,
21223 IX86_BUILTIN_PTESTC,
21224 IX86_BUILTIN_PTESTNZC,
21226 IX86_BUILTIN_VEC_INIT_V2SI,
21227 IX86_BUILTIN_VEC_INIT_V4HI,
21228 IX86_BUILTIN_VEC_INIT_V8QI,
21229 IX86_BUILTIN_VEC_EXT_V2DF,
21230 IX86_BUILTIN_VEC_EXT_V2DI,
21231 IX86_BUILTIN_VEC_EXT_V4SF,
21232 IX86_BUILTIN_VEC_EXT_V4SI,
21233 IX86_BUILTIN_VEC_EXT_V8HI,
21234 IX86_BUILTIN_VEC_EXT_V2SI,
21235 IX86_BUILTIN_VEC_EXT_V4HI,
21236 IX86_BUILTIN_VEC_EXT_V16QI,
21237 IX86_BUILTIN_VEC_SET_V2DI,
21238 IX86_BUILTIN_VEC_SET_V4SF,
21239 IX86_BUILTIN_VEC_SET_V4SI,
21240 IX86_BUILTIN_VEC_SET_V8HI,
21241 IX86_BUILTIN_VEC_SET_V4HI,
21242 IX86_BUILTIN_VEC_SET_V16QI,
21244 IX86_BUILTIN_VEC_PACK_SFIX,
21247 IX86_BUILTIN_CRC32QI,
21248 IX86_BUILTIN_CRC32HI,
21249 IX86_BUILTIN_CRC32SI,
21250 IX86_BUILTIN_CRC32DI,
21252 IX86_BUILTIN_PCMPESTRI128,
21253 IX86_BUILTIN_PCMPESTRM128,
21254 IX86_BUILTIN_PCMPESTRA128,
21255 IX86_BUILTIN_PCMPESTRC128,
21256 IX86_BUILTIN_PCMPESTRO128,
21257 IX86_BUILTIN_PCMPESTRS128,
21258 IX86_BUILTIN_PCMPESTRZ128,
21259 IX86_BUILTIN_PCMPISTRI128,
21260 IX86_BUILTIN_PCMPISTRM128,
21261 IX86_BUILTIN_PCMPISTRA128,
21262 IX86_BUILTIN_PCMPISTRC128,
21263 IX86_BUILTIN_PCMPISTRO128,
21264 IX86_BUILTIN_PCMPISTRS128,
21265 IX86_BUILTIN_PCMPISTRZ128,
21267 IX86_BUILTIN_PCMPGTQ,
21269 /* AES instructions */
21270 IX86_BUILTIN_AESENC128,
21271 IX86_BUILTIN_AESENCLAST128,
21272 IX86_BUILTIN_AESDEC128,
21273 IX86_BUILTIN_AESDECLAST128,
21274 IX86_BUILTIN_AESIMC128,
21275 IX86_BUILTIN_AESKEYGENASSIST128,
21277 /* PCLMUL instruction */
21278 IX86_BUILTIN_PCLMULQDQ128,
21281 IX86_BUILTIN_ADDPD256,
21282 IX86_BUILTIN_ADDPS256,
21283 IX86_BUILTIN_ADDSUBPD256,
21284 IX86_BUILTIN_ADDSUBPS256,
21285 IX86_BUILTIN_ANDPD256,
21286 IX86_BUILTIN_ANDPS256,
21287 IX86_BUILTIN_ANDNPD256,
21288 IX86_BUILTIN_ANDNPS256,
21289 IX86_BUILTIN_BLENDPD256,
21290 IX86_BUILTIN_BLENDPS256,
21291 IX86_BUILTIN_BLENDVPD256,
21292 IX86_BUILTIN_BLENDVPS256,
21293 IX86_BUILTIN_DIVPD256,
21294 IX86_BUILTIN_DIVPS256,
21295 IX86_BUILTIN_DPPS256,
21296 IX86_BUILTIN_HADDPD256,
21297 IX86_BUILTIN_HADDPS256,
21298 IX86_BUILTIN_HSUBPD256,
21299 IX86_BUILTIN_HSUBPS256,
21300 IX86_BUILTIN_MAXPD256,
21301 IX86_BUILTIN_MAXPS256,
21302 IX86_BUILTIN_MINPD256,
21303 IX86_BUILTIN_MINPS256,
21304 IX86_BUILTIN_MULPD256,
21305 IX86_BUILTIN_MULPS256,
21306 IX86_BUILTIN_ORPD256,
21307 IX86_BUILTIN_ORPS256,
21308 IX86_BUILTIN_SHUFPD256,
21309 IX86_BUILTIN_SHUFPS256,
21310 IX86_BUILTIN_SUBPD256,
21311 IX86_BUILTIN_SUBPS256,
21312 IX86_BUILTIN_XORPD256,
21313 IX86_BUILTIN_XORPS256,
21314 IX86_BUILTIN_CMPSD,
21315 IX86_BUILTIN_CMPSS,
21316 IX86_BUILTIN_CMPPD,
21317 IX86_BUILTIN_CMPPS,
21318 IX86_BUILTIN_CMPPD256,
21319 IX86_BUILTIN_CMPPS256,
21320 IX86_BUILTIN_CVTDQ2PD256,
21321 IX86_BUILTIN_CVTDQ2PS256,
21322 IX86_BUILTIN_CVTPD2PS256,
21323 IX86_BUILTIN_CVTPS2DQ256,
21324 IX86_BUILTIN_CVTPS2PD256,
21325 IX86_BUILTIN_CVTTPD2DQ256,
21326 IX86_BUILTIN_CVTPD2DQ256,
21327 IX86_BUILTIN_CVTTPS2DQ256,
21328 IX86_BUILTIN_EXTRACTF128PD256,
21329 IX86_BUILTIN_EXTRACTF128PS256,
21330 IX86_BUILTIN_EXTRACTF128SI256,
21331 IX86_BUILTIN_VZEROALL,
21332 IX86_BUILTIN_VZEROUPPER,
21333 IX86_BUILTIN_VPERMILVARPD,
21334 IX86_BUILTIN_VPERMILVARPS,
21335 IX86_BUILTIN_VPERMILVARPD256,
21336 IX86_BUILTIN_VPERMILVARPS256,
21337 IX86_BUILTIN_VPERMILPD,
21338 IX86_BUILTIN_VPERMILPS,
21339 IX86_BUILTIN_VPERMILPD256,
21340 IX86_BUILTIN_VPERMILPS256,
21341 IX86_BUILTIN_VPERMIL2PD,
21342 IX86_BUILTIN_VPERMIL2PS,
21343 IX86_BUILTIN_VPERMIL2PD256,
21344 IX86_BUILTIN_VPERMIL2PS256,
21345 IX86_BUILTIN_VPERM2F128PD256,
21346 IX86_BUILTIN_VPERM2F128PS256,
21347 IX86_BUILTIN_VPERM2F128SI256,
21348 IX86_BUILTIN_VBROADCASTSS,
21349 IX86_BUILTIN_VBROADCASTSD256,
21350 IX86_BUILTIN_VBROADCASTSS256,
21351 IX86_BUILTIN_VBROADCASTPD256,
21352 IX86_BUILTIN_VBROADCASTPS256,
21353 IX86_BUILTIN_VINSERTF128PD256,
21354 IX86_BUILTIN_VINSERTF128PS256,
21355 IX86_BUILTIN_VINSERTF128SI256,
21356 IX86_BUILTIN_LOADUPD256,
21357 IX86_BUILTIN_LOADUPS256,
21358 IX86_BUILTIN_STOREUPD256,
21359 IX86_BUILTIN_STOREUPS256,
21360 IX86_BUILTIN_LDDQU256,
21361 IX86_BUILTIN_MOVNTDQ256,
21362 IX86_BUILTIN_MOVNTPD256,
21363 IX86_BUILTIN_MOVNTPS256,
21364 IX86_BUILTIN_LOADDQU256,
21365 IX86_BUILTIN_STOREDQU256,
21366 IX86_BUILTIN_MASKLOADPD,
21367 IX86_BUILTIN_MASKLOADPS,
21368 IX86_BUILTIN_MASKSTOREPD,
21369 IX86_BUILTIN_MASKSTOREPS,
21370 IX86_BUILTIN_MASKLOADPD256,
21371 IX86_BUILTIN_MASKLOADPS256,
21372 IX86_BUILTIN_MASKSTOREPD256,
21373 IX86_BUILTIN_MASKSTOREPS256,
21374 IX86_BUILTIN_MOVSHDUP256,
21375 IX86_BUILTIN_MOVSLDUP256,
21376 IX86_BUILTIN_MOVDDUP256,
21378 IX86_BUILTIN_SQRTPD256,
21379 IX86_BUILTIN_SQRTPS256,
21380 IX86_BUILTIN_SQRTPS_NR256,
21381 IX86_BUILTIN_RSQRTPS256,
21382 IX86_BUILTIN_RSQRTPS_NR256,
21384 IX86_BUILTIN_RCPPS256,
21386 IX86_BUILTIN_ROUNDPD256,
21387 IX86_BUILTIN_ROUNDPS256,
21389 IX86_BUILTIN_UNPCKHPD256,
21390 IX86_BUILTIN_UNPCKLPD256,
21391 IX86_BUILTIN_UNPCKHPS256,
21392 IX86_BUILTIN_UNPCKLPS256,
21394 IX86_BUILTIN_SI256_SI,
21395 IX86_BUILTIN_PS256_PS,
21396 IX86_BUILTIN_PD256_PD,
21397 IX86_BUILTIN_SI_SI256,
21398 IX86_BUILTIN_PS_PS256,
21399 IX86_BUILTIN_PD_PD256,
21401 IX86_BUILTIN_VTESTZPD,
21402 IX86_BUILTIN_VTESTCPD,
21403 IX86_BUILTIN_VTESTNZCPD,
21404 IX86_BUILTIN_VTESTZPS,
21405 IX86_BUILTIN_VTESTCPS,
21406 IX86_BUILTIN_VTESTNZCPS,
21407 IX86_BUILTIN_VTESTZPD256,
21408 IX86_BUILTIN_VTESTCPD256,
21409 IX86_BUILTIN_VTESTNZCPD256,
21410 IX86_BUILTIN_VTESTZPS256,
21411 IX86_BUILTIN_VTESTCPS256,
21412 IX86_BUILTIN_VTESTNZCPS256,
21413 IX86_BUILTIN_PTESTZ256,
21414 IX86_BUILTIN_PTESTC256,
21415 IX86_BUILTIN_PTESTNZC256,
21417 IX86_BUILTIN_MOVMSKPD256,
21418 IX86_BUILTIN_MOVMSKPS256,
21420 /* TFmode support builtins. */
21422 IX86_BUILTIN_HUGE_VALQ,
21423 IX86_BUILTIN_FABSQ,
21424 IX86_BUILTIN_COPYSIGNQ,
21426 /* Vectorizer support builtins. */
21427 IX86_BUILTIN_CPYSGNPS,
21428 IX86_BUILTIN_CPYSGNPD,
21430 IX86_BUILTIN_CVTUDQ2PS,
21432 IX86_BUILTIN_VEC_PERM_V2DF,
21433 IX86_BUILTIN_VEC_PERM_V4SF,
21434 IX86_BUILTIN_VEC_PERM_V2DI,
21435 IX86_BUILTIN_VEC_PERM_V4SI,
21436 IX86_BUILTIN_VEC_PERM_V8HI,
21437 IX86_BUILTIN_VEC_PERM_V16QI,
21438 IX86_BUILTIN_VEC_PERM_V2DI_U,
21439 IX86_BUILTIN_VEC_PERM_V4SI_U,
21440 IX86_BUILTIN_VEC_PERM_V8HI_U,
21441 IX86_BUILTIN_VEC_PERM_V16QI_U,
21442 IX86_BUILTIN_VEC_PERM_V4DF,
21443 IX86_BUILTIN_VEC_PERM_V8SF,
21445 /* FMA4 and XOP instructions. */
21446 IX86_BUILTIN_VFMADDSS,
21447 IX86_BUILTIN_VFMADDSD,
21448 IX86_BUILTIN_VFMADDPS,
21449 IX86_BUILTIN_VFMADDPD,
21450 IX86_BUILTIN_VFMSUBSS,
21451 IX86_BUILTIN_VFMSUBSD,
21452 IX86_BUILTIN_VFMSUBPS,
21453 IX86_BUILTIN_VFMSUBPD,
21454 IX86_BUILTIN_VFMADDSUBPS,
21455 IX86_BUILTIN_VFMADDSUBPD,
21456 IX86_BUILTIN_VFMSUBADDPS,
21457 IX86_BUILTIN_VFMSUBADDPD,
21458 IX86_BUILTIN_VFNMADDSS,
21459 IX86_BUILTIN_VFNMADDSD,
21460 IX86_BUILTIN_VFNMADDPS,
21461 IX86_BUILTIN_VFNMADDPD,
21462 IX86_BUILTIN_VFNMSUBSS,
21463 IX86_BUILTIN_VFNMSUBSD,
21464 IX86_BUILTIN_VFNMSUBPS,
21465 IX86_BUILTIN_VFNMSUBPD,
21466 IX86_BUILTIN_VFMADDPS256,
21467 IX86_BUILTIN_VFMADDPD256,
21468 IX86_BUILTIN_VFMSUBPS256,
21469 IX86_BUILTIN_VFMSUBPD256,
21470 IX86_BUILTIN_VFMADDSUBPS256,
21471 IX86_BUILTIN_VFMADDSUBPD256,
21472 IX86_BUILTIN_VFMSUBADDPS256,
21473 IX86_BUILTIN_VFMSUBADDPD256,
21474 IX86_BUILTIN_VFNMADDPS256,
21475 IX86_BUILTIN_VFNMADDPD256,
21476 IX86_BUILTIN_VFNMSUBPS256,
21477 IX86_BUILTIN_VFNMSUBPD256,
21479 IX86_BUILTIN_VPCMOV,
21480 IX86_BUILTIN_VPCMOV_V2DI,
21481 IX86_BUILTIN_VPCMOV_V4SI,
21482 IX86_BUILTIN_VPCMOV_V8HI,
21483 IX86_BUILTIN_VPCMOV_V16QI,
21484 IX86_BUILTIN_VPCMOV_V4SF,
21485 IX86_BUILTIN_VPCMOV_V2DF,
21486 IX86_BUILTIN_VPCMOV256,
21487 IX86_BUILTIN_VPCMOV_V4DI256,
21488 IX86_BUILTIN_VPCMOV_V8SI256,
21489 IX86_BUILTIN_VPCMOV_V16HI256,
21490 IX86_BUILTIN_VPCMOV_V32QI256,
21491 IX86_BUILTIN_VPCMOV_V8SF256,
21492 IX86_BUILTIN_VPCMOV_V4DF256,
21494 IX86_BUILTIN_VPPERM,
21496 IX86_BUILTIN_VPMACSSWW,
21497 IX86_BUILTIN_VPMACSWW,
21498 IX86_BUILTIN_VPMACSSWD,
21499 IX86_BUILTIN_VPMACSWD,
21500 IX86_BUILTIN_VPMACSSDD,
21501 IX86_BUILTIN_VPMACSDD,
21502 IX86_BUILTIN_VPMACSSDQL,
21503 IX86_BUILTIN_VPMACSSDQH,
21504 IX86_BUILTIN_VPMACSDQL,
21505 IX86_BUILTIN_VPMACSDQH,
21506 IX86_BUILTIN_VPMADCSSWD,
21507 IX86_BUILTIN_VPMADCSWD,
21509 IX86_BUILTIN_VPHADDBW,
21510 IX86_BUILTIN_VPHADDBD,
21511 IX86_BUILTIN_VPHADDBQ,
21512 IX86_BUILTIN_VPHADDWD,
21513 IX86_BUILTIN_VPHADDWQ,
21514 IX86_BUILTIN_VPHADDDQ,
21515 IX86_BUILTIN_VPHADDUBW,
21516 IX86_BUILTIN_VPHADDUBD,
21517 IX86_BUILTIN_VPHADDUBQ,
21518 IX86_BUILTIN_VPHADDUWD,
21519 IX86_BUILTIN_VPHADDUWQ,
21520 IX86_BUILTIN_VPHADDUDQ,
21521 IX86_BUILTIN_VPHSUBBW,
21522 IX86_BUILTIN_VPHSUBWD,
21523 IX86_BUILTIN_VPHSUBDQ,
21525 IX86_BUILTIN_VPROTB,
21526 IX86_BUILTIN_VPROTW,
21527 IX86_BUILTIN_VPROTD,
21528 IX86_BUILTIN_VPROTQ,
21529 IX86_BUILTIN_VPROTB_IMM,
21530 IX86_BUILTIN_VPROTW_IMM,
21531 IX86_BUILTIN_VPROTD_IMM,
21532 IX86_BUILTIN_VPROTQ_IMM,
21534 IX86_BUILTIN_VPSHLB,
21535 IX86_BUILTIN_VPSHLW,
21536 IX86_BUILTIN_VPSHLD,
21537 IX86_BUILTIN_VPSHLQ,
21538 IX86_BUILTIN_VPSHAB,
21539 IX86_BUILTIN_VPSHAW,
21540 IX86_BUILTIN_VPSHAD,
21541 IX86_BUILTIN_VPSHAQ,
21543 IX86_BUILTIN_VFRCZSS,
21544 IX86_BUILTIN_VFRCZSD,
21545 IX86_BUILTIN_VFRCZPS,
21546 IX86_BUILTIN_VFRCZPD,
21547 IX86_BUILTIN_VFRCZPS256,
21548 IX86_BUILTIN_VFRCZPD256,
21550 IX86_BUILTIN_VPCOMEQUB,
21551 IX86_BUILTIN_VPCOMNEUB,
21552 IX86_BUILTIN_VPCOMLTUB,
21553 IX86_BUILTIN_VPCOMLEUB,
21554 IX86_BUILTIN_VPCOMGTUB,
21555 IX86_BUILTIN_VPCOMGEUB,
21556 IX86_BUILTIN_VPCOMFALSEUB,
21557 IX86_BUILTIN_VPCOMTRUEUB,
21559 IX86_BUILTIN_VPCOMEQUW,
21560 IX86_BUILTIN_VPCOMNEUW,
21561 IX86_BUILTIN_VPCOMLTUW,
21562 IX86_BUILTIN_VPCOMLEUW,
21563 IX86_BUILTIN_VPCOMGTUW,
21564 IX86_BUILTIN_VPCOMGEUW,
21565 IX86_BUILTIN_VPCOMFALSEUW,
21566 IX86_BUILTIN_VPCOMTRUEUW,
21568 IX86_BUILTIN_VPCOMEQUD,
21569 IX86_BUILTIN_VPCOMNEUD,
21570 IX86_BUILTIN_VPCOMLTUD,
21571 IX86_BUILTIN_VPCOMLEUD,
21572 IX86_BUILTIN_VPCOMGTUD,
21573 IX86_BUILTIN_VPCOMGEUD,
21574 IX86_BUILTIN_VPCOMFALSEUD,
21575 IX86_BUILTIN_VPCOMTRUEUD,
21577 IX86_BUILTIN_VPCOMEQUQ,
21578 IX86_BUILTIN_VPCOMNEUQ,
21579 IX86_BUILTIN_VPCOMLTUQ,
21580 IX86_BUILTIN_VPCOMLEUQ,
21581 IX86_BUILTIN_VPCOMGTUQ,
21582 IX86_BUILTIN_VPCOMGEUQ,
21583 IX86_BUILTIN_VPCOMFALSEUQ,
21584 IX86_BUILTIN_VPCOMTRUEUQ,
21586 IX86_BUILTIN_VPCOMEQB,
21587 IX86_BUILTIN_VPCOMNEB,
21588 IX86_BUILTIN_VPCOMLTB,
21589 IX86_BUILTIN_VPCOMLEB,
21590 IX86_BUILTIN_VPCOMGTB,
21591 IX86_BUILTIN_VPCOMGEB,
21592 IX86_BUILTIN_VPCOMFALSEB,
21593 IX86_BUILTIN_VPCOMTRUEB,
21595 IX86_BUILTIN_VPCOMEQW,
21596 IX86_BUILTIN_VPCOMNEW,
21597 IX86_BUILTIN_VPCOMLTW,
21598 IX86_BUILTIN_VPCOMLEW,
21599 IX86_BUILTIN_VPCOMGTW,
21600 IX86_BUILTIN_VPCOMGEW,
21601 IX86_BUILTIN_VPCOMFALSEW,
21602 IX86_BUILTIN_VPCOMTRUEW,
21604 IX86_BUILTIN_VPCOMEQD,
21605 IX86_BUILTIN_VPCOMNED,
21606 IX86_BUILTIN_VPCOMLTD,
21607 IX86_BUILTIN_VPCOMLED,
21608 IX86_BUILTIN_VPCOMGTD,
21609 IX86_BUILTIN_VPCOMGED,
21610 IX86_BUILTIN_VPCOMFALSED,
21611 IX86_BUILTIN_VPCOMTRUED,
21613 IX86_BUILTIN_VPCOMEQQ,
21614 IX86_BUILTIN_VPCOMNEQ,
21615 IX86_BUILTIN_VPCOMLTQ,
21616 IX86_BUILTIN_VPCOMLEQ,
21617 IX86_BUILTIN_VPCOMGTQ,
21618 IX86_BUILTIN_VPCOMGEQ,
21619 IX86_BUILTIN_VPCOMFALSEQ,
21620 IX86_BUILTIN_VPCOMTRUEQ,
21622 /* LWP instructions. */
21623 IX86_BUILTIN_LLWPCB,
21624 IX86_BUILTIN_SLWPCB,
21625 IX86_BUILTIN_LWPVAL32,
21626 IX86_BUILTIN_LWPVAL64,
21627 IX86_BUILTIN_LWPINS32,
21628 IX86_BUILTIN_LWPINS64,
21635 /* Table for the ix86 builtin decls. */
21636 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21638 /* Table of all of the builtin functions that are possible with different ISA's
21639 but are waiting to be built until a function is declared to use that
21641 struct builtin_isa {
21642 const char *name; /* function name */
21643 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21644 int isa; /* isa_flags this builtin is defined for */
21645 bool const_p; /* true if the declaration is constant */
21646 bool set_and_not_built_p;
21649 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21652 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21653 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21654 function decl in the ix86_builtins array. Returns the function decl or
21655 NULL_TREE, if the builtin was not added.
21657 If the front end has a special hook for builtin functions, delay adding
21658 builtin functions that aren't in the current ISA until the ISA is changed
21659 with function specific optimization. Doing so, can save about 300K for the
21660 default compiler. When the builtin is expanded, check at that time whether
21663 If the front end doesn't have a special hook, record all builtins, even if
21664 it isn't an instruction set in the current ISA in case the user uses
21665 function specific options for a different ISA, so that we don't get scope
21666 errors if a builtin is added in the middle of a function scope. */
21669 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21670 enum ix86_builtins code)
21672 tree decl = NULL_TREE;
21674 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21676 ix86_builtins_isa[(int) code].isa = mask;
21678 mask &= ~OPTION_MASK_ISA_64BIT;
21680 || (mask & ix86_isa_flags) != 0
21681 || (lang_hooks.builtin_function
21682 == lang_hooks.builtin_function_ext_scope))
21685 tree type = ix86_get_builtin_func_type (tcode);
21686 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21688 ix86_builtins[(int) code] = decl;
21689 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21693 ix86_builtins[(int) code] = NULL_TREE;
21694 ix86_builtins_isa[(int) code].tcode = tcode;
21695 ix86_builtins_isa[(int) code].name = name;
21696 ix86_builtins_isa[(int) code].const_p = false;
21697 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21704 /* Like def_builtin, but also marks the function decl "const". */
21707 def_builtin_const (int mask, const char *name,
21708 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21710 tree decl = def_builtin (mask, name, tcode, code);
21712 TREE_READONLY (decl) = 1;
21714 ix86_builtins_isa[(int) code].const_p = true;
21719 /* Add any new builtin functions for a given ISA that may not have been
21720 declared. This saves a bit of space compared to adding all of the
21721 declarations to the tree, even if we didn't use them. */
21724 ix86_add_new_builtins (int isa)
21728 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21730 if ((ix86_builtins_isa[i].isa & isa) != 0
21731 && ix86_builtins_isa[i].set_and_not_built_p)
21735 /* Don't define the builtin again. */
21736 ix86_builtins_isa[i].set_and_not_built_p = false;
21738 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21739 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21740 type, i, BUILT_IN_MD, NULL,
21743 ix86_builtins[i] = decl;
21744 if (ix86_builtins_isa[i].const_p)
21745 TREE_READONLY (decl) = 1;
21750 /* Bits for builtin_description.flag. */
21752 /* Set when we don't support the comparison natively, and should
21753 swap_comparison in order to support it. */
21754 #define BUILTIN_DESC_SWAP_OPERANDS 1
21756 struct builtin_description
21758 const unsigned int mask;
21759 const enum insn_code icode;
21760 const char *const name;
21761 const enum ix86_builtins code;
21762 const enum rtx_code comparison;
21766 static const struct builtin_description bdesc_comi[] =
21768 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21769 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21770 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21771 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21772 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21774 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21775 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21776 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21777 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21778 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21780 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21781 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21782 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21783 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21784 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21785 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21786 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21787 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21788 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21789 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21790 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21791 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21794 static const struct builtin_description bdesc_pcmpestr[] =
21797 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21798 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21799 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21800 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21801 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21802 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21803 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21806 static const struct builtin_description bdesc_pcmpistr[] =
21809 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21810 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21811 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21812 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21813 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21814 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21815 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21818 /* Special builtins with variable number of arguments. */
21819 static const struct builtin_description bdesc_special_args[] =
21821 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21822 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21825 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21828 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21831 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21832 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21833 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21835 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21836 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21837 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21838 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21840 /* SSE or 3DNow!A */
21841 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21842 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21845 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21846 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21847 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21848 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21849 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21850 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21851 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21852 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21853 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21855 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21856 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21859 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21862 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21865 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21866 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21869 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21870 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21872 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21873 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21874 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21875 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21876 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21878 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21879 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21880 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21881 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21882 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21883 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21884 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21886 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21887 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21888 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21890 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21891 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21892 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21893 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21894 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21895 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21896 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21897 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21899 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21900 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21901 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21902 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21903 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21904 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21908 /* Builtins with variable number of arguments. */
21909 static const struct builtin_description bdesc_args[] =
21911 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21912 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21913 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21914 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21915 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21916 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21917 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21920 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21921 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21922 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21923 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21924 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21925 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21927 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21928 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21929 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21930 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21931 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21932 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21933 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21934 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21936 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21937 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21939 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21940 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21941 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21942 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21944 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21945 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21946 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21947 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21948 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21949 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21951 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21952 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21953 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21954 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21955 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21956 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21958 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21959 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21960 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21962 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21964 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21965 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21966 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21967 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21968 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21969 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21971 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21972 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21973 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21974 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21975 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21976 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21978 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21979 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21980 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21981 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21984 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21985 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21986 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21987 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21989 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21990 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21991 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21992 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21993 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21994 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21995 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21996 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21997 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21998 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21999 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22000 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22001 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22002 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22003 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22006 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
22007 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
22008 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22009 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
22010 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22011 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22014 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
22015 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22016 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22017 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22018 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22019 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22020 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
22021 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
22022 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
22023 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
22024 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
22025 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
22027 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22029 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22030 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22031 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22032 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22033 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22034 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22035 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22036 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22038 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
22039 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
22040 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
22041 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22042 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22043 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22044 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
22045 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
22046 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
22047 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22048 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
22049 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22050 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
22051 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
22052 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
22053 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22054 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
22055 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
22056 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
22057 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22058 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22059 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22061 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22062 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22063 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22064 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22066 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22067 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22068 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22069 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22071 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22073 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22074 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22075 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22076 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22077 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22079 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
22080 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
22081 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
22083 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
22085 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22086 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22087 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22089 /* SSE MMX or 3Dnow!A */
22090 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22091 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22092 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22094 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22095 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22096 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22097 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22099 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
22100 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
22102 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
22105 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22107 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
22108 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
22109 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
22110 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
22111 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
22112 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22113 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
22114 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
22115 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
22116 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
22117 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
22118 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
22120 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
22121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
22122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
22123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
22124 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22125 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22127 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22128 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22129 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
22130 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22131 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22133 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
22135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22137 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22138 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
22142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22144 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22145 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22146 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22147 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22148 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22149 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22150 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22151 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22153 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22154 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22155 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22156 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22157 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
22158 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22159 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22162 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22163 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22164 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22165 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22167 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22168 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22169 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22170 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22171 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22172 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22174 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22175 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22176 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22177 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22179 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22180 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22181 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22182 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22184 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22186 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22187 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22188 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22190 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
22192 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22193 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22194 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22195 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22196 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22197 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22198 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22199 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22201 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22202 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22203 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22204 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22205 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22206 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22207 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22208 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22210 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22211 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
22213 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22214 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22215 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22216 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22218 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22219 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22221 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22222 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22223 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22224 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22225 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22226 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22228 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22229 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22230 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22231 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22233 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22234 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22235 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22236 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22237 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22238 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22239 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22240 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22242 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22243 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22244 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22246 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22247 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22249 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22250 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22252 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22254 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22255 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22256 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22257 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22259 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22260 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22261 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22262 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22263 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22264 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22265 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22267 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22268 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22269 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22270 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22271 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22272 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22273 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22275 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22276 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22277 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22278 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22280 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22281 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22282 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22284 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22286 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22287 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22289 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22292 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22293 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22296 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22297 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22299 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22300 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22301 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22302 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22303 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22304 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22307 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22308 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22309 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22310 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22311 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22312 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22314 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22315 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22316 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22317 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22318 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22319 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22320 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22321 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22322 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22323 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22324 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22325 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22326 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22327 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22328 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22329 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22330 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22331 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22332 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22333 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22334 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22335 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22336 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22337 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22340 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22341 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22344 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22345 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22346 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22347 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22348 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22349 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22350 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22351 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22352 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22353 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22355 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22356 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22357 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22358 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22359 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22360 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22361 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22362 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22363 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22364 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22365 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22366 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22367 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22369 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22370 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22371 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22372 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22373 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22374 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22375 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22376 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22377 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22378 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22379 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22380 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22383 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22384 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22385 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22386 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22388 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22389 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22390 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22393 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22394 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22395 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22396 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22397 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22400 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22401 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22402 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22403 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22406 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22407 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22409 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22410 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22411 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22412 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22415 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22418 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22419 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22422 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22423 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22426 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22427 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22428 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22429 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22432 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22433 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22434 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22435 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22436 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22437 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22438 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22439 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22440 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22441 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22442 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22443 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22445 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22447 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22448 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22450 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22451 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22452 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22453 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22454 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22455 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22456 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22457 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22458 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22459 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22460 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22461 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22462 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22463 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22464 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22465 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22466 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22467 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22468 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22469 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22470 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22471 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22472 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22473 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22474 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22475 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22476 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22477 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22478 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22479 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22480 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22481 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22482 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22483 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22485 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22486 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22487 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22489 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22490 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22491 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22492 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22493 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22495 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22497 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22498 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22500 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22501 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22502 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22503 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22505 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22506 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22507 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22508 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22509 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22510 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22512 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22513 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22514 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22515 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22516 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22517 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22518 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22519 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22520 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22521 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22522 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22523 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22524 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22525 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22526 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22528 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22529 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22531 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22534 /* FMA4 and XOP. */
22535 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22536 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22537 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22538 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22539 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22540 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22541 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22542 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22543 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22544 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22545 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22546 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22547 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22548 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22549 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22550 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22551 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22552 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22553 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22554 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22555 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22556 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22557 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22558 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22559 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22560 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22561 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22562 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22563 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22564 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22565 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22566 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22567 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22568 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22569 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22570 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22571 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22572 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22573 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22574 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22575 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22576 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22577 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22578 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22579 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22580 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22581 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22582 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22583 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22584 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22585 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22586 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22588 static const struct builtin_description bdesc_multi_arg[] =
22590 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22591 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22592 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22593 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22594 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22595 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22596 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22597 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22599 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22600 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22601 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22602 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22603 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22604 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22605 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22606 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22608 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22609 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22610 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22611 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22613 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22614 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22615 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22616 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22618 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22619 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22620 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22621 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22623 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22624 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22625 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22626 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22639 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22683 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22684 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22687 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22688 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22689 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22692 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22696 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22697 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22699 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22700 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22701 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22702 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22703 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22704 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22705 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22707 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22708 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22709 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22710 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22711 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22712 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22713 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22715 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22716 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22717 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22718 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22719 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22720 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22721 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22723 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22724 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22725 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22726 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22727 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22728 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22729 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22731 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22732 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22733 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22734 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22735 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22736 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22737 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22739 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22740 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22741 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22742 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22743 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22744 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22745 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22747 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22748 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22749 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22750 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22751 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22752 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22753 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22755 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22756 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22757 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22758 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22759 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22760 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22761 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22763 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22764 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22765 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22766 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22767 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22768 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22769 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22770 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22772 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22773 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22774 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22775 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22776 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22777 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22778 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22779 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22781 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22782 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22783 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22784 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22788 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22789 in the current target ISA to allow the user to compile particular modules
22790 with different target specific options that differ from the command line
22793 ix86_init_mmx_sse_builtins (void)
22795 const struct builtin_description * d;
22796 enum ix86_builtin_func_type ftype;
22799 /* Add all special builtins with variable number of operands. */
22800 for (i = 0, d = bdesc_special_args;
22801 i < ARRAY_SIZE (bdesc_special_args);
22807 ftype = (enum ix86_builtin_func_type) d->flag;
22808 def_builtin (d->mask, d->name, ftype, d->code);
22811 /* Add all builtins with variable number of operands. */
22812 for (i = 0, d = bdesc_args;
22813 i < ARRAY_SIZE (bdesc_args);
22819 ftype = (enum ix86_builtin_func_type) d->flag;
22820 def_builtin_const (d->mask, d->name, ftype, d->code);
22823 /* pcmpestr[im] insns. */
22824 for (i = 0, d = bdesc_pcmpestr;
22825 i < ARRAY_SIZE (bdesc_pcmpestr);
22828 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22829 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22831 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22832 def_builtin_const (d->mask, d->name, ftype, d->code);
22835 /* pcmpistr[im] insns. */
22836 for (i = 0, d = bdesc_pcmpistr;
22837 i < ARRAY_SIZE (bdesc_pcmpistr);
22840 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22841 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22843 ftype = INT_FTYPE_V16QI_V16QI_INT;
22844 def_builtin_const (d->mask, d->name, ftype, d->code);
22847 /* comi/ucomi insns. */
22848 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22850 if (d->mask == OPTION_MASK_ISA_SSE2)
22851 ftype = INT_FTYPE_V2DF_V2DF;
22853 ftype = INT_FTYPE_V4SF_V4SF;
22854 def_builtin_const (d->mask, d->name, ftype, d->code);
22858 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22859 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22860 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22861 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22863 /* SSE or 3DNow!A */
22864 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22865 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22866 IX86_BUILTIN_MASKMOVQ);
22869 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22870 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22872 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22873 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22874 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22875 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22878 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22879 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22880 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22881 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22884 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22885 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22886 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22887 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22888 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22889 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22890 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22891 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22892 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22893 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22894 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22895 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22898 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22899 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22901 /* MMX access to the vec_init patterns. */
22902 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22903 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22905 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22906 V4HI_FTYPE_HI_HI_HI_HI,
22907 IX86_BUILTIN_VEC_INIT_V4HI);
22909 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22910 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22911 IX86_BUILTIN_VEC_INIT_V8QI);
22913 /* Access to the vec_extract patterns. */
22914 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22915 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22916 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22917 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22918 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22919 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22920 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22921 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22922 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22923 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22925 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22926 "__builtin_ia32_vec_ext_v4hi",
22927 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22929 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22930 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22932 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22933 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22935 /* Access to the vec_set patterns. */
22936 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22937 "__builtin_ia32_vec_set_v2di",
22938 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22940 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22941 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22943 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22944 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22946 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22947 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22949 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22950 "__builtin_ia32_vec_set_v4hi",
22951 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22953 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22954 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22956 /* Add FMA4 multi-arg argument instructions */
22957 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22962 ftype = (enum ix86_builtin_func_type) d->flag;
22963 def_builtin_const (d->mask, d->name, ftype, d->code);
22967 /* Internal method for ix86_init_builtins. */
22970 ix86_init_builtins_va_builtins_abi (void)
22972 tree ms_va_ref, sysv_va_ref;
22973 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22974 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22975 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22976 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22980 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22981 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22982 ms_va_ref = build_reference_type (ms_va_list_type_node);
22984 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22987 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22988 fnvoid_va_start_ms =
22989 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22990 fnvoid_va_end_sysv =
22991 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22992 fnvoid_va_start_sysv =
22993 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22995 fnvoid_va_copy_ms =
22996 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22998 fnvoid_va_copy_sysv =
22999 build_function_type_list (void_type_node, sysv_va_ref,
23000 sysv_va_ref, NULL_TREE);
23002 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
23003 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
23004 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
23005 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
23006 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
23007 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
23008 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
23009 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23010 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
23011 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23012 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
23013 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23017 ix86_init_builtin_types (void)
23019 tree float128_type_node, float80_type_node;
23021 /* The __float80 type. */
23022 float80_type_node = long_double_type_node;
23023 if (TYPE_MODE (float80_type_node) != XFmode)
23025 /* The __float80 type. */
23026 float80_type_node = make_node (REAL_TYPE);
23028 TYPE_PRECISION (float80_type_node) = 80;
23029 layout_type (float80_type_node);
23031 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
23033 /* The __float128 type. */
23034 float128_type_node = make_node (REAL_TYPE);
23035 TYPE_PRECISION (float128_type_node) = 128;
23036 layout_type (float128_type_node);
23037 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
23039 /* This macro is built by i386-builtin-types.awk. */
23040 DEFINE_BUILTIN_PRIMITIVE_TYPES;
23044 ix86_init_builtins (void)
23048 ix86_init_builtin_types ();
23050 /* TFmode support builtins. */
23051 def_builtin_const (0, "__builtin_infq",
23052 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
23053 def_builtin_const (0, "__builtin_huge_valq",
23054 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
23056 /* We will expand them to normal call if SSE2 isn't available since
23057 they are used by libgcc. */
23058 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
23059 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
23060 BUILT_IN_MD, "__fabstf2", NULL_TREE);
23061 TREE_READONLY (t) = 1;
23062 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
23064 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
23065 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
23066 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
23067 TREE_READONLY (t) = 1;
23068 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
23070 ix86_init_mmx_sse_builtins ();
23073 ix86_init_builtins_va_builtins_abi ();
23076 /* Return the ix86 builtin for CODE. */
23079 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
23081 if (code >= IX86_BUILTIN_MAX)
23082 return error_mark_node;
23084 return ix86_builtins[code];
23087 /* Errors in the source file can cause expand_expr to return const0_rtx
23088 where we expect a vector. To avoid crashing, use one of the vector
23089 clear instructions. */
23091 safe_vector_operand (rtx x, enum machine_mode mode)
23093 if (x == const0_rtx)
23094 x = CONST0_RTX (mode);
23098 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23101 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23104 tree arg0 = CALL_EXPR_ARG (exp, 0);
23105 tree arg1 = CALL_EXPR_ARG (exp, 1);
23106 rtx op0 = expand_normal (arg0);
23107 rtx op1 = expand_normal (arg1);
23108 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23109 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23110 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23112 if (VECTOR_MODE_P (mode0))
23113 op0 = safe_vector_operand (op0, mode0);
23114 if (VECTOR_MODE_P (mode1))
23115 op1 = safe_vector_operand (op1, mode1);
23117 if (optimize || !target
23118 || GET_MODE (target) != tmode
23119 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23120 target = gen_reg_rtx (tmode);
23122 if (GET_MODE (op1) == SImode && mode1 == TImode)
23124 rtx x = gen_reg_rtx (V4SImode);
23125 emit_insn (gen_sse2_loadd (x, op1));
23126 op1 = gen_lowpart (TImode, x);
23129 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23130 op0 = copy_to_mode_reg (mode0, op0);
23131 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23132 op1 = copy_to_mode_reg (mode1, op1);
23134 pat = GEN_FCN (icode) (target, op0, op1);
23143 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23146 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23147 enum ix86_builtin_func_type m_type,
23148 enum rtx_code sub_code)
23153 bool comparison_p = false;
23155 bool last_arg_constant = false;
23156 int num_memory = 0;
23159 enum machine_mode mode;
23162 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23166 case MULTI_ARG_4_DF2_DI_I:
23167 case MULTI_ARG_4_DF2_DI_I1:
23168 case MULTI_ARG_4_SF2_SI_I:
23169 case MULTI_ARG_4_SF2_SI_I1:
23171 last_arg_constant = true;
23174 case MULTI_ARG_3_SF:
23175 case MULTI_ARG_3_DF:
23176 case MULTI_ARG_3_SF2:
23177 case MULTI_ARG_3_DF2:
23178 case MULTI_ARG_3_DI:
23179 case MULTI_ARG_3_SI:
23180 case MULTI_ARG_3_SI_DI:
23181 case MULTI_ARG_3_HI:
23182 case MULTI_ARG_3_HI_SI:
23183 case MULTI_ARG_3_QI:
23184 case MULTI_ARG_3_DI2:
23185 case MULTI_ARG_3_SI2:
23186 case MULTI_ARG_3_HI2:
23187 case MULTI_ARG_3_QI2:
23191 case MULTI_ARG_2_SF:
23192 case MULTI_ARG_2_DF:
23193 case MULTI_ARG_2_DI:
23194 case MULTI_ARG_2_SI:
23195 case MULTI_ARG_2_HI:
23196 case MULTI_ARG_2_QI:
23200 case MULTI_ARG_2_DI_IMM:
23201 case MULTI_ARG_2_SI_IMM:
23202 case MULTI_ARG_2_HI_IMM:
23203 case MULTI_ARG_2_QI_IMM:
23205 last_arg_constant = true;
23208 case MULTI_ARG_1_SF:
23209 case MULTI_ARG_1_DF:
23210 case MULTI_ARG_1_SF2:
23211 case MULTI_ARG_1_DF2:
23212 case MULTI_ARG_1_DI:
23213 case MULTI_ARG_1_SI:
23214 case MULTI_ARG_1_HI:
23215 case MULTI_ARG_1_QI:
23216 case MULTI_ARG_1_SI_DI:
23217 case MULTI_ARG_1_HI_DI:
23218 case MULTI_ARG_1_HI_SI:
23219 case MULTI_ARG_1_QI_DI:
23220 case MULTI_ARG_1_QI_SI:
23221 case MULTI_ARG_1_QI_HI:
23225 case MULTI_ARG_2_DI_CMP:
23226 case MULTI_ARG_2_SI_CMP:
23227 case MULTI_ARG_2_HI_CMP:
23228 case MULTI_ARG_2_QI_CMP:
23230 comparison_p = true;
23233 case MULTI_ARG_2_SF_TF:
23234 case MULTI_ARG_2_DF_TF:
23235 case MULTI_ARG_2_DI_TF:
23236 case MULTI_ARG_2_SI_TF:
23237 case MULTI_ARG_2_HI_TF:
23238 case MULTI_ARG_2_QI_TF:
23244 gcc_unreachable ();
23247 if (optimize || !target
23248 || GET_MODE (target) != tmode
23249 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23250 target = gen_reg_rtx (tmode);
23252 gcc_assert (nargs <= 4);
23254 for (i = 0; i < nargs; i++)
23256 tree arg = CALL_EXPR_ARG (exp, i);
23257 rtx op = expand_normal (arg);
23258 int adjust = (comparison_p) ? 1 : 0;
23259 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23261 if (last_arg_constant && i == nargs-1)
23263 if (!CONST_INT_P (op))
23265 error ("last argument must be an immediate");
23266 return gen_reg_rtx (tmode);
23271 if (VECTOR_MODE_P (mode))
23272 op = safe_vector_operand (op, mode);
23274 /* If we aren't optimizing, only allow one memory operand to be
23276 if (memory_operand (op, mode))
23279 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23282 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23284 op = force_reg (mode, op);
23288 args[i].mode = mode;
23294 pat = GEN_FCN (icode) (target, args[0].op);
23299 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23300 GEN_INT ((int)sub_code));
23301 else if (! comparison_p)
23302 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23305 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23309 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23314 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23318 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23322 gcc_unreachable ();
23332 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23333 insns with vec_merge. */
23336 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23340 tree arg0 = CALL_EXPR_ARG (exp, 0);
23341 rtx op1, op0 = expand_normal (arg0);
23342 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23343 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23345 if (optimize || !target
23346 || GET_MODE (target) != tmode
23347 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23348 target = gen_reg_rtx (tmode);
23350 if (VECTOR_MODE_P (mode0))
23351 op0 = safe_vector_operand (op0, mode0);
23353 if ((optimize && !register_operand (op0, mode0))
23354 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23355 op0 = copy_to_mode_reg (mode0, op0);
23358 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23359 op1 = copy_to_mode_reg (mode0, op1);
23361 pat = GEN_FCN (icode) (target, op0, op1);
23368 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23371 ix86_expand_sse_compare (const struct builtin_description *d,
23372 tree exp, rtx target, bool swap)
23375 tree arg0 = CALL_EXPR_ARG (exp, 0);
23376 tree arg1 = CALL_EXPR_ARG (exp, 1);
23377 rtx op0 = expand_normal (arg0);
23378 rtx op1 = expand_normal (arg1);
23380 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23381 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23382 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23383 enum rtx_code comparison = d->comparison;
23385 if (VECTOR_MODE_P (mode0))
23386 op0 = safe_vector_operand (op0, mode0);
23387 if (VECTOR_MODE_P (mode1))
23388 op1 = safe_vector_operand (op1, mode1);
23390 /* Swap operands if we have a comparison that isn't available in
23394 rtx tmp = gen_reg_rtx (mode1);
23395 emit_move_insn (tmp, op1);
23400 if (optimize || !target
23401 || GET_MODE (target) != tmode
23402 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23403 target = gen_reg_rtx (tmode);
23405 if ((optimize && !register_operand (op0, mode0))
23406 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23407 op0 = copy_to_mode_reg (mode0, op0);
23408 if ((optimize && !register_operand (op1, mode1))
23409 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23410 op1 = copy_to_mode_reg (mode1, op1);
23412 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23413 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23420 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23423 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23427 tree arg0 = CALL_EXPR_ARG (exp, 0);
23428 tree arg1 = CALL_EXPR_ARG (exp, 1);
23429 rtx op0 = expand_normal (arg0);
23430 rtx op1 = expand_normal (arg1);
23431 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23432 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23433 enum rtx_code comparison = d->comparison;
23435 if (VECTOR_MODE_P (mode0))
23436 op0 = safe_vector_operand (op0, mode0);
23437 if (VECTOR_MODE_P (mode1))
23438 op1 = safe_vector_operand (op1, mode1);
23440 /* Swap operands if we have a comparison that isn't available in
23442 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23449 target = gen_reg_rtx (SImode);
23450 emit_move_insn (target, const0_rtx);
23451 target = gen_rtx_SUBREG (QImode, target, 0);
23453 if ((optimize && !register_operand (op0, mode0))
23454 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23455 op0 = copy_to_mode_reg (mode0, op0);
23456 if ((optimize && !register_operand (op1, mode1))
23457 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23458 op1 = copy_to_mode_reg (mode1, op1);
23460 pat = GEN_FCN (d->icode) (op0, op1);
23464 emit_insn (gen_rtx_SET (VOIDmode,
23465 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23466 gen_rtx_fmt_ee (comparison, QImode,
23470 return SUBREG_REG (target);
23473 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23476 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23480 tree arg0 = CALL_EXPR_ARG (exp, 0);
23481 tree arg1 = CALL_EXPR_ARG (exp, 1);
23482 rtx op0 = expand_normal (arg0);
23483 rtx op1 = expand_normal (arg1);
23484 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23485 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23486 enum rtx_code comparison = d->comparison;
23488 if (VECTOR_MODE_P (mode0))
23489 op0 = safe_vector_operand (op0, mode0);
23490 if (VECTOR_MODE_P (mode1))
23491 op1 = safe_vector_operand (op1, mode1);
23493 target = gen_reg_rtx (SImode);
23494 emit_move_insn (target, const0_rtx);
23495 target = gen_rtx_SUBREG (QImode, target, 0);
23497 if ((optimize && !register_operand (op0, mode0))
23498 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23499 op0 = copy_to_mode_reg (mode0, op0);
23500 if ((optimize && !register_operand (op1, mode1))
23501 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23502 op1 = copy_to_mode_reg (mode1, op1);
23504 pat = GEN_FCN (d->icode) (op0, op1);
23508 emit_insn (gen_rtx_SET (VOIDmode,
23509 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23510 gen_rtx_fmt_ee (comparison, QImode,
23514 return SUBREG_REG (target);
23517 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23520 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23521 tree exp, rtx target)
23524 tree arg0 = CALL_EXPR_ARG (exp, 0);
23525 tree arg1 = CALL_EXPR_ARG (exp, 1);
23526 tree arg2 = CALL_EXPR_ARG (exp, 2);
23527 tree arg3 = CALL_EXPR_ARG (exp, 3);
23528 tree arg4 = CALL_EXPR_ARG (exp, 4);
23529 rtx scratch0, scratch1;
23530 rtx op0 = expand_normal (arg0);
23531 rtx op1 = expand_normal (arg1);
23532 rtx op2 = expand_normal (arg2);
23533 rtx op3 = expand_normal (arg3);
23534 rtx op4 = expand_normal (arg4);
23535 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23537 tmode0 = insn_data[d->icode].operand[0].mode;
23538 tmode1 = insn_data[d->icode].operand[1].mode;
23539 modev2 = insn_data[d->icode].operand[2].mode;
23540 modei3 = insn_data[d->icode].operand[3].mode;
23541 modev4 = insn_data[d->icode].operand[4].mode;
23542 modei5 = insn_data[d->icode].operand[5].mode;
23543 modeimm = insn_data[d->icode].operand[6].mode;
23545 if (VECTOR_MODE_P (modev2))
23546 op0 = safe_vector_operand (op0, modev2);
23547 if (VECTOR_MODE_P (modev4))
23548 op2 = safe_vector_operand (op2, modev4);
23550 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23551 op0 = copy_to_mode_reg (modev2, op0);
23552 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23553 op1 = copy_to_mode_reg (modei3, op1);
23554 if ((optimize && !register_operand (op2, modev4))
23555 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23556 op2 = copy_to_mode_reg (modev4, op2);
23557 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23558 op3 = copy_to_mode_reg (modei5, op3);
23560 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23562 error ("the fifth argument must be a 8-bit immediate");
23566 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23568 if (optimize || !target
23569 || GET_MODE (target) != tmode0
23570 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23571 target = gen_reg_rtx (tmode0);
23573 scratch1 = gen_reg_rtx (tmode1);
23575 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23577 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23579 if (optimize || !target
23580 || GET_MODE (target) != tmode1
23581 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23582 target = gen_reg_rtx (tmode1);
23584 scratch0 = gen_reg_rtx (tmode0);
23586 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23590 gcc_assert (d->flag);
23592 scratch0 = gen_reg_rtx (tmode0);
23593 scratch1 = gen_reg_rtx (tmode1);
23595 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23605 target = gen_reg_rtx (SImode);
23606 emit_move_insn (target, const0_rtx);
23607 target = gen_rtx_SUBREG (QImode, target, 0);
23610 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23611 gen_rtx_fmt_ee (EQ, QImode,
23612 gen_rtx_REG ((enum machine_mode) d->flag,
23615 return SUBREG_REG (target);
23622 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23625 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23626 tree exp, rtx target)
23629 tree arg0 = CALL_EXPR_ARG (exp, 0);
23630 tree arg1 = CALL_EXPR_ARG (exp, 1);
23631 tree arg2 = CALL_EXPR_ARG (exp, 2);
23632 rtx scratch0, scratch1;
23633 rtx op0 = expand_normal (arg0);
23634 rtx op1 = expand_normal (arg1);
23635 rtx op2 = expand_normal (arg2);
23636 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23638 tmode0 = insn_data[d->icode].operand[0].mode;
23639 tmode1 = insn_data[d->icode].operand[1].mode;
23640 modev2 = insn_data[d->icode].operand[2].mode;
23641 modev3 = insn_data[d->icode].operand[3].mode;
23642 modeimm = insn_data[d->icode].operand[4].mode;
23644 if (VECTOR_MODE_P (modev2))
23645 op0 = safe_vector_operand (op0, modev2);
23646 if (VECTOR_MODE_P (modev3))
23647 op1 = safe_vector_operand (op1, modev3);
23649 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23650 op0 = copy_to_mode_reg (modev2, op0);
23651 if ((optimize && !register_operand (op1, modev3))
23652 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23653 op1 = copy_to_mode_reg (modev3, op1);
23655 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23657 error ("the third argument must be a 8-bit immediate");
23661 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23663 if (optimize || !target
23664 || GET_MODE (target) != tmode0
23665 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23666 target = gen_reg_rtx (tmode0);
23668 scratch1 = gen_reg_rtx (tmode1);
23670 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23672 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23674 if (optimize || !target
23675 || GET_MODE (target) != tmode1
23676 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23677 target = gen_reg_rtx (tmode1);
23679 scratch0 = gen_reg_rtx (tmode0);
23681 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23685 gcc_assert (d->flag);
23687 scratch0 = gen_reg_rtx (tmode0);
23688 scratch1 = gen_reg_rtx (tmode1);
23690 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23700 target = gen_reg_rtx (SImode);
23701 emit_move_insn (target, const0_rtx);
23702 target = gen_rtx_SUBREG (QImode, target, 0);
23705 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23706 gen_rtx_fmt_ee (EQ, QImode,
23707 gen_rtx_REG ((enum machine_mode) d->flag,
23710 return SUBREG_REG (target);
23716 /* Subroutine of ix86_expand_builtin to take care of insns with
23717 variable number of operands. */
23720 ix86_expand_args_builtin (const struct builtin_description *d,
23721 tree exp, rtx target)
23723 rtx pat, real_target;
23724 unsigned int i, nargs;
23725 unsigned int nargs_constant = 0;
23726 int num_memory = 0;
23730 enum machine_mode mode;
23732 bool last_arg_count = false;
23733 enum insn_code icode = d->icode;
23734 const struct insn_data_d *insn_p = &insn_data[icode];
23735 enum machine_mode tmode = insn_p->operand[0].mode;
23736 enum machine_mode rmode = VOIDmode;
23738 enum rtx_code comparison = d->comparison;
23740 switch ((enum ix86_builtin_func_type) d->flag)
23742 case INT_FTYPE_V8SF_V8SF_PTEST:
23743 case INT_FTYPE_V4DI_V4DI_PTEST:
23744 case INT_FTYPE_V4DF_V4DF_PTEST:
23745 case INT_FTYPE_V4SF_V4SF_PTEST:
23746 case INT_FTYPE_V2DI_V2DI_PTEST:
23747 case INT_FTYPE_V2DF_V2DF_PTEST:
23748 return ix86_expand_sse_ptest (d, exp, target);
23749 case FLOAT128_FTYPE_FLOAT128:
23750 case FLOAT_FTYPE_FLOAT:
23751 case INT_FTYPE_INT:
23752 case UINT64_FTYPE_INT:
23753 case UINT16_FTYPE_UINT16:
23754 case INT64_FTYPE_INT64:
23755 case INT64_FTYPE_V4SF:
23756 case INT64_FTYPE_V2DF:
23757 case INT_FTYPE_V16QI:
23758 case INT_FTYPE_V8QI:
23759 case INT_FTYPE_V8SF:
23760 case INT_FTYPE_V4DF:
23761 case INT_FTYPE_V4SF:
23762 case INT_FTYPE_V2DF:
23763 case V16QI_FTYPE_V16QI:
23764 case V8SI_FTYPE_V8SF:
23765 case V8SI_FTYPE_V4SI:
23766 case V8HI_FTYPE_V8HI:
23767 case V8HI_FTYPE_V16QI:
23768 case V8QI_FTYPE_V8QI:
23769 case V8SF_FTYPE_V8SF:
23770 case V8SF_FTYPE_V8SI:
23771 case V8SF_FTYPE_V4SF:
23772 case V4SI_FTYPE_V4SI:
23773 case V4SI_FTYPE_V16QI:
23774 case V4SI_FTYPE_V4SF:
23775 case V4SI_FTYPE_V8SI:
23776 case V4SI_FTYPE_V8HI:
23777 case V4SI_FTYPE_V4DF:
23778 case V4SI_FTYPE_V2DF:
23779 case V4HI_FTYPE_V4HI:
23780 case V4DF_FTYPE_V4DF:
23781 case V4DF_FTYPE_V4SI:
23782 case V4DF_FTYPE_V4SF:
23783 case V4DF_FTYPE_V2DF:
23784 case V4SF_FTYPE_V4SF:
23785 case V4SF_FTYPE_V4SI:
23786 case V4SF_FTYPE_V8SF:
23787 case V4SF_FTYPE_V4DF:
23788 case V4SF_FTYPE_V2DF:
23789 case V2DI_FTYPE_V2DI:
23790 case V2DI_FTYPE_V16QI:
23791 case V2DI_FTYPE_V8HI:
23792 case V2DI_FTYPE_V4SI:
23793 case V2DF_FTYPE_V2DF:
23794 case V2DF_FTYPE_V4SI:
23795 case V2DF_FTYPE_V4DF:
23796 case V2DF_FTYPE_V4SF:
23797 case V2DF_FTYPE_V2SI:
23798 case V2SI_FTYPE_V2SI:
23799 case V2SI_FTYPE_V4SF:
23800 case V2SI_FTYPE_V2SF:
23801 case V2SI_FTYPE_V2DF:
23802 case V2SF_FTYPE_V2SF:
23803 case V2SF_FTYPE_V2SI:
23806 case V4SF_FTYPE_V4SF_VEC_MERGE:
23807 case V2DF_FTYPE_V2DF_VEC_MERGE:
23808 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23809 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23810 case V16QI_FTYPE_V16QI_V16QI:
23811 case V16QI_FTYPE_V8HI_V8HI:
23812 case V8QI_FTYPE_V8QI_V8QI:
23813 case V8QI_FTYPE_V4HI_V4HI:
23814 case V8HI_FTYPE_V8HI_V8HI:
23815 case V8HI_FTYPE_V16QI_V16QI:
23816 case V8HI_FTYPE_V4SI_V4SI:
23817 case V8SF_FTYPE_V8SF_V8SF:
23818 case V8SF_FTYPE_V8SF_V8SI:
23819 case V4SI_FTYPE_V4SI_V4SI:
23820 case V4SI_FTYPE_V8HI_V8HI:
23821 case V4SI_FTYPE_V4SF_V4SF:
23822 case V4SI_FTYPE_V2DF_V2DF:
23823 case V4HI_FTYPE_V4HI_V4HI:
23824 case V4HI_FTYPE_V8QI_V8QI:
23825 case V4HI_FTYPE_V2SI_V2SI:
23826 case V4DF_FTYPE_V4DF_V4DF:
23827 case V4DF_FTYPE_V4DF_V4DI:
23828 case V4SF_FTYPE_V4SF_V4SF:
23829 case V4SF_FTYPE_V4SF_V4SI:
23830 case V4SF_FTYPE_V4SF_V2SI:
23831 case V4SF_FTYPE_V4SF_V2DF:
23832 case V4SF_FTYPE_V4SF_DI:
23833 case V4SF_FTYPE_V4SF_SI:
23834 case V2DI_FTYPE_V2DI_V2DI:
23835 case V2DI_FTYPE_V16QI_V16QI:
23836 case V2DI_FTYPE_V4SI_V4SI:
23837 case V2DI_FTYPE_V2DI_V16QI:
23838 case V2DI_FTYPE_V2DF_V2DF:
23839 case V2SI_FTYPE_V2SI_V2SI:
23840 case V2SI_FTYPE_V4HI_V4HI:
23841 case V2SI_FTYPE_V2SF_V2SF:
23842 case V2DF_FTYPE_V2DF_V2DF:
23843 case V2DF_FTYPE_V2DF_V4SF:
23844 case V2DF_FTYPE_V2DF_V2DI:
23845 case V2DF_FTYPE_V2DF_DI:
23846 case V2DF_FTYPE_V2DF_SI:
23847 case V2SF_FTYPE_V2SF_V2SF:
23848 case V1DI_FTYPE_V1DI_V1DI:
23849 case V1DI_FTYPE_V8QI_V8QI:
23850 case V1DI_FTYPE_V2SI_V2SI:
23851 if (comparison == UNKNOWN)
23852 return ix86_expand_binop_builtin (icode, exp, target);
23855 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23856 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23857 gcc_assert (comparison != UNKNOWN);
23861 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23862 case V8HI_FTYPE_V8HI_SI_COUNT:
23863 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23864 case V4SI_FTYPE_V4SI_SI_COUNT:
23865 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23866 case V4HI_FTYPE_V4HI_SI_COUNT:
23867 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23868 case V2DI_FTYPE_V2DI_SI_COUNT:
23869 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23870 case V2SI_FTYPE_V2SI_SI_COUNT:
23871 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23872 case V1DI_FTYPE_V1DI_SI_COUNT:
23874 last_arg_count = true;
23876 case UINT64_FTYPE_UINT64_UINT64:
23877 case UINT_FTYPE_UINT_UINT:
23878 case UINT_FTYPE_UINT_USHORT:
23879 case UINT_FTYPE_UINT_UCHAR:
23880 case UINT16_FTYPE_UINT16_INT:
23881 case UINT8_FTYPE_UINT8_INT:
23884 case V2DI_FTYPE_V2DI_INT_CONVERT:
23887 nargs_constant = 1;
23889 case V8HI_FTYPE_V8HI_INT:
23890 case V8SF_FTYPE_V8SF_INT:
23891 case V4SI_FTYPE_V4SI_INT:
23892 case V4SI_FTYPE_V8SI_INT:
23893 case V4HI_FTYPE_V4HI_INT:
23894 case V4DF_FTYPE_V4DF_INT:
23895 case V4SF_FTYPE_V4SF_INT:
23896 case V4SF_FTYPE_V8SF_INT:
23897 case V2DI_FTYPE_V2DI_INT:
23898 case V2DF_FTYPE_V2DF_INT:
23899 case V2DF_FTYPE_V4DF_INT:
23901 nargs_constant = 1;
23903 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23904 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23905 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23906 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23907 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23910 case V16QI_FTYPE_V16QI_V16QI_INT:
23911 case V8HI_FTYPE_V8HI_V8HI_INT:
23912 case V8SI_FTYPE_V8SI_V8SI_INT:
23913 case V8SI_FTYPE_V8SI_V4SI_INT:
23914 case V8SF_FTYPE_V8SF_V8SF_INT:
23915 case V8SF_FTYPE_V8SF_V4SF_INT:
23916 case V4SI_FTYPE_V4SI_V4SI_INT:
23917 case V4DF_FTYPE_V4DF_V4DF_INT:
23918 case V4DF_FTYPE_V4DF_V2DF_INT:
23919 case V4SF_FTYPE_V4SF_V4SF_INT:
23920 case V2DI_FTYPE_V2DI_V2DI_INT:
23921 case V2DF_FTYPE_V2DF_V2DF_INT:
23923 nargs_constant = 1;
23925 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23928 nargs_constant = 1;
23930 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23933 nargs_constant = 1;
23935 case V2DI_FTYPE_V2DI_UINT_UINT:
23937 nargs_constant = 2;
23939 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23940 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23941 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23942 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23944 nargs_constant = 1;
23946 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23948 nargs_constant = 2;
23951 gcc_unreachable ();
23954 gcc_assert (nargs <= ARRAY_SIZE (args));
23956 if (comparison != UNKNOWN)
23958 gcc_assert (nargs == 2);
23959 return ix86_expand_sse_compare (d, exp, target, swap);
23962 if (rmode == VOIDmode || rmode == tmode)
23966 || GET_MODE (target) != tmode
23967 || ! (*insn_p->operand[0].predicate) (target, tmode))
23968 target = gen_reg_rtx (tmode);
23969 real_target = target;
23973 target = gen_reg_rtx (rmode);
23974 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23977 for (i = 0; i < nargs; i++)
23979 tree arg = CALL_EXPR_ARG (exp, i);
23980 rtx op = expand_normal (arg);
23981 enum machine_mode mode = insn_p->operand[i + 1].mode;
23982 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23984 if (last_arg_count && (i + 1) == nargs)
23986 /* SIMD shift insns take either an 8-bit immediate or
23987 register as count. But builtin functions take int as
23988 count. If count doesn't match, we put it in register. */
23991 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23992 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23993 op = copy_to_reg (op);
23996 else if ((nargs - i) <= nargs_constant)
24001 case CODE_FOR_sse4_1_roundpd:
24002 case CODE_FOR_sse4_1_roundps:
24003 case CODE_FOR_sse4_1_roundsd:
24004 case CODE_FOR_sse4_1_roundss:
24005 case CODE_FOR_sse4_1_blendps:
24006 case CODE_FOR_avx_blendpd256:
24007 case CODE_FOR_avx_vpermilv4df:
24008 case CODE_FOR_avx_roundpd256:
24009 case CODE_FOR_avx_roundps256:
24010 error ("the last argument must be a 4-bit immediate");
24013 case CODE_FOR_sse4_1_blendpd:
24014 case CODE_FOR_avx_vpermilv2df:
24015 case CODE_FOR_xop_vpermil2v2df3:
24016 case CODE_FOR_xop_vpermil2v4sf3:
24017 case CODE_FOR_xop_vpermil2v4df3:
24018 case CODE_FOR_xop_vpermil2v8sf3:
24019 error ("the last argument must be a 2-bit immediate");
24022 case CODE_FOR_avx_vextractf128v4df:
24023 case CODE_FOR_avx_vextractf128v8sf:
24024 case CODE_FOR_avx_vextractf128v8si:
24025 case CODE_FOR_avx_vinsertf128v4df:
24026 case CODE_FOR_avx_vinsertf128v8sf:
24027 case CODE_FOR_avx_vinsertf128v8si:
24028 error ("the last argument must be a 1-bit immediate");
24031 case CODE_FOR_avx_cmpsdv2df3:
24032 case CODE_FOR_avx_cmpssv4sf3:
24033 case CODE_FOR_avx_cmppdv2df3:
24034 case CODE_FOR_avx_cmppsv4sf3:
24035 case CODE_FOR_avx_cmppdv4df3:
24036 case CODE_FOR_avx_cmppsv8sf3:
24037 error ("the last argument must be a 5-bit immediate");
24041 switch (nargs_constant)
24044 if ((nargs - i) == nargs_constant)
24046 error ("the next to last argument must be an 8-bit immediate");
24050 error ("the last argument must be an 8-bit immediate");
24053 gcc_unreachable ();
24060 if (VECTOR_MODE_P (mode))
24061 op = safe_vector_operand (op, mode);
24063 /* If we aren't optimizing, only allow one memory operand to
24065 if (memory_operand (op, mode))
24068 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
24070 if (optimize || !match || num_memory > 1)
24071 op = copy_to_mode_reg (mode, op);
24075 op = copy_to_reg (op);
24076 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
24081 args[i].mode = mode;
24087 pat = GEN_FCN (icode) (real_target, args[0].op);
24090 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24093 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24097 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24098 args[2].op, args[3].op);
24101 gcc_unreachable ();
24111 /* Subroutine of ix86_expand_builtin to take care of special insns
24112 with variable number of operands. */
24115 ix86_expand_special_args_builtin (const struct builtin_description *d,
24116 tree exp, rtx target)
24120 unsigned int i, nargs, arg_adjust, memory;
24124 enum machine_mode mode;
24126 enum insn_code icode = d->icode;
24127 bool last_arg_constant = false;
24128 const struct insn_data_d *insn_p = &insn_data[icode];
24129 enum machine_mode tmode = insn_p->operand[0].mode;
24130 enum { load, store } klass;
24132 switch ((enum ix86_builtin_func_type) d->flag)
24134 case VOID_FTYPE_VOID:
24135 emit_insn (GEN_FCN (icode) (target));
24137 case UINT64_FTYPE_VOID:
24142 case UINT64_FTYPE_PUNSIGNED:
24143 case V2DI_FTYPE_PV2DI:
24144 case V32QI_FTYPE_PCCHAR:
24145 case V16QI_FTYPE_PCCHAR:
24146 case V8SF_FTYPE_PCV4SF:
24147 case V8SF_FTYPE_PCFLOAT:
24148 case V4SF_FTYPE_PCFLOAT:
24149 case V4DF_FTYPE_PCV2DF:
24150 case V4DF_FTYPE_PCDOUBLE:
24151 case V2DF_FTYPE_PCDOUBLE:
24152 case VOID_FTYPE_PVOID:
24157 case VOID_FTYPE_PV2SF_V4SF:
24158 case VOID_FTYPE_PV4DI_V4DI:
24159 case VOID_FTYPE_PV2DI_V2DI:
24160 case VOID_FTYPE_PCHAR_V32QI:
24161 case VOID_FTYPE_PCHAR_V16QI:
24162 case VOID_FTYPE_PFLOAT_V8SF:
24163 case VOID_FTYPE_PFLOAT_V4SF:
24164 case VOID_FTYPE_PDOUBLE_V4DF:
24165 case VOID_FTYPE_PDOUBLE_V2DF:
24166 case VOID_FTYPE_PULONGLONG_ULONGLONG:
24167 case VOID_FTYPE_PINT_INT:
24170 /* Reserve memory operand for target. */
24171 memory = ARRAY_SIZE (args);
24173 case V4SF_FTYPE_V4SF_PCV2SF:
24174 case V2DF_FTYPE_V2DF_PCDOUBLE:
24179 case V8SF_FTYPE_PCV8SF_V8SF:
24180 case V4DF_FTYPE_PCV4DF_V4DF:
24181 case V4SF_FTYPE_PCV4SF_V4SF:
24182 case V2DF_FTYPE_PCV2DF_V2DF:
24187 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24188 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24189 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24190 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24193 /* Reserve memory operand for target. */
24194 memory = ARRAY_SIZE (args);
24196 case VOID_FTYPE_UINT_UINT_UINT:
24197 case VOID_FTYPE_UINT64_UINT_UINT:
24198 case UCHAR_FTYPE_UINT_UINT_UINT:
24199 case UCHAR_FTYPE_UINT64_UINT_UINT:
24202 memory = ARRAY_SIZE (args);
24203 last_arg_constant = true;
24206 gcc_unreachable ();
24209 gcc_assert (nargs <= ARRAY_SIZE (args));
24211 if (klass == store)
24213 arg = CALL_EXPR_ARG (exp, 0);
24214 op = expand_normal (arg);
24215 gcc_assert (target == 0);
24216 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24224 || GET_MODE (target) != tmode
24225 || ! (*insn_p->operand[0].predicate) (target, tmode))
24226 target = gen_reg_rtx (tmode);
24229 for (i = 0; i < nargs; i++)
24231 enum machine_mode mode = insn_p->operand[i + 1].mode;
24234 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24235 op = expand_normal (arg);
24236 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24238 if (last_arg_constant && (i + 1) == nargs)
24242 if (icode == CODE_FOR_lwp_lwpvalsi3
24243 || icode == CODE_FOR_lwp_lwpinssi3
24244 || icode == CODE_FOR_lwp_lwpvaldi3
24245 || icode == CODE_FOR_lwp_lwpinsdi3)
24246 error ("the last argument must be a 32-bit immediate");
24248 error ("the last argument must be an 8-bit immediate");
24256 /* This must be the memory operand. */
24257 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24258 gcc_assert (GET_MODE (op) == mode
24259 || GET_MODE (op) == VOIDmode);
24263 /* This must be register. */
24264 if (VECTOR_MODE_P (mode))
24265 op = safe_vector_operand (op, mode);
24267 gcc_assert (GET_MODE (op) == mode
24268 || GET_MODE (op) == VOIDmode);
24269 op = copy_to_mode_reg (mode, op);
24274 args[i].mode = mode;
24280 pat = GEN_FCN (icode) (target);
24283 pat = GEN_FCN (icode) (target, args[0].op);
24286 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24289 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24292 gcc_unreachable ();
24298 return klass == store ? 0 : target;
24301 /* Return the integer constant in ARG. Constrain it to be in the range
24302 of the subparts of VEC_TYPE; issue an error if not. */
24305 get_element_number (tree vec_type, tree arg)
24307 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24309 if (!host_integerp (arg, 1)
24310 || (elt = tree_low_cst (arg, 1), elt > max))
24312 error ("selector must be an integer constant in the range 0..%wi", max);
24319 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24320 ix86_expand_vector_init. We DO have language-level syntax for this, in
24321 the form of (type){ init-list }. Except that since we can't place emms
24322 instructions from inside the compiler, we can't allow the use of MMX
24323 registers unless the user explicitly asks for it. So we do *not* define
24324 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24325 we have builtins invoked by mmintrin.h that gives us license to emit
24326 these sorts of instructions. */
24329 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24331 enum machine_mode tmode = TYPE_MODE (type);
24332 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24333 int i, n_elt = GET_MODE_NUNITS (tmode);
24334 rtvec v = rtvec_alloc (n_elt);
24336 gcc_assert (VECTOR_MODE_P (tmode));
24337 gcc_assert (call_expr_nargs (exp) == n_elt);
24339 for (i = 0; i < n_elt; ++i)
24341 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24342 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24345 if (!target || !register_operand (target, tmode))
24346 target = gen_reg_rtx (tmode);
24348 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24352 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24353 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24354 had a language-level syntax for referencing vector elements. */
24357 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24359 enum machine_mode tmode, mode0;
24364 arg0 = CALL_EXPR_ARG (exp, 0);
24365 arg1 = CALL_EXPR_ARG (exp, 1);
24367 op0 = expand_normal (arg0);
24368 elt = get_element_number (TREE_TYPE (arg0), arg1);
24370 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24371 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24372 gcc_assert (VECTOR_MODE_P (mode0));
24374 op0 = force_reg (mode0, op0);
24376 if (optimize || !target || !register_operand (target, tmode))
24377 target = gen_reg_rtx (tmode);
24379 ix86_expand_vector_extract (true, target, op0, elt);
24384 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24385 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24386 a language-level syntax for referencing vector elements. */
24389 ix86_expand_vec_set_builtin (tree exp)
24391 enum machine_mode tmode, mode1;
24392 tree arg0, arg1, arg2;
24394 rtx op0, op1, target;
24396 arg0 = CALL_EXPR_ARG (exp, 0);
24397 arg1 = CALL_EXPR_ARG (exp, 1);
24398 arg2 = CALL_EXPR_ARG (exp, 2);
24400 tmode = TYPE_MODE (TREE_TYPE (arg0));
24401 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24402 gcc_assert (VECTOR_MODE_P (tmode));
24404 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24405 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24406 elt = get_element_number (TREE_TYPE (arg0), arg2);
24408 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24409 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24411 op0 = force_reg (tmode, op0);
24412 op1 = force_reg (mode1, op1);
24414 /* OP0 is the source of these builtin functions and shouldn't be
24415 modified. Create a copy, use it and return it as target. */
24416 target = gen_reg_rtx (tmode);
24417 emit_move_insn (target, op0);
24418 ix86_expand_vector_set (true, target, op1, elt);
24423 /* Expand an expression EXP that calls a built-in function,
24424 with result going to TARGET if that's convenient
24425 (and in mode MODE if that's convenient).
24426 SUBTARGET may be used as the target for computing one of EXP's operands.
24427 IGNORE is nonzero if the value is to be ignored. */
24430 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24431 enum machine_mode mode ATTRIBUTE_UNUSED,
24432 int ignore ATTRIBUTE_UNUSED)
24434 const struct builtin_description *d;
24436 enum insn_code icode;
24437 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24438 tree arg0, arg1, arg2;
24439 rtx op0, op1, op2, pat;
24440 enum machine_mode mode0, mode1, mode2;
24441 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24443 /* Determine whether the builtin function is available under the current ISA.
24444 Originally the builtin was not created if it wasn't applicable to the
24445 current ISA based on the command line switches. With function specific
24446 options, we need to check in the context of the function making the call
24447 whether it is supported. */
24448 if (ix86_builtins_isa[fcode].isa
24449 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24451 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24452 NULL, NULL, false);
24455 error ("%qE needs unknown isa option", fndecl);
24458 gcc_assert (opts != NULL);
24459 error ("%qE needs isa option %s", fndecl, opts);
24467 case IX86_BUILTIN_MASKMOVQ:
24468 case IX86_BUILTIN_MASKMOVDQU:
24469 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24470 ? CODE_FOR_mmx_maskmovq
24471 : CODE_FOR_sse2_maskmovdqu);
24472 /* Note the arg order is different from the operand order. */
24473 arg1 = CALL_EXPR_ARG (exp, 0);
24474 arg2 = CALL_EXPR_ARG (exp, 1);
24475 arg0 = CALL_EXPR_ARG (exp, 2);
24476 op0 = expand_normal (arg0);
24477 op1 = expand_normal (arg1);
24478 op2 = expand_normal (arg2);
24479 mode0 = insn_data[icode].operand[0].mode;
24480 mode1 = insn_data[icode].operand[1].mode;
24481 mode2 = insn_data[icode].operand[2].mode;
24483 op0 = force_reg (Pmode, op0);
24484 op0 = gen_rtx_MEM (mode1, op0);
24486 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24487 op0 = copy_to_mode_reg (mode0, op0);
24488 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24489 op1 = copy_to_mode_reg (mode1, op1);
24490 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24491 op2 = copy_to_mode_reg (mode2, op2);
24492 pat = GEN_FCN (icode) (op0, op1, op2);
24498 case IX86_BUILTIN_LDMXCSR:
24499 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24500 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24501 emit_move_insn (target, op0);
24502 emit_insn (gen_sse_ldmxcsr (target));
24505 case IX86_BUILTIN_STMXCSR:
24506 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24507 emit_insn (gen_sse_stmxcsr (target));
24508 return copy_to_mode_reg (SImode, target);
24510 case IX86_BUILTIN_CLFLUSH:
24511 arg0 = CALL_EXPR_ARG (exp, 0);
24512 op0 = expand_normal (arg0);
24513 icode = CODE_FOR_sse2_clflush;
24514 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24515 op0 = copy_to_mode_reg (Pmode, op0);
24517 emit_insn (gen_sse2_clflush (op0));
24520 case IX86_BUILTIN_MONITOR:
24521 arg0 = CALL_EXPR_ARG (exp, 0);
24522 arg1 = CALL_EXPR_ARG (exp, 1);
24523 arg2 = CALL_EXPR_ARG (exp, 2);
24524 op0 = expand_normal (arg0);
24525 op1 = expand_normal (arg1);
24526 op2 = expand_normal (arg2);
24528 op0 = copy_to_mode_reg (Pmode, op0);
24530 op1 = copy_to_mode_reg (SImode, op1);
24532 op2 = copy_to_mode_reg (SImode, op2);
24533 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24536 case IX86_BUILTIN_MWAIT:
24537 arg0 = CALL_EXPR_ARG (exp, 0);
24538 arg1 = CALL_EXPR_ARG (exp, 1);
24539 op0 = expand_normal (arg0);
24540 op1 = expand_normal (arg1);
24542 op0 = copy_to_mode_reg (SImode, op0);
24544 op1 = copy_to_mode_reg (SImode, op1);
24545 emit_insn (gen_sse3_mwait (op0, op1));
24548 case IX86_BUILTIN_VEC_INIT_V2SI:
24549 case IX86_BUILTIN_VEC_INIT_V4HI:
24550 case IX86_BUILTIN_VEC_INIT_V8QI:
24551 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24553 case IX86_BUILTIN_VEC_EXT_V2DF:
24554 case IX86_BUILTIN_VEC_EXT_V2DI:
24555 case IX86_BUILTIN_VEC_EXT_V4SF:
24556 case IX86_BUILTIN_VEC_EXT_V4SI:
24557 case IX86_BUILTIN_VEC_EXT_V8HI:
24558 case IX86_BUILTIN_VEC_EXT_V2SI:
24559 case IX86_BUILTIN_VEC_EXT_V4HI:
24560 case IX86_BUILTIN_VEC_EXT_V16QI:
24561 return ix86_expand_vec_ext_builtin (exp, target);
24563 case IX86_BUILTIN_VEC_SET_V2DI:
24564 case IX86_BUILTIN_VEC_SET_V4SF:
24565 case IX86_BUILTIN_VEC_SET_V4SI:
24566 case IX86_BUILTIN_VEC_SET_V8HI:
24567 case IX86_BUILTIN_VEC_SET_V4HI:
24568 case IX86_BUILTIN_VEC_SET_V16QI:
24569 return ix86_expand_vec_set_builtin (exp);
24571 case IX86_BUILTIN_VEC_PERM_V2DF:
24572 case IX86_BUILTIN_VEC_PERM_V4SF:
24573 case IX86_BUILTIN_VEC_PERM_V2DI:
24574 case IX86_BUILTIN_VEC_PERM_V4SI:
24575 case IX86_BUILTIN_VEC_PERM_V8HI:
24576 case IX86_BUILTIN_VEC_PERM_V16QI:
24577 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24578 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24579 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24580 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24581 case IX86_BUILTIN_VEC_PERM_V4DF:
24582 case IX86_BUILTIN_VEC_PERM_V8SF:
24583 return ix86_expand_vec_perm_builtin (exp);
24585 case IX86_BUILTIN_INFQ:
24586 case IX86_BUILTIN_HUGE_VALQ:
24588 REAL_VALUE_TYPE inf;
24592 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24594 tmp = validize_mem (force_const_mem (mode, tmp));
24597 target = gen_reg_rtx (mode);
24599 emit_move_insn (target, tmp);
24603 case IX86_BUILTIN_LLWPCB:
24604 arg0 = CALL_EXPR_ARG (exp, 0);
24605 op0 = expand_normal (arg0);
24606 icode = CODE_FOR_lwp_llwpcb;
24607 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24608 op0 = copy_to_mode_reg (Pmode, op0);
24609 emit_insn (gen_lwp_llwpcb (op0));
24612 case IX86_BUILTIN_SLWPCB:
24613 icode = CODE_FOR_lwp_slwpcb;
24615 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24616 target = gen_reg_rtx (Pmode);
24617 emit_insn (gen_lwp_slwpcb (target));
24624 for (i = 0, d = bdesc_special_args;
24625 i < ARRAY_SIZE (bdesc_special_args);
24627 if (d->code == fcode)
24628 return ix86_expand_special_args_builtin (d, exp, target);
24630 for (i = 0, d = bdesc_args;
24631 i < ARRAY_SIZE (bdesc_args);
24633 if (d->code == fcode)
24636 case IX86_BUILTIN_FABSQ:
24637 case IX86_BUILTIN_COPYSIGNQ:
24639 /* Emit a normal call if SSE2 isn't available. */
24640 return expand_call (exp, target, ignore);
24642 return ix86_expand_args_builtin (d, exp, target);
24645 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24646 if (d->code == fcode)
24647 return ix86_expand_sse_comi (d, exp, target);
24649 for (i = 0, d = bdesc_pcmpestr;
24650 i < ARRAY_SIZE (bdesc_pcmpestr);
24652 if (d->code == fcode)
24653 return ix86_expand_sse_pcmpestr (d, exp, target);
24655 for (i = 0, d = bdesc_pcmpistr;
24656 i < ARRAY_SIZE (bdesc_pcmpistr);
24658 if (d->code == fcode)
24659 return ix86_expand_sse_pcmpistr (d, exp, target);
24661 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24662 if (d->code == fcode)
24663 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24664 (enum ix86_builtin_func_type)
24665 d->flag, d->comparison);
24667 gcc_unreachable ();
24670 /* Returns a function decl for a vectorized version of the builtin function
24671 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24672 if it is not available. */
24675 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24678 enum machine_mode in_mode, out_mode;
24680 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24682 if (TREE_CODE (type_out) != VECTOR_TYPE
24683 || TREE_CODE (type_in) != VECTOR_TYPE
24684 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24687 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24688 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24689 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24690 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24694 case BUILT_IN_SQRT:
24695 if (out_mode == DFmode && out_n == 2
24696 && in_mode == DFmode && in_n == 2)
24697 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24700 case BUILT_IN_SQRTF:
24701 if (out_mode == SFmode && out_n == 4
24702 && in_mode == SFmode && in_n == 4)
24703 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24706 case BUILT_IN_LRINT:
24707 if (out_mode == SImode && out_n == 4
24708 && in_mode == DFmode && in_n == 2)
24709 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24712 case BUILT_IN_LRINTF:
24713 if (out_mode == SImode && out_n == 4
24714 && in_mode == SFmode && in_n == 4)
24715 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24718 case BUILT_IN_COPYSIGN:
24719 if (out_mode == DFmode && out_n == 2
24720 && in_mode == DFmode && in_n == 2)
24721 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24724 case BUILT_IN_COPYSIGNF:
24725 if (out_mode == SFmode && out_n == 4
24726 && in_mode == SFmode && in_n == 4)
24727 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24734 /* Dispatch to a handler for a vectorization library. */
24735 if (ix86_veclib_handler)
24736 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24742 /* Handler for an SVML-style interface to
24743 a library with vectorized intrinsics. */
24746 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24749 tree fntype, new_fndecl, args;
24752 enum machine_mode el_mode, in_mode;
24755 /* The SVML is suitable for unsafe math only. */
24756 if (!flag_unsafe_math_optimizations)
24759 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24760 n = TYPE_VECTOR_SUBPARTS (type_out);
24761 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24762 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24763 if (el_mode != in_mode
24771 case BUILT_IN_LOG10:
24773 case BUILT_IN_TANH:
24775 case BUILT_IN_ATAN:
24776 case BUILT_IN_ATAN2:
24777 case BUILT_IN_ATANH:
24778 case BUILT_IN_CBRT:
24779 case BUILT_IN_SINH:
24781 case BUILT_IN_ASINH:
24782 case BUILT_IN_ASIN:
24783 case BUILT_IN_COSH:
24785 case BUILT_IN_ACOSH:
24786 case BUILT_IN_ACOS:
24787 if (el_mode != DFmode || n != 2)
24791 case BUILT_IN_EXPF:
24792 case BUILT_IN_LOGF:
24793 case BUILT_IN_LOG10F:
24794 case BUILT_IN_POWF:
24795 case BUILT_IN_TANHF:
24796 case BUILT_IN_TANF:
24797 case BUILT_IN_ATANF:
24798 case BUILT_IN_ATAN2F:
24799 case BUILT_IN_ATANHF:
24800 case BUILT_IN_CBRTF:
24801 case BUILT_IN_SINHF:
24802 case BUILT_IN_SINF:
24803 case BUILT_IN_ASINHF:
24804 case BUILT_IN_ASINF:
24805 case BUILT_IN_COSHF:
24806 case BUILT_IN_COSF:
24807 case BUILT_IN_ACOSHF:
24808 case BUILT_IN_ACOSF:
24809 if (el_mode != SFmode || n != 4)
24817 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24819 if (fn == BUILT_IN_LOGF)
24820 strcpy (name, "vmlsLn4");
24821 else if (fn == BUILT_IN_LOG)
24822 strcpy (name, "vmldLn2");
24825 sprintf (name, "vmls%s", bname+10);
24826 name[strlen (name)-1] = '4';
24829 sprintf (name, "vmld%s2", bname+10);
24831 /* Convert to uppercase. */
24835 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24836 args = TREE_CHAIN (args))
24840 fntype = build_function_type_list (type_out, type_in, NULL);
24842 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24844 /* Build a function declaration for the vectorized function. */
24845 new_fndecl = build_decl (BUILTINS_LOCATION,
24846 FUNCTION_DECL, get_identifier (name), fntype);
24847 TREE_PUBLIC (new_fndecl) = 1;
24848 DECL_EXTERNAL (new_fndecl) = 1;
24849 DECL_IS_NOVOPS (new_fndecl) = 1;
24850 TREE_READONLY (new_fndecl) = 1;
24855 /* Handler for an ACML-style interface to
24856 a library with vectorized intrinsics. */
24859 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24861 char name[20] = "__vr.._";
24862 tree fntype, new_fndecl, args;
24865 enum machine_mode el_mode, in_mode;
24868 /* The ACML is 64bits only and suitable for unsafe math only as
24869 it does not correctly support parts of IEEE with the required
24870 precision such as denormals. */
24872 || !flag_unsafe_math_optimizations)
24875 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24876 n = TYPE_VECTOR_SUBPARTS (type_out);
24877 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24878 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24879 if (el_mode != in_mode
24889 case BUILT_IN_LOG2:
24890 case BUILT_IN_LOG10:
24893 if (el_mode != DFmode
24898 case BUILT_IN_SINF:
24899 case BUILT_IN_COSF:
24900 case BUILT_IN_EXPF:
24901 case BUILT_IN_POWF:
24902 case BUILT_IN_LOGF:
24903 case BUILT_IN_LOG2F:
24904 case BUILT_IN_LOG10F:
24907 if (el_mode != SFmode
24916 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24917 sprintf (name + 7, "%s", bname+10);
24920 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24921 args = TREE_CHAIN (args))
24925 fntype = build_function_type_list (type_out, type_in, NULL);
24927 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24929 /* Build a function declaration for the vectorized function. */
24930 new_fndecl = build_decl (BUILTINS_LOCATION,
24931 FUNCTION_DECL, get_identifier (name), fntype);
24932 TREE_PUBLIC (new_fndecl) = 1;
24933 DECL_EXTERNAL (new_fndecl) = 1;
24934 DECL_IS_NOVOPS (new_fndecl) = 1;
24935 TREE_READONLY (new_fndecl) = 1;
24941 /* Returns a decl of a function that implements conversion of an integer vector
24942 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24943 are the types involved when converting according to CODE.
24944 Return NULL_TREE if it is not available. */
24947 ix86_vectorize_builtin_conversion (unsigned int code,
24948 tree dest_type, tree src_type)
24956 switch (TYPE_MODE (src_type))
24959 switch (TYPE_MODE (dest_type))
24962 return (TYPE_UNSIGNED (src_type)
24963 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24964 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24966 return (TYPE_UNSIGNED (src_type)
24968 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24974 switch (TYPE_MODE (dest_type))
24977 return (TYPE_UNSIGNED (src_type)
24979 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24988 case FIX_TRUNC_EXPR:
24989 switch (TYPE_MODE (dest_type))
24992 switch (TYPE_MODE (src_type))
24995 return (TYPE_UNSIGNED (dest_type)
24997 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24999 return (TYPE_UNSIGNED (dest_type)
25001 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
25008 switch (TYPE_MODE (src_type))
25011 return (TYPE_UNSIGNED (dest_type)
25013 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
25030 /* Returns a code for a target-specific builtin that implements
25031 reciprocal of the function, or NULL_TREE if not available. */
25034 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
25035 bool sqrt ATTRIBUTE_UNUSED)
25037 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
25038 && flag_finite_math_only && !flag_trapping_math
25039 && flag_unsafe_math_optimizations))
25043 /* Machine dependent builtins. */
25046 /* Vectorized version of sqrt to rsqrt conversion. */
25047 case IX86_BUILTIN_SQRTPS_NR:
25048 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
25054 /* Normal builtins. */
25057 /* Sqrt to rsqrt conversion. */
25058 case BUILT_IN_SQRTF:
25059 return ix86_builtins[IX86_BUILTIN_RSQRTF];
25066 /* Helper for avx_vpermilps256_operand et al. This is also used by
25067 the expansion functions to turn the parallel back into a mask.
25068 The return value is 0 for no match and the imm8+1 for a match. */
25071 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
25073 unsigned i, nelt = GET_MODE_NUNITS (mode);
25075 unsigned char ipar[8];
25077 if (XVECLEN (par, 0) != (int) nelt)
25080 /* Validate that all of the elements are constants, and not totally
25081 out of range. Copy the data into an integral array to make the
25082 subsequent checks easier. */
25083 for (i = 0; i < nelt; ++i)
25085 rtx er = XVECEXP (par, 0, i);
25086 unsigned HOST_WIDE_INT ei;
25088 if (!CONST_INT_P (er))
25099 /* In the 256-bit DFmode case, we can only move elements within
25101 for (i = 0; i < 2; ++i)
25105 mask |= ipar[i] << i;
25107 for (i = 2; i < 4; ++i)
25111 mask |= (ipar[i] - 2) << i;
25116 /* In the 256-bit SFmode case, we have full freedom of movement
25117 within the low 128-bit lane, but the high 128-bit lane must
25118 mirror the exact same pattern. */
25119 for (i = 0; i < 4; ++i)
25120 if (ipar[i] + 4 != ipar[i + 4])
25127 /* In the 128-bit case, we've full freedom in the placement of
25128 the elements from the source operand. */
25129 for (i = 0; i < nelt; ++i)
25130 mask |= ipar[i] << (i * (nelt / 2));
25134 gcc_unreachable ();
25137 /* Make sure success has a non-zero value by adding one. */
25141 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
25142 the expansion functions to turn the parallel back into a mask.
25143 The return value is 0 for no match and the imm8+1 for a match. */
25146 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
25148 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
25150 unsigned char ipar[8];
25152 if (XVECLEN (par, 0) != (int) nelt)
25155 /* Validate that all of the elements are constants, and not totally
25156 out of range. Copy the data into an integral array to make the
25157 subsequent checks easier. */
25158 for (i = 0; i < nelt; ++i)
25160 rtx er = XVECEXP (par, 0, i);
25161 unsigned HOST_WIDE_INT ei;
25163 if (!CONST_INT_P (er))
25166 if (ei >= 2 * nelt)
25171 /* Validate that the halves of the permute are halves. */
25172 for (i = 0; i < nelt2 - 1; ++i)
25173 if (ipar[i] + 1 != ipar[i + 1])
25175 for (i = nelt2; i < nelt - 1; ++i)
25176 if (ipar[i] + 1 != ipar[i + 1])
25179 /* Reconstruct the mask. */
25180 for (i = 0; i < 2; ++i)
25182 unsigned e = ipar[i * nelt2];
25186 mask |= e << (i * 4);
25189 /* Make sure success has a non-zero value by adding one. */
25194 /* Store OPERAND to the memory after reload is completed. This means
25195 that we can't easily use assign_stack_local. */
25197 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25201 gcc_assert (reload_completed);
25202 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25204 result = gen_rtx_MEM (mode,
25205 gen_rtx_PLUS (Pmode,
25207 GEN_INT (-RED_ZONE_SIZE)));
25208 emit_move_insn (result, operand);
25210 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25216 operand = gen_lowpart (DImode, operand);
25220 gen_rtx_SET (VOIDmode,
25221 gen_rtx_MEM (DImode,
25222 gen_rtx_PRE_DEC (DImode,
25223 stack_pointer_rtx)),
25227 gcc_unreachable ();
25229 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25238 split_di (&operand, 1, operands, operands + 1);
25240 gen_rtx_SET (VOIDmode,
25241 gen_rtx_MEM (SImode,
25242 gen_rtx_PRE_DEC (Pmode,
25243 stack_pointer_rtx)),
25246 gen_rtx_SET (VOIDmode,
25247 gen_rtx_MEM (SImode,
25248 gen_rtx_PRE_DEC (Pmode,
25249 stack_pointer_rtx)),
25254 /* Store HImodes as SImodes. */
25255 operand = gen_lowpart (SImode, operand);
25259 gen_rtx_SET (VOIDmode,
25260 gen_rtx_MEM (GET_MODE (operand),
25261 gen_rtx_PRE_DEC (SImode,
25262 stack_pointer_rtx)),
25266 gcc_unreachable ();
25268 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25273 /* Free operand from the memory. */
25275 ix86_free_from_memory (enum machine_mode mode)
25277 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25281 if (mode == DImode || TARGET_64BIT)
25285 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25286 to pop or add instruction if registers are available. */
25287 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25288 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25293 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25294 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25296 static const reg_class_t *
25297 i386_ira_cover_classes (void)
25299 static const reg_class_t sse_fpmath_classes[] = {
25300 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25302 static const reg_class_t no_sse_fpmath_classes[] = {
25303 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25306 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25309 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25310 QImode must go into class Q_REGS.
25311 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25312 movdf to do mem-to-mem moves through integer regs. */
25314 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25316 enum machine_mode mode = GET_MODE (x);
25318 /* We're only allowed to return a subclass of CLASS. Many of the
25319 following checks fail for NO_REGS, so eliminate that early. */
25320 if (regclass == NO_REGS)
25323 /* All classes can load zeros. */
25324 if (x == CONST0_RTX (mode))
25327 /* Force constants into memory if we are loading a (nonzero) constant into
25328 an MMX or SSE register. This is because there are no MMX/SSE instructions
25329 to load from a constant. */
25331 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25334 /* Prefer SSE regs only, if we can use them for math. */
25335 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25336 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25338 /* Floating-point constants need more complex checks. */
25339 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25341 /* General regs can load everything. */
25342 if (reg_class_subset_p (regclass, GENERAL_REGS))
25345 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25346 zero above. We only want to wind up preferring 80387 registers if
25347 we plan on doing computation with them. */
25349 && standard_80387_constant_p (x))
25351 /* Limit class to non-sse. */
25352 if (regclass == FLOAT_SSE_REGS)
25354 if (regclass == FP_TOP_SSE_REGS)
25356 if (regclass == FP_SECOND_SSE_REGS)
25357 return FP_SECOND_REG;
25358 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25365 /* Generally when we see PLUS here, it's the function invariant
25366 (plus soft-fp const_int). Which can only be computed into general
25368 if (GET_CODE (x) == PLUS)
25369 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25371 /* QImode constants are easy to load, but non-constant QImode data
25372 must go into Q_REGS. */
25373 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25375 if (reg_class_subset_p (regclass, Q_REGS))
25377 if (reg_class_subset_p (Q_REGS, regclass))
25385 /* Discourage putting floating-point values in SSE registers unless
25386 SSE math is being used, and likewise for the 387 registers. */
25388 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25390 enum machine_mode mode = GET_MODE (x);
25392 /* Restrict the output reload class to the register bank that we are doing
25393 math on. If we would like not to return a subset of CLASS, reject this
25394 alternative: if reload cannot do this, it will still use its choice. */
25395 mode = GET_MODE (x);
25396 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25397 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25399 if (X87_FLOAT_MODE_P (mode))
25401 if (regclass == FP_TOP_SSE_REGS)
25403 else if (regclass == FP_SECOND_SSE_REGS)
25404 return FP_SECOND_REG;
25406 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25413 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
25414 enum machine_mode mode,
25415 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25417 /* QImode spills from non-QI registers require
25418 intermediate register on 32bit targets. */
25419 if (!in_p && mode == QImode && !TARGET_64BIT
25420 && (rclass == GENERAL_REGS
25421 || rclass == LEGACY_REGS
25422 || rclass == INDEX_REGS))
25431 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25432 regno = true_regnum (x);
25434 /* Return Q_REGS if the operand is in memory. */
25442 /* If we are copying between general and FP registers, we need a memory
25443 location. The same is true for SSE and MMX registers.
25445 To optimize register_move_cost performance, allow inline variant.
25447 The macro can't work reliably when one of the CLASSES is class containing
25448 registers from multiple units (SSE, MMX, integer). We avoid this by never
25449 combining those units in single alternative in the machine description.
25450 Ensure that this constraint holds to avoid unexpected surprises.
25452 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25453 enforce these sanity checks. */
25456 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25457 enum machine_mode mode, int strict)
25459 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25460 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25461 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25462 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25463 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25464 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25466 gcc_assert (!strict);
25470 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25473 /* ??? This is a lie. We do have moves between mmx/general, and for
25474 mmx/sse2. But by saying we need secondary memory we discourage the
25475 register allocator from using the mmx registers unless needed. */
25476 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25479 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25481 /* SSE1 doesn't have any direct moves from other classes. */
25485 /* If the target says that inter-unit moves are more expensive
25486 than moving through memory, then don't generate them. */
25487 if (!TARGET_INTER_UNIT_MOVES)
25490 /* Between SSE and general, we have moves no larger than word size. */
25491 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25499 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25500 enum machine_mode mode, int strict)
25502 return inline_secondary_memory_needed (class1, class2, mode, strict);
25505 /* Return true if the registers in CLASS cannot represent the change from
25506 modes FROM to TO. */
25509 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25510 enum reg_class regclass)
25515 /* x87 registers can't do subreg at all, as all values are reformatted
25516 to extended precision. */
25517 if (MAYBE_FLOAT_CLASS_P (regclass))
25520 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25522 /* Vector registers do not support QI or HImode loads. If we don't
25523 disallow a change to these modes, reload will assume it's ok to
25524 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25525 the vec_dupv4hi pattern. */
25526 if (GET_MODE_SIZE (from) < 4)
25529 /* Vector registers do not support subreg with nonzero offsets, which
25530 are otherwise valid for integer registers. Since we can't see
25531 whether we have a nonzero offset from here, prohibit all
25532 nonparadoxical subregs changing size. */
25533 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25540 /* Return the cost of moving data of mode M between a
25541 register and memory. A value of 2 is the default; this cost is
25542 relative to those in `REGISTER_MOVE_COST'.
25544 This function is used extensively by register_move_cost that is used to
25545 build tables at startup. Make it inline in this case.
25546 When IN is 2, return maximum of in and out move cost.
25548 If moving between registers and memory is more expensive than
25549 between two registers, you should define this macro to express the
25552 Model also increased moving costs of QImode registers in non
25556 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25560 if (FLOAT_CLASS_P (regclass))
25578 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25579 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25581 if (SSE_CLASS_P (regclass))
25584 switch (GET_MODE_SIZE (mode))
25599 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25600 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25602 if (MMX_CLASS_P (regclass))
25605 switch (GET_MODE_SIZE (mode))
25617 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25618 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25620 switch (GET_MODE_SIZE (mode))
25623 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25626 return ix86_cost->int_store[0];
25627 if (TARGET_PARTIAL_REG_DEPENDENCY
25628 && optimize_function_for_speed_p (cfun))
25629 cost = ix86_cost->movzbl_load;
25631 cost = ix86_cost->int_load[0];
25633 return MAX (cost, ix86_cost->int_store[0]);
25639 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25641 return ix86_cost->movzbl_load;
25643 return ix86_cost->int_store[0] + 4;
25648 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25649 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25651 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25652 if (mode == TFmode)
25655 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25657 cost = ix86_cost->int_load[2];
25659 cost = ix86_cost->int_store[2];
25660 return (cost * (((int) GET_MODE_SIZE (mode)
25661 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25666 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
25669 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
25673 /* Return the cost of moving data from a register in class CLASS1 to
25674 one in class CLASS2.
25676 It is not required that the cost always equal 2 when FROM is the same as TO;
25677 on some machines it is expensive to move between registers if they are not
25678 general registers. */
25681 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
25682 reg_class_t class2_i)
25684 enum reg_class class1 = (enum reg_class) class1_i;
25685 enum reg_class class2 = (enum reg_class) class2_i;
25687 /* In case we require secondary memory, compute cost of the store followed
25688 by load. In order to avoid bad register allocation choices, we need
25689 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25691 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25695 cost += inline_memory_move_cost (mode, class1, 2);
25696 cost += inline_memory_move_cost (mode, class2, 2);
25698 /* In case of copying from general_purpose_register we may emit multiple
25699 stores followed by single load causing memory size mismatch stall.
25700 Count this as arbitrarily high cost of 20. */
25701 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25704 /* In the case of FP/MMX moves, the registers actually overlap, and we
25705 have to switch modes in order to treat them differently. */
25706 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25707 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25713 /* Moves between SSE/MMX and integer unit are expensive. */
25714 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25715 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25717 /* ??? By keeping returned value relatively high, we limit the number
25718 of moves between integer and MMX/SSE registers for all targets.
25719 Additionally, high value prevents problem with x86_modes_tieable_p(),
25720 where integer modes in MMX/SSE registers are not tieable
25721 because of missing QImode and HImode moves to, from or between
25722 MMX/SSE registers. */
25723 return MAX (8, ix86_cost->mmxsse_to_integer);
25725 if (MAYBE_FLOAT_CLASS_P (class1))
25726 return ix86_cost->fp_move;
25727 if (MAYBE_SSE_CLASS_P (class1))
25728 return ix86_cost->sse_move;
25729 if (MAYBE_MMX_CLASS_P (class1))
25730 return ix86_cost->mmx_move;
25734 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25737 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25739 /* Flags and only flags can only hold CCmode values. */
25740 if (CC_REGNO_P (regno))
25741 return GET_MODE_CLASS (mode) == MODE_CC;
25742 if (GET_MODE_CLASS (mode) == MODE_CC
25743 || GET_MODE_CLASS (mode) == MODE_RANDOM
25744 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25746 if (FP_REGNO_P (regno))
25747 return VALID_FP_MODE_P (mode);
25748 if (SSE_REGNO_P (regno))
25750 /* We implement the move patterns for all vector modes into and
25751 out of SSE registers, even when no operation instructions
25752 are available. OImode move is available only when AVX is
25754 return ((TARGET_AVX && mode == OImode)
25755 || VALID_AVX256_REG_MODE (mode)
25756 || VALID_SSE_REG_MODE (mode)
25757 || VALID_SSE2_REG_MODE (mode)
25758 || VALID_MMX_REG_MODE (mode)
25759 || VALID_MMX_REG_MODE_3DNOW (mode));
25761 if (MMX_REGNO_P (regno))
25763 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25764 so if the register is available at all, then we can move data of
25765 the given mode into or out of it. */
25766 return (VALID_MMX_REG_MODE (mode)
25767 || VALID_MMX_REG_MODE_3DNOW (mode));
25770 if (mode == QImode)
25772 /* Take care for QImode values - they can be in non-QI regs,
25773 but then they do cause partial register stalls. */
25774 if (regno <= BX_REG || TARGET_64BIT)
25776 if (!TARGET_PARTIAL_REG_STALL)
25778 return reload_in_progress || reload_completed;
25780 /* We handle both integer and floats in the general purpose registers. */
25781 else if (VALID_INT_MODE_P (mode))
25783 else if (VALID_FP_MODE_P (mode))
25785 else if (VALID_DFP_MODE_P (mode))
25787 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25788 on to use that value in smaller contexts, this can easily force a
25789 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25790 supporting DImode, allow it. */
25791 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25797 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25798 tieable integer mode. */
25801 ix86_tieable_integer_mode_p (enum machine_mode mode)
25810 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25813 return TARGET_64BIT;
25820 /* Return true if MODE1 is accessible in a register that can hold MODE2
25821 without copying. That is, all register classes that can hold MODE2
25822 can also hold MODE1. */
25825 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25827 if (mode1 == mode2)
25830 if (ix86_tieable_integer_mode_p (mode1)
25831 && ix86_tieable_integer_mode_p (mode2))
25834 /* MODE2 being XFmode implies fp stack or general regs, which means we
25835 can tie any smaller floating point modes to it. Note that we do not
25836 tie this with TFmode. */
25837 if (mode2 == XFmode)
25838 return mode1 == SFmode || mode1 == DFmode;
25840 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25841 that we can tie it with SFmode. */
25842 if (mode2 == DFmode)
25843 return mode1 == SFmode;
25845 /* If MODE2 is only appropriate for an SSE register, then tie with
25846 any other mode acceptable to SSE registers. */
25847 if (GET_MODE_SIZE (mode2) == 16
25848 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25849 return (GET_MODE_SIZE (mode1) == 16
25850 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25852 /* If MODE2 is appropriate for an MMX register, then tie
25853 with any other mode acceptable to MMX registers. */
25854 if (GET_MODE_SIZE (mode2) == 8
25855 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25856 return (GET_MODE_SIZE (mode1) == 8
25857 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25862 /* Compute a (partial) cost for rtx X. Return true if the complete
25863 cost has been computed, and false if subexpressions should be
25864 scanned. In either case, *TOTAL contains the cost result. */
25867 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25869 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25870 enum machine_mode mode = GET_MODE (x);
25871 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25879 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25881 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25883 else if (flag_pic && SYMBOLIC_CONST (x)
25885 || (!GET_CODE (x) != LABEL_REF
25886 && (GET_CODE (x) != SYMBOL_REF
25887 || !SYMBOL_REF_LOCAL_P (x)))))
25894 if (mode == VOIDmode)
25897 switch (standard_80387_constant_p (x))
25902 default: /* Other constants */
25907 /* Start with (MEM (SYMBOL_REF)), since that's where
25908 it'll probably end up. Add a penalty for size. */
25909 *total = (COSTS_N_INSNS (1)
25910 + (flag_pic != 0 && !TARGET_64BIT)
25911 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25917 /* The zero extensions is often completely free on x86_64, so make
25918 it as cheap as possible. */
25919 if (TARGET_64BIT && mode == DImode
25920 && GET_MODE (XEXP (x, 0)) == SImode)
25922 else if (TARGET_ZERO_EXTEND_WITH_AND)
25923 *total = cost->add;
25925 *total = cost->movzx;
25929 *total = cost->movsx;
25933 if (CONST_INT_P (XEXP (x, 1))
25934 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25936 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25939 *total = cost->add;
25942 if ((value == 2 || value == 3)
25943 && cost->lea <= cost->shift_const)
25945 *total = cost->lea;
25955 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25957 if (CONST_INT_P (XEXP (x, 1)))
25959 if (INTVAL (XEXP (x, 1)) > 32)
25960 *total = cost->shift_const + COSTS_N_INSNS (2);
25962 *total = cost->shift_const * 2;
25966 if (GET_CODE (XEXP (x, 1)) == AND)
25967 *total = cost->shift_var * 2;
25969 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25974 if (CONST_INT_P (XEXP (x, 1)))
25975 *total = cost->shift_const;
25977 *total = cost->shift_var;
25982 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25984 /* ??? SSE scalar cost should be used here. */
25985 *total = cost->fmul;
25988 else if (X87_FLOAT_MODE_P (mode))
25990 *total = cost->fmul;
25993 else if (FLOAT_MODE_P (mode))
25995 /* ??? SSE vector cost should be used here. */
25996 *total = cost->fmul;
26001 rtx op0 = XEXP (x, 0);
26002 rtx op1 = XEXP (x, 1);
26004 if (CONST_INT_P (XEXP (x, 1)))
26006 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
26007 for (nbits = 0; value != 0; value &= value - 1)
26011 /* This is arbitrary. */
26014 /* Compute costs correctly for widening multiplication. */
26015 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
26016 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
26017 == GET_MODE_SIZE (mode))
26019 int is_mulwiden = 0;
26020 enum machine_mode inner_mode = GET_MODE (op0);
26022 if (GET_CODE (op0) == GET_CODE (op1))
26023 is_mulwiden = 1, op1 = XEXP (op1, 0);
26024 else if (CONST_INT_P (op1))
26026 if (GET_CODE (op0) == SIGN_EXTEND)
26027 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
26030 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
26034 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
26037 *total = (cost->mult_init[MODE_INDEX (mode)]
26038 + nbits * cost->mult_bit
26039 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
26048 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26049 /* ??? SSE cost should be used here. */
26050 *total = cost->fdiv;
26051 else if (X87_FLOAT_MODE_P (mode))
26052 *total = cost->fdiv;
26053 else if (FLOAT_MODE_P (mode))
26054 /* ??? SSE vector cost should be used here. */
26055 *total = cost->fdiv;
26057 *total = cost->divide[MODE_INDEX (mode)];
26061 if (GET_MODE_CLASS (mode) == MODE_INT
26062 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
26064 if (GET_CODE (XEXP (x, 0)) == PLUS
26065 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
26066 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
26067 && CONSTANT_P (XEXP (x, 1)))
26069 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
26070 if (val == 2 || val == 4 || val == 8)
26072 *total = cost->lea;
26073 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26074 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
26075 outer_code, speed);
26076 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26080 else if (GET_CODE (XEXP (x, 0)) == MULT
26081 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
26083 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
26084 if (val == 2 || val == 4 || val == 8)
26086 *total = cost->lea;
26087 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26088 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26092 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26094 *total = cost->lea;
26095 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26096 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26097 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26104 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26106 /* ??? SSE cost should be used here. */
26107 *total = cost->fadd;
26110 else if (X87_FLOAT_MODE_P (mode))
26112 *total = cost->fadd;
26115 else if (FLOAT_MODE_P (mode))
26117 /* ??? SSE vector cost should be used here. */
26118 *total = cost->fadd;
26126 if (!TARGET_64BIT && mode == DImode)
26128 *total = (cost->add * 2
26129 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26130 << (GET_MODE (XEXP (x, 0)) != DImode))
26131 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26132 << (GET_MODE (XEXP (x, 1)) != DImode)));
26138 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26140 /* ??? SSE cost should be used here. */
26141 *total = cost->fchs;
26144 else if (X87_FLOAT_MODE_P (mode))
26146 *total = cost->fchs;
26149 else if (FLOAT_MODE_P (mode))
26151 /* ??? SSE vector cost should be used here. */
26152 *total = cost->fchs;
26158 if (!TARGET_64BIT && mode == DImode)
26159 *total = cost->add * 2;
26161 *total = cost->add;
26165 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26166 && XEXP (XEXP (x, 0), 1) == const1_rtx
26167 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26168 && XEXP (x, 1) == const0_rtx)
26170 /* This kind of construct is implemented using test[bwl].
26171 Treat it as if we had an AND. */
26172 *total = (cost->add
26173 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26174 + rtx_cost (const1_rtx, outer_code, speed));
26180 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26185 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26186 /* ??? SSE cost should be used here. */
26187 *total = cost->fabs;
26188 else if (X87_FLOAT_MODE_P (mode))
26189 *total = cost->fabs;
26190 else if (FLOAT_MODE_P (mode))
26191 /* ??? SSE vector cost should be used here. */
26192 *total = cost->fabs;
26196 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26197 /* ??? SSE cost should be used here. */
26198 *total = cost->fsqrt;
26199 else if (X87_FLOAT_MODE_P (mode))
26200 *total = cost->fsqrt;
26201 else if (FLOAT_MODE_P (mode))
26202 /* ??? SSE vector cost should be used here. */
26203 *total = cost->fsqrt;
26207 if (XINT (x, 1) == UNSPEC_TP)
26214 case VEC_DUPLICATE:
26215 /* ??? Assume all of these vector manipulation patterns are
26216 recognizable. In which case they all pretty much have the
26218 *total = COSTS_N_INSNS (1);
26228 static int current_machopic_label_num;
26230 /* Given a symbol name and its associated stub, write out the
26231 definition of the stub. */
26234 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26236 unsigned int length;
26237 char *binder_name, *symbol_name, lazy_ptr_name[32];
26238 int label = ++current_machopic_label_num;
26240 /* For 64-bit we shouldn't get here. */
26241 gcc_assert (!TARGET_64BIT);
26243 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26244 symb = (*targetm.strip_name_encoding) (symb);
26246 length = strlen (stub);
26247 binder_name = XALLOCAVEC (char, length + 32);
26248 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26250 length = strlen (symb);
26251 symbol_name = XALLOCAVEC (char, length + 32);
26252 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26254 sprintf (lazy_ptr_name, "L%d$lz", label);
26257 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26259 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26261 fprintf (file, "%s:\n", stub);
26262 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26266 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26267 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26268 fprintf (file, "\tjmp\t*%%edx\n");
26271 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26273 fprintf (file, "%s:\n", binder_name);
26277 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26278 fputs ("\tpushl\t%eax\n", file);
26281 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26283 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26285 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26286 fprintf (file, "%s:\n", lazy_ptr_name);
26287 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26288 fprintf (file, ASM_LONG "%s\n", binder_name);
26290 #endif /* TARGET_MACHO */
26292 /* Order the registers for register allocator. */
26295 x86_order_regs_for_local_alloc (void)
26300 /* First allocate the local general purpose registers. */
26301 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26302 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26303 reg_alloc_order [pos++] = i;
26305 /* Global general purpose registers. */
26306 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26307 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26308 reg_alloc_order [pos++] = i;
26310 /* x87 registers come first in case we are doing FP math
26312 if (!TARGET_SSE_MATH)
26313 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26314 reg_alloc_order [pos++] = i;
26316 /* SSE registers. */
26317 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26318 reg_alloc_order [pos++] = i;
26319 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26320 reg_alloc_order [pos++] = i;
26322 /* x87 registers. */
26323 if (TARGET_SSE_MATH)
26324 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26325 reg_alloc_order [pos++] = i;
26327 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26328 reg_alloc_order [pos++] = i;
26330 /* Initialize the rest of array as we do not allocate some registers
26332 while (pos < FIRST_PSEUDO_REGISTER)
26333 reg_alloc_order [pos++] = 0;
26336 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26337 struct attribute_spec.handler. */
26339 ix86_handle_abi_attribute (tree *node, tree name,
26340 tree args ATTRIBUTE_UNUSED,
26341 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26343 if (TREE_CODE (*node) != FUNCTION_TYPE
26344 && TREE_CODE (*node) != METHOD_TYPE
26345 && TREE_CODE (*node) != FIELD_DECL
26346 && TREE_CODE (*node) != TYPE_DECL)
26348 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26350 *no_add_attrs = true;
26355 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26357 *no_add_attrs = true;
26361 /* Can combine regparm with all attributes but fastcall. */
26362 if (is_attribute_p ("ms_abi", name))
26364 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26366 error ("ms_abi and sysv_abi attributes are not compatible");
26371 else if (is_attribute_p ("sysv_abi", name))
26373 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26375 error ("ms_abi and sysv_abi attributes are not compatible");
26384 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26385 struct attribute_spec.handler. */
26387 ix86_handle_struct_attribute (tree *node, tree name,
26388 tree args ATTRIBUTE_UNUSED,
26389 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26392 if (DECL_P (*node))
26394 if (TREE_CODE (*node) == TYPE_DECL)
26395 type = &TREE_TYPE (*node);
26400 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26401 || TREE_CODE (*type) == UNION_TYPE)))
26403 warning (OPT_Wattributes, "%qE attribute ignored",
26405 *no_add_attrs = true;
26408 else if ((is_attribute_p ("ms_struct", name)
26409 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26410 || ((is_attribute_p ("gcc_struct", name)
26411 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26413 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26415 *no_add_attrs = true;
26422 ix86_handle_fndecl_attribute (tree *node, tree name,
26423 tree args ATTRIBUTE_UNUSED,
26424 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26426 if (TREE_CODE (*node) != FUNCTION_DECL)
26428 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26430 *no_add_attrs = true;
26436 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26441 #ifndef HAVE_AS_IX86_SWAP
26442 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26449 ix86_ms_bitfield_layout_p (const_tree record_type)
26451 return (TARGET_MS_BITFIELD_LAYOUT &&
26452 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26453 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26456 /* Returns an expression indicating where the this parameter is
26457 located on entry to the FUNCTION. */
26460 x86_this_parameter (tree function)
26462 tree type = TREE_TYPE (function);
26463 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26468 const int *parm_regs;
26470 if (ix86_function_type_abi (type) == MS_ABI)
26471 parm_regs = x86_64_ms_abi_int_parameter_registers;
26473 parm_regs = x86_64_int_parameter_registers;
26474 return gen_rtx_REG (DImode, parm_regs[aggr]);
26477 nregs = ix86_function_regparm (type, function);
26479 if (nregs > 0 && !stdarg_p (type))
26483 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26484 regno = aggr ? DX_REG : CX_REG;
26485 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26489 return gen_rtx_MEM (SImode,
26490 plus_constant (stack_pointer_rtx, 4));
26499 return gen_rtx_MEM (SImode,
26500 plus_constant (stack_pointer_rtx, 4));
26503 return gen_rtx_REG (SImode, regno);
26506 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26509 /* Determine whether x86_output_mi_thunk can succeed. */
26512 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26513 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26514 HOST_WIDE_INT vcall_offset, const_tree function)
26516 /* 64-bit can handle anything. */
26520 /* For 32-bit, everything's fine if we have one free register. */
26521 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26524 /* Need a free register for vcall_offset. */
26528 /* Need a free register for GOT references. */
26529 if (flag_pic && !(*targetm.binds_local_p) (function))
26532 /* Otherwise ok. */
26536 /* Output the assembler code for a thunk function. THUNK_DECL is the
26537 declaration for the thunk function itself, FUNCTION is the decl for
26538 the target function. DELTA is an immediate constant offset to be
26539 added to THIS. If VCALL_OFFSET is nonzero, the word at
26540 *(*this + vcall_offset) should be added to THIS. */
26543 x86_output_mi_thunk (FILE *file,
26544 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26545 HOST_WIDE_INT vcall_offset, tree function)
26548 rtx this_param = x86_this_parameter (function);
26551 /* Make sure unwind info is emitted for the thunk if needed. */
26552 final_start_function (emit_barrier (), file, 1);
26554 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26555 pull it in now and let DELTA benefit. */
26556 if (REG_P (this_param))
26557 this_reg = this_param;
26558 else if (vcall_offset)
26560 /* Put the this parameter into %eax. */
26561 xops[0] = this_param;
26562 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26563 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26566 this_reg = NULL_RTX;
26568 /* Adjust the this parameter by a fixed constant. */
26571 xops[0] = GEN_INT (delta);
26572 xops[1] = this_reg ? this_reg : this_param;
26575 if (!x86_64_general_operand (xops[0], DImode))
26577 tmp = gen_rtx_REG (DImode, R10_REG);
26579 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26581 xops[1] = this_param;
26583 if (x86_maybe_negate_const_int (&xops[0], DImode))
26584 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26586 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26588 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26589 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26591 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26594 /* Adjust the this parameter by a value stored in the vtable. */
26598 tmp = gen_rtx_REG (DImode, R10_REG);
26601 int tmp_regno = CX_REG;
26602 if (lookup_attribute ("fastcall",
26603 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26604 || lookup_attribute ("thiscall",
26605 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26606 tmp_regno = AX_REG;
26607 tmp = gen_rtx_REG (SImode, tmp_regno);
26610 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26612 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26614 /* Adjust the this parameter. */
26615 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26616 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26618 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26619 xops[0] = GEN_INT (vcall_offset);
26621 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26622 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26624 xops[1] = this_reg;
26625 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26628 /* If necessary, drop THIS back to its stack slot. */
26629 if (this_reg && this_reg != this_param)
26631 xops[0] = this_reg;
26632 xops[1] = this_param;
26633 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26636 xops[0] = XEXP (DECL_RTL (function), 0);
26639 if (!flag_pic || (*targetm.binds_local_p) (function))
26640 output_asm_insn ("jmp\t%P0", xops);
26641 /* All thunks should be in the same object as their target,
26642 and thus binds_local_p should be true. */
26643 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26644 gcc_unreachable ();
26647 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26648 tmp = gen_rtx_CONST (Pmode, tmp);
26649 tmp = gen_rtx_MEM (QImode, tmp);
26651 output_asm_insn ("jmp\t%A0", xops);
26656 if (!flag_pic || (*targetm.binds_local_p) (function))
26657 output_asm_insn ("jmp\t%P0", xops);
26662 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26663 tmp = (gen_rtx_SYMBOL_REF
26665 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26666 tmp = gen_rtx_MEM (QImode, tmp);
26668 output_asm_insn ("jmp\t%0", xops);
26671 #endif /* TARGET_MACHO */
26673 tmp = gen_rtx_REG (SImode, CX_REG);
26674 output_set_got (tmp, NULL_RTX);
26677 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26678 output_asm_insn ("jmp\t{*}%1", xops);
26681 final_end_function ();
26685 x86_file_start (void)
26687 default_file_start ();
26689 darwin_file_start ();
26691 if (X86_FILE_START_VERSION_DIRECTIVE)
26692 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26693 if (X86_FILE_START_FLTUSED)
26694 fputs ("\t.global\t__fltused\n", asm_out_file);
26695 if (ix86_asm_dialect == ASM_INTEL)
26696 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26700 x86_field_alignment (tree field, int computed)
26702 enum machine_mode mode;
26703 tree type = TREE_TYPE (field);
26705 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26707 mode = TYPE_MODE (strip_array_types (type));
26708 if (mode == DFmode || mode == DCmode
26709 || GET_MODE_CLASS (mode) == MODE_INT
26710 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26711 return MIN (32, computed);
26715 /* Output assembler code to FILE to increment profiler label # LABELNO
26716 for profiling a function entry. */
26718 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26722 #ifndef NO_PROFILE_COUNTERS
26723 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
26726 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26727 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26729 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26733 #ifndef NO_PROFILE_COUNTERS
26734 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26737 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26741 #ifndef NO_PROFILE_COUNTERS
26742 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
26745 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26749 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26750 /* We don't have exact information about the insn sizes, but we may assume
26751 quite safely that we are informed about all 1 byte insns and memory
26752 address sizes. This is enough to eliminate unnecessary padding in
26756 min_insn_size (rtx insn)
26760 if (!INSN_P (insn) || !active_insn_p (insn))
26763 /* Discard alignments we've emit and jump instructions. */
26764 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26765 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26767 if (JUMP_TABLE_DATA_P (insn))
26770 /* Important case - calls are always 5 bytes.
26771 It is common to have many calls in the row. */
26773 && symbolic_reference_mentioned_p (PATTERN (insn))
26774 && !SIBLING_CALL_P (insn))
26776 len = get_attr_length (insn);
26780 /* For normal instructions we rely on get_attr_length being exact,
26781 with a few exceptions. */
26782 if (!JUMP_P (insn))
26784 enum attr_type type = get_attr_type (insn);
26789 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26790 || asm_noperands (PATTERN (insn)) >= 0)
26797 /* Otherwise trust get_attr_length. */
26801 l = get_attr_length_address (insn);
26802 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26811 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26815 ix86_avoid_jump_mispredicts (void)
26817 rtx insn, start = get_insns ();
26818 int nbytes = 0, njumps = 0;
26821 /* Look for all minimal intervals of instructions containing 4 jumps.
26822 The intervals are bounded by START and INSN. NBYTES is the total
26823 size of instructions in the interval including INSN and not including
26824 START. When the NBYTES is smaller than 16 bytes, it is possible
26825 that the end of START and INSN ends up in the same 16byte page.
26827 The smallest offset in the page INSN can start is the case where START
26828 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26829 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26831 for (insn = start; insn; insn = NEXT_INSN (insn))
26835 if (LABEL_P (insn))
26837 int align = label_to_alignment (insn);
26838 int max_skip = label_to_max_skip (insn);
26842 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26843 already in the current 16 byte page, because otherwise
26844 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26845 bytes to reach 16 byte boundary. */
26847 || (align <= 3 && max_skip != (1 << align) - 1))
26850 fprintf (dump_file, "Label %i with max_skip %i\n",
26851 INSN_UID (insn), max_skip);
26854 while (nbytes + max_skip >= 16)
26856 start = NEXT_INSN (start);
26857 if ((JUMP_P (start)
26858 && GET_CODE (PATTERN (start)) != ADDR_VEC
26859 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26861 njumps--, isjump = 1;
26864 nbytes -= min_insn_size (start);
26870 min_size = min_insn_size (insn);
26871 nbytes += min_size;
26873 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26874 INSN_UID (insn), min_size);
26876 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26877 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26885 start = NEXT_INSN (start);
26886 if ((JUMP_P (start)
26887 && GET_CODE (PATTERN (start)) != ADDR_VEC
26888 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26890 njumps--, isjump = 1;
26893 nbytes -= min_insn_size (start);
26895 gcc_assert (njumps >= 0);
26897 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26898 INSN_UID (start), INSN_UID (insn), nbytes);
26900 if (njumps == 3 && isjump && nbytes < 16)
26902 int padsize = 15 - nbytes + min_insn_size (insn);
26905 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26906 INSN_UID (insn), padsize);
26907 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26913 /* AMD Athlon works faster
26914 when RET is not destination of conditional jump or directly preceded
26915 by other jump instruction. We avoid the penalty by inserting NOP just
26916 before the RET instructions in such cases. */
26918 ix86_pad_returns (void)
26923 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26925 basic_block bb = e->src;
26926 rtx ret = BB_END (bb);
26928 bool replace = false;
26930 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26931 || optimize_bb_for_size_p (bb))
26933 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26934 if (active_insn_p (prev) || LABEL_P (prev))
26936 if (prev && LABEL_P (prev))
26941 FOR_EACH_EDGE (e, ei, bb->preds)
26942 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26943 && !(e->flags & EDGE_FALLTHRU))
26948 prev = prev_active_insn (ret);
26950 && ((JUMP_P (prev) && any_condjump_p (prev))
26953 /* Empty functions get branch mispredict even when the jump destination
26954 is not visible to us. */
26955 if (!prev && !optimize_function_for_size_p (cfun))
26960 emit_jump_insn_before (gen_return_internal_long (), ret);
26966 /* Implement machine specific optimizations. We implement padding of returns
26967 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26971 if (optimize && optimize_function_for_speed_p (cfun))
26973 if (TARGET_PAD_RETURNS)
26974 ix86_pad_returns ();
26975 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26976 if (TARGET_FOUR_JUMP_LIMIT)
26977 ix86_avoid_jump_mispredicts ();
26982 /* Return nonzero when QImode register that must be represented via REX prefix
26985 x86_extended_QIreg_mentioned_p (rtx insn)
26988 extract_insn_cached (insn);
26989 for (i = 0; i < recog_data.n_operands; i++)
26990 if (REG_P (recog_data.operand[i])
26991 && REGNO (recog_data.operand[i]) > BX_REG)
26996 /* Return nonzero when P points to register encoded via REX prefix.
26997 Called via for_each_rtx. */
26999 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
27001 unsigned int regno;
27004 regno = REGNO (*p);
27005 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
27008 /* Return true when INSN mentions register that must be encoded using REX
27011 x86_extended_reg_mentioned_p (rtx insn)
27013 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
27014 extended_reg_mentioned_1, NULL);
27017 /* If profitable, negate (without causing overflow) integer constant
27018 of mode MODE at location LOC. Return true in this case. */
27020 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
27024 if (!CONST_INT_P (*loc))
27030 /* DImode x86_64 constants must fit in 32 bits. */
27031 gcc_assert (x86_64_immediate_operand (*loc, mode));
27042 gcc_unreachable ();
27045 /* Avoid overflows. */
27046 if (mode_signbit_p (mode, *loc))
27049 val = INTVAL (*loc);
27051 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
27052 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
27053 if ((val < 0 && val != -128)
27056 *loc = GEN_INT (-val);
27063 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
27064 optabs would emit if we didn't have TFmode patterns. */
27067 x86_emit_floatuns (rtx operands[2])
27069 rtx neglab, donelab, i0, i1, f0, in, out;
27070 enum machine_mode mode, inmode;
27072 inmode = GET_MODE (operands[1]);
27073 gcc_assert (inmode == SImode || inmode == DImode);
27076 in = force_reg (inmode, operands[1]);
27077 mode = GET_MODE (out);
27078 neglab = gen_label_rtx ();
27079 donelab = gen_label_rtx ();
27080 f0 = gen_reg_rtx (mode);
27082 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
27084 expand_float (out, in, 0);
27086 emit_jump_insn (gen_jump (donelab));
27089 emit_label (neglab);
27091 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
27093 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
27095 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
27097 expand_float (f0, i0, 0);
27099 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
27101 emit_label (donelab);
27104 /* AVX does not support 32-byte integer vector operations,
27105 thus the longest vector we are faced with is V16QImode. */
27106 #define MAX_VECT_LEN 16
27108 struct expand_vec_perm_d
27110 rtx target, op0, op1;
27111 unsigned char perm[MAX_VECT_LEN];
27112 enum machine_mode vmode;
27113 unsigned char nelt;
27117 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
27118 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
27120 /* Get a vector mode of the same size as the original but with elements
27121 twice as wide. This is only guaranteed to apply to integral vectors. */
27123 static inline enum machine_mode
27124 get_mode_wider_vector (enum machine_mode o)
27126 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
27127 enum machine_mode n = GET_MODE_WIDER_MODE (o);
27128 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
27129 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
27133 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27134 with all elements equal to VAR. Return true if successful. */
27137 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27138 rtx target, rtx val)
27161 /* First attempt to recognize VAL as-is. */
27162 dup = gen_rtx_VEC_DUPLICATE (mode, val);
27163 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
27164 if (recog_memoized (insn) < 0)
27167 /* If that fails, force VAL into a register. */
27170 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
27171 seq = get_insns ();
27174 emit_insn_before (seq, insn);
27176 ok = recog_memoized (insn) >= 0;
27185 if (TARGET_SSE || TARGET_3DNOW_A)
27189 val = gen_lowpart (SImode, val);
27190 x = gen_rtx_TRUNCATE (HImode, val);
27191 x = gen_rtx_VEC_DUPLICATE (mode, x);
27192 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27205 struct expand_vec_perm_d dperm;
27209 memset (&dperm, 0, sizeof (dperm));
27210 dperm.target = target;
27211 dperm.vmode = mode;
27212 dperm.nelt = GET_MODE_NUNITS (mode);
27213 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
27215 /* Extend to SImode using a paradoxical SUBREG. */
27216 tmp1 = gen_reg_rtx (SImode);
27217 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27219 /* Insert the SImode value as low element of a V4SImode vector. */
27220 tmp2 = gen_lowpart (V4SImode, dperm.op0);
27221 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
27223 ok = (expand_vec_perm_1 (&dperm)
27224 || expand_vec_perm_broadcast_1 (&dperm));
27236 /* Replicate the value once into the next wider mode and recurse. */
27238 enum machine_mode smode, wsmode, wvmode;
27241 smode = GET_MODE_INNER (mode);
27242 wvmode = get_mode_wider_vector (mode);
27243 wsmode = GET_MODE_INNER (wvmode);
27245 val = convert_modes (wsmode, smode, val, true);
27246 x = expand_simple_binop (wsmode, ASHIFT, val,
27247 GEN_INT (GET_MODE_BITSIZE (smode)),
27248 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27249 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27251 x = gen_lowpart (wvmode, target);
27252 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27260 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27261 rtx x = gen_reg_rtx (hvmode);
27263 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27266 x = gen_rtx_VEC_CONCAT (mode, x, x);
27267 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27276 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27277 whose ONE_VAR element is VAR, and other elements are zero. Return true
27281 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27282 rtx target, rtx var, int one_var)
27284 enum machine_mode vsimode;
27287 bool use_vector_set = false;
27292 /* For SSE4.1, we normally use vector set. But if the second
27293 element is zero and inter-unit moves are OK, we use movq
27295 use_vector_set = (TARGET_64BIT
27297 && !(TARGET_INTER_UNIT_MOVES
27303 use_vector_set = TARGET_SSE4_1;
27306 use_vector_set = TARGET_SSE2;
27309 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27316 use_vector_set = TARGET_AVX;
27319 /* Use ix86_expand_vector_set in 64bit mode only. */
27320 use_vector_set = TARGET_AVX && TARGET_64BIT;
27326 if (use_vector_set)
27328 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27329 var = force_reg (GET_MODE_INNER (mode), var);
27330 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27346 var = force_reg (GET_MODE_INNER (mode), var);
27347 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27348 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27353 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27354 new_target = gen_reg_rtx (mode);
27356 new_target = target;
27357 var = force_reg (GET_MODE_INNER (mode), var);
27358 x = gen_rtx_VEC_DUPLICATE (mode, var);
27359 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27360 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27363 /* We need to shuffle the value to the correct position, so
27364 create a new pseudo to store the intermediate result. */
27366 /* With SSE2, we can use the integer shuffle insns. */
27367 if (mode != V4SFmode && TARGET_SSE2)
27369 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27371 GEN_INT (one_var == 1 ? 0 : 1),
27372 GEN_INT (one_var == 2 ? 0 : 1),
27373 GEN_INT (one_var == 3 ? 0 : 1)));
27374 if (target != new_target)
27375 emit_move_insn (target, new_target);
27379 /* Otherwise convert the intermediate result to V4SFmode and
27380 use the SSE1 shuffle instructions. */
27381 if (mode != V4SFmode)
27383 tmp = gen_reg_rtx (V4SFmode);
27384 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27389 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27391 GEN_INT (one_var == 1 ? 0 : 1),
27392 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27393 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27395 if (mode != V4SFmode)
27396 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27397 else if (tmp != target)
27398 emit_move_insn (target, tmp);
27400 else if (target != new_target)
27401 emit_move_insn (target, new_target);
27406 vsimode = V4SImode;
27412 vsimode = V2SImode;
27418 /* Zero extend the variable element to SImode and recurse. */
27419 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27421 x = gen_reg_rtx (vsimode);
27422 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27424 gcc_unreachable ();
27426 emit_move_insn (target, gen_lowpart (mode, x));
27434 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27435 consisting of the values in VALS. It is known that all elements
27436 except ONE_VAR are constants. Return true if successful. */
27439 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27440 rtx target, rtx vals, int one_var)
27442 rtx var = XVECEXP (vals, 0, one_var);
27443 enum machine_mode wmode;
27446 const_vec = copy_rtx (vals);
27447 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27448 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27456 /* For the two element vectors, it's just as easy to use
27457 the general case. */
27461 /* Use ix86_expand_vector_set in 64bit mode only. */
27484 /* There's no way to set one QImode entry easily. Combine
27485 the variable value with its adjacent constant value, and
27486 promote to an HImode set. */
27487 x = XVECEXP (vals, 0, one_var ^ 1);
27490 var = convert_modes (HImode, QImode, var, true);
27491 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27492 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27493 x = GEN_INT (INTVAL (x) & 0xff);
27497 var = convert_modes (HImode, QImode, var, true);
27498 x = gen_int_mode (INTVAL (x) << 8, HImode);
27500 if (x != const0_rtx)
27501 var = expand_simple_binop (HImode, IOR, var, x, var,
27502 1, OPTAB_LIB_WIDEN);
27504 x = gen_reg_rtx (wmode);
27505 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27506 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27508 emit_move_insn (target, gen_lowpart (mode, x));
27515 emit_move_insn (target, const_vec);
27516 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27520 /* A subroutine of ix86_expand_vector_init_general. Use vector
27521 concatenate to handle the most general case: all values variable,
27522 and none identical. */
27525 ix86_expand_vector_init_concat (enum machine_mode mode,
27526 rtx target, rtx *ops, int n)
27528 enum machine_mode cmode, hmode = VOIDmode;
27529 rtx first[8], second[4];
27569 gcc_unreachable ();
27572 if (!register_operand (ops[1], cmode))
27573 ops[1] = force_reg (cmode, ops[1]);
27574 if (!register_operand (ops[0], cmode))
27575 ops[0] = force_reg (cmode, ops[0]);
27576 emit_insn (gen_rtx_SET (VOIDmode, target,
27577 gen_rtx_VEC_CONCAT (mode, ops[0],
27597 gcc_unreachable ();
27613 gcc_unreachable ();
27618 /* FIXME: We process inputs backward to help RA. PR 36222. */
27621 for (; i > 0; i -= 2, j--)
27623 first[j] = gen_reg_rtx (cmode);
27624 v = gen_rtvec (2, ops[i - 1], ops[i]);
27625 ix86_expand_vector_init (false, first[j],
27626 gen_rtx_PARALLEL (cmode, v));
27632 gcc_assert (hmode != VOIDmode);
27633 for (i = j = 0; i < n; i += 2, j++)
27635 second[j] = gen_reg_rtx (hmode);
27636 ix86_expand_vector_init_concat (hmode, second [j],
27640 ix86_expand_vector_init_concat (mode, target, second, n);
27643 ix86_expand_vector_init_concat (mode, target, first, n);
27647 gcc_unreachable ();
27651 /* A subroutine of ix86_expand_vector_init_general. Use vector
27652 interleave to handle the most general case: all values variable,
27653 and none identical. */
27656 ix86_expand_vector_init_interleave (enum machine_mode mode,
27657 rtx target, rtx *ops, int n)
27659 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27662 rtx (*gen_load_even) (rtx, rtx, rtx);
27663 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27664 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27669 gen_load_even = gen_vec_setv8hi;
27670 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27671 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27672 inner_mode = HImode;
27673 first_imode = V4SImode;
27674 second_imode = V2DImode;
27675 third_imode = VOIDmode;
27678 gen_load_even = gen_vec_setv16qi;
27679 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27680 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27681 inner_mode = QImode;
27682 first_imode = V8HImode;
27683 second_imode = V4SImode;
27684 third_imode = V2DImode;
27687 gcc_unreachable ();
27690 for (i = 0; i < n; i++)
27692 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27693 op0 = gen_reg_rtx (SImode);
27694 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27696 /* Insert the SImode value as low element of V4SImode vector. */
27697 op1 = gen_reg_rtx (V4SImode);
27698 op0 = gen_rtx_VEC_MERGE (V4SImode,
27699 gen_rtx_VEC_DUPLICATE (V4SImode,
27701 CONST0_RTX (V4SImode),
27703 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27705 /* Cast the V4SImode vector back to a vector in orignal mode. */
27706 op0 = gen_reg_rtx (mode);
27707 emit_move_insn (op0, gen_lowpart (mode, op1));
27709 /* Load even elements into the second positon. */
27710 emit_insn ((*gen_load_even) (op0,
27711 force_reg (inner_mode,
27715 /* Cast vector to FIRST_IMODE vector. */
27716 ops[i] = gen_reg_rtx (first_imode);
27717 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27720 /* Interleave low FIRST_IMODE vectors. */
27721 for (i = j = 0; i < n; i += 2, j++)
27723 op0 = gen_reg_rtx (first_imode);
27724 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27726 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27727 ops[j] = gen_reg_rtx (second_imode);
27728 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27731 /* Interleave low SECOND_IMODE vectors. */
27732 switch (second_imode)
27735 for (i = j = 0; i < n / 2; i += 2, j++)
27737 op0 = gen_reg_rtx (second_imode);
27738 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27741 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27743 ops[j] = gen_reg_rtx (third_imode);
27744 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27746 second_imode = V2DImode;
27747 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27751 op0 = gen_reg_rtx (second_imode);
27752 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27755 /* Cast the SECOND_IMODE vector back to a vector on original
27757 emit_insn (gen_rtx_SET (VOIDmode, target,
27758 gen_lowpart (mode, op0)));
27762 gcc_unreachable ();
27766 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27767 all values variable, and none identical. */
27770 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27771 rtx target, rtx vals)
27773 rtx ops[32], op0, op1;
27774 enum machine_mode half_mode = VOIDmode;
27781 if (!mmx_ok && !TARGET_SSE)
27793 n = GET_MODE_NUNITS (mode);
27794 for (i = 0; i < n; i++)
27795 ops[i] = XVECEXP (vals, 0, i);
27796 ix86_expand_vector_init_concat (mode, target, ops, n);
27800 half_mode = V16QImode;
27804 half_mode = V8HImode;
27808 n = GET_MODE_NUNITS (mode);
27809 for (i = 0; i < n; i++)
27810 ops[i] = XVECEXP (vals, 0, i);
27811 op0 = gen_reg_rtx (half_mode);
27812 op1 = gen_reg_rtx (half_mode);
27813 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27815 ix86_expand_vector_init_interleave (half_mode, op1,
27816 &ops [n >> 1], n >> 2);
27817 emit_insn (gen_rtx_SET (VOIDmode, target,
27818 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27822 if (!TARGET_SSE4_1)
27830 /* Don't use ix86_expand_vector_init_interleave if we can't
27831 move from GPR to SSE register directly. */
27832 if (!TARGET_INTER_UNIT_MOVES)
27835 n = GET_MODE_NUNITS (mode);
27836 for (i = 0; i < n; i++)
27837 ops[i] = XVECEXP (vals, 0, i);
27838 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27846 gcc_unreachable ();
27850 int i, j, n_elts, n_words, n_elt_per_word;
27851 enum machine_mode inner_mode;
27852 rtx words[4], shift;
27854 inner_mode = GET_MODE_INNER (mode);
27855 n_elts = GET_MODE_NUNITS (mode);
27856 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27857 n_elt_per_word = n_elts / n_words;
27858 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27860 for (i = 0; i < n_words; ++i)
27862 rtx word = NULL_RTX;
27864 for (j = 0; j < n_elt_per_word; ++j)
27866 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27867 elt = convert_modes (word_mode, inner_mode, elt, true);
27873 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27874 word, 1, OPTAB_LIB_WIDEN);
27875 word = expand_simple_binop (word_mode, IOR, word, elt,
27876 word, 1, OPTAB_LIB_WIDEN);
27884 emit_move_insn (target, gen_lowpart (mode, words[0]));
27885 else if (n_words == 2)
27887 rtx tmp = gen_reg_rtx (mode);
27888 emit_clobber (tmp);
27889 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27890 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27891 emit_move_insn (target, tmp);
27893 else if (n_words == 4)
27895 rtx tmp = gen_reg_rtx (V4SImode);
27896 gcc_assert (word_mode == SImode);
27897 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27898 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27899 emit_move_insn (target, gen_lowpart (mode, tmp));
27902 gcc_unreachable ();
27906 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27907 instructions unless MMX_OK is true. */
27910 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27912 enum machine_mode mode = GET_MODE (target);
27913 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27914 int n_elts = GET_MODE_NUNITS (mode);
27915 int n_var = 0, one_var = -1;
27916 bool all_same = true, all_const_zero = true;
27920 for (i = 0; i < n_elts; ++i)
27922 x = XVECEXP (vals, 0, i);
27923 if (!(CONST_INT_P (x)
27924 || GET_CODE (x) == CONST_DOUBLE
27925 || GET_CODE (x) == CONST_FIXED))
27926 n_var++, one_var = i;
27927 else if (x != CONST0_RTX (inner_mode))
27928 all_const_zero = false;
27929 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27933 /* Constants are best loaded from the constant pool. */
27936 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27940 /* If all values are identical, broadcast the value. */
27942 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27943 XVECEXP (vals, 0, 0)))
27946 /* Values where only one field is non-constant are best loaded from
27947 the pool and overwritten via move later. */
27951 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27952 XVECEXP (vals, 0, one_var),
27956 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27960 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27964 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27966 enum machine_mode mode = GET_MODE (target);
27967 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27968 enum machine_mode half_mode;
27969 bool use_vec_merge = false;
27971 static rtx (*gen_extract[6][2]) (rtx, rtx)
27973 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27974 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27975 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27976 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27977 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27978 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27980 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27982 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27983 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27984 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27985 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27986 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27987 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27997 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27998 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
28000 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
28002 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
28003 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28009 use_vec_merge = TARGET_SSE4_1;
28017 /* For the two element vectors, we implement a VEC_CONCAT with
28018 the extraction of the other element. */
28020 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
28021 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
28024 op0 = val, op1 = tmp;
28026 op0 = tmp, op1 = val;
28028 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
28029 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28034 use_vec_merge = TARGET_SSE4_1;
28041 use_vec_merge = true;
28045 /* tmp = target = A B C D */
28046 tmp = copy_to_reg (target);
28047 /* target = A A B B */
28048 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
28049 /* target = X A B B */
28050 ix86_expand_vector_set (false, target, val, 0);
28051 /* target = A X C D */
28052 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28053 const1_rtx, const0_rtx,
28054 GEN_INT (2+4), GEN_INT (3+4)));
28058 /* tmp = target = A B C D */
28059 tmp = copy_to_reg (target);
28060 /* tmp = X B C D */
28061 ix86_expand_vector_set (false, tmp, val, 0);
28062 /* target = A B X D */
28063 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28064 const0_rtx, const1_rtx,
28065 GEN_INT (0+4), GEN_INT (3+4)));
28069 /* tmp = target = A B C D */
28070 tmp = copy_to_reg (target);
28071 /* tmp = X B C D */
28072 ix86_expand_vector_set (false, tmp, val, 0);
28073 /* target = A B X D */
28074 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28075 const0_rtx, const1_rtx,
28076 GEN_INT (2+4), GEN_INT (0+4)));
28080 gcc_unreachable ();
28085 use_vec_merge = TARGET_SSE4_1;
28089 /* Element 0 handled by vec_merge below. */
28092 use_vec_merge = true;
28098 /* With SSE2, use integer shuffles to swap element 0 and ELT,
28099 store into element 0, then shuffle them back. */
28103 order[0] = GEN_INT (elt);
28104 order[1] = const1_rtx;
28105 order[2] = const2_rtx;
28106 order[3] = GEN_INT (3);
28107 order[elt] = const0_rtx;
28109 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28110 order[1], order[2], order[3]));
28112 ix86_expand_vector_set (false, target, val, 0);
28114 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28115 order[1], order[2], order[3]));
28119 /* For SSE1, we have to reuse the V4SF code. */
28120 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28121 gen_lowpart (SFmode, val), elt);
28126 use_vec_merge = TARGET_SSE2;
28129 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28133 use_vec_merge = TARGET_SSE4_1;
28140 half_mode = V16QImode;
28146 half_mode = V8HImode;
28152 half_mode = V4SImode;
28158 half_mode = V2DImode;
28164 half_mode = V4SFmode;
28170 half_mode = V2DFmode;
28176 /* Compute offset. */
28180 gcc_assert (i <= 1);
28182 /* Extract the half. */
28183 tmp = gen_reg_rtx (half_mode);
28184 emit_insn ((*gen_extract[j][i]) (tmp, target));
28186 /* Put val in tmp at elt. */
28187 ix86_expand_vector_set (false, tmp, val, elt);
28190 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28199 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28200 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28201 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28205 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28207 emit_move_insn (mem, target);
28209 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28210 emit_move_insn (tmp, val);
28212 emit_move_insn (target, mem);
28217 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28219 enum machine_mode mode = GET_MODE (vec);
28220 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28221 bool use_vec_extr = false;
28234 use_vec_extr = true;
28238 use_vec_extr = TARGET_SSE4_1;
28250 tmp = gen_reg_rtx (mode);
28251 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28252 GEN_INT (elt), GEN_INT (elt),
28253 GEN_INT (elt+4), GEN_INT (elt+4)));
28257 tmp = gen_reg_rtx (mode);
28258 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28262 gcc_unreachable ();
28265 use_vec_extr = true;
28270 use_vec_extr = TARGET_SSE4_1;
28284 tmp = gen_reg_rtx (mode);
28285 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28286 GEN_INT (elt), GEN_INT (elt),
28287 GEN_INT (elt), GEN_INT (elt)));
28291 tmp = gen_reg_rtx (mode);
28292 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28296 gcc_unreachable ();
28299 use_vec_extr = true;
28304 /* For SSE1, we have to reuse the V4SF code. */
28305 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28306 gen_lowpart (V4SFmode, vec), elt);
28312 use_vec_extr = TARGET_SSE2;
28315 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28319 use_vec_extr = TARGET_SSE4_1;
28323 /* ??? Could extract the appropriate HImode element and shift. */
28330 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28331 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28333 /* Let the rtl optimizers know about the zero extension performed. */
28334 if (inner_mode == QImode || inner_mode == HImode)
28336 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28337 target = gen_lowpart (SImode, target);
28340 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28344 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28346 emit_move_insn (mem, vec);
28348 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28349 emit_move_insn (target, tmp);
28353 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28354 pattern to reduce; DEST is the destination; IN is the input vector. */
28357 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28359 rtx tmp1, tmp2, tmp3;
28361 tmp1 = gen_reg_rtx (V4SFmode);
28362 tmp2 = gen_reg_rtx (V4SFmode);
28363 tmp3 = gen_reg_rtx (V4SFmode);
28365 emit_insn (gen_sse_movhlps (tmp1, in, in));
28366 emit_insn (fn (tmp2, tmp1, in));
28368 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28369 const1_rtx, const1_rtx,
28370 GEN_INT (1+4), GEN_INT (1+4)));
28371 emit_insn (fn (dest, tmp2, tmp3));
28374 /* Target hook for scalar_mode_supported_p. */
28376 ix86_scalar_mode_supported_p (enum machine_mode mode)
28378 if (DECIMAL_FLOAT_MODE_P (mode))
28379 return default_decimal_float_supported_p ();
28380 else if (mode == TFmode)
28383 return default_scalar_mode_supported_p (mode);
28386 /* Implements target hook vector_mode_supported_p. */
28388 ix86_vector_mode_supported_p (enum machine_mode mode)
28390 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28392 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28394 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28396 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28398 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28403 /* Target hook for c_mode_for_suffix. */
28404 static enum machine_mode
28405 ix86_c_mode_for_suffix (char suffix)
28415 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28417 We do this in the new i386 backend to maintain source compatibility
28418 with the old cc0-based compiler. */
28421 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28422 tree inputs ATTRIBUTE_UNUSED,
28425 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28427 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28432 /* Implements target vector targetm.asm.encode_section_info. This
28433 is not used by netware. */
28435 static void ATTRIBUTE_UNUSED
28436 ix86_encode_section_info (tree decl, rtx rtl, int first)
28438 default_encode_section_info (decl, rtl, first);
28440 if (TREE_CODE (decl) == VAR_DECL
28441 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28442 && ix86_in_large_data_p (decl))
28443 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28446 /* Worker function for REVERSE_CONDITION. */
28449 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28451 return (mode != CCFPmode && mode != CCFPUmode
28452 ? reverse_condition (code)
28453 : reverse_condition_maybe_unordered (code));
28456 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28460 output_387_reg_move (rtx insn, rtx *operands)
28462 if (REG_P (operands[0]))
28464 if (REG_P (operands[1])
28465 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28467 if (REGNO (operands[0]) == FIRST_STACK_REG)
28468 return output_387_ffreep (operands, 0);
28469 return "fstp\t%y0";
28471 if (STACK_TOP_P (operands[0]))
28472 return "fld%Z1\t%y1";
28475 else if (MEM_P (operands[0]))
28477 gcc_assert (REG_P (operands[1]));
28478 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28479 return "fstp%Z0\t%y0";
28482 /* There is no non-popping store to memory for XFmode.
28483 So if we need one, follow the store with a load. */
28484 if (GET_MODE (operands[0]) == XFmode)
28485 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28487 return "fst%Z0\t%y0";
28494 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28495 FP status register is set. */
28498 ix86_emit_fp_unordered_jump (rtx label)
28500 rtx reg = gen_reg_rtx (HImode);
28503 emit_insn (gen_x86_fnstsw_1 (reg));
28505 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28507 emit_insn (gen_x86_sahf_1 (reg));
28509 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28510 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28514 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28516 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28517 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28520 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28521 gen_rtx_LABEL_REF (VOIDmode, label),
28523 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28525 emit_jump_insn (temp);
28526 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28529 /* Output code to perform a log1p XFmode calculation. */
28531 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28533 rtx label1 = gen_label_rtx ();
28534 rtx label2 = gen_label_rtx ();
28536 rtx tmp = gen_reg_rtx (XFmode);
28537 rtx tmp2 = gen_reg_rtx (XFmode);
28540 emit_insn (gen_absxf2 (tmp, op1));
28541 test = gen_rtx_GE (VOIDmode, tmp,
28542 CONST_DOUBLE_FROM_REAL_VALUE (
28543 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28545 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28547 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28548 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28549 emit_jump (label2);
28551 emit_label (label1);
28552 emit_move_insn (tmp, CONST1_RTX (XFmode));
28553 emit_insn (gen_addxf3 (tmp, op1, tmp));
28554 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28555 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28557 emit_label (label2);
28560 /* Output code to perform a Newton-Rhapson approximation of a single precision
28561 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28563 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28565 rtx x0, x1, e0, e1, two;
28567 x0 = gen_reg_rtx (mode);
28568 e0 = gen_reg_rtx (mode);
28569 e1 = gen_reg_rtx (mode);
28570 x1 = gen_reg_rtx (mode);
28572 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28574 if (VECTOR_MODE_P (mode))
28575 two = ix86_build_const_vector (SFmode, true, two);
28577 two = force_reg (mode, two);
28579 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28581 /* x0 = rcp(b) estimate */
28582 emit_insn (gen_rtx_SET (VOIDmode, x0,
28583 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28586 emit_insn (gen_rtx_SET (VOIDmode, e0,
28587 gen_rtx_MULT (mode, x0, a)));
28589 emit_insn (gen_rtx_SET (VOIDmode, e1,
28590 gen_rtx_MULT (mode, x0, b)));
28592 emit_insn (gen_rtx_SET (VOIDmode, x1,
28593 gen_rtx_MINUS (mode, two, e1)));
28594 /* res = e0 * x1 */
28595 emit_insn (gen_rtx_SET (VOIDmode, res,
28596 gen_rtx_MULT (mode, e0, x1)));
28599 /* Output code to perform a Newton-Rhapson approximation of a
28600 single precision floating point [reciprocal] square root. */
28602 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28605 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28608 x0 = gen_reg_rtx (mode);
28609 e0 = gen_reg_rtx (mode);
28610 e1 = gen_reg_rtx (mode);
28611 e2 = gen_reg_rtx (mode);
28612 e3 = gen_reg_rtx (mode);
28614 real_from_integer (&r, VOIDmode, -3, -1, 0);
28615 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28617 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28618 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28620 if (VECTOR_MODE_P (mode))
28622 mthree = ix86_build_const_vector (SFmode, true, mthree);
28623 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28626 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28627 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28629 /* x0 = rsqrt(a) estimate */
28630 emit_insn (gen_rtx_SET (VOIDmode, x0,
28631 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28634 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28639 zero = gen_reg_rtx (mode);
28640 mask = gen_reg_rtx (mode);
28642 zero = force_reg (mode, CONST0_RTX(mode));
28643 emit_insn (gen_rtx_SET (VOIDmode, mask,
28644 gen_rtx_NE (mode, zero, a)));
28646 emit_insn (gen_rtx_SET (VOIDmode, x0,
28647 gen_rtx_AND (mode, x0, mask)));
28651 emit_insn (gen_rtx_SET (VOIDmode, e0,
28652 gen_rtx_MULT (mode, x0, a)));
28654 emit_insn (gen_rtx_SET (VOIDmode, e1,
28655 gen_rtx_MULT (mode, e0, x0)));
28658 mthree = force_reg (mode, mthree);
28659 emit_insn (gen_rtx_SET (VOIDmode, e2,
28660 gen_rtx_PLUS (mode, e1, mthree)));
28662 mhalf = force_reg (mode, mhalf);
28664 /* e3 = -.5 * x0 */
28665 emit_insn (gen_rtx_SET (VOIDmode, e3,
28666 gen_rtx_MULT (mode, x0, mhalf)));
28668 /* e3 = -.5 * e0 */
28669 emit_insn (gen_rtx_SET (VOIDmode, e3,
28670 gen_rtx_MULT (mode, e0, mhalf)));
28671 /* ret = e2 * e3 */
28672 emit_insn (gen_rtx_SET (VOIDmode, res,
28673 gen_rtx_MULT (mode, e2, e3)));
28676 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28678 static void ATTRIBUTE_UNUSED
28679 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28682 /* With Binutils 2.15, the "@unwind" marker must be specified on
28683 every occurrence of the ".eh_frame" section, not just the first
28686 && strcmp (name, ".eh_frame") == 0)
28688 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28689 flags & SECTION_WRITE ? "aw" : "a");
28692 default_elf_asm_named_section (name, flags, decl);
28695 /* Return the mangling of TYPE if it is an extended fundamental type. */
28697 static const char *
28698 ix86_mangle_type (const_tree type)
28700 type = TYPE_MAIN_VARIANT (type);
28702 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28703 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28706 switch (TYPE_MODE (type))
28709 /* __float128 is "g". */
28712 /* "long double" or __float80 is "e". */
28719 /* For 32-bit code we can save PIC register setup by using
28720 __stack_chk_fail_local hidden function instead of calling
28721 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28722 register, so it is better to call __stack_chk_fail directly. */
28725 ix86_stack_protect_fail (void)
28727 return TARGET_64BIT
28728 ? default_external_stack_protect_fail ()
28729 : default_hidden_stack_protect_fail ();
28732 /* Select a format to encode pointers in exception handling data. CODE
28733 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28734 true if the symbol may be affected by dynamic relocations.
28736 ??? All x86 object file formats are capable of representing this.
28737 After all, the relocation needed is the same as for the call insn.
28738 Whether or not a particular assembler allows us to enter such, I
28739 guess we'll have to see. */
28741 asm_preferred_eh_data_format (int code, int global)
28745 int type = DW_EH_PE_sdata8;
28747 || ix86_cmodel == CM_SMALL_PIC
28748 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28749 type = DW_EH_PE_sdata4;
28750 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28752 if (ix86_cmodel == CM_SMALL
28753 || (ix86_cmodel == CM_MEDIUM && code))
28754 return DW_EH_PE_udata4;
28755 return DW_EH_PE_absptr;
28758 /* Expand copysign from SIGN to the positive value ABS_VALUE
28759 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28762 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28764 enum machine_mode mode = GET_MODE (sign);
28765 rtx sgn = gen_reg_rtx (mode);
28766 if (mask == NULL_RTX)
28768 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28769 if (!VECTOR_MODE_P (mode))
28771 /* We need to generate a scalar mode mask in this case. */
28772 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28773 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28774 mask = gen_reg_rtx (mode);
28775 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28779 mask = gen_rtx_NOT (mode, mask);
28780 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28781 gen_rtx_AND (mode, mask, sign)));
28782 emit_insn (gen_rtx_SET (VOIDmode, result,
28783 gen_rtx_IOR (mode, abs_value, sgn)));
28786 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28787 mask for masking out the sign-bit is stored in *SMASK, if that is
28790 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28792 enum machine_mode mode = GET_MODE (op0);
28795 xa = gen_reg_rtx (mode);
28796 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28797 if (!VECTOR_MODE_P (mode))
28799 /* We need to generate a scalar mode mask in this case. */
28800 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28801 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28802 mask = gen_reg_rtx (mode);
28803 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28805 emit_insn (gen_rtx_SET (VOIDmode, xa,
28806 gen_rtx_AND (mode, op0, mask)));
28814 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28815 swapping the operands if SWAP_OPERANDS is true. The expanded
28816 code is a forward jump to a newly created label in case the
28817 comparison is true. The generated label rtx is returned. */
28819 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28820 bool swap_operands)
28831 label = gen_label_rtx ();
28832 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28833 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28834 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28835 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28836 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28837 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28838 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28839 JUMP_LABEL (tmp) = label;
28844 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28845 using comparison code CODE. Operands are swapped for the comparison if
28846 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28848 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28849 bool swap_operands)
28851 enum machine_mode mode = GET_MODE (op0);
28852 rtx mask = gen_reg_rtx (mode);
28861 if (mode == DFmode)
28862 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28863 gen_rtx_fmt_ee (code, mode, op0, op1)));
28865 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28866 gen_rtx_fmt_ee (code, mode, op0, op1)));
28871 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28872 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28874 ix86_gen_TWO52 (enum machine_mode mode)
28876 REAL_VALUE_TYPE TWO52r;
28879 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28880 TWO52 = const_double_from_real_value (TWO52r, mode);
28881 TWO52 = force_reg (mode, TWO52);
28886 /* Expand SSE sequence for computing lround from OP1 storing
28889 ix86_expand_lround (rtx op0, rtx op1)
28891 /* C code for the stuff we're doing below:
28892 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28895 enum machine_mode mode = GET_MODE (op1);
28896 const struct real_format *fmt;
28897 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28900 /* load nextafter (0.5, 0.0) */
28901 fmt = REAL_MODE_FORMAT (mode);
28902 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28903 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28905 /* adj = copysign (0.5, op1) */
28906 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28907 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28909 /* adj = op1 + adj */
28910 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28912 /* op0 = (imode)adj */
28913 expand_fix (op0, adj, 0);
28916 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28919 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28921 /* C code for the stuff we're doing below (for do_floor):
28923 xi -= (double)xi > op1 ? 1 : 0;
28926 enum machine_mode fmode = GET_MODE (op1);
28927 enum machine_mode imode = GET_MODE (op0);
28928 rtx ireg, freg, label, tmp;
28930 /* reg = (long)op1 */
28931 ireg = gen_reg_rtx (imode);
28932 expand_fix (ireg, op1, 0);
28934 /* freg = (double)reg */
28935 freg = gen_reg_rtx (fmode);
28936 expand_float (freg, ireg, 0);
28938 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28939 label = ix86_expand_sse_compare_and_jump (UNLE,
28940 freg, op1, !do_floor);
28941 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28942 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28943 emit_move_insn (ireg, tmp);
28945 emit_label (label);
28946 LABEL_NUSES (label) = 1;
28948 emit_move_insn (op0, ireg);
28951 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28952 result in OPERAND0. */
28954 ix86_expand_rint (rtx operand0, rtx operand1)
28956 /* C code for the stuff we're doing below:
28957 xa = fabs (operand1);
28958 if (!isless (xa, 2**52))
28960 xa = xa + 2**52 - 2**52;
28961 return copysign (xa, operand1);
28963 enum machine_mode mode = GET_MODE (operand0);
28964 rtx res, xa, label, TWO52, mask;
28966 res = gen_reg_rtx (mode);
28967 emit_move_insn (res, operand1);
28969 /* xa = abs (operand1) */
28970 xa = ix86_expand_sse_fabs (res, &mask);
28972 /* if (!isless (xa, TWO52)) goto label; */
28973 TWO52 = ix86_gen_TWO52 (mode);
28974 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28976 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28977 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28979 ix86_sse_copysign_to_positive (res, xa, res, mask);
28981 emit_label (label);
28982 LABEL_NUSES (label) = 1;
28984 emit_move_insn (operand0, res);
28987 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28990 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28992 /* C code for the stuff we expand below.
28993 double xa = fabs (x), x2;
28994 if (!isless (xa, TWO52))
28996 xa = xa + TWO52 - TWO52;
28997 x2 = copysign (xa, x);
29006 enum machine_mode mode = GET_MODE (operand0);
29007 rtx xa, TWO52, tmp, label, one, res, mask;
29009 TWO52 = ix86_gen_TWO52 (mode);
29011 /* Temporary for holding the result, initialized to the input
29012 operand to ease control flow. */
29013 res = gen_reg_rtx (mode);
29014 emit_move_insn (res, operand1);
29016 /* xa = abs (operand1) */
29017 xa = ix86_expand_sse_fabs (res, &mask);
29019 /* if (!isless (xa, TWO52)) goto label; */
29020 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29022 /* xa = xa + TWO52 - TWO52; */
29023 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29024 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
29026 /* xa = copysign (xa, operand1) */
29027 ix86_sse_copysign_to_positive (xa, xa, res, mask);
29029 /* generate 1.0 or -1.0 */
29030 one = force_reg (mode,
29031 const_double_from_real_value (do_floor
29032 ? dconst1 : dconstm1, mode));
29034 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29035 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29036 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29037 gen_rtx_AND (mode, one, tmp)));
29038 /* We always need to subtract here to preserve signed zero. */
29039 tmp = expand_simple_binop (mode, MINUS,
29040 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29041 emit_move_insn (res, tmp);
29043 emit_label (label);
29044 LABEL_NUSES (label) = 1;
29046 emit_move_insn (operand0, res);
29049 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
29052 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
29054 /* C code for the stuff we expand below.
29055 double xa = fabs (x), x2;
29056 if (!isless (xa, TWO52))
29058 x2 = (double)(long)x;
29065 if (HONOR_SIGNED_ZEROS (mode))
29066 return copysign (x2, x);
29069 enum machine_mode mode = GET_MODE (operand0);
29070 rtx xa, xi, TWO52, tmp, label, one, res, mask;
29072 TWO52 = ix86_gen_TWO52 (mode);
29074 /* Temporary for holding the result, initialized to the input
29075 operand to ease control flow. */
29076 res = gen_reg_rtx (mode);
29077 emit_move_insn (res, operand1);
29079 /* xa = abs (operand1) */
29080 xa = ix86_expand_sse_fabs (res, &mask);
29082 /* if (!isless (xa, TWO52)) goto label; */
29083 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29085 /* xa = (double)(long)x */
29086 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29087 expand_fix (xi, res, 0);
29088 expand_float (xa, xi, 0);
29091 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29093 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29094 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29095 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29096 gen_rtx_AND (mode, one, tmp)));
29097 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
29098 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29099 emit_move_insn (res, tmp);
29101 if (HONOR_SIGNED_ZEROS (mode))
29102 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29104 emit_label (label);
29105 LABEL_NUSES (label) = 1;
29107 emit_move_insn (operand0, res);
29110 /* Expand SSE sequence for computing round from OPERAND1 storing
29111 into OPERAND0. Sequence that works without relying on DImode truncation
29112 via cvttsd2siq that is only available on 64bit targets. */
29114 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29116 /* C code for the stuff we expand below.
29117 double xa = fabs (x), xa2, x2;
29118 if (!isless (xa, TWO52))
29120 Using the absolute value and copying back sign makes
29121 -0.0 -> -0.0 correct.
29122 xa2 = xa + TWO52 - TWO52;
29127 else if (dxa > 0.5)
29129 x2 = copysign (xa2, x);
29132 enum machine_mode mode = GET_MODE (operand0);
29133 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29135 TWO52 = ix86_gen_TWO52 (mode);
29137 /* Temporary for holding the result, initialized to the input
29138 operand to ease control flow. */
29139 res = gen_reg_rtx (mode);
29140 emit_move_insn (res, operand1);
29142 /* xa = abs (operand1) */
29143 xa = ix86_expand_sse_fabs (res, &mask);
29145 /* if (!isless (xa, TWO52)) goto label; */
29146 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29148 /* xa2 = xa + TWO52 - TWO52; */
29149 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29150 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29152 /* dxa = xa2 - xa; */
29153 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29155 /* generate 0.5, 1.0 and -0.5 */
29156 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29157 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29158 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29162 tmp = gen_reg_rtx (mode);
29163 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29164 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29165 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29166 gen_rtx_AND (mode, one, tmp)));
29167 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29168 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29169 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29170 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29171 gen_rtx_AND (mode, one, tmp)));
29172 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29174 /* res = copysign (xa2, operand1) */
29175 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29177 emit_label (label);
29178 LABEL_NUSES (label) = 1;
29180 emit_move_insn (operand0, res);
29183 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29186 ix86_expand_trunc (rtx operand0, rtx operand1)
29188 /* C code for SSE variant we expand below.
29189 double xa = fabs (x), x2;
29190 if (!isless (xa, TWO52))
29192 x2 = (double)(long)x;
29193 if (HONOR_SIGNED_ZEROS (mode))
29194 return copysign (x2, x);
29197 enum machine_mode mode = GET_MODE (operand0);
29198 rtx xa, xi, TWO52, label, res, mask;
29200 TWO52 = ix86_gen_TWO52 (mode);
29202 /* Temporary for holding the result, initialized to the input
29203 operand to ease control flow. */
29204 res = gen_reg_rtx (mode);
29205 emit_move_insn (res, operand1);
29207 /* xa = abs (operand1) */
29208 xa = ix86_expand_sse_fabs (res, &mask);
29210 /* if (!isless (xa, TWO52)) goto label; */
29211 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29213 /* x = (double)(long)x */
29214 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29215 expand_fix (xi, res, 0);
29216 expand_float (res, xi, 0);
29218 if (HONOR_SIGNED_ZEROS (mode))
29219 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29221 emit_label (label);
29222 LABEL_NUSES (label) = 1;
29224 emit_move_insn (operand0, res);
29227 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29230 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29232 enum machine_mode mode = GET_MODE (operand0);
29233 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29235 /* C code for SSE variant we expand below.
29236 double xa = fabs (x), x2;
29237 if (!isless (xa, TWO52))
29239 xa2 = xa + TWO52 - TWO52;
29243 x2 = copysign (xa2, x);
29247 TWO52 = ix86_gen_TWO52 (mode);
29249 /* Temporary for holding the result, initialized to the input
29250 operand to ease control flow. */
29251 res = gen_reg_rtx (mode);
29252 emit_move_insn (res, operand1);
29254 /* xa = abs (operand1) */
29255 xa = ix86_expand_sse_fabs (res, &smask);
29257 /* if (!isless (xa, TWO52)) goto label; */
29258 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29260 /* res = xa + TWO52 - TWO52; */
29261 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29262 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29263 emit_move_insn (res, tmp);
29266 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29268 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29269 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29270 emit_insn (gen_rtx_SET (VOIDmode, mask,
29271 gen_rtx_AND (mode, mask, one)));
29272 tmp = expand_simple_binop (mode, MINUS,
29273 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29274 emit_move_insn (res, tmp);
29276 /* res = copysign (res, operand1) */
29277 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29279 emit_label (label);
29280 LABEL_NUSES (label) = 1;
29282 emit_move_insn (operand0, res);
29285 /* Expand SSE sequence for computing round from OPERAND1 storing
29288 ix86_expand_round (rtx operand0, rtx operand1)
29290 /* C code for the stuff we're doing below:
29291 double xa = fabs (x);
29292 if (!isless (xa, TWO52))
29294 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29295 return copysign (xa, x);
29297 enum machine_mode mode = GET_MODE (operand0);
29298 rtx res, TWO52, xa, label, xi, half, mask;
29299 const struct real_format *fmt;
29300 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29302 /* Temporary for holding the result, initialized to the input
29303 operand to ease control flow. */
29304 res = gen_reg_rtx (mode);
29305 emit_move_insn (res, operand1);
29307 TWO52 = ix86_gen_TWO52 (mode);
29308 xa = ix86_expand_sse_fabs (res, &mask);
29309 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29311 /* load nextafter (0.5, 0.0) */
29312 fmt = REAL_MODE_FORMAT (mode);
29313 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29314 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29316 /* xa = xa + 0.5 */
29317 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29318 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29320 /* xa = (double)(int64_t)xa */
29321 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29322 expand_fix (xi, xa, 0);
29323 expand_float (xa, xi, 0);
29325 /* res = copysign (xa, operand1) */
29326 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29328 emit_label (label);
29329 LABEL_NUSES (label) = 1;
29331 emit_move_insn (operand0, res);
29335 /* Table of valid machine attributes. */
29336 static const struct attribute_spec ix86_attribute_table[] =
29338 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29339 /* Stdcall attribute says callee is responsible for popping arguments
29340 if they are not variable. */
29341 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29342 /* Fastcall attribute says callee is responsible for popping arguments
29343 if they are not variable. */
29344 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29345 /* Thiscall attribute says callee is responsible for popping arguments
29346 if they are not variable. */
29347 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29348 /* Cdecl attribute says the callee is a normal C declaration */
29349 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29350 /* Regparm attribute specifies how many integer arguments are to be
29351 passed in registers. */
29352 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29353 /* Sseregparm attribute says we are using x86_64 calling conventions
29354 for FP arguments. */
29355 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29356 /* force_align_arg_pointer says this function realigns the stack at entry. */
29357 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29358 false, true, true, ix86_handle_cconv_attribute },
29359 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29360 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29361 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29362 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29364 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29365 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29366 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29367 SUBTARGET_ATTRIBUTE_TABLE,
29369 /* ms_abi and sysv_abi calling convention function attributes. */
29370 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29371 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29372 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29374 { NULL, 0, 0, false, false, false, NULL }
29377 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29379 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost)
29381 switch (type_of_cost)
29384 return ix86_cost->scalar_stmt_cost;
29387 return ix86_cost->scalar_load_cost;
29390 return ix86_cost->scalar_store_cost;
29393 return ix86_cost->vec_stmt_cost;
29396 return ix86_cost->vec_align_load_cost;
29399 return ix86_cost->vec_store_cost;
29401 case vec_to_scalar:
29402 return ix86_cost->vec_to_scalar_cost;
29404 case scalar_to_vec:
29405 return ix86_cost->scalar_to_vec_cost;
29407 case unaligned_load:
29408 return ix86_cost->vec_unalign_load_cost;
29410 case cond_branch_taken:
29411 return ix86_cost->cond_taken_branch_cost;
29413 case cond_branch_not_taken:
29414 return ix86_cost->cond_not_taken_branch_cost;
29420 gcc_unreachable ();
29425 /* Implement targetm.vectorize.builtin_vec_perm. */
29428 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29430 tree itype = TREE_TYPE (vec_type);
29431 bool u = TYPE_UNSIGNED (itype);
29432 enum machine_mode vmode = TYPE_MODE (vec_type);
29433 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29434 bool ok = TARGET_SSE2;
29440 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29443 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29445 itype = ix86_get_builtin_type (IX86_BT_DI);
29450 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29454 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29456 itype = ix86_get_builtin_type (IX86_BT_SI);
29460 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29463 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29466 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29469 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29479 *mask_type = itype;
29480 return ix86_builtins[(int) fcode];
29483 /* Return a vector mode with twice as many elements as VMODE. */
29484 /* ??? Consider moving this to a table generated by genmodes.c. */
29486 static enum machine_mode
29487 doublesize_vector_mode (enum machine_mode vmode)
29491 case V2SFmode: return V4SFmode;
29492 case V1DImode: return V2DImode;
29493 case V2SImode: return V4SImode;
29494 case V4HImode: return V8HImode;
29495 case V8QImode: return V16QImode;
29497 case V2DFmode: return V4DFmode;
29498 case V4SFmode: return V8SFmode;
29499 case V2DImode: return V4DImode;
29500 case V4SImode: return V8SImode;
29501 case V8HImode: return V16HImode;
29502 case V16QImode: return V32QImode;
29504 case V4DFmode: return V8DFmode;
29505 case V8SFmode: return V16SFmode;
29506 case V4DImode: return V8DImode;
29507 case V8SImode: return V16SImode;
29508 case V16HImode: return V32HImode;
29509 case V32QImode: return V64QImode;
29512 gcc_unreachable ();
29516 /* Construct (set target (vec_select op0 (parallel perm))) and
29517 return true if that's a valid instruction in the active ISA. */
29520 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29522 rtx rperm[MAX_VECT_LEN], x;
29525 for (i = 0; i < nelt; ++i)
29526 rperm[i] = GEN_INT (perm[i]);
29528 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29529 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29530 x = gen_rtx_SET (VOIDmode, target, x);
29533 if (recog_memoized (x) < 0)
29541 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29544 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29545 const unsigned char *perm, unsigned nelt)
29547 enum machine_mode v2mode;
29550 v2mode = doublesize_vector_mode (GET_MODE (op0));
29551 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29552 return expand_vselect (target, x, perm, nelt);
29555 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29556 in terms of blendp[sd] / pblendw / pblendvb. */
29559 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29561 enum machine_mode vmode = d->vmode;
29562 unsigned i, mask, nelt = d->nelt;
29563 rtx target, op0, op1, x;
29565 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29567 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29570 /* This is a blend, not a permute. Elements must stay in their
29571 respective lanes. */
29572 for (i = 0; i < nelt; ++i)
29574 unsigned e = d->perm[i];
29575 if (!(e == i || e == i + nelt))
29582 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29583 decision should be extracted elsewhere, so that we only try that
29584 sequence once all budget==3 options have been tried. */
29586 /* For bytes, see if bytes move in pairs so we can use pblendw with
29587 an immediate argument, rather than pblendvb with a vector argument. */
29588 if (vmode == V16QImode)
29590 bool pblendw_ok = true;
29591 for (i = 0; i < 16 && pblendw_ok; i += 2)
29592 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29596 rtx rperm[16], vperm;
29598 for (i = 0; i < nelt; ++i)
29599 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29601 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29602 vperm = force_reg (V16QImode, vperm);
29604 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29609 target = d->target;
29621 for (i = 0; i < nelt; ++i)
29622 mask |= (d->perm[i] >= nelt) << i;
29626 for (i = 0; i < 2; ++i)
29627 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29631 for (i = 0; i < 4; ++i)
29632 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29636 for (i = 0; i < 8; ++i)
29637 mask |= (d->perm[i * 2] >= 16) << i;
29641 target = gen_lowpart (vmode, target);
29642 op0 = gen_lowpart (vmode, op0);
29643 op1 = gen_lowpart (vmode, op1);
29647 gcc_unreachable ();
29650 /* This matches five different patterns with the different modes. */
29651 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29652 x = gen_rtx_SET (VOIDmode, target, x);
29658 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29659 in terms of the variable form of vpermilps.
29661 Note that we will have already failed the immediate input vpermilps,
29662 which requires that the high and low part shuffle be identical; the
29663 variable form doesn't require that. */
29666 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29668 rtx rperm[8], vperm;
29671 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29674 /* We can only permute within the 128-bit lane. */
29675 for (i = 0; i < 8; ++i)
29677 unsigned e = d->perm[i];
29678 if (i < 4 ? e >= 4 : e < 4)
29685 for (i = 0; i < 8; ++i)
29687 unsigned e = d->perm[i];
29689 /* Within each 128-bit lane, the elements of op0 are numbered
29690 from 0 and the elements of op1 are numbered from 4. */
29696 rperm[i] = GEN_INT (e);
29699 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29700 vperm = force_reg (V8SImode, vperm);
29701 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29706 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29707 in terms of pshufb or vpperm. */
29710 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29712 unsigned i, nelt, eltsz;
29713 rtx rperm[16], vperm, target, op0, op1;
29715 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29717 if (GET_MODE_SIZE (d->vmode) != 16)
29724 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29726 for (i = 0; i < nelt; ++i)
29728 unsigned j, e = d->perm[i];
29729 for (j = 0; j < eltsz; ++j)
29730 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29733 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29734 vperm = force_reg (V16QImode, vperm);
29736 target = gen_lowpart (V16QImode, d->target);
29737 op0 = gen_lowpart (V16QImode, d->op0);
29738 if (d->op0 == d->op1)
29739 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29742 op1 = gen_lowpart (V16QImode, d->op1);
29743 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29749 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29750 in a single instruction. */
29753 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29755 unsigned i, nelt = d->nelt;
29756 unsigned char perm2[MAX_VECT_LEN];
29758 /* Check plain VEC_SELECT first, because AVX has instructions that could
29759 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29760 input where SEL+CONCAT may not. */
29761 if (d->op0 == d->op1)
29763 int mask = nelt - 1;
29765 for (i = 0; i < nelt; i++)
29766 perm2[i] = d->perm[i] & mask;
29768 if (expand_vselect (d->target, d->op0, perm2, nelt))
29771 /* There are plenty of patterns in sse.md that are written for
29772 SEL+CONCAT and are not replicated for a single op. Perhaps
29773 that should be changed, to avoid the nastiness here. */
29775 /* Recognize interleave style patterns, which means incrementing
29776 every other permutation operand. */
29777 for (i = 0; i < nelt; i += 2)
29779 perm2[i] = d->perm[i] & mask;
29780 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29782 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29785 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29788 for (i = 0; i < nelt; i += 4)
29790 perm2[i + 0] = d->perm[i + 0] & mask;
29791 perm2[i + 1] = d->perm[i + 1] & mask;
29792 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29793 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29796 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29801 /* Finally, try the fully general two operand permute. */
29802 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29805 /* Recognize interleave style patterns with reversed operands. */
29806 if (d->op0 != d->op1)
29808 for (i = 0; i < nelt; ++i)
29810 unsigned e = d->perm[i];
29818 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29822 /* Try the SSE4.1 blend variable merge instructions. */
29823 if (expand_vec_perm_blend (d))
29826 /* Try one of the AVX vpermil variable permutations. */
29827 if (expand_vec_perm_vpermil (d))
29830 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29831 if (expand_vec_perm_pshufb (d))
29837 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29838 in terms of a pair of pshuflw + pshufhw instructions. */
29841 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29843 unsigned char perm2[MAX_VECT_LEN];
29847 if (d->vmode != V8HImode || d->op0 != d->op1)
29850 /* The two permutations only operate in 64-bit lanes. */
29851 for (i = 0; i < 4; ++i)
29852 if (d->perm[i] >= 4)
29854 for (i = 4; i < 8; ++i)
29855 if (d->perm[i] < 4)
29861 /* Emit the pshuflw. */
29862 memcpy (perm2, d->perm, 4);
29863 for (i = 4; i < 8; ++i)
29865 ok = expand_vselect (d->target, d->op0, perm2, 8);
29868 /* Emit the pshufhw. */
29869 memcpy (perm2 + 4, d->perm + 4, 4);
29870 for (i = 0; i < 4; ++i)
29872 ok = expand_vselect (d->target, d->target, perm2, 8);
29878 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29879 the permutation using the SSSE3 palignr instruction. This succeeds
29880 when all of the elements in PERM fit within one vector and we merely
29881 need to shift them down so that a single vector permutation has a
29882 chance to succeed. */
29885 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29887 unsigned i, nelt = d->nelt;
29892 /* Even with AVX, palignr only operates on 128-bit vectors. */
29893 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29896 min = nelt, max = 0;
29897 for (i = 0; i < nelt; ++i)
29899 unsigned e = d->perm[i];
29905 if (min == 0 || max - min >= nelt)
29908 /* Given that we have SSSE3, we know we'll be able to implement the
29909 single operand permutation after the palignr with pshufb. */
29913 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29914 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29915 gen_lowpart (TImode, d->op1),
29916 gen_lowpart (TImode, d->op0), shift));
29918 d->op0 = d->op1 = d->target;
29921 for (i = 0; i < nelt; ++i)
29923 unsigned e = d->perm[i] - min;
29929 /* Test for the degenerate case where the alignment by itself
29930 produces the desired permutation. */
29934 ok = expand_vec_perm_1 (d);
29940 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29941 a two vector permutation into a single vector permutation by using
29942 an interleave operation to merge the vectors. */
29945 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29947 struct expand_vec_perm_d dremap, dfinal;
29948 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29949 unsigned contents, h1, h2, h3, h4;
29950 unsigned char remap[2 * MAX_VECT_LEN];
29954 if (d->op0 == d->op1)
29957 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29958 lanes. We can use similar techniques with the vperm2f128 instruction,
29959 but it requires slightly different logic. */
29960 if (GET_MODE_SIZE (d->vmode) != 16)
29963 /* Examine from whence the elements come. */
29965 for (i = 0; i < nelt; ++i)
29966 contents |= 1u << d->perm[i];
29968 /* Split the two input vectors into 4 halves. */
29969 h1 = (1u << nelt2) - 1;
29974 memset (remap, 0xff, sizeof (remap));
29977 /* If the elements from the low halves use interleave low, and similarly
29978 for interleave high. If the elements are from mis-matched halves, we
29979 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29980 if ((contents & (h1 | h3)) == contents)
29982 for (i = 0; i < nelt2; ++i)
29985 remap[i + nelt] = i * 2 + 1;
29986 dremap.perm[i * 2] = i;
29987 dremap.perm[i * 2 + 1] = i + nelt;
29990 else if ((contents & (h2 | h4)) == contents)
29992 for (i = 0; i < nelt2; ++i)
29994 remap[i + nelt2] = i * 2;
29995 remap[i + nelt + nelt2] = i * 2 + 1;
29996 dremap.perm[i * 2] = i + nelt2;
29997 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
30000 else if ((contents & (h1 | h4)) == contents)
30002 for (i = 0; i < nelt2; ++i)
30005 remap[i + nelt + nelt2] = i + nelt2;
30006 dremap.perm[i] = i;
30007 dremap.perm[i + nelt2] = i + nelt + nelt2;
30011 dremap.vmode = V2DImode;
30013 dremap.perm[0] = 0;
30014 dremap.perm[1] = 3;
30017 else if ((contents & (h2 | h3)) == contents)
30019 for (i = 0; i < nelt2; ++i)
30021 remap[i + nelt2] = i;
30022 remap[i + nelt] = i + nelt2;
30023 dremap.perm[i] = i + nelt2;
30024 dremap.perm[i + nelt2] = i + nelt;
30028 dremap.vmode = V2DImode;
30030 dremap.perm[0] = 1;
30031 dremap.perm[1] = 2;
30037 /* Use the remapping array set up above to move the elements from their
30038 swizzled locations into their final destinations. */
30040 for (i = 0; i < nelt; ++i)
30042 unsigned e = remap[d->perm[i]];
30043 gcc_assert (e < nelt);
30044 dfinal.perm[i] = e;
30046 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
30047 dfinal.op1 = dfinal.op0;
30048 dremap.target = dfinal.op0;
30050 /* Test if the final remap can be done with a single insn. For V4SFmode or
30051 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
30053 ok = expand_vec_perm_1 (&dfinal);
30054 seq = get_insns ();
30060 if (dremap.vmode != dfinal.vmode)
30062 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
30063 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
30064 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
30067 ok = expand_vec_perm_1 (&dremap);
30074 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
30075 permutation with two pshufb insns and an ior. We should have already
30076 failed all two instruction sequences. */
30079 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
30081 rtx rperm[2][16], vperm, l, h, op, m128;
30082 unsigned int i, nelt, eltsz;
30084 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
30086 gcc_assert (d->op0 != d->op1);
30089 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
30091 /* Generate two permutation masks. If the required element is within
30092 the given vector it is shuffled into the proper lane. If the required
30093 element is in the other vector, force a zero into the lane by setting
30094 bit 7 in the permutation mask. */
30095 m128 = GEN_INT (-128);
30096 for (i = 0; i < nelt; ++i)
30098 unsigned j, e = d->perm[i];
30099 unsigned which = (e >= nelt);
30103 for (j = 0; j < eltsz; ++j)
30105 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
30106 rperm[1-which][i*eltsz + j] = m128;
30110 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
30111 vperm = force_reg (V16QImode, vperm);
30113 l = gen_reg_rtx (V16QImode);
30114 op = gen_lowpart (V16QImode, d->op0);
30115 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
30117 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
30118 vperm = force_reg (V16QImode, vperm);
30120 h = gen_reg_rtx (V16QImode);
30121 op = gen_lowpart (V16QImode, d->op1);
30122 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
30124 op = gen_lowpart (V16QImode, d->target);
30125 emit_insn (gen_iorv16qi3 (op, l, h));
30130 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
30131 and extract-odd permutations. */
30134 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
30136 rtx t1, t2, t3, t4;
30141 t1 = gen_reg_rtx (V4DFmode);
30142 t2 = gen_reg_rtx (V4DFmode);
30144 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
30145 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
30146 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
30148 /* Now an unpck[lh]pd will produce the result required. */
30150 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
30152 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
30158 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
30159 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
30160 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
30162 t1 = gen_reg_rtx (V8SFmode);
30163 t2 = gen_reg_rtx (V8SFmode);
30164 t3 = gen_reg_rtx (V8SFmode);
30165 t4 = gen_reg_rtx (V8SFmode);
30167 /* Shuffle within the 128-bit lanes to produce:
30168 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
30169 expand_vselect (t1, d->op0, perm1, 8);
30170 expand_vselect (t2, d->op1, perm1, 8);
30172 /* Shuffle the lanes around to produce:
30173 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
30174 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
30175 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
30177 /* Now a vpermil2p will produce the result required. */
30178 /* ??? The vpermil2p requires a vector constant. Another option
30179 is a unpck[lh]ps to merge the two vectors to produce
30180 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
30181 vpermilps to get the elements into the final order. */
30184 memcpy (d->perm, odd ? permo: perme, 8);
30185 expand_vec_perm_vpermil (d);
30193 /* These are always directly implementable by expand_vec_perm_1. */
30194 gcc_unreachable ();
30198 return expand_vec_perm_pshufb2 (d);
30201 /* We need 2*log2(N)-1 operations to achieve odd/even
30202 with interleave. */
30203 t1 = gen_reg_rtx (V8HImode);
30204 t2 = gen_reg_rtx (V8HImode);
30205 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
30206 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
30207 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
30208 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
30210 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
30212 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
30219 return expand_vec_perm_pshufb2 (d);
30222 t1 = gen_reg_rtx (V16QImode);
30223 t2 = gen_reg_rtx (V16QImode);
30224 t3 = gen_reg_rtx (V16QImode);
30225 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
30226 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
30227 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
30228 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
30229 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
30230 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
30232 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
30234 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
30240 gcc_unreachable ();
30246 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30247 extract-even and extract-odd permutations. */
30250 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
30252 unsigned i, odd, nelt = d->nelt;
30255 if (odd != 0 && odd != 1)
30258 for (i = 1; i < nelt; ++i)
30259 if (d->perm[i] != 2 * i + odd)
30262 return expand_vec_perm_even_odd_1 (d, odd);
30265 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
30266 permutations. We assume that expand_vec_perm_1 has already failed. */
30269 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30271 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30272 enum machine_mode vmode = d->vmode;
30273 unsigned char perm2[4];
30281 /* These are special-cased in sse.md so that we can optionally
30282 use the vbroadcast instruction. They expand to two insns
30283 if the input happens to be in a register. */
30284 gcc_unreachable ();
30290 /* These are always implementable using standard shuffle patterns. */
30291 gcc_unreachable ();
30295 /* These can be implemented via interleave. We save one insn by
30296 stopping once we have promoted to V4SImode and then use pshufd. */
30299 optab otab = vec_interleave_low_optab;
30303 otab = vec_interleave_high_optab;
30308 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30309 vmode = get_mode_wider_vector (vmode);
30310 op0 = gen_lowpart (vmode, op0);
30312 while (vmode != V4SImode);
30314 memset (perm2, elt, 4);
30315 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30320 gcc_unreachable ();
30324 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30325 broadcast permutations. */
30328 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30330 unsigned i, elt, nelt = d->nelt;
30332 if (d->op0 != d->op1)
30336 for (i = 1; i < nelt; ++i)
30337 if (d->perm[i] != elt)
30340 return expand_vec_perm_broadcast_1 (d);
30343 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30344 With all of the interface bits taken care of, perform the expansion
30345 in D and return true on success. */
30348 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30350 /* Try a single instruction expansion. */
30351 if (expand_vec_perm_1 (d))
30354 /* Try sequences of two instructions. */
30356 if (expand_vec_perm_pshuflw_pshufhw (d))
30359 if (expand_vec_perm_palignr (d))
30362 if (expand_vec_perm_interleave2 (d))
30365 if (expand_vec_perm_broadcast (d))
30368 /* Try sequences of three instructions. */
30370 if (expand_vec_perm_pshufb2 (d))
30373 /* ??? Look for narrow permutations whose element orderings would
30374 allow the promotion to a wider mode. */
30376 /* ??? Look for sequences of interleave or a wider permute that place
30377 the data into the correct lanes for a half-vector shuffle like
30378 pshuf[lh]w or vpermilps. */
30380 /* ??? Look for sequences of interleave that produce the desired results.
30381 The combinatorics of punpck[lh] get pretty ugly... */
30383 if (expand_vec_perm_even_odd (d))
30389 /* Extract the values from the vector CST into the permutation array in D.
30390 Return 0 on error, 1 if all values from the permutation come from the
30391 first vector, 2 if all values from the second vector, and 3 otherwise. */
30394 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30396 tree list = TREE_VECTOR_CST_ELTS (cst);
30397 unsigned i, nelt = d->nelt;
30400 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30402 unsigned HOST_WIDE_INT e;
30404 if (!host_integerp (TREE_VALUE (list), 1))
30406 e = tree_low_cst (TREE_VALUE (list), 1);
30410 ret |= (e < nelt ? 1 : 2);
30413 gcc_assert (list == NULL);
30415 /* For all elements from second vector, fold the elements to first. */
30417 for (i = 0; i < nelt; ++i)
30418 d->perm[i] -= nelt;
30424 ix86_expand_vec_perm_builtin (tree exp)
30426 struct expand_vec_perm_d d;
30427 tree arg0, arg1, arg2;
30429 arg0 = CALL_EXPR_ARG (exp, 0);
30430 arg1 = CALL_EXPR_ARG (exp, 1);
30431 arg2 = CALL_EXPR_ARG (exp, 2);
30433 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30434 d.nelt = GET_MODE_NUNITS (d.vmode);
30435 d.testing_p = false;
30436 gcc_assert (VECTOR_MODE_P (d.vmode));
30438 if (TREE_CODE (arg2) != VECTOR_CST)
30440 error_at (EXPR_LOCATION (exp),
30441 "vector permutation requires vector constant");
30445 switch (extract_vec_perm_cst (&d, arg2))
30451 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30455 if (!operand_equal_p (arg0, arg1, 0))
30457 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30458 d.op0 = force_reg (d.vmode, d.op0);
30459 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30460 d.op1 = force_reg (d.vmode, d.op1);
30464 /* The elements of PERM do not suggest that only the first operand
30465 is used, but both operands are identical. Allow easier matching
30466 of the permutation by folding the permutation into the single
30469 unsigned i, nelt = d.nelt;
30470 for (i = 0; i < nelt; ++i)
30471 if (d.perm[i] >= nelt)
30477 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30478 d.op0 = force_reg (d.vmode, d.op0);
30483 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30484 d.op0 = force_reg (d.vmode, d.op0);
30489 d.target = gen_reg_rtx (d.vmode);
30490 if (ix86_expand_vec_perm_builtin_1 (&d))
30493 /* For compiler generated permutations, we should never got here, because
30494 the compiler should also be checking the ok hook. But since this is a
30495 builtin the user has access too, so don't abort. */
30499 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30502 sorry ("vector permutation (%d %d %d %d)",
30503 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30506 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30507 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30508 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30511 sorry ("vector permutation "
30512 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30513 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30514 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30515 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30516 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30519 gcc_unreachable ();
30522 return CONST0_RTX (d.vmode);
30525 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30528 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30530 struct expand_vec_perm_d d;
30534 d.vmode = TYPE_MODE (vec_type);
30535 d.nelt = GET_MODE_NUNITS (d.vmode);
30536 d.testing_p = true;
30538 /* Given sufficient ISA support we can just return true here
30539 for selected vector modes. */
30540 if (GET_MODE_SIZE (d.vmode) == 16)
30542 /* All implementable with a single vpperm insn. */
30545 /* All implementable with 2 pshufb + 1 ior. */
30548 /* All implementable with shufpd or unpck[lh]pd. */
30553 vec_mask = extract_vec_perm_cst (&d, mask);
30555 /* This hook is cannot be called in response to something that the
30556 user does (unlike the builtin expander) so we shouldn't ever see
30557 an error generated from the extract. */
30558 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30559 one_vec = (vec_mask != 3);
30561 /* Implementable with shufps or pshufd. */
30562 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30565 /* Otherwise we have to go through the motions and see if we can
30566 figure out how to generate the requested permutation. */
30567 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30568 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30570 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30573 ret = ix86_expand_vec_perm_builtin_1 (&d);
30580 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30582 struct expand_vec_perm_d d;
30588 d.vmode = GET_MODE (targ);
30589 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30590 d.testing_p = false;
30592 for (i = 0; i < nelt; ++i)
30593 d.perm[i] = i * 2 + odd;
30595 /* We'll either be able to implement the permutation directly... */
30596 if (expand_vec_perm_1 (&d))
30599 /* ... or we use the special-case patterns. */
30600 expand_vec_perm_even_odd_1 (&d, odd);
30603 /* This function returns the calling abi specific va_list type node.
30604 It returns the FNDECL specific va_list type. */
30607 ix86_fn_abi_va_list (tree fndecl)
30610 return va_list_type_node;
30611 gcc_assert (fndecl != NULL_TREE);
30613 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30614 return ms_va_list_type_node;
30616 return sysv_va_list_type_node;
30619 /* Returns the canonical va_list type specified by TYPE. If there
30620 is no valid TYPE provided, it return NULL_TREE. */
30623 ix86_canonical_va_list_type (tree type)
30627 /* Resolve references and pointers to va_list type. */
30628 if (TREE_CODE (type) == MEM_REF)
30629 type = TREE_TYPE (type);
30630 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30631 type = TREE_TYPE (type);
30632 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
30633 type = TREE_TYPE (type);
30637 wtype = va_list_type_node;
30638 gcc_assert (wtype != NULL_TREE);
30640 if (TREE_CODE (wtype) == ARRAY_TYPE)
30642 /* If va_list is an array type, the argument may have decayed
30643 to a pointer type, e.g. by being passed to another function.
30644 In that case, unwrap both types so that we can compare the
30645 underlying records. */
30646 if (TREE_CODE (htype) == ARRAY_TYPE
30647 || POINTER_TYPE_P (htype))
30649 wtype = TREE_TYPE (wtype);
30650 htype = TREE_TYPE (htype);
30653 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30654 return va_list_type_node;
30655 wtype = sysv_va_list_type_node;
30656 gcc_assert (wtype != NULL_TREE);
30658 if (TREE_CODE (wtype) == ARRAY_TYPE)
30660 /* If va_list is an array type, the argument may have decayed
30661 to a pointer type, e.g. by being passed to another function.
30662 In that case, unwrap both types so that we can compare the
30663 underlying records. */
30664 if (TREE_CODE (htype) == ARRAY_TYPE
30665 || POINTER_TYPE_P (htype))
30667 wtype = TREE_TYPE (wtype);
30668 htype = TREE_TYPE (htype);
30671 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30672 return sysv_va_list_type_node;
30673 wtype = ms_va_list_type_node;
30674 gcc_assert (wtype != NULL_TREE);
30676 if (TREE_CODE (wtype) == ARRAY_TYPE)
30678 /* If va_list is an array type, the argument may have decayed
30679 to a pointer type, e.g. by being passed to another function.
30680 In that case, unwrap both types so that we can compare the
30681 underlying records. */
30682 if (TREE_CODE (htype) == ARRAY_TYPE
30683 || POINTER_TYPE_P (htype))
30685 wtype = TREE_TYPE (wtype);
30686 htype = TREE_TYPE (htype);
30689 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30690 return ms_va_list_type_node;
30693 return std_canonical_va_list_type (type);
30696 /* Iterate through the target-specific builtin types for va_list.
30697 IDX denotes the iterator, *PTREE is set to the result type of
30698 the va_list builtin, and *PNAME to its internal type.
30699 Returns zero if there is no element for this index, otherwise
30700 IDX should be increased upon the next call.
30701 Note, do not iterate a base builtin's name like __builtin_va_list.
30702 Used from c_common_nodes_and_builtins. */
30705 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30715 *ptree = ms_va_list_type_node;
30716 *pname = "__builtin_ms_va_list";
30720 *ptree = sysv_va_list_type_node;
30721 *pname = "__builtin_sysv_va_list";
30729 /* Initialize the GCC target structure. */
30730 #undef TARGET_RETURN_IN_MEMORY
30731 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30733 #undef TARGET_LEGITIMIZE_ADDRESS
30734 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30736 #undef TARGET_ATTRIBUTE_TABLE
30737 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30738 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30739 # undef TARGET_MERGE_DECL_ATTRIBUTES
30740 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30743 #undef TARGET_COMP_TYPE_ATTRIBUTES
30744 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30746 #undef TARGET_INIT_BUILTINS
30747 #define TARGET_INIT_BUILTINS ix86_init_builtins
30748 #undef TARGET_BUILTIN_DECL
30749 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30750 #undef TARGET_EXPAND_BUILTIN
30751 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30753 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30754 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30755 ix86_builtin_vectorized_function
30757 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30758 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30760 #undef TARGET_BUILTIN_RECIPROCAL
30761 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30763 #undef TARGET_ASM_FUNCTION_EPILOGUE
30764 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30766 #undef TARGET_ENCODE_SECTION_INFO
30767 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30768 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30770 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30773 #undef TARGET_ASM_OPEN_PAREN
30774 #define TARGET_ASM_OPEN_PAREN ""
30775 #undef TARGET_ASM_CLOSE_PAREN
30776 #define TARGET_ASM_CLOSE_PAREN ""
30778 #undef TARGET_ASM_BYTE_OP
30779 #define TARGET_ASM_BYTE_OP ASM_BYTE
30781 #undef TARGET_ASM_ALIGNED_HI_OP
30782 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30783 #undef TARGET_ASM_ALIGNED_SI_OP
30784 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30786 #undef TARGET_ASM_ALIGNED_DI_OP
30787 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30790 #undef TARGET_ASM_UNALIGNED_HI_OP
30791 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30792 #undef TARGET_ASM_UNALIGNED_SI_OP
30793 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30794 #undef TARGET_ASM_UNALIGNED_DI_OP
30795 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30797 #undef TARGET_PRINT_OPERAND
30798 #define TARGET_PRINT_OPERAND ix86_print_operand
30799 #undef TARGET_PRINT_OPERAND_ADDRESS
30800 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
30801 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
30802 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
30804 #undef TARGET_SCHED_ADJUST_COST
30805 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30806 #undef TARGET_SCHED_ISSUE_RATE
30807 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30808 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30809 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30810 ia32_multipass_dfa_lookahead
30812 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30813 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30816 #undef TARGET_HAVE_TLS
30817 #define TARGET_HAVE_TLS true
30819 #undef TARGET_CANNOT_FORCE_CONST_MEM
30820 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30821 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30822 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30824 #undef TARGET_DELEGITIMIZE_ADDRESS
30825 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30827 #undef TARGET_MS_BITFIELD_LAYOUT_P
30828 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30831 #undef TARGET_BINDS_LOCAL_P
30832 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30834 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30835 #undef TARGET_BINDS_LOCAL_P
30836 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30839 #undef TARGET_ASM_OUTPUT_MI_THUNK
30840 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30841 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30842 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30844 #undef TARGET_ASM_FILE_START
30845 #define TARGET_ASM_FILE_START x86_file_start
30847 #undef TARGET_DEFAULT_TARGET_FLAGS
30848 #define TARGET_DEFAULT_TARGET_FLAGS \
30850 | TARGET_SUBTARGET_DEFAULT \
30851 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30854 #undef TARGET_HANDLE_OPTION
30855 #define TARGET_HANDLE_OPTION ix86_handle_option
30857 #undef TARGET_REGISTER_MOVE_COST
30858 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
30859 #undef TARGET_MEMORY_MOVE_COST
30860 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
30861 #undef TARGET_RTX_COSTS
30862 #define TARGET_RTX_COSTS ix86_rtx_costs
30863 #undef TARGET_ADDRESS_COST
30864 #define TARGET_ADDRESS_COST ix86_address_cost
30866 #undef TARGET_FIXED_CONDITION_CODE_REGS
30867 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30868 #undef TARGET_CC_MODES_COMPATIBLE
30869 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30871 #undef TARGET_MACHINE_DEPENDENT_REORG
30872 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30874 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30875 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30877 #undef TARGET_BUILD_BUILTIN_VA_LIST
30878 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30880 #undef TARGET_ENUM_VA_LIST_P
30881 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
30883 #undef TARGET_FN_ABI_VA_LIST
30884 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30886 #undef TARGET_CANONICAL_VA_LIST_TYPE
30887 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30889 #undef TARGET_EXPAND_BUILTIN_VA_START
30890 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30892 #undef TARGET_MD_ASM_CLOBBERS
30893 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30895 #undef TARGET_PROMOTE_PROTOTYPES
30896 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30897 #undef TARGET_STRUCT_VALUE_RTX
30898 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30899 #undef TARGET_SETUP_INCOMING_VARARGS
30900 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30901 #undef TARGET_MUST_PASS_IN_STACK
30902 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30903 #undef TARGET_FUNCTION_ARG_ADVANCE
30904 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
30905 #undef TARGET_FUNCTION_ARG
30906 #define TARGET_FUNCTION_ARG ix86_function_arg
30907 #undef TARGET_PASS_BY_REFERENCE
30908 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30909 #undef TARGET_INTERNAL_ARG_POINTER
30910 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30911 #undef TARGET_UPDATE_STACK_BOUNDARY
30912 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30913 #undef TARGET_GET_DRAP_RTX
30914 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30915 #undef TARGET_STRICT_ARGUMENT_NAMING
30916 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30917 #undef TARGET_STATIC_CHAIN
30918 #define TARGET_STATIC_CHAIN ix86_static_chain
30919 #undef TARGET_TRAMPOLINE_INIT
30920 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30921 #undef TARGET_RETURN_POPS_ARGS
30922 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
30924 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30925 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30927 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30928 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30930 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30931 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30933 #undef TARGET_C_MODE_FOR_SUFFIX
30934 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30937 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30938 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30941 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30942 #undef TARGET_INSERT_ATTRIBUTES
30943 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30946 #undef TARGET_MANGLE_TYPE
30947 #define TARGET_MANGLE_TYPE ix86_mangle_type
30949 #undef TARGET_STACK_PROTECT_FAIL
30950 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30952 #undef TARGET_FUNCTION_VALUE
30953 #define TARGET_FUNCTION_VALUE ix86_function_value
30955 #undef TARGET_FUNCTION_VALUE_REGNO_P
30956 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30958 #undef TARGET_SECONDARY_RELOAD
30959 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30961 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30962 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30963 ix86_builtin_vectorization_cost
30964 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30965 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30966 ix86_vectorize_builtin_vec_perm
30967 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30968 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30969 ix86_vectorize_builtin_vec_perm_ok
30971 #undef TARGET_SET_CURRENT_FUNCTION
30972 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30974 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30975 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30977 #undef TARGET_OPTION_SAVE
30978 #define TARGET_OPTION_SAVE ix86_function_specific_save
30980 #undef TARGET_OPTION_RESTORE
30981 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30983 #undef TARGET_OPTION_PRINT
30984 #define TARGET_OPTION_PRINT ix86_function_specific_print
30986 #undef TARGET_CAN_INLINE_P
30987 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30989 #undef TARGET_EXPAND_TO_RTL_HOOK
30990 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30992 #undef TARGET_LEGITIMATE_ADDRESS_P
30993 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30995 #undef TARGET_IRA_COVER_CLASSES
30996 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30998 #undef TARGET_FRAME_POINTER_REQUIRED
30999 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
31001 #undef TARGET_CAN_ELIMINATE
31002 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
31004 #undef TARGET_ASM_CODE_END
31005 #define TARGET_ASM_CODE_END ix86_code_end
31007 struct gcc_target targetm = TARGET_INITIALIZER;
31009 #include "gt-i386.h"