1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1351 /* X86_TUNE_USE_MOV0 */
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1366 /* X86_TUNE_READ_MODIFY */
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1379 /* X86_TUNE_QIMODE_MATH */
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1454 /* X86_TUNE_SHIFT1 */
1457 /* X86_TUNE_USE_FFREEP */
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1521 m_AMDFAM10 | m_GENERIC,
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1567 static enum stringop_alg stringop_alg = no_stringop;
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1609 /* The "default" register map used in 32bit mode. */
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1622 /* The "default" register map used in 64bit mode. */
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1706 /* Define parameter passing and return registers. */
1708 static int const x86_64_int_parameter_registers[6] =
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1715 CX_REG, DX_REG, R8_REG, R9_REG
1718 static int const x86_64_int_return_registers[4] =
1720 AX_REG, DX_REG, DI_REG, SI_REG
1723 /* Define the structure for the machine field in struct function. */
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1729 struct stack_local_entry *next;
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1762 HOST_WIDE_INT frame;
1764 int outgoing_arguments_size;
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1820 /* Alignment for incoming stack boundary in bits specified at
1822 static unsigned int ix86_user_incoming_stack_boundary;
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1843 int ix86_section_threshold = 65536;
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1849 /* Fence to use after loop using movnt. */
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1874 #define MAX_CLASSES 4
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static rtx ix86_static_chain (const_tree, bool);
1884 static int ix86_function_regparm (const_tree, const_tree);
1885 static void ix86_compute_frame_layout (struct ix86_frame *);
1886 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1888 static void ix86_add_new_builtins (int);
1889 static rtx ix86_expand_vec_perm_builtin (tree);
1891 enum ix86_function_specific_strings
1893 IX86_FUNCTION_SPECIFIC_ARCH,
1894 IX86_FUNCTION_SPECIFIC_TUNE,
1895 IX86_FUNCTION_SPECIFIC_FPMATH,
1896 IX86_FUNCTION_SPECIFIC_MAX
1899 static char *ix86_target_string (int, int, const char *, const char *,
1900 const char *, bool);
1901 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1902 static void ix86_function_specific_save (struct cl_target_option *);
1903 static void ix86_function_specific_restore (struct cl_target_option *);
1904 static void ix86_function_specific_print (FILE *, int,
1905 struct cl_target_option *);
1906 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1907 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1908 static bool ix86_can_inline_p (tree, tree);
1909 static void ix86_set_current_function (tree);
1910 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1912 static enum calling_abi ix86_function_abi (const_tree);
1915 #ifndef SUBTARGET32_DEFAULT_CPU
1916 #define SUBTARGET32_DEFAULT_CPU "i386"
1919 /* The svr4 ABI for the i386 says that records and unions are returned
1921 #ifndef DEFAULT_PCC_STRUCT_RETURN
1922 #define DEFAULT_PCC_STRUCT_RETURN 1
1925 /* Whether -mtune= or -march= were specified */
1926 static int ix86_tune_defaulted;
1927 static int ix86_arch_specified;
1929 /* Bit flags that specify the ISA we are compiling for. */
1930 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1932 /* A mask of ix86_isa_flags that includes bit X if X
1933 was set or cleared on the command line. */
1934 static int ix86_isa_flags_explicit;
1936 /* Define a set of ISAs which are available when a given ISA is
1937 enabled. MMX and SSE ISAs are handled separately. */
1939 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1940 #define OPTION_MASK_ISA_3DNOW_SET \
1941 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1943 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1944 #define OPTION_MASK_ISA_SSE2_SET \
1945 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1946 #define OPTION_MASK_ISA_SSE3_SET \
1947 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1948 #define OPTION_MASK_ISA_SSSE3_SET \
1949 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1950 #define OPTION_MASK_ISA_SSE4_1_SET \
1951 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1952 #define OPTION_MASK_ISA_SSE4_2_SET \
1953 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1954 #define OPTION_MASK_ISA_AVX_SET \
1955 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1956 #define OPTION_MASK_ISA_FMA_SET \
1957 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1959 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1961 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1963 #define OPTION_MASK_ISA_SSE4A_SET \
1964 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1965 #define OPTION_MASK_ISA_FMA4_SET \
1966 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1967 | OPTION_MASK_ISA_AVX_SET)
1968 #define OPTION_MASK_ISA_XOP_SET \
1969 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1970 #define OPTION_MASK_ISA_LWP_SET \
1973 /* AES and PCLMUL need SSE2 because they use xmm registers */
1974 #define OPTION_MASK_ISA_AES_SET \
1975 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1976 #define OPTION_MASK_ISA_PCLMUL_SET \
1977 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1979 #define OPTION_MASK_ISA_ABM_SET \
1980 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1982 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1983 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1984 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1985 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1986 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1988 /* Define a set of ISAs which aren't available when a given ISA is
1989 disabled. MMX and SSE ISAs are handled separately. */
1991 #define OPTION_MASK_ISA_MMX_UNSET \
1992 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1993 #define OPTION_MASK_ISA_3DNOW_UNSET \
1994 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1995 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1997 #define OPTION_MASK_ISA_SSE_UNSET \
1998 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1999 #define OPTION_MASK_ISA_SSE2_UNSET \
2000 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2001 #define OPTION_MASK_ISA_SSE3_UNSET \
2002 (OPTION_MASK_ISA_SSE3 \
2003 | OPTION_MASK_ISA_SSSE3_UNSET \
2004 | OPTION_MASK_ISA_SSE4A_UNSET )
2005 #define OPTION_MASK_ISA_SSSE3_UNSET \
2006 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2007 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2008 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2009 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2010 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2011 #define OPTION_MASK_ISA_AVX_UNSET \
2012 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2013 | OPTION_MASK_ISA_FMA4_UNSET)
2014 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2016 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2018 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2020 #define OPTION_MASK_ISA_SSE4A_UNSET \
2021 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2023 #define OPTION_MASK_ISA_FMA4_UNSET \
2024 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2025 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2026 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2028 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2029 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2030 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2031 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2032 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2033 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2034 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2035 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2037 /* Vectorization library interface and handlers. */
2038 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2039 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2040 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2042 /* Processor target table, indexed by processor number */
2045 const struct processor_costs *cost; /* Processor costs */
2046 const int align_loop; /* Default alignments. */
2047 const int align_loop_max_skip;
2048 const int align_jump;
2049 const int align_jump_max_skip;
2050 const int align_func;
2053 static const struct ptt processor_target_table[PROCESSOR_max] =
2055 {&i386_cost, 4, 3, 4, 3, 4},
2056 {&i486_cost, 16, 15, 16, 15, 16},
2057 {&pentium_cost, 16, 7, 16, 7, 16},
2058 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2059 {&geode_cost, 0, 0, 0, 0, 0},
2060 {&k6_cost, 32, 7, 32, 7, 32},
2061 {&athlon_cost, 16, 7, 16, 7, 16},
2062 {&pentium4_cost, 0, 0, 0, 0, 0},
2063 {&k8_cost, 16, 7, 16, 7, 16},
2064 {&nocona_cost, 0, 0, 0, 0, 0},
2065 {&core2_cost, 16, 10, 16, 10, 16},
2066 {&generic32_cost, 16, 7, 16, 7, 16},
2067 {&generic64_cost, 16, 10, 16, 10, 16},
2068 {&amdfam10_cost, 32, 24, 32, 7, 32},
2069 {&atom_cost, 16, 7, 16, 7, 16}
2072 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2098 /* Implement TARGET_HANDLE_OPTION. */
2101 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2108 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2109 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2113 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2121 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2122 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2126 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2127 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2137 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2150 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2163 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2176 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2189 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2190 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2194 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2202 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2203 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2207 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2215 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2216 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2220 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2228 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2229 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2233 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2239 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2244 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2245 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2251 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2252 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2256 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2257 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2264 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2265 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2269 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2270 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2277 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2278 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2282 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2283 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2290 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2291 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2295 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2296 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2303 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2304 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2308 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2309 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2316 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2317 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2321 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2322 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2329 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2330 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2334 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2335 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2342 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2343 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2347 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2348 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2355 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2356 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2360 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2368 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2369 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2373 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2374 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2381 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2382 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2386 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2387 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2394 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2395 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2399 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2400 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2409 /* Return a string that documents the current -m options. The caller is
2410 responsible for freeing the string. */
2413 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2414 const char *fpmath, bool add_nl_p)
2416 struct ix86_target_opts
2418 const char *option; /* option string */
2419 int mask; /* isa mask options */
2422 /* This table is ordered so that options like -msse4.2 that imply
2423 preceding options while match those first. */
2424 static struct ix86_target_opts isa_opts[] =
2426 { "-m64", OPTION_MASK_ISA_64BIT },
2427 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2428 { "-mfma", OPTION_MASK_ISA_FMA },
2429 { "-mxop", OPTION_MASK_ISA_XOP },
2430 { "-mlwp", OPTION_MASK_ISA_LWP },
2431 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2432 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2433 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2434 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2435 { "-msse3", OPTION_MASK_ISA_SSE3 },
2436 { "-msse2", OPTION_MASK_ISA_SSE2 },
2437 { "-msse", OPTION_MASK_ISA_SSE },
2438 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2439 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2440 { "-mmmx", OPTION_MASK_ISA_MMX },
2441 { "-mabm", OPTION_MASK_ISA_ABM },
2442 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2443 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2444 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2445 { "-maes", OPTION_MASK_ISA_AES },
2446 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2450 static struct ix86_target_opts flag_opts[] =
2452 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2453 { "-m80387", MASK_80387 },
2454 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2455 { "-malign-double", MASK_ALIGN_DOUBLE },
2456 { "-mcld", MASK_CLD },
2457 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2458 { "-mieee-fp", MASK_IEEE_FP },
2459 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2460 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2461 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2462 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2463 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2464 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2465 { "-mno-red-zone", MASK_NO_RED_ZONE },
2466 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2467 { "-mrecip", MASK_RECIP },
2468 { "-mrtd", MASK_RTD },
2469 { "-msseregparm", MASK_SSEREGPARM },
2470 { "-mstack-arg-probe", MASK_STACK_PROBE },
2471 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2474 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2477 char target_other[40];
2486 memset (opts, '\0', sizeof (opts));
2488 /* Add -march= option. */
2491 opts[num][0] = "-march=";
2492 opts[num++][1] = arch;
2495 /* Add -mtune= option. */
2498 opts[num][0] = "-mtune=";
2499 opts[num++][1] = tune;
2502 /* Pick out the options in isa options. */
2503 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2505 if ((isa & isa_opts[i].mask) != 0)
2507 opts[num++][0] = isa_opts[i].option;
2508 isa &= ~ isa_opts[i].mask;
2512 if (isa && add_nl_p)
2514 opts[num++][0] = isa_other;
2515 sprintf (isa_other, "(other isa: 0x%x)", isa);
2518 /* Add flag options. */
2519 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2521 if ((flags & flag_opts[i].mask) != 0)
2523 opts[num++][0] = flag_opts[i].option;
2524 flags &= ~ flag_opts[i].mask;
2528 if (flags && add_nl_p)
2530 opts[num++][0] = target_other;
2531 sprintf (target_other, "(other flags: 0x%x)", isa);
2534 /* Add -fpmath= option. */
2537 opts[num][0] = "-mfpmath=";
2538 opts[num++][1] = fpmath;
2545 gcc_assert (num < ARRAY_SIZE (opts));
2547 /* Size the string. */
2549 sep_len = (add_nl_p) ? 3 : 1;
2550 for (i = 0; i < num; i++)
2553 for (j = 0; j < 2; j++)
2555 len += strlen (opts[i][j]);
2558 /* Build the string. */
2559 ret = ptr = (char *) xmalloc (len);
2562 for (i = 0; i < num; i++)
2566 for (j = 0; j < 2; j++)
2567 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2574 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2582 for (j = 0; j < 2; j++)
2585 memcpy (ptr, opts[i][j], len2[j]);
2587 line_len += len2[j];
2592 gcc_assert (ret + len >= ptr);
2597 /* Function that is callable from the debugger to print the current
2600 ix86_debug_options (void)
2602 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2603 ix86_arch_string, ix86_tune_string,
2604 ix86_fpmath_string, true);
2608 fprintf (stderr, "%s\n\n", opts);
2612 fputs ("<no options>\n\n", stderr);
2617 /* Sometimes certain combinations of command options do not make
2618 sense on a particular target machine. You can define a macro
2619 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2620 defined, is executed once just after all the command options have
2623 Don't use this macro to turn on various extra optimizations for
2624 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2627 override_options (bool main_args_p)
2630 unsigned int ix86_arch_mask, ix86_tune_mask;
2631 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2636 /* Comes from final.c -- no real reason to change it. */
2637 #define MAX_CODE_ALIGN 16
2645 PTA_PREFETCH_SSE = 1 << 4,
2647 PTA_3DNOW_A = 1 << 6,
2651 PTA_POPCNT = 1 << 10,
2653 PTA_SSE4A = 1 << 12,
2654 PTA_NO_SAHF = 1 << 13,
2655 PTA_SSE4_1 = 1 << 14,
2656 PTA_SSE4_2 = 1 << 15,
2658 PTA_PCLMUL = 1 << 17,
2661 PTA_MOVBE = 1 << 20,
2669 const char *const name; /* processor name or nickname. */
2670 const enum processor_type processor;
2671 const enum attr_cpu schedule;
2672 const unsigned /*enum pta_flags*/ flags;
2674 const processor_alias_table[] =
2676 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2677 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2678 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2679 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2681 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2682 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2683 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2685 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2686 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2688 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2690 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2692 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 PTA_MMX | PTA_SSE | PTA_SSE2},
2694 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2695 PTA_MMX |PTA_SSE | PTA_SSE2},
2696 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2697 PTA_MMX | PTA_SSE | PTA_SSE2},
2698 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2699 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2700 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2701 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2702 | PTA_CX16 | PTA_NO_SAHF},
2703 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2704 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2705 | PTA_SSSE3 | PTA_CX16},
2706 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2707 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2708 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2709 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2710 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2711 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2712 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2713 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2716 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2717 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2718 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2719 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2720 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2721 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2722 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2723 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2724 {"x86-64", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2726 {"k8", PROCESSOR_K8, CPU_K8,
2727 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2728 | PTA_SSE2 | PTA_NO_SAHF},
2729 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2730 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2731 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2732 {"opteron", PROCESSOR_K8, CPU_K8,
2733 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2734 | PTA_SSE2 | PTA_NO_SAHF},
2735 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2736 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2737 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2738 {"athlon64", PROCESSOR_K8, CPU_K8,
2739 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2740 | PTA_SSE2 | PTA_NO_SAHF},
2741 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2742 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2743 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2744 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2745 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2746 | PTA_SSE2 | PTA_NO_SAHF},
2747 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2748 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2749 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2750 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2751 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2752 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2753 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2754 0 /* flags are only used for -march switch. */ },
2755 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2756 PTA_64BIT /* flags are only used for -march switch. */ },
2759 int const pta_size = ARRAY_SIZE (processor_alias_table);
2761 /* Set up prefix/suffix so the error messages refer to either the command
2762 line argument, or the attribute(target). */
2771 prefix = "option(\"";
2776 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2777 SUBTARGET_OVERRIDE_OPTIONS;
2780 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2781 SUBSUBTARGET_OVERRIDE_OPTIONS;
2784 /* -fPIC is the default for x86_64. */
2785 if (TARGET_MACHO && TARGET_64BIT)
2788 /* Set the default values for switches whose default depends on TARGET_64BIT
2789 in case they weren't overwritten by command line options. */
2792 /* Mach-O doesn't support omitting the frame pointer for now. */
2793 if (flag_omit_frame_pointer == 2)
2794 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2795 if (flag_asynchronous_unwind_tables == 2)
2796 flag_asynchronous_unwind_tables = 1;
2797 if (flag_pcc_struct_return == 2)
2798 flag_pcc_struct_return = 0;
2802 if (flag_omit_frame_pointer == 2)
2803 flag_omit_frame_pointer = 0;
2804 if (flag_asynchronous_unwind_tables == 2)
2805 flag_asynchronous_unwind_tables = 0;
2806 if (flag_pcc_struct_return == 2)
2807 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2810 /* Need to check -mtune=generic first. */
2811 if (ix86_tune_string)
2813 if (!strcmp (ix86_tune_string, "generic")
2814 || !strcmp (ix86_tune_string, "i686")
2815 /* As special support for cross compilers we read -mtune=native
2816 as -mtune=generic. With native compilers we won't see the
2817 -mtune=native, as it was changed by the driver. */
2818 || !strcmp (ix86_tune_string, "native"))
2821 ix86_tune_string = "generic64";
2823 ix86_tune_string = "generic32";
2825 /* If this call is for setting the option attribute, allow the
2826 generic32/generic64 that was previously set. */
2827 else if (!main_args_p
2828 && (!strcmp (ix86_tune_string, "generic32")
2829 || !strcmp (ix86_tune_string, "generic64")))
2831 else if (!strncmp (ix86_tune_string, "generic", 7))
2832 error ("bad value (%s) for %stune=%s %s",
2833 ix86_tune_string, prefix, suffix, sw);
2834 else if (!strcmp (ix86_tune_string, "x86-64"))
2835 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2836 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2837 prefix, suffix, prefix, suffix, prefix, suffix);
2841 if (ix86_arch_string)
2842 ix86_tune_string = ix86_arch_string;
2843 if (!ix86_tune_string)
2845 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2846 ix86_tune_defaulted = 1;
2849 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2850 need to use a sensible tune option. */
2851 if (!strcmp (ix86_tune_string, "generic")
2852 || !strcmp (ix86_tune_string, "x86-64")
2853 || !strcmp (ix86_tune_string, "i686"))
2856 ix86_tune_string = "generic64";
2858 ix86_tune_string = "generic32";
2862 if (ix86_stringop_string)
2864 if (!strcmp (ix86_stringop_string, "rep_byte"))
2865 stringop_alg = rep_prefix_1_byte;
2866 else if (!strcmp (ix86_stringop_string, "libcall"))
2867 stringop_alg = libcall;
2868 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2869 stringop_alg = rep_prefix_4_byte;
2870 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2872 /* rep; movq isn't available in 32-bit code. */
2873 stringop_alg = rep_prefix_8_byte;
2874 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2875 stringop_alg = loop_1_byte;
2876 else if (!strcmp (ix86_stringop_string, "loop"))
2877 stringop_alg = loop;
2878 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2879 stringop_alg = unrolled_loop;
2881 error ("bad value (%s) for %sstringop-strategy=%s %s",
2882 ix86_stringop_string, prefix, suffix, sw);
2885 if (!ix86_arch_string)
2886 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2888 ix86_arch_specified = 1;
2890 /* Validate -mabi= value. */
2891 if (ix86_abi_string)
2893 if (strcmp (ix86_abi_string, "sysv") == 0)
2894 ix86_abi = SYSV_ABI;
2895 else if (strcmp (ix86_abi_string, "ms") == 0)
2898 error ("unknown ABI (%s) for %sabi=%s %s",
2899 ix86_abi_string, prefix, suffix, sw);
2902 ix86_abi = DEFAULT_ABI;
2904 if (ix86_cmodel_string != 0)
2906 if (!strcmp (ix86_cmodel_string, "small"))
2907 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2908 else if (!strcmp (ix86_cmodel_string, "medium"))
2909 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2910 else if (!strcmp (ix86_cmodel_string, "large"))
2911 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2913 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2914 else if (!strcmp (ix86_cmodel_string, "32"))
2915 ix86_cmodel = CM_32;
2916 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2917 ix86_cmodel = CM_KERNEL;
2919 error ("bad value (%s) for %scmodel=%s %s",
2920 ix86_cmodel_string, prefix, suffix, sw);
2924 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2925 use of rip-relative addressing. This eliminates fixups that
2926 would otherwise be needed if this object is to be placed in a
2927 DLL, and is essentially just as efficient as direct addressing. */
2928 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2929 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2930 else if (TARGET_64BIT)
2931 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2933 ix86_cmodel = CM_32;
2935 if (ix86_asm_string != 0)
2938 && !strcmp (ix86_asm_string, "intel"))
2939 ix86_asm_dialect = ASM_INTEL;
2940 else if (!strcmp (ix86_asm_string, "att"))
2941 ix86_asm_dialect = ASM_ATT;
2943 error ("bad value (%s) for %sasm=%s %s",
2944 ix86_asm_string, prefix, suffix, sw);
2946 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2947 error ("code model %qs not supported in the %s bit mode",
2948 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2949 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2950 sorry ("%i-bit mode not compiled in",
2951 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2953 for (i = 0; i < pta_size; i++)
2954 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2956 ix86_schedule = processor_alias_table[i].schedule;
2957 ix86_arch = processor_alias_table[i].processor;
2958 /* Default cpu tuning to the architecture. */
2959 ix86_tune = ix86_arch;
2961 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2962 error ("CPU you selected does not support x86-64 "
2965 if (processor_alias_table[i].flags & PTA_MMX
2966 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2967 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2968 if (processor_alias_table[i].flags & PTA_3DNOW
2969 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2970 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2971 if (processor_alias_table[i].flags & PTA_3DNOW_A
2972 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2973 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2974 if (processor_alias_table[i].flags & PTA_SSE
2975 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2976 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2977 if (processor_alias_table[i].flags & PTA_SSE2
2978 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2979 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2980 if (processor_alias_table[i].flags & PTA_SSE3
2981 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2982 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2983 if (processor_alias_table[i].flags & PTA_SSSE3
2984 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2985 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2986 if (processor_alias_table[i].flags & PTA_SSE4_1
2987 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2988 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2989 if (processor_alias_table[i].flags & PTA_SSE4_2
2990 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2991 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2992 if (processor_alias_table[i].flags & PTA_AVX
2993 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2994 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2995 if (processor_alias_table[i].flags & PTA_FMA
2996 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2997 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2998 if (processor_alias_table[i].flags & PTA_SSE4A
2999 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3000 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3001 if (processor_alias_table[i].flags & PTA_FMA4
3002 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3003 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3004 if (processor_alias_table[i].flags & PTA_XOP
3005 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3006 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3007 if (processor_alias_table[i].flags & PTA_LWP
3008 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3009 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3010 if (processor_alias_table[i].flags & PTA_ABM
3011 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3012 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3013 if (processor_alias_table[i].flags & PTA_CX16
3014 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3015 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3016 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3017 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3018 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3019 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3020 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3021 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3022 if (processor_alias_table[i].flags & PTA_MOVBE
3023 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3024 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3025 if (processor_alias_table[i].flags & PTA_AES
3026 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3027 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3028 if (processor_alias_table[i].flags & PTA_PCLMUL
3029 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3030 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3031 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3032 x86_prefetch_sse = true;
3037 if (!strcmp (ix86_arch_string, "generic"))
3038 error ("generic CPU can be used only for %stune=%s %s",
3039 prefix, suffix, sw);
3040 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3041 error ("bad value (%s) for %sarch=%s %s",
3042 ix86_arch_string, prefix, suffix, sw);
3044 ix86_arch_mask = 1u << ix86_arch;
3045 for (i = 0; i < X86_ARCH_LAST; ++i)
3046 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3048 for (i = 0; i < pta_size; i++)
3049 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3051 ix86_schedule = processor_alias_table[i].schedule;
3052 ix86_tune = processor_alias_table[i].processor;
3053 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3055 if (ix86_tune_defaulted)
3057 ix86_tune_string = "x86-64";
3058 for (i = 0; i < pta_size; i++)
3059 if (! strcmp (ix86_tune_string,
3060 processor_alias_table[i].name))
3062 ix86_schedule = processor_alias_table[i].schedule;
3063 ix86_tune = processor_alias_table[i].processor;
3066 error ("CPU you selected does not support x86-64 "
3069 /* Intel CPUs have always interpreted SSE prefetch instructions as
3070 NOPs; so, we can enable SSE prefetch instructions even when
3071 -mtune (rather than -march) points us to a processor that has them.
3072 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3073 higher processors. */
3075 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3076 x86_prefetch_sse = true;
3080 if (ix86_tune_specified && i == pta_size)
3081 error ("bad value (%s) for %stune=%s %s",
3082 ix86_tune_string, prefix, suffix, sw);
3084 ix86_tune_mask = 1u << ix86_tune;
3085 for (i = 0; i < X86_TUNE_LAST; ++i)
3086 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3089 ix86_cost = &ix86_size_cost;
3091 ix86_cost = processor_target_table[ix86_tune].cost;
3093 /* Arrange to set up i386_stack_locals for all functions. */
3094 init_machine_status = ix86_init_machine_status;
3096 /* Validate -mregparm= value. */
3097 if (ix86_regparm_string)
3100 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3101 i = atoi (ix86_regparm_string);
3102 if (i < 0 || i > REGPARM_MAX)
3103 error ("%sregparm=%d%s is not between 0 and %d",
3104 prefix, i, suffix, REGPARM_MAX);
3109 ix86_regparm = REGPARM_MAX;
3111 /* If the user has provided any of the -malign-* options,
3112 warn and use that value only if -falign-* is not set.
3113 Remove this code in GCC 3.2 or later. */
3114 if (ix86_align_loops_string)
3116 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3117 prefix, suffix, suffix);
3118 if (align_loops == 0)
3120 i = atoi (ix86_align_loops_string);
3121 if (i < 0 || i > MAX_CODE_ALIGN)
3122 error ("%salign-loops=%d%s is not between 0 and %d",
3123 prefix, i, suffix, MAX_CODE_ALIGN);
3125 align_loops = 1 << i;
3129 if (ix86_align_jumps_string)
3131 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3132 prefix, suffix, suffix);
3133 if (align_jumps == 0)
3135 i = atoi (ix86_align_jumps_string);
3136 if (i < 0 || i > MAX_CODE_ALIGN)
3137 error ("%salign-loops=%d%s is not between 0 and %d",
3138 prefix, i, suffix, MAX_CODE_ALIGN);
3140 align_jumps = 1 << i;
3144 if (ix86_align_funcs_string)
3146 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3147 prefix, suffix, suffix);
3148 if (align_functions == 0)
3150 i = atoi (ix86_align_funcs_string);
3151 if (i < 0 || i > MAX_CODE_ALIGN)
3152 error ("%salign-loops=%d%s is not between 0 and %d",
3153 prefix, i, suffix, MAX_CODE_ALIGN);
3155 align_functions = 1 << i;
3159 /* Default align_* from the processor table. */
3160 if (align_loops == 0)
3162 align_loops = processor_target_table[ix86_tune].align_loop;
3163 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3165 if (align_jumps == 0)
3167 align_jumps = processor_target_table[ix86_tune].align_jump;
3168 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3170 if (align_functions == 0)
3172 align_functions = processor_target_table[ix86_tune].align_func;
3175 /* Validate -mbranch-cost= value, or provide default. */
3176 ix86_branch_cost = ix86_cost->branch_cost;
3177 if (ix86_branch_cost_string)
3179 i = atoi (ix86_branch_cost_string);
3181 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3183 ix86_branch_cost = i;
3185 if (ix86_section_threshold_string)
3187 i = atoi (ix86_section_threshold_string);
3189 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3191 ix86_section_threshold = i;
3194 if (ix86_tls_dialect_string)
3196 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3197 ix86_tls_dialect = TLS_DIALECT_GNU;
3198 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3199 ix86_tls_dialect = TLS_DIALECT_GNU2;
3201 error ("bad value (%s) for %stls-dialect=%s %s",
3202 ix86_tls_dialect_string, prefix, suffix, sw);
3205 if (ix87_precision_string)
3207 i = atoi (ix87_precision_string);
3208 if (i != 32 && i != 64 && i != 80)
3209 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3214 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3216 /* Enable by default the SSE and MMX builtins. Do allow the user to
3217 explicitly disable any of these. In particular, disabling SSE and
3218 MMX for kernel code is extremely useful. */
3219 if (!ix86_arch_specified)
3221 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3222 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3225 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3229 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3231 if (!ix86_arch_specified)
3233 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3235 /* i386 ABI does not specify red zone. It still makes sense to use it
3236 when programmer takes care to stack from being destroyed. */
3237 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3238 target_flags |= MASK_NO_RED_ZONE;
3241 /* Keep nonleaf frame pointers. */
3242 if (flag_omit_frame_pointer)
3243 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3244 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3245 flag_omit_frame_pointer = 1;
3247 /* If we're doing fast math, we don't care about comparison order
3248 wrt NaNs. This lets us use a shorter comparison sequence. */
3249 if (flag_finite_math_only)
3250 target_flags &= ~MASK_IEEE_FP;
3252 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3253 since the insns won't need emulation. */
3254 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3255 target_flags &= ~MASK_NO_FANCY_MATH_387;
3257 /* Likewise, if the target doesn't have a 387, or we've specified
3258 software floating point, don't use 387 inline intrinsics. */
3260 target_flags |= MASK_NO_FANCY_MATH_387;
3262 /* Turn on MMX builtins for -msse. */
3265 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3266 x86_prefetch_sse = true;
3269 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3270 if (TARGET_SSE4_2 || TARGET_ABM)
3271 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3273 /* Validate -mpreferred-stack-boundary= value or default it to
3274 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3275 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3276 if (ix86_preferred_stack_boundary_string)
3278 i = atoi (ix86_preferred_stack_boundary_string);
3279 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3280 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3281 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3283 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3286 /* Set the default value for -mstackrealign. */
3287 if (ix86_force_align_arg_pointer == -1)
3288 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3290 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3292 /* Validate -mincoming-stack-boundary= value or default it to
3293 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3294 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3295 if (ix86_incoming_stack_boundary_string)
3297 i = atoi (ix86_incoming_stack_boundary_string);
3298 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3299 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3300 i, TARGET_64BIT ? 4 : 2);
3303 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3304 ix86_incoming_stack_boundary
3305 = ix86_user_incoming_stack_boundary;
3309 /* Accept -msseregparm only if at least SSE support is enabled. */
3310 if (TARGET_SSEREGPARM
3312 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3314 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3315 if (ix86_fpmath_string != 0)
3317 if (! strcmp (ix86_fpmath_string, "387"))
3318 ix86_fpmath = FPMATH_387;
3319 else if (! strcmp (ix86_fpmath_string, "sse"))
3323 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3324 ix86_fpmath = FPMATH_387;
3327 ix86_fpmath = FPMATH_SSE;
3329 else if (! strcmp (ix86_fpmath_string, "387,sse")
3330 || ! strcmp (ix86_fpmath_string, "387+sse")
3331 || ! strcmp (ix86_fpmath_string, "sse,387")
3332 || ! strcmp (ix86_fpmath_string, "sse+387")
3333 || ! strcmp (ix86_fpmath_string, "both"))
3337 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3338 ix86_fpmath = FPMATH_387;
3340 else if (!TARGET_80387)
3342 warning (0, "387 instruction set disabled, using SSE arithmetics");
3343 ix86_fpmath = FPMATH_SSE;
3346 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3349 error ("bad value (%s) for %sfpmath=%s %s",
3350 ix86_fpmath_string, prefix, suffix, sw);
3353 /* If the i387 is disabled, then do not return values in it. */
3355 target_flags &= ~MASK_FLOAT_RETURNS;
3357 /* Use external vectorized library in vectorizing intrinsics. */
3358 if (ix86_veclibabi_string)
3360 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3361 ix86_veclib_handler = ix86_veclibabi_svml;
3362 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3363 ix86_veclib_handler = ix86_veclibabi_acml;
3365 error ("unknown vectorization library ABI type (%s) for "
3366 "%sveclibabi=%s %s", ix86_veclibabi_string,
3367 prefix, suffix, sw);
3370 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3371 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3373 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3375 /* ??? Unwind info is not correct around the CFG unless either a frame
3376 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3377 unwind info generation to be aware of the CFG and propagating states
3379 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3380 || flag_exceptions || flag_non_call_exceptions)
3381 && flag_omit_frame_pointer
3382 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3384 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3385 warning (0, "unwind tables currently require either a frame pointer "
3386 "or %saccumulate-outgoing-args%s for correctness",
3388 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3391 /* If stack probes are required, the space used for large function
3392 arguments on the stack must also be probed, so enable
3393 -maccumulate-outgoing-args so this happens in the prologue. */
3394 if (TARGET_STACK_PROBE
3395 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3397 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3398 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3399 "for correctness", prefix, suffix);
3400 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3403 /* For sane SSE instruction set generation we need fcomi instruction.
3404 It is safe to enable all CMOVE instructions. */
3408 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3411 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3412 p = strchr (internal_label_prefix, 'X');
3413 internal_label_prefix_len = p - internal_label_prefix;
3417 /* When scheduling description is not available, disable scheduler pass
3418 so it won't slow down the compilation and make x87 code slower. */
3419 if (!TARGET_SCHEDULE)
3420 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3422 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3423 set_param_value ("simultaneous-prefetches",
3424 ix86_cost->simultaneous_prefetches);
3425 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3426 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3427 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3428 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3429 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3430 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3432 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3433 can be optimized to ap = __builtin_next_arg (0). */
3435 targetm.expand_builtin_va_start = NULL;
3439 ix86_gen_leave = gen_leave_rex64;
3440 ix86_gen_pop1 = gen_popdi1;
3441 ix86_gen_add3 = gen_adddi3;
3442 ix86_gen_sub3 = gen_subdi3;
3443 ix86_gen_sub3_carry = gen_subdi3_carry;
3444 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3445 ix86_gen_monitor = gen_sse3_monitor64;
3446 ix86_gen_andsp = gen_anddi3;
3450 ix86_gen_leave = gen_leave;
3451 ix86_gen_pop1 = gen_popsi1;
3452 ix86_gen_add3 = gen_addsi3;
3453 ix86_gen_sub3 = gen_subsi3;
3454 ix86_gen_sub3_carry = gen_subsi3_carry;
3455 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3456 ix86_gen_monitor = gen_sse3_monitor;
3457 ix86_gen_andsp = gen_andsi3;
3461 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3463 target_flags |= MASK_CLD & ~target_flags_explicit;
3466 /* Save the initial options in case the user does function specific options */
3468 target_option_default_node = target_option_current_node
3469 = build_target_option_node ();
3472 /* Update register usage after having seen the compiler flags. */
3475 ix86_conditional_register_usage (void)
3480 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3482 if (fixed_regs[i] > 1)
3483 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 if (call_used_regs[i] > 1)
3485 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3488 /* The PIC register, if it exists, is fixed. */
3489 j = PIC_OFFSET_TABLE_REGNUM;
3490 if (j != INVALID_REGNUM)
3491 fixed_regs[j] = call_used_regs[j] = 1;
3493 /* The MS_ABI changes the set of call-used registers. */
3494 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3496 call_used_regs[SI_REG] = 0;
3497 call_used_regs[DI_REG] = 0;
3498 call_used_regs[XMM6_REG] = 0;
3499 call_used_regs[XMM7_REG] = 0;
3500 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3501 call_used_regs[i] = 0;
3504 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3505 other call-clobbered regs for 64-bit. */
3508 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3511 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3512 && call_used_regs[i])
3513 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3516 /* If MMX is disabled, squash the registers. */
3518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3519 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3520 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3522 /* If SSE is disabled, squash the registers. */
3524 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3525 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3526 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3528 /* If the FPU is disabled, squash the registers. */
3529 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3530 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3531 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3532 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3534 /* If 32-bit, squash the 64-bit registers. */
3537 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3539 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3545 /* Save the current options */
3548 ix86_function_specific_save (struct cl_target_option *ptr)
3550 ptr->arch = ix86_arch;
3551 ptr->schedule = ix86_schedule;
3552 ptr->tune = ix86_tune;
3553 ptr->fpmath = ix86_fpmath;
3554 ptr->branch_cost = ix86_branch_cost;
3555 ptr->tune_defaulted = ix86_tune_defaulted;
3556 ptr->arch_specified = ix86_arch_specified;
3557 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3558 ptr->target_flags_explicit = target_flags_explicit;
3560 /* The fields are char but the variables are not; make sure the
3561 values fit in the fields. */
3562 gcc_assert (ptr->arch == ix86_arch);
3563 gcc_assert (ptr->schedule == ix86_schedule);
3564 gcc_assert (ptr->tune == ix86_tune);
3565 gcc_assert (ptr->fpmath == ix86_fpmath);
3566 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3569 /* Restore the current options */
3572 ix86_function_specific_restore (struct cl_target_option *ptr)
3574 enum processor_type old_tune = ix86_tune;
3575 enum processor_type old_arch = ix86_arch;
3576 unsigned int ix86_arch_mask, ix86_tune_mask;
3579 ix86_arch = (enum processor_type) ptr->arch;
3580 ix86_schedule = (enum attr_cpu) ptr->schedule;
3581 ix86_tune = (enum processor_type) ptr->tune;
3582 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3583 ix86_branch_cost = ptr->branch_cost;
3584 ix86_tune_defaulted = ptr->tune_defaulted;
3585 ix86_arch_specified = ptr->arch_specified;
3586 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3587 target_flags_explicit = ptr->target_flags_explicit;
3589 /* Recreate the arch feature tests if the arch changed */
3590 if (old_arch != ix86_arch)
3592 ix86_arch_mask = 1u << ix86_arch;
3593 for (i = 0; i < X86_ARCH_LAST; ++i)
3594 ix86_arch_features[i]
3595 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3598 /* Recreate the tune optimization tests */
3599 if (old_tune != ix86_tune)
3601 ix86_tune_mask = 1u << ix86_tune;
3602 for (i = 0; i < X86_TUNE_LAST; ++i)
3603 ix86_tune_features[i]
3604 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3608 /* Print the current options */
3611 ix86_function_specific_print (FILE *file, int indent,
3612 struct cl_target_option *ptr)
3615 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3616 NULL, NULL, NULL, false);
3618 fprintf (file, "%*sarch = %d (%s)\n",
3621 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3622 ? cpu_names[ptr->arch]
3625 fprintf (file, "%*stune = %d (%s)\n",
3628 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3629 ? cpu_names[ptr->tune]
3632 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3633 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3634 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3635 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3639 fprintf (file, "%*s%s\n", indent, "", target_string);
3640 free (target_string);
3645 /* Inner function to process the attribute((target(...))), take an argument and
3646 set the current options from the argument. If we have a list, recursively go
3650 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3655 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3656 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3657 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3658 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3673 enum ix86_opt_type type;
3678 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3679 IX86_ATTR_ISA ("abm", OPT_mabm),
3680 IX86_ATTR_ISA ("aes", OPT_maes),
3681 IX86_ATTR_ISA ("avx", OPT_mavx),
3682 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3683 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3684 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3685 IX86_ATTR_ISA ("sse", OPT_msse),
3686 IX86_ATTR_ISA ("sse2", OPT_msse2),
3687 IX86_ATTR_ISA ("sse3", OPT_msse3),
3688 IX86_ATTR_ISA ("sse4", OPT_msse4),
3689 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3690 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3691 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3692 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3693 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3694 IX86_ATTR_ISA ("xop", OPT_mxop),
3695 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3697 /* string options */
3698 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3699 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3700 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3703 IX86_ATTR_YES ("cld",
3707 IX86_ATTR_NO ("fancy-math-387",
3708 OPT_mfancy_math_387,
3709 MASK_NO_FANCY_MATH_387),
3711 IX86_ATTR_YES ("ieee-fp",
3715 IX86_ATTR_YES ("inline-all-stringops",
3716 OPT_minline_all_stringops,
3717 MASK_INLINE_ALL_STRINGOPS),
3719 IX86_ATTR_YES ("inline-stringops-dynamically",
3720 OPT_minline_stringops_dynamically,
3721 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3723 IX86_ATTR_NO ("align-stringops",
3724 OPT_mno_align_stringops,
3725 MASK_NO_ALIGN_STRINGOPS),
3727 IX86_ATTR_YES ("recip",
3733 /* If this is a list, recurse to get the options. */
3734 if (TREE_CODE (args) == TREE_LIST)
3738 for (; args; args = TREE_CHAIN (args))
3739 if (TREE_VALUE (args)
3740 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3746 else if (TREE_CODE (args) != STRING_CST)
3749 /* Handle multiple arguments separated by commas. */
3750 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3752 while (next_optstr && *next_optstr != '\0')
3754 char *p = next_optstr;
3756 char *comma = strchr (next_optstr, ',');
3757 const char *opt_string;
3758 size_t len, opt_len;
3763 enum ix86_opt_type type = ix86_opt_unknown;
3769 len = comma - next_optstr;
3770 next_optstr = comma + 1;
3778 /* Recognize no-xxx. */
3779 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3788 /* Find the option. */
3791 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3793 type = attrs[i].type;
3794 opt_len = attrs[i].len;
3795 if (ch == attrs[i].string[0]
3796 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3797 && memcmp (p, attrs[i].string, opt_len) == 0)
3800 mask = attrs[i].mask;
3801 opt_string = attrs[i].string;
3806 /* Process the option. */
3809 error ("attribute(target(\"%s\")) is unknown", orig_p);
3813 else if (type == ix86_opt_isa)
3814 ix86_handle_option (opt, p, opt_set_p);
3816 else if (type == ix86_opt_yes || type == ix86_opt_no)
3818 if (type == ix86_opt_no)
3819 opt_set_p = !opt_set_p;
3822 target_flags |= mask;
3824 target_flags &= ~mask;
3827 else if (type == ix86_opt_str)
3831 error ("option(\"%s\") was already specified", opt_string);
3835 p_strings[opt] = xstrdup (p + opt_len);
3845 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3848 ix86_valid_target_attribute_tree (tree args)
3850 const char *orig_arch_string = ix86_arch_string;
3851 const char *orig_tune_string = ix86_tune_string;
3852 const char *orig_fpmath_string = ix86_fpmath_string;
3853 int orig_tune_defaulted = ix86_tune_defaulted;
3854 int orig_arch_specified = ix86_arch_specified;
3855 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3858 struct cl_target_option *def
3859 = TREE_TARGET_OPTION (target_option_default_node);
3861 /* Process each of the options on the chain. */
3862 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3865 /* If the changed options are different from the default, rerun override_options,
3866 and then save the options away. The string options are are attribute options,
3867 and will be undone when we copy the save structure. */
3868 if (ix86_isa_flags != def->ix86_isa_flags
3869 || target_flags != def->target_flags
3870 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3871 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3874 /* If we are using the default tune= or arch=, undo the string assigned,
3875 and use the default. */
3876 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3877 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3878 else if (!orig_arch_specified)
3879 ix86_arch_string = NULL;
3881 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3882 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3883 else if (orig_tune_defaulted)
3884 ix86_tune_string = NULL;
3886 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3887 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3888 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3889 else if (!TARGET_64BIT && TARGET_SSE)
3890 ix86_fpmath_string = "sse,387";
3892 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3893 override_options (false);
3895 /* Add any builtin functions with the new isa if any. */
3896 ix86_add_new_builtins (ix86_isa_flags);
3898 /* Save the current options unless we are validating options for
3900 t = build_target_option_node ();
3902 ix86_arch_string = orig_arch_string;
3903 ix86_tune_string = orig_tune_string;
3904 ix86_fpmath_string = orig_fpmath_string;
3906 /* Free up memory allocated to hold the strings */
3907 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3908 if (option_strings[i])
3909 free (option_strings[i]);
3915 /* Hook to validate attribute((target("string"))). */
3918 ix86_valid_target_attribute_p (tree fndecl,
3919 tree ARG_UNUSED (name),
3921 int ARG_UNUSED (flags))
3923 struct cl_target_option cur_target;
3925 tree old_optimize = build_optimization_node ();
3926 tree new_target, new_optimize;
3927 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3929 /* If the function changed the optimization levels as well as setting target
3930 options, start with the optimizations specified. */
3931 if (func_optimize && func_optimize != old_optimize)
3932 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3934 /* The target attributes may also change some optimization flags, so update
3935 the optimization options if necessary. */
3936 cl_target_option_save (&cur_target);
3937 new_target = ix86_valid_target_attribute_tree (args);
3938 new_optimize = build_optimization_node ();
3945 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3947 if (old_optimize != new_optimize)
3948 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3951 cl_target_option_restore (&cur_target);
3953 if (old_optimize != new_optimize)
3954 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3960 /* Hook to determine if one function can safely inline another. */
3963 ix86_can_inline_p (tree caller, tree callee)
3966 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3967 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3969 /* If callee has no option attributes, then it is ok to inline. */
3973 /* If caller has no option attributes, but callee does then it is not ok to
3975 else if (!caller_tree)
3980 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3981 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3983 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3984 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3986 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3987 != callee_opts->ix86_isa_flags)
3990 /* See if we have the same non-isa options. */
3991 else if (caller_opts->target_flags != callee_opts->target_flags)
3994 /* See if arch, tune, etc. are the same. */
3995 else if (caller_opts->arch != callee_opts->arch)
3998 else if (caller_opts->tune != callee_opts->tune)
4001 else if (caller_opts->fpmath != callee_opts->fpmath)
4004 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4015 /* Remember the last target of ix86_set_current_function. */
4016 static GTY(()) tree ix86_previous_fndecl;
4018 /* Establish appropriate back-end context for processing the function
4019 FNDECL. The argument might be NULL to indicate processing at top
4020 level, outside of any function scope. */
4022 ix86_set_current_function (tree fndecl)
4024 /* Only change the context if the function changes. This hook is called
4025 several times in the course of compiling a function, and we don't want to
4026 slow things down too much or call target_reinit when it isn't safe. */
4027 if (fndecl && fndecl != ix86_previous_fndecl)
4029 tree old_tree = (ix86_previous_fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4033 tree new_tree = (fndecl
4034 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4037 ix86_previous_fndecl = fndecl;
4038 if (old_tree == new_tree)
4043 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4049 struct cl_target_option *def
4050 = TREE_TARGET_OPTION (target_option_current_node);
4052 cl_target_option_restore (def);
4059 /* Return true if this goes in large data/bss. */
4062 ix86_in_large_data_p (tree exp)
4064 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4067 /* Functions are never large data. */
4068 if (TREE_CODE (exp) == FUNCTION_DECL)
4071 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4073 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4074 if (strcmp (section, ".ldata") == 0
4075 || strcmp (section, ".lbss") == 0)
4081 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4083 /* If this is an incomplete type with size 0, then we can't put it
4084 in data because it might be too big when completed. */
4085 if (!size || size > ix86_section_threshold)
4092 /* Switch to the appropriate section for output of DECL.
4093 DECL is either a `VAR_DECL' node or a constant of some sort.
4094 RELOC indicates whether forming the initial value of DECL requires
4095 link-time relocations. */
4097 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4101 x86_64_elf_select_section (tree decl, int reloc,
4102 unsigned HOST_WIDE_INT align)
4104 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4105 && ix86_in_large_data_p (decl))
4107 const char *sname = NULL;
4108 unsigned int flags = SECTION_WRITE;
4109 switch (categorize_decl_for_section (decl, reloc))
4114 case SECCAT_DATA_REL:
4115 sname = ".ldata.rel";
4117 case SECCAT_DATA_REL_LOCAL:
4118 sname = ".ldata.rel.local";
4120 case SECCAT_DATA_REL_RO:
4121 sname = ".ldata.rel.ro";
4123 case SECCAT_DATA_REL_RO_LOCAL:
4124 sname = ".ldata.rel.ro.local";
4128 flags |= SECTION_BSS;
4131 case SECCAT_RODATA_MERGE_STR:
4132 case SECCAT_RODATA_MERGE_STR_INIT:
4133 case SECCAT_RODATA_MERGE_CONST:
4137 case SECCAT_SRODATA:
4144 /* We don't split these for medium model. Place them into
4145 default sections and hope for best. */
4147 case SECCAT_EMUTLS_VAR:
4148 case SECCAT_EMUTLS_TMPL:
4153 /* We might get called with string constants, but get_named_section
4154 doesn't like them as they are not DECLs. Also, we need to set
4155 flags in that case. */
4157 return get_section (sname, flags, NULL);
4158 return get_named_section (decl, sname, reloc);
4161 return default_elf_select_section (decl, reloc, align);
4164 /* Build up a unique section name, expressed as a
4165 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4166 RELOC indicates whether the initial value of EXP requires
4167 link-time relocations. */
4169 static void ATTRIBUTE_UNUSED
4170 x86_64_elf_unique_section (tree decl, int reloc)
4172 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4173 && ix86_in_large_data_p (decl))
4175 const char *prefix = NULL;
4176 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4177 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4179 switch (categorize_decl_for_section (decl, reloc))
4182 case SECCAT_DATA_REL:
4183 case SECCAT_DATA_REL_LOCAL:
4184 case SECCAT_DATA_REL_RO:
4185 case SECCAT_DATA_REL_RO_LOCAL:
4186 prefix = one_only ? ".ld" : ".ldata";
4189 prefix = one_only ? ".lb" : ".lbss";
4192 case SECCAT_RODATA_MERGE_STR:
4193 case SECCAT_RODATA_MERGE_STR_INIT:
4194 case SECCAT_RODATA_MERGE_CONST:
4195 prefix = one_only ? ".lr" : ".lrodata";
4197 case SECCAT_SRODATA:
4204 /* We don't split these for medium model. Place them into
4205 default sections and hope for best. */
4207 case SECCAT_EMUTLS_VAR:
4208 prefix = targetm.emutls.var_section;
4210 case SECCAT_EMUTLS_TMPL:
4211 prefix = targetm.emutls.tmpl_section;
4216 const char *name, *linkonce;
4219 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4220 name = targetm.strip_name_encoding (name);
4222 /* If we're using one_only, then there needs to be a .gnu.linkonce
4223 prefix to the section name. */
4224 linkonce = one_only ? ".gnu.linkonce" : "";
4226 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4228 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4232 default_unique_section (decl, reloc);
4235 #ifdef COMMON_ASM_OP
4236 /* This says how to output assembler code to declare an
4237 uninitialized external linkage data object.
4239 For medium model x86-64 we need to use .largecomm opcode for
4242 x86_elf_aligned_common (FILE *file,
4243 const char *name, unsigned HOST_WIDE_INT size,
4246 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4247 && size > (unsigned int)ix86_section_threshold)
4248 fputs (".largecomm\t", file);
4250 fputs (COMMON_ASM_OP, file);
4251 assemble_name (file, name);
4252 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4253 size, align / BITS_PER_UNIT);
4257 /* Utility function for targets to use in implementing
4258 ASM_OUTPUT_ALIGNED_BSS. */
4261 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4262 const char *name, unsigned HOST_WIDE_INT size,
4265 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4266 && size > (unsigned int)ix86_section_threshold)
4267 switch_to_section (get_named_section (decl, ".lbss", 0));
4269 switch_to_section (bss_section);
4270 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4271 #ifdef ASM_DECLARE_OBJECT_NAME
4272 last_assemble_variable_decl = decl;
4273 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4275 /* Standard thing is just output label for the object. */
4276 ASM_OUTPUT_LABEL (file, name);
4277 #endif /* ASM_DECLARE_OBJECT_NAME */
4278 ASM_OUTPUT_SKIP (file, size ? size : 1);
4282 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4284 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4285 make the problem with not enough registers even worse. */
4286 #ifdef INSN_SCHEDULING
4288 flag_schedule_insns = 0;
4292 /* The Darwin libraries never set errno, so we might as well
4293 avoid calling them when that's the only reason we would. */
4294 flag_errno_math = 0;
4296 /* The default values of these switches depend on the TARGET_64BIT
4297 that is not known at this moment. Mark these values with 2 and
4298 let user the to override these. In case there is no command line option
4299 specifying them, we will set the defaults in override_options. */
4301 flag_omit_frame_pointer = 2;
4302 flag_pcc_struct_return = 2;
4303 flag_asynchronous_unwind_tables = 2;
4304 flag_vect_cost_model = 1;
4305 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4306 SUBTARGET_OPTIMIZATION_OPTIONS;
4310 /* Decide whether we can make a sibling call to a function. DECL is the
4311 declaration of the function being targeted by the call and EXP is the
4312 CALL_EXPR representing the call. */
4315 ix86_function_ok_for_sibcall (tree decl, tree exp)
4317 tree type, decl_or_type;
4320 /* If we are generating position-independent code, we cannot sibcall
4321 optimize any indirect call, or a direct call to a global function,
4322 as the PLT requires %ebx be live. */
4323 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4326 /* If we need to align the outgoing stack, then sibcalling would
4327 unalign the stack, which may break the called function. */
4328 if (ix86_minimum_incoming_stack_boundary (true)
4329 < PREFERRED_STACK_BOUNDARY)
4334 decl_or_type = decl;
4335 type = TREE_TYPE (decl);
4339 /* We're looking at the CALL_EXPR, we need the type of the function. */
4340 type = CALL_EXPR_FN (exp); /* pointer expression */
4341 type = TREE_TYPE (type); /* pointer type */
4342 type = TREE_TYPE (type); /* function type */
4343 decl_or_type = type;
4346 /* Check that the return value locations are the same. Like
4347 if we are returning floats on the 80387 register stack, we cannot
4348 make a sibcall from a function that doesn't return a float to a
4349 function that does or, conversely, from a function that does return
4350 a float to a function that doesn't; the necessary stack adjustment
4351 would not be executed. This is also the place we notice
4352 differences in the return value ABI. Note that it is ok for one
4353 of the functions to have void return type as long as the return
4354 value of the other is passed in a register. */
4355 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4356 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4358 if (STACK_REG_P (a) || STACK_REG_P (b))
4360 if (!rtx_equal_p (a, b))
4363 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4365 else if (!rtx_equal_p (a, b))
4370 /* The SYSV ABI has more call-clobbered registers;
4371 disallow sibcalls from MS to SYSV. */
4372 if (cfun->machine->call_abi == MS_ABI
4373 && ix86_function_type_abi (type) == SYSV_ABI)
4378 /* If this call is indirect, we'll need to be able to use a
4379 call-clobbered register for the address of the target function.
4380 Make sure that all such registers are not used for passing
4381 parameters. Note that DLLIMPORT functions are indirect. */
4383 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4385 if (ix86_function_regparm (type, NULL) >= 3)
4387 /* ??? Need to count the actual number of registers to be used,
4388 not the possible number of registers. Fix later. */
4394 /* Otherwise okay. That also includes certain types of indirect calls. */
4398 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4399 calling convention attributes;
4400 arguments as in struct attribute_spec.handler. */
4403 ix86_handle_cconv_attribute (tree *node, tree name,
4405 int flags ATTRIBUTE_UNUSED,
4408 if (TREE_CODE (*node) != FUNCTION_TYPE
4409 && TREE_CODE (*node) != METHOD_TYPE
4410 && TREE_CODE (*node) != FIELD_DECL
4411 && TREE_CODE (*node) != TYPE_DECL)
4413 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4415 *no_add_attrs = true;
4419 /* Can combine regparm with all attributes but fastcall. */
4420 if (is_attribute_p ("regparm", name))
4424 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4426 error ("fastcall and regparm attributes are not compatible");
4429 cst = TREE_VALUE (args);
4430 if (TREE_CODE (cst) != INTEGER_CST)
4432 warning (OPT_Wattributes,
4433 "%qE attribute requires an integer constant argument",
4435 *no_add_attrs = true;
4437 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4439 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4441 *no_add_attrs = true;
4449 /* Do not warn when emulating the MS ABI. */
4450 if (TREE_CODE (*node) != FUNCTION_TYPE
4451 || ix86_function_type_abi (*node) != MS_ABI)
4452 warning (OPT_Wattributes, "%qE attribute ignored",
4454 *no_add_attrs = true;
4458 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4459 if (is_attribute_p ("fastcall", name))
4461 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4463 error ("fastcall and cdecl attributes are not compatible");
4465 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4467 error ("fastcall and stdcall attributes are not compatible");
4469 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4471 error ("fastcall and regparm attributes are not compatible");
4475 /* Can combine stdcall with fastcall (redundant), regparm and
4477 else if (is_attribute_p ("stdcall", name))
4479 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4481 error ("stdcall and cdecl attributes are not compatible");
4483 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4485 error ("stdcall and fastcall attributes are not compatible");
4489 /* Can combine cdecl with regparm and sseregparm. */
4490 else if (is_attribute_p ("cdecl", name))
4492 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4494 error ("stdcall and cdecl attributes are not compatible");
4496 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4498 error ("fastcall and cdecl attributes are not compatible");
4502 /* Can combine sseregparm with all attributes. */
4507 /* Return 0 if the attributes for two types are incompatible, 1 if they
4508 are compatible, and 2 if they are nearly compatible (which causes a
4509 warning to be generated). */
4512 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4514 /* Check for mismatch of non-default calling convention. */
4515 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4517 if (TREE_CODE (type1) != FUNCTION_TYPE
4518 && TREE_CODE (type1) != METHOD_TYPE)
4521 /* Check for mismatched fastcall/regparm types. */
4522 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4523 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4524 || (ix86_function_regparm (type1, NULL)
4525 != ix86_function_regparm (type2, NULL)))
4528 /* Check for mismatched sseregparm types. */
4529 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4530 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4533 /* Check for mismatched return types (cdecl vs stdcall). */
4534 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4535 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4541 /* Return the regparm value for a function with the indicated TYPE and DECL.
4542 DECL may be NULL when calling function indirectly
4543 or considering a libcall. */
4546 ix86_function_regparm (const_tree type, const_tree decl)
4552 return (ix86_function_type_abi (type) == SYSV_ABI
4553 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4555 regparm = ix86_regparm;
4556 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4559 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4563 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4566 /* Use register calling convention for local functions when possible. */
4568 && TREE_CODE (decl) == FUNCTION_DECL
4572 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4573 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4576 int local_regparm, globals = 0, regno;
4578 /* Make sure no regparm register is taken by a
4579 fixed register variable. */
4580 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4581 if (fixed_regs[local_regparm])
4584 /* We don't want to use regparm(3) for nested functions as
4585 these use a static chain pointer in the third argument. */
4586 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4589 /* Each fixed register usage increases register pressure,
4590 so less registers should be used for argument passing.
4591 This functionality can be overriden by an explicit
4593 for (regno = 0; regno <= DI_REG; regno++)
4594 if (fixed_regs[regno])
4598 = globals < local_regparm ? local_regparm - globals : 0;
4600 if (local_regparm > regparm)
4601 regparm = local_regparm;
4608 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4609 DFmode (2) arguments in SSE registers for a function with the
4610 indicated TYPE and DECL. DECL may be NULL when calling function
4611 indirectly or considering a libcall. Otherwise return 0. */
4614 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4616 gcc_assert (!TARGET_64BIT);
4618 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4619 by the sseregparm attribute. */
4620 if (TARGET_SSEREGPARM
4621 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4628 error ("Calling %qD with attribute sseregparm without "
4629 "SSE/SSE2 enabled", decl);
4631 error ("Calling %qT with attribute sseregparm without "
4632 "SSE/SSE2 enabled", type);
4640 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4641 (and DFmode for SSE2) arguments in SSE registers. */
4642 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4644 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4645 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4647 return TARGET_SSE2 ? 2 : 1;
4653 /* Return true if EAX is live at the start of the function. Used by
4654 ix86_expand_prologue to determine if we need special help before
4655 calling allocate_stack_worker. */
4658 ix86_eax_live_at_start_p (void)
4660 /* Cheat. Don't bother working forward from ix86_function_regparm
4661 to the function type to whether an actual argument is located in
4662 eax. Instead just look at cfg info, which is still close enough
4663 to correct at this point. This gives false positives for broken
4664 functions that might use uninitialized data that happens to be
4665 allocated in eax, but who cares? */
4666 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4669 /* Value is the number of bytes of arguments automatically
4670 popped when returning from a subroutine call.
4671 FUNDECL is the declaration node of the function (as a tree),
4672 FUNTYPE is the data type of the function (as a tree),
4673 or for a library call it is an identifier node for the subroutine name.
4674 SIZE is the number of bytes of arguments passed on the stack.
4676 On the 80386, the RTD insn may be used to pop them if the number
4677 of args is fixed, but if the number is variable then the caller
4678 must pop them all. RTD can't be used for library calls now
4679 because the library is compiled with the Unix compiler.
4680 Use of RTD is a selectable option, since it is incompatible with
4681 standard Unix calling sequences. If the option is not selected,
4682 the caller must always pop the args.
4684 The attribute stdcall is equivalent to RTD on a per module basis. */
4687 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4691 /* None of the 64-bit ABIs pop arguments. */
4695 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4697 /* Cdecl functions override -mrtd, and never pop the stack. */
4698 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4700 /* Stdcall and fastcall functions will pop the stack if not
4702 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4703 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4706 if (rtd && ! stdarg_p (funtype))
4710 /* Lose any fake structure return argument if it is passed on the stack. */
4711 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4712 && !KEEP_AGGREGATE_RETURN_POINTER)
4714 int nregs = ix86_function_regparm (funtype, fundecl);
4716 return GET_MODE_SIZE (Pmode);
4722 /* Argument support functions. */
4724 /* Return true when register may be used to pass function parameters. */
4726 ix86_function_arg_regno_p (int regno)
4729 const int *parm_regs;
4734 return (regno < REGPARM_MAX
4735 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4737 return (regno < REGPARM_MAX
4738 || (TARGET_MMX && MMX_REGNO_P (regno)
4739 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4740 || (TARGET_SSE && SSE_REGNO_P (regno)
4741 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4746 if (SSE_REGNO_P (regno) && TARGET_SSE)
4751 if (TARGET_SSE && SSE_REGNO_P (regno)
4752 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4756 /* TODO: The function should depend on current function ABI but
4757 builtins.c would need updating then. Therefore we use the
4760 /* RAX is used as hidden argument to va_arg functions. */
4761 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4764 if (ix86_abi == MS_ABI)
4765 parm_regs = x86_64_ms_abi_int_parameter_registers;
4767 parm_regs = x86_64_int_parameter_registers;
4768 for (i = 0; i < (ix86_abi == MS_ABI
4769 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4770 if (regno == parm_regs[i])
4775 /* Return if we do not know how to pass TYPE solely in registers. */
4778 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4780 if (must_pass_in_stack_var_size_or_pad (mode, type))
4783 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4784 The layout_type routine is crafty and tries to trick us into passing
4785 currently unsupported vector types on the stack by using TImode. */
4786 return (!TARGET_64BIT && mode == TImode
4787 && type && TREE_CODE (type) != VECTOR_TYPE);
4790 /* It returns the size, in bytes, of the area reserved for arguments passed
4791 in registers for the function represented by fndecl dependent to the used
4794 ix86_reg_parm_stack_space (const_tree fndecl)
4796 enum calling_abi call_abi = SYSV_ABI;
4797 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4798 call_abi = ix86_function_abi (fndecl);
4800 call_abi = ix86_function_type_abi (fndecl);
4801 if (call_abi == MS_ABI)
4806 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4809 ix86_function_type_abi (const_tree fntype)
4811 if (TARGET_64BIT && fntype != NULL)
4813 enum calling_abi abi = ix86_abi;
4814 if (abi == SYSV_ABI)
4816 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4819 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4827 ix86_function_ms_hook_prologue (const_tree fntype)
4831 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4833 if (decl_function_context (fntype) != NULL_TREE)
4835 error_at (DECL_SOURCE_LOCATION (fntype),
4836 "ms_hook_prologue is not compatible with nested function");
4845 static enum calling_abi
4846 ix86_function_abi (const_tree fndecl)
4850 return ix86_function_type_abi (TREE_TYPE (fndecl));
4853 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4856 ix86_cfun_abi (void)
4858 if (! cfun || ! TARGET_64BIT)
4860 return cfun->machine->call_abi;
4864 extern void init_regs (void);
4866 /* Implementation of call abi switching target hook. Specific to FNDECL
4867 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4868 for more details. */
4870 ix86_call_abi_override (const_tree fndecl)
4872 if (fndecl == NULL_TREE)
4873 cfun->machine->call_abi = ix86_abi;
4875 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4878 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4879 re-initialization of init_regs each time we switch function context since
4880 this is needed only during RTL expansion. */
4882 ix86_maybe_switch_abi (void)
4885 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4889 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4890 for a call to a function whose data type is FNTYPE.
4891 For a library call, FNTYPE is 0. */
4894 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4895 tree fntype, /* tree ptr for function decl */
4896 rtx libname, /* SYMBOL_REF of library name or 0 */
4899 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4900 memset (cum, 0, sizeof (*cum));
4903 cum->call_abi = ix86_function_abi (fndecl);
4905 cum->call_abi = ix86_function_type_abi (fntype);
4906 /* Set up the number of registers to use for passing arguments. */
4908 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4909 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4910 "or subtarget optimization implying it");
4911 cum->nregs = ix86_regparm;
4914 if (cum->call_abi != ix86_abi)
4915 cum->nregs = (ix86_abi != SYSV_ABI
4916 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4920 cum->sse_nregs = SSE_REGPARM_MAX;
4923 if (cum->call_abi != ix86_abi)
4924 cum->sse_nregs = (ix86_abi != SYSV_ABI
4925 ? X86_64_SSE_REGPARM_MAX
4926 : X86_64_MS_SSE_REGPARM_MAX);
4930 cum->mmx_nregs = MMX_REGPARM_MAX;
4931 cum->warn_avx = true;
4932 cum->warn_sse = true;
4933 cum->warn_mmx = true;
4935 /* Because type might mismatch in between caller and callee, we need to
4936 use actual type of function for local calls.
4937 FIXME: cgraph_analyze can be told to actually record if function uses
4938 va_start so for local functions maybe_vaarg can be made aggressive
4940 FIXME: once typesytem is fixed, we won't need this code anymore. */
4942 fntype = TREE_TYPE (fndecl);
4943 cum->maybe_vaarg = (fntype
4944 ? (!prototype_p (fntype) || stdarg_p (fntype))
4949 /* If there are variable arguments, then we won't pass anything
4950 in registers in 32-bit mode. */
4951 if (stdarg_p (fntype))
4962 /* Use ecx and edx registers if function has fastcall attribute,
4963 else look for regparm information. */
4966 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4972 cum->nregs = ix86_function_regparm (fntype, fndecl);
4975 /* Set up the number of SSE registers used for passing SFmode
4976 and DFmode arguments. Warn for mismatching ABI. */
4977 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4981 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4982 But in the case of vector types, it is some vector mode.
4984 When we have only some of our vector isa extensions enabled, then there
4985 are some modes for which vector_mode_supported_p is false. For these
4986 modes, the generic vector support in gcc will choose some non-vector mode
4987 in order to implement the type. By computing the natural mode, we'll
4988 select the proper ABI location for the operand and not depend on whatever
4989 the middle-end decides to do with these vector types.
4991 The midde-end can't deal with the vector types > 16 bytes. In this
4992 case, we return the original mode and warn ABI change if CUM isn't
4995 static enum machine_mode
4996 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4998 enum machine_mode mode = TYPE_MODE (type);
5000 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5002 HOST_WIDE_INT size = int_size_in_bytes (type);
5003 if ((size == 8 || size == 16 || size == 32)
5004 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5005 && TYPE_VECTOR_SUBPARTS (type) > 1)
5007 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5009 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5010 mode = MIN_MODE_VECTOR_FLOAT;
5012 mode = MIN_MODE_VECTOR_INT;
5014 /* Get the mode which has this inner mode and number of units. */
5015 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5016 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5017 && GET_MODE_INNER (mode) == innermode)
5019 if (size == 32 && !TARGET_AVX)
5021 static bool warnedavx;
5028 warning (0, "AVX vector argument without AVX "
5029 "enabled changes the ABI");
5031 return TYPE_MODE (type);
5044 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5045 this may not agree with the mode that the type system has chosen for the
5046 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5047 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5050 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5055 if (orig_mode != BLKmode)
5056 tmp = gen_rtx_REG (orig_mode, regno);
5059 tmp = gen_rtx_REG (mode, regno);
5060 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5061 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5067 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5068 of this code is to classify each 8bytes of incoming argument by the register
5069 class and assign registers accordingly. */
5071 /* Return the union class of CLASS1 and CLASS2.
5072 See the x86-64 PS ABI for details. */
5074 static enum x86_64_reg_class
5075 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5077 /* Rule #1: If both classes are equal, this is the resulting class. */
5078 if (class1 == class2)
5081 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5083 if (class1 == X86_64_NO_CLASS)
5085 if (class2 == X86_64_NO_CLASS)
5088 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5089 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5090 return X86_64_MEMORY_CLASS;
5092 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5093 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5094 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5095 return X86_64_INTEGERSI_CLASS;
5096 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5097 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5098 return X86_64_INTEGER_CLASS;
5100 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5102 if (class1 == X86_64_X87_CLASS
5103 || class1 == X86_64_X87UP_CLASS
5104 || class1 == X86_64_COMPLEX_X87_CLASS
5105 || class2 == X86_64_X87_CLASS
5106 || class2 == X86_64_X87UP_CLASS
5107 || class2 == X86_64_COMPLEX_X87_CLASS)
5108 return X86_64_MEMORY_CLASS;
5110 /* Rule #6: Otherwise class SSE is used. */
5111 return X86_64_SSE_CLASS;
5114 /* Classify the argument of type TYPE and mode MODE.
5115 CLASSES will be filled by the register class used to pass each word
5116 of the operand. The number of words is returned. In case the parameter
5117 should be passed in memory, 0 is returned. As a special case for zero
5118 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5120 BIT_OFFSET is used internally for handling records and specifies offset
5121 of the offset in bits modulo 256 to avoid overflow cases.
5123 See the x86-64 PS ABI for details.
5127 classify_argument (enum machine_mode mode, const_tree type,
5128 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5130 HOST_WIDE_INT bytes =
5131 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5132 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5134 /* Variable sized entities are always passed/returned in memory. */
5138 if (mode != VOIDmode
5139 && targetm.calls.must_pass_in_stack (mode, type))
5142 if (type && AGGREGATE_TYPE_P (type))
5146 enum x86_64_reg_class subclasses[MAX_CLASSES];
5148 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5152 for (i = 0; i < words; i++)
5153 classes[i] = X86_64_NO_CLASS;
5155 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5156 signalize memory class, so handle it as special case. */
5159 classes[0] = X86_64_NO_CLASS;
5163 /* Classify each field of record and merge classes. */
5164 switch (TREE_CODE (type))
5167 /* And now merge the fields of structure. */
5168 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5170 if (TREE_CODE (field) == FIELD_DECL)
5174 if (TREE_TYPE (field) == error_mark_node)
5177 /* Bitfields are always classified as integer. Handle them
5178 early, since later code would consider them to be
5179 misaligned integers. */
5180 if (DECL_BIT_FIELD (field))
5182 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5183 i < ((int_bit_position (field) + (bit_offset % 64))
5184 + tree_low_cst (DECL_SIZE (field), 0)
5187 merge_classes (X86_64_INTEGER_CLASS,
5194 type = TREE_TYPE (field);
5196 /* Flexible array member is ignored. */
5197 if (TYPE_MODE (type) == BLKmode
5198 && TREE_CODE (type) == ARRAY_TYPE
5199 && TYPE_SIZE (type) == NULL_TREE
5200 && TYPE_DOMAIN (type) != NULL_TREE
5201 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5206 if (!warned && warn_psabi)
5209 inform (input_location,
5210 "The ABI of passing struct with"
5211 " a flexible array member has"
5212 " changed in GCC 4.4");
5216 num = classify_argument (TYPE_MODE (type), type,
5218 (int_bit_position (field)
5219 + bit_offset) % 256);
5222 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5223 for (i = 0; i < num && (i + pos) < words; i++)
5225 merge_classes (subclasses[i], classes[i + pos]);
5232 /* Arrays are handled as small records. */
5235 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5236 TREE_TYPE (type), subclasses, bit_offset);
5240 /* The partial classes are now full classes. */
5241 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5242 subclasses[0] = X86_64_SSE_CLASS;
5243 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5244 && !((bit_offset % 64) == 0 && bytes == 4))
5245 subclasses[0] = X86_64_INTEGER_CLASS;
5247 for (i = 0; i < words; i++)
5248 classes[i] = subclasses[i % num];
5253 case QUAL_UNION_TYPE:
5254 /* Unions are similar to RECORD_TYPE but offset is always 0.
5256 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5258 if (TREE_CODE (field) == FIELD_DECL)
5262 if (TREE_TYPE (field) == error_mark_node)
5265 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5266 TREE_TYPE (field), subclasses,
5270 for (i = 0; i < num; i++)
5271 classes[i] = merge_classes (subclasses[i], classes[i]);
5282 /* When size > 16 bytes, if the first one isn't
5283 X86_64_SSE_CLASS or any other ones aren't
5284 X86_64_SSEUP_CLASS, everything should be passed in
5286 if (classes[0] != X86_64_SSE_CLASS)
5289 for (i = 1; i < words; i++)
5290 if (classes[i] != X86_64_SSEUP_CLASS)
5294 /* Final merger cleanup. */
5295 for (i = 0; i < words; i++)
5297 /* If one class is MEMORY, everything should be passed in
5299 if (classes[i] == X86_64_MEMORY_CLASS)
5302 /* The X86_64_SSEUP_CLASS should be always preceded by
5303 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5304 if (classes[i] == X86_64_SSEUP_CLASS
5305 && classes[i - 1] != X86_64_SSE_CLASS
5306 && classes[i - 1] != X86_64_SSEUP_CLASS)
5308 /* The first one should never be X86_64_SSEUP_CLASS. */
5309 gcc_assert (i != 0);
5310 classes[i] = X86_64_SSE_CLASS;
5313 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5314 everything should be passed in memory. */
5315 if (classes[i] == X86_64_X87UP_CLASS
5316 && (classes[i - 1] != X86_64_X87_CLASS))
5320 /* The first one should never be X86_64_X87UP_CLASS. */
5321 gcc_assert (i != 0);
5322 if (!warned && warn_psabi)
5325 inform (input_location,
5326 "The ABI of passing union with long double"
5327 " has changed in GCC 4.4");
5335 /* Compute alignment needed. We align all types to natural boundaries with
5336 exception of XFmode that is aligned to 64bits. */
5337 if (mode != VOIDmode && mode != BLKmode)
5339 int mode_alignment = GET_MODE_BITSIZE (mode);
5342 mode_alignment = 128;
5343 else if (mode == XCmode)
5344 mode_alignment = 256;
5345 if (COMPLEX_MODE_P (mode))
5346 mode_alignment /= 2;
5347 /* Misaligned fields are always returned in memory. */
5348 if (bit_offset % mode_alignment)
5352 /* for V1xx modes, just use the base mode */
5353 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5354 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5355 mode = GET_MODE_INNER (mode);
5357 /* Classification of atomic types. */
5362 classes[0] = X86_64_SSE_CLASS;
5365 classes[0] = X86_64_SSE_CLASS;
5366 classes[1] = X86_64_SSEUP_CLASS;
5376 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5380 classes[0] = X86_64_INTEGERSI_CLASS;
5383 else if (size <= 64)
5385 classes[0] = X86_64_INTEGER_CLASS;
5388 else if (size <= 64+32)
5390 classes[0] = X86_64_INTEGER_CLASS;
5391 classes[1] = X86_64_INTEGERSI_CLASS;
5394 else if (size <= 64+64)
5396 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5404 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5408 /* OImode shouldn't be used directly. */
5413 if (!(bit_offset % 64))
5414 classes[0] = X86_64_SSESF_CLASS;
5416 classes[0] = X86_64_SSE_CLASS;
5419 classes[0] = X86_64_SSEDF_CLASS;
5422 classes[0] = X86_64_X87_CLASS;
5423 classes[1] = X86_64_X87UP_CLASS;
5426 classes[0] = X86_64_SSE_CLASS;
5427 classes[1] = X86_64_SSEUP_CLASS;
5430 classes[0] = X86_64_SSE_CLASS;
5431 if (!(bit_offset % 64))
5437 if (!warned && warn_psabi)
5440 inform (input_location,
5441 "The ABI of passing structure with complex float"
5442 " member has changed in GCC 4.4");
5444 classes[1] = X86_64_SSESF_CLASS;
5448 classes[0] = X86_64_SSEDF_CLASS;
5449 classes[1] = X86_64_SSEDF_CLASS;
5452 classes[0] = X86_64_COMPLEX_X87_CLASS;
5455 /* This modes is larger than 16 bytes. */
5463 classes[0] = X86_64_SSE_CLASS;
5464 classes[1] = X86_64_SSEUP_CLASS;
5465 classes[2] = X86_64_SSEUP_CLASS;
5466 classes[3] = X86_64_SSEUP_CLASS;
5474 classes[0] = X86_64_SSE_CLASS;
5475 classes[1] = X86_64_SSEUP_CLASS;
5483 classes[0] = X86_64_SSE_CLASS;
5489 gcc_assert (VECTOR_MODE_P (mode));
5494 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5496 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5497 classes[0] = X86_64_INTEGERSI_CLASS;
5499 classes[0] = X86_64_INTEGER_CLASS;
5500 classes[1] = X86_64_INTEGER_CLASS;
5501 return 1 + (bytes > 8);
5505 /* Examine the argument and return set number of register required in each
5506 class. Return 0 iff parameter should be passed in memory. */
5508 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5509 int *int_nregs, int *sse_nregs)
5511 enum x86_64_reg_class regclass[MAX_CLASSES];
5512 int n = classify_argument (mode, type, regclass, 0);
5518 for (n--; n >= 0; n--)
5519 switch (regclass[n])
5521 case X86_64_INTEGER_CLASS:
5522 case X86_64_INTEGERSI_CLASS:
5525 case X86_64_SSE_CLASS:
5526 case X86_64_SSESF_CLASS:
5527 case X86_64_SSEDF_CLASS:
5530 case X86_64_NO_CLASS:
5531 case X86_64_SSEUP_CLASS:
5533 case X86_64_X87_CLASS:
5534 case X86_64_X87UP_CLASS:
5538 case X86_64_COMPLEX_X87_CLASS:
5539 return in_return ? 2 : 0;
5540 case X86_64_MEMORY_CLASS:
5546 /* Construct container for the argument used by GCC interface. See
5547 FUNCTION_ARG for the detailed description. */
5550 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5551 const_tree type, int in_return, int nintregs, int nsseregs,
5552 const int *intreg, int sse_regno)
5554 /* The following variables hold the static issued_error state. */
5555 static bool issued_sse_arg_error;
5556 static bool issued_sse_ret_error;
5557 static bool issued_x87_ret_error;
5559 enum machine_mode tmpmode;
5561 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5562 enum x86_64_reg_class regclass[MAX_CLASSES];
5566 int needed_sseregs, needed_intregs;
5567 rtx exp[MAX_CLASSES];
5570 n = classify_argument (mode, type, regclass, 0);
5573 if (!examine_argument (mode, type, in_return, &needed_intregs,
5576 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5579 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5580 some less clueful developer tries to use floating-point anyway. */
5581 if (needed_sseregs && !TARGET_SSE)
5585 if (!issued_sse_ret_error)
5587 error ("SSE register return with SSE disabled");
5588 issued_sse_ret_error = true;
5591 else if (!issued_sse_arg_error)
5593 error ("SSE register argument with SSE disabled");
5594 issued_sse_arg_error = true;
5599 /* Likewise, error if the ABI requires us to return values in the
5600 x87 registers and the user specified -mno-80387. */
5601 if (!TARGET_80387 && in_return)
5602 for (i = 0; i < n; i++)
5603 if (regclass[i] == X86_64_X87_CLASS
5604 || regclass[i] == X86_64_X87UP_CLASS
5605 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5607 if (!issued_x87_ret_error)
5609 error ("x87 register return with x87 disabled");
5610 issued_x87_ret_error = true;
5615 /* First construct simple cases. Avoid SCmode, since we want to use
5616 single register to pass this type. */
5617 if (n == 1 && mode != SCmode)
5618 switch (regclass[0])
5620 case X86_64_INTEGER_CLASS:
5621 case X86_64_INTEGERSI_CLASS:
5622 return gen_rtx_REG (mode, intreg[0]);
5623 case X86_64_SSE_CLASS:
5624 case X86_64_SSESF_CLASS:
5625 case X86_64_SSEDF_CLASS:
5626 if (mode != BLKmode)
5627 return gen_reg_or_parallel (mode, orig_mode,
5628 SSE_REGNO (sse_regno));
5630 case X86_64_X87_CLASS:
5631 case X86_64_COMPLEX_X87_CLASS:
5632 return gen_rtx_REG (mode, FIRST_STACK_REG);
5633 case X86_64_NO_CLASS:
5634 /* Zero sized array, struct or class. */
5639 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5640 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5641 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5643 && regclass[0] == X86_64_SSE_CLASS
5644 && regclass[1] == X86_64_SSEUP_CLASS
5645 && regclass[2] == X86_64_SSEUP_CLASS
5646 && regclass[3] == X86_64_SSEUP_CLASS
5648 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5651 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5652 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5653 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5654 && regclass[1] == X86_64_INTEGER_CLASS
5655 && (mode == CDImode || mode == TImode || mode == TFmode)
5656 && intreg[0] + 1 == intreg[1])
5657 return gen_rtx_REG (mode, intreg[0]);
5659 /* Otherwise figure out the entries of the PARALLEL. */
5660 for (i = 0; i < n; i++)
5664 switch (regclass[i])
5666 case X86_64_NO_CLASS:
5668 case X86_64_INTEGER_CLASS:
5669 case X86_64_INTEGERSI_CLASS:
5670 /* Merge TImodes on aligned occasions here too. */
5671 if (i * 8 + 8 > bytes)
5672 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5673 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5677 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5678 if (tmpmode == BLKmode)
5680 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5681 gen_rtx_REG (tmpmode, *intreg),
5685 case X86_64_SSESF_CLASS:
5686 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5687 gen_rtx_REG (SFmode,
5688 SSE_REGNO (sse_regno)),
5692 case X86_64_SSEDF_CLASS:
5693 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5694 gen_rtx_REG (DFmode,
5695 SSE_REGNO (sse_regno)),
5699 case X86_64_SSE_CLASS:
5707 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5717 && regclass[1] == X86_64_SSEUP_CLASS
5718 && regclass[2] == X86_64_SSEUP_CLASS
5719 && regclass[3] == X86_64_SSEUP_CLASS);
5726 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5727 gen_rtx_REG (tmpmode,
5728 SSE_REGNO (sse_regno)),
5737 /* Empty aligned struct, union or class. */
5741 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5742 for (i = 0; i < nexps; i++)
5743 XVECEXP (ret, 0, i) = exp [i];
5747 /* Update the data in CUM to advance over an argument of mode MODE
5748 and data type TYPE. (TYPE is null for libcalls where that information
5749 may not be available.) */
5752 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5753 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5769 cum->words += words;
5770 cum->nregs -= words;
5771 cum->regno += words;
5773 if (cum->nregs <= 0)
5781 /* OImode shouldn't be used directly. */
5785 if (cum->float_in_sse < 2)
5788 if (cum->float_in_sse < 1)
5805 if (!type || !AGGREGATE_TYPE_P (type))
5807 cum->sse_words += words;
5808 cum->sse_nregs -= 1;
5809 cum->sse_regno += 1;
5810 if (cum->sse_nregs <= 0)
5824 if (!type || !AGGREGATE_TYPE_P (type))
5826 cum->mmx_words += words;
5827 cum->mmx_nregs -= 1;
5828 cum->mmx_regno += 1;
5829 if (cum->mmx_nregs <= 0)
5840 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5841 tree type, HOST_WIDE_INT words, int named)
5843 int int_nregs, sse_nregs;
5845 /* Unnamed 256bit vector mode parameters are passed on stack. */
5846 if (!named && VALID_AVX256_REG_MODE (mode))
5849 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5850 cum->words += words;
5851 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5853 cum->nregs -= int_nregs;
5854 cum->sse_nregs -= sse_nregs;
5855 cum->regno += int_nregs;
5856 cum->sse_regno += sse_nregs;
5859 cum->words += words;
5863 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5864 HOST_WIDE_INT words)
5866 /* Otherwise, this should be passed indirect. */
5867 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5869 cum->words += words;
5878 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5879 tree type, int named)
5881 HOST_WIDE_INT bytes, words;
5883 if (mode == BLKmode)
5884 bytes = int_size_in_bytes (type);
5886 bytes = GET_MODE_SIZE (mode);
5887 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5890 mode = type_natural_mode (type, NULL);
5892 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5893 function_arg_advance_ms_64 (cum, bytes, words);
5894 else if (TARGET_64BIT)
5895 function_arg_advance_64 (cum, mode, type, words, named);
5897 function_arg_advance_32 (cum, mode, type, bytes, words);
5900 /* Define where to put the arguments to a function.
5901 Value is zero to push the argument on the stack,
5902 or a hard register in which to store the argument.
5904 MODE is the argument's machine mode.
5905 TYPE is the data type of the argument (as a tree).
5906 This is null for libcalls where that information may
5908 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5909 the preceding args and about the function being called.
5910 NAMED is nonzero if this argument is a named parameter
5911 (otherwise it is an extra parameter matching an ellipsis). */
5914 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5915 enum machine_mode orig_mode, tree type,
5916 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5918 static bool warnedsse, warnedmmx;
5920 /* Avoid the AL settings for the Unix64 ABI. */
5921 if (mode == VOIDmode)
5937 if (words <= cum->nregs)
5939 int regno = cum->regno;
5941 /* Fastcall allocates the first two DWORD (SImode) or
5942 smaller arguments to ECX and EDX if it isn't an
5948 || (type && AGGREGATE_TYPE_P (type)))
5951 /* ECX not EAX is the first allocated register. */
5952 if (regno == AX_REG)
5955 return gen_rtx_REG (mode, regno);
5960 if (cum->float_in_sse < 2)
5963 if (cum->float_in_sse < 1)
5967 /* In 32bit, we pass TImode in xmm registers. */
5974 if (!type || !AGGREGATE_TYPE_P (type))
5976 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5979 warning (0, "SSE vector argument without SSE enabled "
5983 return gen_reg_or_parallel (mode, orig_mode,
5984 cum->sse_regno + FIRST_SSE_REG);
5989 /* OImode shouldn't be used directly. */
5998 if (!type || !AGGREGATE_TYPE_P (type))
6001 return gen_reg_or_parallel (mode, orig_mode,
6002 cum->sse_regno + FIRST_SSE_REG);
6012 if (!type || !AGGREGATE_TYPE_P (type))
6014 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6017 warning (0, "MMX vector argument without MMX enabled "
6021 return gen_reg_or_parallel (mode, orig_mode,
6022 cum->mmx_regno + FIRST_MMX_REG);
6031 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6032 enum machine_mode orig_mode, tree type, int named)
6034 /* Handle a hidden AL argument containing number of registers
6035 for varargs x86-64 functions. */
6036 if (mode == VOIDmode)
6037 return GEN_INT (cum->maybe_vaarg
6038 ? (cum->sse_nregs < 0
6039 ? (cum->call_abi == ix86_abi
6041 : (ix86_abi != SYSV_ABI
6042 ? X86_64_SSE_REGPARM_MAX
6043 : X86_64_MS_SSE_REGPARM_MAX))
6058 /* Unnamed 256bit vector mode parameters are passed on stack. */
6064 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6066 &x86_64_int_parameter_registers [cum->regno],
6071 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6072 enum machine_mode orig_mode, int named,
6073 HOST_WIDE_INT bytes)
6077 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6078 We use value of -2 to specify that current function call is MSABI. */
6079 if (mode == VOIDmode)
6080 return GEN_INT (-2);
6082 /* If we've run out of registers, it goes on the stack. */
6083 if (cum->nregs == 0)
6086 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6088 /* Only floating point modes are passed in anything but integer regs. */
6089 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6092 regno = cum->regno + FIRST_SSE_REG;
6097 /* Unnamed floating parameters are passed in both the
6098 SSE and integer registers. */
6099 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6100 t2 = gen_rtx_REG (mode, regno);
6101 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6102 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6103 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6106 /* Handle aggregated types passed in register. */
6107 if (orig_mode == BLKmode)
6109 if (bytes > 0 && bytes <= 8)
6110 mode = (bytes > 4 ? DImode : SImode);
6111 if (mode == BLKmode)
6115 return gen_reg_or_parallel (mode, orig_mode, regno);
6119 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6120 tree type, int named)
6122 enum machine_mode mode = omode;
6123 HOST_WIDE_INT bytes, words;
6125 if (mode == BLKmode)
6126 bytes = int_size_in_bytes (type);
6128 bytes = GET_MODE_SIZE (mode);
6129 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6131 /* To simplify the code below, represent vector types with a vector mode
6132 even if MMX/SSE are not active. */
6133 if (type && TREE_CODE (type) == VECTOR_TYPE)
6134 mode = type_natural_mode (type, cum);
6136 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6137 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6138 else if (TARGET_64BIT)
6139 return function_arg_64 (cum, mode, omode, type, named);
6141 return function_arg_32 (cum, mode, omode, type, bytes, words);
6144 /* A C expression that indicates when an argument must be passed by
6145 reference. If nonzero for an argument, a copy of that argument is
6146 made in memory and a pointer to the argument is passed instead of
6147 the argument itself. The pointer is passed in whatever way is
6148 appropriate for passing a pointer to that type. */
6151 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6152 enum machine_mode mode ATTRIBUTE_UNUSED,
6153 const_tree type, bool named ATTRIBUTE_UNUSED)
6155 /* See Windows x64 Software Convention. */
6156 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6158 int msize = (int) GET_MODE_SIZE (mode);
6161 /* Arrays are passed by reference. */
6162 if (TREE_CODE (type) == ARRAY_TYPE)
6165 if (AGGREGATE_TYPE_P (type))
6167 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6168 are passed by reference. */
6169 msize = int_size_in_bytes (type);
6173 /* __m128 is passed by reference. */
6175 case 1: case 2: case 4: case 8:
6181 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6187 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6190 contains_aligned_value_p (tree type)
6192 enum machine_mode mode = TYPE_MODE (type);
6193 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6197 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6199 if (TYPE_ALIGN (type) < 128)
6202 if (AGGREGATE_TYPE_P (type))
6204 /* Walk the aggregates recursively. */
6205 switch (TREE_CODE (type))
6209 case QUAL_UNION_TYPE:
6213 /* Walk all the structure fields. */
6214 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6216 if (TREE_CODE (field) == FIELD_DECL
6217 && contains_aligned_value_p (TREE_TYPE (field)))
6224 /* Just for use if some languages passes arrays by value. */
6225 if (contains_aligned_value_p (TREE_TYPE (type)))
6236 /* Gives the alignment boundary, in bits, of an argument with the
6237 specified mode and type. */
6240 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6245 /* Since canonical type is used for call, we convert it to
6246 canonical type if needed. */
6247 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6248 type = TYPE_CANONICAL (type);
6249 align = TYPE_ALIGN (type);
6252 align = GET_MODE_ALIGNMENT (mode);
6253 if (align < PARM_BOUNDARY)
6254 align = PARM_BOUNDARY;
6255 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6256 natural boundaries. */
6257 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6259 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6260 make an exception for SSE modes since these require 128bit
6263 The handling here differs from field_alignment. ICC aligns MMX
6264 arguments to 4 byte boundaries, while structure fields are aligned
6265 to 8 byte boundaries. */
6268 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6269 align = PARM_BOUNDARY;
6273 if (!contains_aligned_value_p (type))
6274 align = PARM_BOUNDARY;
6277 if (align > BIGGEST_ALIGNMENT)
6278 align = BIGGEST_ALIGNMENT;
6282 /* Return true if N is a possible register number of function value. */
6285 ix86_function_value_regno_p (int regno)
6292 case FIRST_FLOAT_REG:
6293 /* TODO: The function should depend on current function ABI but
6294 builtins.c would need updating then. Therefore we use the
6296 if (TARGET_64BIT && ix86_abi == MS_ABI)
6298 return TARGET_FLOAT_RETURNS_IN_80387;
6304 if (TARGET_MACHO || TARGET_64BIT)
6312 /* Define how to find the value returned by a function.
6313 VALTYPE is the data type of the value (as a tree).
6314 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6315 otherwise, FUNC is 0. */
6318 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6319 const_tree fntype, const_tree fn)
6323 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6324 we normally prevent this case when mmx is not available. However
6325 some ABIs may require the result to be returned like DImode. */
6326 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6327 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6329 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6330 we prevent this case when sse is not available. However some ABIs
6331 may require the result to be returned like integer TImode. */
6332 else if (mode == TImode
6333 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6334 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6336 /* 32-byte vector modes in %ymm0. */
6337 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6338 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6340 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6341 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6342 regno = FIRST_FLOAT_REG;
6344 /* Most things go in %eax. */
6347 /* Override FP return register with %xmm0 for local functions when
6348 SSE math is enabled or for functions with sseregparm attribute. */
6349 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6351 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6352 if ((sse_level >= 1 && mode == SFmode)
6353 || (sse_level == 2 && mode == DFmode))
6354 regno = FIRST_SSE_REG;
6357 /* OImode shouldn't be used directly. */
6358 gcc_assert (mode != OImode);
6360 return gen_rtx_REG (orig_mode, regno);
6364 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6369 /* Handle libcalls, which don't provide a type node. */
6370 if (valtype == NULL)
6382 return gen_rtx_REG (mode, FIRST_SSE_REG);
6385 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6389 return gen_rtx_REG (mode, AX_REG);
6393 ret = construct_container (mode, orig_mode, valtype, 1,
6394 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6395 x86_64_int_return_registers, 0);
6397 /* For zero sized structures, construct_container returns NULL, but we
6398 need to keep rest of compiler happy by returning meaningful value. */
6400 ret = gen_rtx_REG (orig_mode, AX_REG);
6406 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6408 unsigned int regno = AX_REG;
6412 switch (GET_MODE_SIZE (mode))
6415 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6416 && !COMPLEX_MODE_P (mode))
6417 regno = FIRST_SSE_REG;
6421 if (mode == SFmode || mode == DFmode)
6422 regno = FIRST_SSE_REG;
6428 return gen_rtx_REG (orig_mode, regno);
6432 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6433 enum machine_mode orig_mode, enum machine_mode mode)
6435 const_tree fn, fntype;
6438 if (fntype_or_decl && DECL_P (fntype_or_decl))
6439 fn = fntype_or_decl;
6440 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6442 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6443 return function_value_ms_64 (orig_mode, mode);
6444 else if (TARGET_64BIT)
6445 return function_value_64 (orig_mode, mode, valtype);
6447 return function_value_32 (orig_mode, mode, fntype, fn);
6451 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6452 bool outgoing ATTRIBUTE_UNUSED)
6454 enum machine_mode mode, orig_mode;
6456 orig_mode = TYPE_MODE (valtype);
6457 mode = type_natural_mode (valtype, NULL);
6458 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6462 ix86_libcall_value (enum machine_mode mode)
6464 return ix86_function_value_1 (NULL, NULL, mode, mode);
6467 /* Return true iff type is returned in memory. */
6469 static int ATTRIBUTE_UNUSED
6470 return_in_memory_32 (const_tree type, enum machine_mode mode)
6474 if (mode == BLKmode)
6477 size = int_size_in_bytes (type);
6479 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6482 if (VECTOR_MODE_P (mode) || mode == TImode)
6484 /* User-created vectors small enough to fit in EAX. */
6488 /* MMX/3dNow values are returned in MM0,
6489 except when it doesn't exits. */
6491 return (TARGET_MMX ? 0 : 1);
6493 /* SSE values are returned in XMM0, except when it doesn't exist. */
6495 return (TARGET_SSE ? 0 : 1);
6497 /* AVX values are returned in YMM0, except when it doesn't exist. */
6499 return TARGET_AVX ? 0 : 1;
6508 /* OImode shouldn't be used directly. */
6509 gcc_assert (mode != OImode);
6514 static int ATTRIBUTE_UNUSED
6515 return_in_memory_64 (const_tree type, enum machine_mode mode)
6517 int needed_intregs, needed_sseregs;
6518 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6521 static int ATTRIBUTE_UNUSED
6522 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6524 HOST_WIDE_INT size = int_size_in_bytes (type);
6526 /* __m128 is returned in xmm0. */
6527 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6528 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6531 /* Otherwise, the size must be exactly in [1248]. */
6532 return (size != 1 && size != 2 && size != 4 && size != 8);
6536 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6538 #ifdef SUBTARGET_RETURN_IN_MEMORY
6539 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6541 const enum machine_mode mode = type_natural_mode (type, NULL);
6545 if (ix86_function_type_abi (fntype) == MS_ABI)
6546 return return_in_memory_ms_64 (type, mode);
6548 return return_in_memory_64 (type, mode);
6551 return return_in_memory_32 (type, mode);
6555 /* Return false iff TYPE is returned in memory. This version is used
6556 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6557 but differs notably in that when MMX is available, 8-byte vectors
6558 are returned in memory, rather than in MMX registers. */
6561 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6564 enum machine_mode mode = type_natural_mode (type, NULL);
6567 return return_in_memory_64 (type, mode);
6569 if (mode == BLKmode)
6572 size = int_size_in_bytes (type);
6574 if (VECTOR_MODE_P (mode))
6576 /* Return in memory only if MMX registers *are* available. This
6577 seems backwards, but it is consistent with the existing
6584 else if (mode == TImode)
6586 else if (mode == XFmode)
6592 /* When returning SSE vector types, we have a choice of either
6593 (1) being abi incompatible with a -march switch, or
6594 (2) generating an error.
6595 Given no good solution, I think the safest thing is one warning.
6596 The user won't be able to use -Werror, but....
6598 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6599 called in response to actually generating a caller or callee that
6600 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6601 via aggregate_value_p for general type probing from tree-ssa. */
6604 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6606 static bool warnedsse, warnedmmx;
6608 if (!TARGET_64BIT && type)
6610 /* Look at the return type of the function, not the function type. */
6611 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6613 if (!TARGET_SSE && !warnedsse)
6616 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6619 warning (0, "SSE vector return without SSE enabled "
6624 if (!TARGET_MMX && !warnedmmx)
6626 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6629 warning (0, "MMX vector return without MMX enabled "
6639 /* Create the va_list data type. */
6641 /* Returns the calling convention specific va_list date type.
6642 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6645 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6647 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6649 /* For i386 we use plain pointer to argument area. */
6650 if (!TARGET_64BIT || abi == MS_ABI)
6651 return build_pointer_type (char_type_node);
6653 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6654 type_decl = build_decl (BUILTINS_LOCATION,
6655 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6657 f_gpr = build_decl (BUILTINS_LOCATION,
6658 FIELD_DECL, get_identifier ("gp_offset"),
6659 unsigned_type_node);
6660 f_fpr = build_decl (BUILTINS_LOCATION,
6661 FIELD_DECL, get_identifier ("fp_offset"),
6662 unsigned_type_node);
6663 f_ovf = build_decl (BUILTINS_LOCATION,
6664 FIELD_DECL, get_identifier ("overflow_arg_area"),
6666 f_sav = build_decl (BUILTINS_LOCATION,
6667 FIELD_DECL, get_identifier ("reg_save_area"),
6670 va_list_gpr_counter_field = f_gpr;
6671 va_list_fpr_counter_field = f_fpr;
6673 DECL_FIELD_CONTEXT (f_gpr) = record;
6674 DECL_FIELD_CONTEXT (f_fpr) = record;
6675 DECL_FIELD_CONTEXT (f_ovf) = record;
6676 DECL_FIELD_CONTEXT (f_sav) = record;
6678 TREE_CHAIN (record) = type_decl;
6679 TYPE_NAME (record) = type_decl;
6680 TYPE_FIELDS (record) = f_gpr;
6681 TREE_CHAIN (f_gpr) = f_fpr;
6682 TREE_CHAIN (f_fpr) = f_ovf;
6683 TREE_CHAIN (f_ovf) = f_sav;
6685 layout_type (record);
6687 /* The correct type is an array type of one element. */
6688 return build_array_type (record, build_index_type (size_zero_node));
6691 /* Setup the builtin va_list data type and for 64-bit the additional
6692 calling convention specific va_list data types. */
6695 ix86_build_builtin_va_list (void)
6697 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6699 /* Initialize abi specific va_list builtin types. */
6703 if (ix86_abi == MS_ABI)
6705 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6706 if (TREE_CODE (t) != RECORD_TYPE)
6707 t = build_variant_type_copy (t);
6708 sysv_va_list_type_node = t;
6713 if (TREE_CODE (t) != RECORD_TYPE)
6714 t = build_variant_type_copy (t);
6715 sysv_va_list_type_node = t;
6717 if (ix86_abi != MS_ABI)
6719 t = ix86_build_builtin_va_list_abi (MS_ABI);
6720 if (TREE_CODE (t) != RECORD_TYPE)
6721 t = build_variant_type_copy (t);
6722 ms_va_list_type_node = t;
6727 if (TREE_CODE (t) != RECORD_TYPE)
6728 t = build_variant_type_copy (t);
6729 ms_va_list_type_node = t;
6736 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6739 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6748 int regparm = ix86_regparm;
6750 if (cum->call_abi != ix86_abi)
6751 regparm = (ix86_abi != SYSV_ABI
6752 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6754 /* GPR size of varargs save area. */
6755 if (cfun->va_list_gpr_size)
6756 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6758 ix86_varargs_gpr_size = 0;
6760 /* FPR size of varargs save area. We don't need it if we don't pass
6761 anything in SSE registers. */
6762 if (cum->sse_nregs && cfun->va_list_fpr_size)
6763 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6765 ix86_varargs_fpr_size = 0;
6767 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6770 save_area = frame_pointer_rtx;
6771 set = get_varargs_alias_set ();
6773 for (i = cum->regno;
6775 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6778 mem = gen_rtx_MEM (Pmode,
6779 plus_constant (save_area, i * UNITS_PER_WORD));
6780 MEM_NOTRAP_P (mem) = 1;
6781 set_mem_alias_set (mem, set);
6782 emit_move_insn (mem, gen_rtx_REG (Pmode,
6783 x86_64_int_parameter_registers[i]));
6786 if (ix86_varargs_fpr_size)
6788 /* Now emit code to save SSE registers. The AX parameter contains number
6789 of SSE parameter registers used to call this function. We use
6790 sse_prologue_save insn template that produces computed jump across
6791 SSE saves. We need some preparation work to get this working. */
6793 label = gen_label_rtx ();
6794 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6796 /* Compute address to jump to :
6797 label - eax*4 + nnamed_sse_arguments*4 Or
6798 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6799 tmp_reg = gen_reg_rtx (Pmode);
6800 nsse_reg = gen_reg_rtx (Pmode);
6801 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6802 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6803 gen_rtx_MULT (Pmode, nsse_reg,
6806 /* vmovaps is one byte longer than movaps. */
6808 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6809 gen_rtx_PLUS (Pmode, tmp_reg,
6815 gen_rtx_CONST (DImode,
6816 gen_rtx_PLUS (DImode,
6818 GEN_INT (cum->sse_regno
6819 * (TARGET_AVX ? 5 : 4)))));
6821 emit_move_insn (nsse_reg, label_ref);
6822 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6824 /* Compute address of memory block we save into. We always use pointer
6825 pointing 127 bytes after first byte to store - this is needed to keep
6826 instruction size limited by 4 bytes (5 bytes for AVX) with one
6827 byte displacement. */
6828 tmp_reg = gen_reg_rtx (Pmode);
6829 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6830 plus_constant (save_area,
6831 ix86_varargs_gpr_size + 127)));
6832 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6833 MEM_NOTRAP_P (mem) = 1;
6834 set_mem_alias_set (mem, set);
6835 set_mem_align (mem, BITS_PER_WORD);
6837 /* And finally do the dirty job! */
6838 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6839 GEN_INT (cum->sse_regno), label));
6844 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6846 alias_set_type set = get_varargs_alias_set ();
6849 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6853 mem = gen_rtx_MEM (Pmode,
6854 plus_constant (virtual_incoming_args_rtx,
6855 i * UNITS_PER_WORD));
6856 MEM_NOTRAP_P (mem) = 1;
6857 set_mem_alias_set (mem, set);
6859 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6860 emit_move_insn (mem, reg);
6865 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6866 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6869 CUMULATIVE_ARGS next_cum;
6872 /* This argument doesn't appear to be used anymore. Which is good,
6873 because the old code here didn't suppress rtl generation. */
6874 gcc_assert (!no_rtl);
6879 fntype = TREE_TYPE (current_function_decl);
6881 /* For varargs, we do not want to skip the dummy va_dcl argument.
6882 For stdargs, we do want to skip the last named argument. */
6884 if (stdarg_p (fntype))
6885 function_arg_advance (&next_cum, mode, type, 1);
6887 if (cum->call_abi == MS_ABI)
6888 setup_incoming_varargs_ms_64 (&next_cum);
6890 setup_incoming_varargs_64 (&next_cum);
6893 /* Checks if TYPE is of kind va_list char *. */
6896 is_va_list_char_pointer (tree type)
6900 /* For 32-bit it is always true. */
6903 canonic = ix86_canonical_va_list_type (type);
6904 return (canonic == ms_va_list_type_node
6905 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6908 /* Implement va_start. */
6911 ix86_va_start (tree valist, rtx nextarg)
6913 HOST_WIDE_INT words, n_gpr, n_fpr;
6914 tree f_gpr, f_fpr, f_ovf, f_sav;
6915 tree gpr, fpr, ovf, sav, t;
6918 /* Only 64bit target needs something special. */
6919 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6921 std_expand_builtin_va_start (valist, nextarg);
6925 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6926 f_fpr = TREE_CHAIN (f_gpr);
6927 f_ovf = TREE_CHAIN (f_fpr);
6928 f_sav = TREE_CHAIN (f_ovf);
6930 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6931 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6932 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6933 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6934 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6936 /* Count number of gp and fp argument registers used. */
6937 words = crtl->args.info.words;
6938 n_gpr = crtl->args.info.regno;
6939 n_fpr = crtl->args.info.sse_regno;
6941 if (cfun->va_list_gpr_size)
6943 type = TREE_TYPE (gpr);
6944 t = build2 (MODIFY_EXPR, type,
6945 gpr, build_int_cst (type, n_gpr * 8));
6946 TREE_SIDE_EFFECTS (t) = 1;
6947 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6950 if (TARGET_SSE && cfun->va_list_fpr_size)
6952 type = TREE_TYPE (fpr);
6953 t = build2 (MODIFY_EXPR, type, fpr,
6954 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6955 TREE_SIDE_EFFECTS (t) = 1;
6956 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6959 /* Find the overflow area. */
6960 type = TREE_TYPE (ovf);
6961 t = make_tree (type, crtl->args.internal_arg_pointer);
6963 t = build2 (POINTER_PLUS_EXPR, type, t,
6964 size_int (words * UNITS_PER_WORD));
6965 t = build2 (MODIFY_EXPR, type, ovf, t);
6966 TREE_SIDE_EFFECTS (t) = 1;
6967 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6969 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6971 /* Find the register save area.
6972 Prologue of the function save it right above stack frame. */
6973 type = TREE_TYPE (sav);
6974 t = make_tree (type, frame_pointer_rtx);
6975 if (!ix86_varargs_gpr_size)
6976 t = build2 (POINTER_PLUS_EXPR, type, t,
6977 size_int (-8 * X86_64_REGPARM_MAX));
6978 t = build2 (MODIFY_EXPR, type, sav, t);
6979 TREE_SIDE_EFFECTS (t) = 1;
6980 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6984 /* Implement va_arg. */
6987 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6990 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6991 tree f_gpr, f_fpr, f_ovf, f_sav;
6992 tree gpr, fpr, ovf, sav, t;
6994 tree lab_false, lab_over = NULL_TREE;
6999 enum machine_mode nat_mode;
7002 /* Only 64bit target needs something special. */
7003 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7004 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7006 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7007 f_fpr = TREE_CHAIN (f_gpr);
7008 f_ovf = TREE_CHAIN (f_fpr);
7009 f_sav = TREE_CHAIN (f_ovf);
7011 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7012 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7013 valist = build_va_arg_indirect_ref (valist);
7014 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7015 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7016 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7018 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7020 type = build_pointer_type (type);
7021 size = int_size_in_bytes (type);
7022 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7024 nat_mode = type_natural_mode (type, NULL);
7033 /* Unnamed 256bit vector mode parameters are passed on stack. */
7034 if (ix86_cfun_abi () == SYSV_ABI)
7041 container = construct_container (nat_mode, TYPE_MODE (type),
7042 type, 0, X86_64_REGPARM_MAX,
7043 X86_64_SSE_REGPARM_MAX, intreg,
7048 /* Pull the value out of the saved registers. */
7050 addr = create_tmp_var (ptr_type_node, "addr");
7054 int needed_intregs, needed_sseregs;
7056 tree int_addr, sse_addr;
7058 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7059 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7061 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7063 need_temp = (!REG_P (container)
7064 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7065 || TYPE_ALIGN (type) > 128));
7067 /* In case we are passing structure, verify that it is consecutive block
7068 on the register save area. If not we need to do moves. */
7069 if (!need_temp && !REG_P (container))
7071 /* Verify that all registers are strictly consecutive */
7072 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7076 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7078 rtx slot = XVECEXP (container, 0, i);
7079 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7080 || INTVAL (XEXP (slot, 1)) != i * 16)
7088 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7090 rtx slot = XVECEXP (container, 0, i);
7091 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7092 || INTVAL (XEXP (slot, 1)) != i * 8)
7104 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7105 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7108 /* First ensure that we fit completely in registers. */
7111 t = build_int_cst (TREE_TYPE (gpr),
7112 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7113 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7114 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7115 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7116 gimplify_and_add (t, pre_p);
7120 t = build_int_cst (TREE_TYPE (fpr),
7121 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7122 + X86_64_REGPARM_MAX * 8);
7123 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7124 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7125 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7126 gimplify_and_add (t, pre_p);
7129 /* Compute index to start of area used for integer regs. */
7132 /* int_addr = gpr + sav; */
7133 t = fold_convert (sizetype, gpr);
7134 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7135 gimplify_assign (int_addr, t, pre_p);
7139 /* sse_addr = fpr + sav; */
7140 t = fold_convert (sizetype, fpr);
7141 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7142 gimplify_assign (sse_addr, t, pre_p);
7147 tree temp = create_tmp_var (type, "va_arg_tmp");
7150 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7151 gimplify_assign (addr, t, pre_p);
7153 for (i = 0; i < XVECLEN (container, 0); i++)
7155 rtx slot = XVECEXP (container, 0, i);
7156 rtx reg = XEXP (slot, 0);
7157 enum machine_mode mode = GET_MODE (reg);
7158 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7159 tree addr_type = build_pointer_type (piece_type);
7160 tree daddr_type = build_pointer_type_for_mode (piece_type,
7164 tree dest_addr, dest;
7166 if (SSE_REGNO_P (REGNO (reg)))
7168 src_addr = sse_addr;
7169 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7173 src_addr = int_addr;
7174 src_offset = REGNO (reg) * 8;
7176 src_addr = fold_convert (addr_type, src_addr);
7177 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7178 size_int (src_offset));
7179 src = build_va_arg_indirect_ref (src_addr);
7181 dest_addr = fold_convert (daddr_type, addr);
7182 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7183 size_int (INTVAL (XEXP (slot, 1))));
7184 dest = build_va_arg_indirect_ref (dest_addr);
7186 gimplify_assign (dest, src, pre_p);
7192 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7193 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7194 gimplify_assign (gpr, t, pre_p);
7199 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7200 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7201 gimplify_assign (fpr, t, pre_p);
7204 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7206 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7209 /* ... otherwise out of the overflow area. */
7211 /* When we align parameter on stack for caller, if the parameter
7212 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7213 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7214 here with caller. */
7215 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7216 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7217 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7219 /* Care for on-stack alignment if needed. */
7220 if (arg_boundary <= 64
7221 || integer_zerop (TYPE_SIZE (type)))
7225 HOST_WIDE_INT align = arg_boundary / 8;
7226 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7227 size_int (align - 1));
7228 t = fold_convert (sizetype, t);
7229 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7231 t = fold_convert (TREE_TYPE (ovf), t);
7233 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7234 gimplify_assign (addr, t, pre_p);
7236 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7237 size_int (rsize * UNITS_PER_WORD));
7238 gimplify_assign (unshare_expr (ovf), t, pre_p);
7241 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7243 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7244 addr = fold_convert (ptrtype, addr);
7247 addr = build_va_arg_indirect_ref (addr);
7248 return build_va_arg_indirect_ref (addr);
7251 /* Return nonzero if OPNUM's MEM should be matched
7252 in movabs* patterns. */
7255 ix86_check_movabs (rtx insn, int opnum)
7259 set = PATTERN (insn);
7260 if (GET_CODE (set) == PARALLEL)
7261 set = XVECEXP (set, 0, 0);
7262 gcc_assert (GET_CODE (set) == SET);
7263 mem = XEXP (set, opnum);
7264 while (GET_CODE (mem) == SUBREG)
7265 mem = SUBREG_REG (mem);
7266 gcc_assert (MEM_P (mem));
7267 return (volatile_ok || !MEM_VOLATILE_P (mem));
7270 /* Initialize the table of extra 80387 mathematical constants. */
7273 init_ext_80387_constants (void)
7275 static const char * cst[5] =
7277 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7278 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7279 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7280 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7281 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7285 for (i = 0; i < 5; i++)
7287 real_from_string (&ext_80387_constants_table[i], cst[i]);
7288 /* Ensure each constant is rounded to XFmode precision. */
7289 real_convert (&ext_80387_constants_table[i],
7290 XFmode, &ext_80387_constants_table[i]);
7293 ext_80387_constants_init = 1;
7296 /* Return true if the constant is something that can be loaded with
7297 a special instruction. */
7300 standard_80387_constant_p (rtx x)
7302 enum machine_mode mode = GET_MODE (x);
7306 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7309 if (x == CONST0_RTX (mode))
7311 if (x == CONST1_RTX (mode))
7314 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7316 /* For XFmode constants, try to find a special 80387 instruction when
7317 optimizing for size or on those CPUs that benefit from them. */
7319 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7323 if (! ext_80387_constants_init)
7324 init_ext_80387_constants ();
7326 for (i = 0; i < 5; i++)
7327 if (real_identical (&r, &ext_80387_constants_table[i]))
7331 /* Load of the constant -0.0 or -1.0 will be split as
7332 fldz;fchs or fld1;fchs sequence. */
7333 if (real_isnegzero (&r))
7335 if (real_identical (&r, &dconstm1))
7341 /* Return the opcode of the special instruction to be used to load
7345 standard_80387_constant_opcode (rtx x)
7347 switch (standard_80387_constant_p (x))
7371 /* Return the CONST_DOUBLE representing the 80387 constant that is
7372 loaded by the specified special instruction. The argument IDX
7373 matches the return value from standard_80387_constant_p. */
7376 standard_80387_constant_rtx (int idx)
7380 if (! ext_80387_constants_init)
7381 init_ext_80387_constants ();
7397 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7401 /* Return 1 if X is all 0s and 2 if x is all 1s
7402 in supported SSE vector mode. */
7405 standard_sse_constant_p (rtx x)
7407 enum machine_mode mode = GET_MODE (x);
7409 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7411 if (vector_all_ones_operand (x, mode))
7427 /* Return the opcode of the special instruction to be used to load
7431 standard_sse_constant_opcode (rtx insn, rtx x)
7433 switch (standard_sse_constant_p (x))
7436 switch (get_attr_mode (insn))
7439 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7441 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7443 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7445 return "vxorps\t%x0, %x0, %x0";
7447 return "vxorpd\t%x0, %x0, %x0";
7449 return "vpxor\t%x0, %x0, %x0";
7454 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7461 /* Returns 1 if OP contains a symbol reference */
7464 symbolic_reference_mentioned_p (rtx op)
7469 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7472 fmt = GET_RTX_FORMAT (GET_CODE (op));
7473 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7479 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7480 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7484 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7491 /* Return 1 if it is appropriate to emit `ret' instructions in the
7492 body of a function. Do this only if the epilogue is simple, needing a
7493 couple of insns. Prior to reloading, we can't tell how many registers
7494 must be saved, so return 0 then. Return 0 if there is no frame
7495 marker to de-allocate. */
7498 ix86_can_use_return_insn_p (void)
7500 struct ix86_frame frame;
7502 if (! reload_completed || frame_pointer_needed)
7505 /* Don't allow more than 32 pop, since that's all we can do
7506 with one instruction. */
7507 if (crtl->args.pops_args
7508 && crtl->args.size >= 32768)
7511 ix86_compute_frame_layout (&frame);
7512 return frame.to_allocate == 0 && frame.padding0 == 0
7513 && (frame.nregs + frame.nsseregs) == 0;
7516 /* Value should be nonzero if functions must have frame pointers.
7517 Zero means the frame pointer need not be set up (and parms may
7518 be accessed via the stack pointer) in functions that seem suitable. */
7521 ix86_frame_pointer_required (void)
7523 /* If we accessed previous frames, then the generated code expects
7524 to be able to access the saved ebp value in our frame. */
7525 if (cfun->machine->accesses_prev_frame)
7528 /* Several x86 os'es need a frame pointer for other reasons,
7529 usually pertaining to setjmp. */
7530 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7533 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7534 the frame pointer by default. Turn it back on now if we've not
7535 got a leaf function. */
7536 if (TARGET_OMIT_LEAF_FRAME_POINTER
7537 && (!current_function_is_leaf
7538 || ix86_current_function_calls_tls_descriptor))
7547 /* Record that the current function accesses previous call frames. */
7550 ix86_setup_frame_addresses (void)
7552 cfun->machine->accesses_prev_frame = 1;
7555 #ifndef USE_HIDDEN_LINKONCE
7556 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7557 # define USE_HIDDEN_LINKONCE 1
7559 # define USE_HIDDEN_LINKONCE 0
7563 static int pic_labels_used;
7565 /* Fills in the label name that should be used for a pc thunk for
7566 the given register. */
7569 get_pc_thunk_name (char name[32], unsigned int regno)
7571 gcc_assert (!TARGET_64BIT);
7573 if (USE_HIDDEN_LINKONCE)
7574 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7576 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7580 /* This function generates code for -fpic that loads %ebx with
7581 the return address of the caller and then returns. */
7584 ix86_code_end (void)
7589 for (regno = 0; regno < 8; ++regno)
7594 if (! ((pic_labels_used >> regno) & 1))
7597 get_pc_thunk_name (name, regno);
7599 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7600 get_identifier (name),
7601 build_function_type (void_type_node, void_list_node));
7602 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7603 NULL_TREE, void_type_node);
7604 TREE_PUBLIC (decl) = 1;
7605 TREE_STATIC (decl) = 1;
7610 switch_to_section (darwin_sections[text_coal_section]);
7611 fputs ("\t.weak_definition\t", asm_out_file);
7612 assemble_name (asm_out_file, name);
7613 fputs ("\n\t.private_extern\t", asm_out_file);
7614 assemble_name (asm_out_file, name);
7615 fputs ("\n", asm_out_file);
7616 ASM_OUTPUT_LABEL (asm_out_file, name);
7617 DECL_WEAK (decl) = 1;
7621 if (USE_HIDDEN_LINKONCE)
7623 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7625 (*targetm.asm_out.unique_section) (decl, 0);
7626 switch_to_section (get_named_section (decl, NULL, 0));
7628 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7629 fputs ("\t.hidden\t", asm_out_file);
7630 assemble_name (asm_out_file, name);
7631 putc ('\n', asm_out_file);
7632 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7636 switch_to_section (text_section);
7637 ASM_OUTPUT_LABEL (asm_out_file, name);
7640 DECL_INITIAL (decl) = make_node (BLOCK);
7641 current_function_decl = decl;
7642 init_function_start (decl);
7643 first_function_block_is_cold = false;
7644 /* Make sure unwind info is emitted for the thunk if needed. */
7645 final_start_function (emit_barrier (), asm_out_file, 1);
7647 xops[0] = gen_rtx_REG (Pmode, regno);
7648 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7649 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7650 output_asm_insn ("ret", xops);
7651 final_end_function ();
7652 init_insn_lengths ();
7653 free_after_compilation (cfun);
7655 current_function_decl = NULL;
7659 /* Emit code for the SET_GOT patterns. */
7662 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7668 if (TARGET_VXWORKS_RTP && flag_pic)
7670 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7671 xops[2] = gen_rtx_MEM (Pmode,
7672 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7673 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7675 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7676 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7677 an unadorned address. */
7678 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7679 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7680 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7684 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7686 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7688 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7691 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7694 output_asm_insn ("call\t%a2", xops);
7695 #ifdef DWARF2_UNWIND_INFO
7696 /* The call to next label acts as a push. */
7697 if (dwarf2out_do_frame ())
7701 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7702 gen_rtx_PLUS (Pmode,
7705 RTX_FRAME_RELATED_P (insn) = 1;
7706 dwarf2out_frame_debug (insn, true);
7713 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7714 is what will be referenced by the Mach-O PIC subsystem. */
7716 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7720 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7724 output_asm_insn ("pop%z0\t%0", xops);
7725 #ifdef DWARF2_UNWIND_INFO
7726 /* The pop is a pop and clobbers dest, but doesn't restore it
7727 for unwind info purposes. */
7728 if (dwarf2out_do_frame ())
7732 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7733 dwarf2out_frame_debug (insn, true);
7734 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7735 gen_rtx_PLUS (Pmode,
7738 RTX_FRAME_RELATED_P (insn) = 1;
7739 dwarf2out_frame_debug (insn, true);
7748 get_pc_thunk_name (name, REGNO (dest));
7749 pic_labels_used |= 1 << REGNO (dest);
7751 #ifdef DWARF2_UNWIND_INFO
7752 /* Ensure all queued register saves are flushed before the
7754 if (dwarf2out_do_frame ())
7758 insn = emit_barrier ();
7760 dwarf2out_frame_debug (insn, false);
7763 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7764 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7765 output_asm_insn ("call\t%X2", xops);
7766 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7767 is what will be referenced by the Mach-O PIC subsystem. */
7770 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7772 targetm.asm_out.internal_label (asm_out_file, "L",
7773 CODE_LABEL_NUMBER (label));
7780 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7781 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7783 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7788 /* Generate an "push" pattern for input ARG. */
7793 if (ix86_cfa_state->reg == stack_pointer_rtx)
7794 ix86_cfa_state->offset += UNITS_PER_WORD;
7796 return gen_rtx_SET (VOIDmode,
7798 gen_rtx_PRE_DEC (Pmode,
7799 stack_pointer_rtx)),
7803 /* Return >= 0 if there is an unused call-clobbered register available
7804 for the entire function. */
7807 ix86_select_alt_pic_regnum (void)
7809 if (current_function_is_leaf && !crtl->profile
7810 && !ix86_current_function_calls_tls_descriptor)
7813 /* Can't use the same register for both PIC and DRAP. */
7815 drap = REGNO (crtl->drap_reg);
7818 for (i = 2; i >= 0; --i)
7819 if (i != drap && !df_regs_ever_live_p (i))
7823 return INVALID_REGNUM;
7826 /* Return 1 if we need to save REGNO. */
7828 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7830 if (pic_offset_table_rtx
7831 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7832 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7834 || crtl->calls_eh_return
7835 || crtl->uses_const_pool))
7837 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7842 if (crtl->calls_eh_return && maybe_eh_return)
7847 unsigned test = EH_RETURN_DATA_REGNO (i);
7848 if (test == INVALID_REGNUM)
7855 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7858 return (df_regs_ever_live_p (regno)
7859 && !call_used_regs[regno]
7860 && !fixed_regs[regno]
7861 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7864 /* Return number of saved general prupose registers. */
7867 ix86_nsaved_regs (void)
7872 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7873 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7878 /* Return number of saved SSE registrers. */
7881 ix86_nsaved_sseregs (void)
7886 if (ix86_cfun_abi () != MS_ABI)
7888 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7889 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7894 /* Given FROM and TO register numbers, say whether this elimination is
7895 allowed. If stack alignment is needed, we can only replace argument
7896 pointer with hard frame pointer, or replace frame pointer with stack
7897 pointer. Otherwise, frame pointer elimination is automatically
7898 handled and all other eliminations are valid. */
7901 ix86_can_eliminate (const int from, const int to)
7903 if (stack_realign_fp)
7904 return ((from == ARG_POINTER_REGNUM
7905 && to == HARD_FRAME_POINTER_REGNUM)
7906 || (from == FRAME_POINTER_REGNUM
7907 && to == STACK_POINTER_REGNUM));
7909 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7912 /* Return the offset between two registers, one to be eliminated, and the other
7913 its replacement, at the start of a routine. */
7916 ix86_initial_elimination_offset (int from, int to)
7918 struct ix86_frame frame;
7919 ix86_compute_frame_layout (&frame);
7921 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7922 return frame.hard_frame_pointer_offset;
7923 else if (from == FRAME_POINTER_REGNUM
7924 && to == HARD_FRAME_POINTER_REGNUM)
7925 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7928 gcc_assert (to == STACK_POINTER_REGNUM);
7930 if (from == ARG_POINTER_REGNUM)
7931 return frame.stack_pointer_offset;
7933 gcc_assert (from == FRAME_POINTER_REGNUM);
7934 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7938 /* In a dynamically-aligned function, we can't know the offset from
7939 stack pointer to frame pointer, so we must ensure that setjmp
7940 eliminates fp against the hard fp (%ebp) rather than trying to
7941 index from %esp up to the top of the frame across a gap that is
7942 of unknown (at compile-time) size. */
7944 ix86_builtin_setjmp_frame_value (void)
7946 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7949 /* Fill structure ix86_frame about frame of currently computed function. */
7952 ix86_compute_frame_layout (struct ix86_frame *frame)
7954 unsigned int stack_alignment_needed;
7955 HOST_WIDE_INT offset;
7956 unsigned int preferred_alignment;
7957 HOST_WIDE_INT size = get_frame_size ();
7959 frame->nregs = ix86_nsaved_regs ();
7960 frame->nsseregs = ix86_nsaved_sseregs ();
7962 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7963 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7965 /* MS ABI seem to require stack alignment to be always 16 except for function
7967 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7969 preferred_alignment = 16;
7970 stack_alignment_needed = 16;
7971 crtl->preferred_stack_boundary = 128;
7972 crtl->stack_alignment_needed = 128;
7975 gcc_assert (!size || stack_alignment_needed);
7976 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7977 gcc_assert (preferred_alignment <= stack_alignment_needed);
7979 /* During reload iteration the amount of registers saved can change.
7980 Recompute the value as needed. Do not recompute when amount of registers
7981 didn't change as reload does multiple calls to the function and does not
7982 expect the decision to change within single iteration. */
7983 if (!optimize_function_for_size_p (cfun)
7984 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7986 int count = frame->nregs;
7988 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7989 /* The fast prologue uses move instead of push to save registers. This
7990 is significantly longer, but also executes faster as modern hardware
7991 can execute the moves in parallel, but can't do that for push/pop.
7993 Be careful about choosing what prologue to emit: When function takes
7994 many instructions to execute we may use slow version as well as in
7995 case function is known to be outside hot spot (this is known with
7996 feedback only). Weight the size of function by number of registers
7997 to save as it is cheap to use one or two push instructions but very
7998 slow to use many of them. */
8000 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8001 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
8002 || (flag_branch_probabilities
8003 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
8004 cfun->machine->use_fast_prologue_epilogue = false;
8006 cfun->machine->use_fast_prologue_epilogue
8007 = !expensive_function_p (count);
8009 if (TARGET_PROLOGUE_USING_MOVE
8010 && cfun->machine->use_fast_prologue_epilogue)
8011 frame->save_regs_using_mov = true;
8013 frame->save_regs_using_mov = false;
8015 /* Skip return address. */
8016 offset = UNITS_PER_WORD;
8018 /* Skip pushed static chain. */
8019 if (ix86_static_chain_on_stack)
8020 offset += UNITS_PER_WORD;
8022 /* Skip saved base pointer. */
8023 if (frame_pointer_needed)
8024 offset += UNITS_PER_WORD;
8026 frame->hard_frame_pointer_offset = offset;
8028 /* Set offset to aligned because the realigned frame starts from
8030 if (stack_realign_fp)
8031 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8033 /* Register save area */
8034 offset += frame->nregs * UNITS_PER_WORD;
8036 /* Align SSE reg save area. */
8037 if (frame->nsseregs)
8038 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8040 frame->padding0 = 0;
8042 /* SSE register save area. */
8043 offset += frame->padding0 + frame->nsseregs * 16;
8046 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8047 offset += frame->va_arg_size;
8049 /* Align start of frame for local function. */
8050 frame->padding1 = ((offset + stack_alignment_needed - 1)
8051 & -stack_alignment_needed) - offset;
8053 offset += frame->padding1;
8055 /* Frame pointer points here. */
8056 frame->frame_pointer_offset = offset;
8060 /* Add outgoing arguments area. Can be skipped if we eliminated
8061 all the function calls as dead code.
8062 Skipping is however impossible when function calls alloca. Alloca
8063 expander assumes that last crtl->outgoing_args_size
8064 of stack frame are unused. */
8065 if (ACCUMULATE_OUTGOING_ARGS
8066 && (!current_function_is_leaf || cfun->calls_alloca
8067 || ix86_current_function_calls_tls_descriptor))
8069 offset += crtl->outgoing_args_size;
8070 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8073 frame->outgoing_arguments_size = 0;
8075 /* Align stack boundary. Only needed if we're calling another function
8077 if (!current_function_is_leaf || cfun->calls_alloca
8078 || ix86_current_function_calls_tls_descriptor)
8079 frame->padding2 = ((offset + preferred_alignment - 1)
8080 & -preferred_alignment) - offset;
8082 frame->padding2 = 0;
8084 offset += frame->padding2;
8086 /* We've reached end of stack frame. */
8087 frame->stack_pointer_offset = offset;
8089 /* Size prologue needs to allocate. */
8090 frame->to_allocate =
8091 (size + frame->padding1 + frame->padding2
8092 + frame->outgoing_arguments_size + frame->va_arg_size);
8094 if ((!frame->to_allocate && frame->nregs <= 1)
8095 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8096 frame->save_regs_using_mov = false;
8098 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8099 && current_function_sp_is_unchanging
8100 && current_function_is_leaf
8101 && !ix86_current_function_calls_tls_descriptor)
8103 frame->red_zone_size = frame->to_allocate;
8104 if (frame->save_regs_using_mov)
8105 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8106 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8107 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8110 frame->red_zone_size = 0;
8111 frame->to_allocate -= frame->red_zone_size;
8112 frame->stack_pointer_offset -= frame->red_zone_size;
8115 /* Emit code to save registers in the prologue. */
8118 ix86_emit_save_regs (void)
8123 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8124 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8126 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8127 RTX_FRAME_RELATED_P (insn) = 1;
8131 /* Emit code to save registers using MOV insns. First register
8132 is restored from POINTER + OFFSET. */
8134 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8139 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8140 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8142 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8144 gen_rtx_REG (Pmode, regno));
8145 RTX_FRAME_RELATED_P (insn) = 1;
8146 offset += UNITS_PER_WORD;
8150 /* Emit code to save registers using MOV insns. First register
8151 is restored from POINTER + OFFSET. */
8153 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8159 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8160 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8162 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8163 set_mem_align (mem, 128);
8164 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8165 RTX_FRAME_RELATED_P (insn) = 1;
8170 static GTY(()) rtx queued_cfa_restores;
8172 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8173 manipulation insn. Don't add it if the previously
8174 saved value will be left untouched within stack red-zone till return,
8175 as unwinders can find the same value in the register and
8179 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8182 && !TARGET_64BIT_MS_ABI
8183 && red_offset + RED_ZONE_SIZE >= 0
8184 && crtl->args.pops_args < 65536)
8189 add_reg_note (insn, REG_CFA_RESTORE, reg);
8190 RTX_FRAME_RELATED_P (insn) = 1;
8194 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8197 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8200 ix86_add_queued_cfa_restore_notes (rtx insn)
8203 if (!queued_cfa_restores)
8205 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8207 XEXP (last, 1) = REG_NOTES (insn);
8208 REG_NOTES (insn) = queued_cfa_restores;
8209 queued_cfa_restores = NULL_RTX;
8210 RTX_FRAME_RELATED_P (insn) = 1;
8213 /* Expand prologue or epilogue stack adjustment.
8214 The pattern exist to put a dependency on all ebp-based memory accesses.
8215 STYLE should be negative if instructions should be marked as frame related,
8216 zero if %r11 register is live and cannot be freely used and positive
8220 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8221 int style, bool set_cfa)
8226 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8227 else if (x86_64_immediate_operand (offset, DImode))
8228 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8232 /* r11 is used by indirect sibcall return as well, set before the
8233 epilogue and used after the epilogue. ATM indirect sibcall
8234 shouldn't be used together with huge frame sizes in one
8235 function because of the frame_size check in sibcall.c. */
8237 r11 = gen_rtx_REG (DImode, R11_REG);
8238 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8240 RTX_FRAME_RELATED_P (insn) = 1;
8241 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8246 ix86_add_queued_cfa_restore_notes (insn);
8252 gcc_assert (ix86_cfa_state->reg == src);
8253 ix86_cfa_state->offset += INTVAL (offset);
8254 ix86_cfa_state->reg = dest;
8256 r = gen_rtx_PLUS (Pmode, src, offset);
8257 r = gen_rtx_SET (VOIDmode, dest, r);
8258 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8259 RTX_FRAME_RELATED_P (insn) = 1;
8262 RTX_FRAME_RELATED_P (insn) = 1;
8265 /* Find an available register to be used as dynamic realign argument
8266 pointer regsiter. Such a register will be written in prologue and
8267 used in begin of body, so it must not be
8268 1. parameter passing register.
8270 We reuse static-chain register if it is available. Otherwise, we
8271 use DI for i386 and R13 for x86-64. We chose R13 since it has
8274 Return: the regno of chosen register. */
8277 find_drap_reg (void)
8279 tree decl = cfun->decl;
8283 /* Use R13 for nested function or function need static chain.
8284 Since function with tail call may use any caller-saved
8285 registers in epilogue, DRAP must not use caller-saved
8286 register in such case. */
8287 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8294 /* Use DI for nested function or function need static chain.
8295 Since function with tail call may use any caller-saved
8296 registers in epilogue, DRAP must not use caller-saved
8297 register in such case. */
8298 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8301 /* Reuse static chain register if it isn't used for parameter
8303 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8304 && !lookup_attribute ("fastcall",
8305 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8312 /* Return minimum incoming stack alignment. */
8315 ix86_minimum_incoming_stack_boundary (bool sibcall)
8317 unsigned int incoming_stack_boundary;
8319 /* Prefer the one specified at command line. */
8320 if (ix86_user_incoming_stack_boundary)
8321 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8322 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8323 if -mstackrealign is used, it isn't used for sibcall check and
8324 estimated stack alignment is 128bit. */
8327 && ix86_force_align_arg_pointer
8328 && crtl->stack_alignment_estimated == 128)
8329 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8331 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8333 /* Incoming stack alignment can be changed on individual functions
8334 via force_align_arg_pointer attribute. We use the smallest
8335 incoming stack boundary. */
8336 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8337 && lookup_attribute (ix86_force_align_arg_pointer_string,
8338 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8339 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8341 /* The incoming stack frame has to be aligned at least at
8342 parm_stack_boundary. */
8343 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8344 incoming_stack_boundary = crtl->parm_stack_boundary;
8346 /* Stack at entrance of main is aligned by runtime. We use the
8347 smallest incoming stack boundary. */
8348 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8349 && DECL_NAME (current_function_decl)
8350 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8351 && DECL_FILE_SCOPE_P (current_function_decl))
8352 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8354 return incoming_stack_boundary;
8357 /* Update incoming stack boundary and estimated stack alignment. */
8360 ix86_update_stack_boundary (void)
8362 ix86_incoming_stack_boundary
8363 = ix86_minimum_incoming_stack_boundary (false);
8365 /* x86_64 vararg needs 16byte stack alignment for register save
8369 && crtl->stack_alignment_estimated < 128)
8370 crtl->stack_alignment_estimated = 128;
8373 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8374 needed or an rtx for DRAP otherwise. */
8377 ix86_get_drap_rtx (void)
8379 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8380 crtl->need_drap = true;
8382 if (stack_realign_drap)
8384 /* Assign DRAP to vDRAP and returns vDRAP */
8385 unsigned int regno = find_drap_reg ();
8390 arg_ptr = gen_rtx_REG (Pmode, regno);
8391 crtl->drap_reg = arg_ptr;
8394 drap_vreg = copy_to_reg (arg_ptr);
8398 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8401 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8402 RTX_FRAME_RELATED_P (insn) = 1;
8410 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8413 ix86_internal_arg_pointer (void)
8415 return virtual_incoming_args_rtx;
8418 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8419 to be generated in correct form. */
8421 ix86_finalize_stack_realign_flags (void)
8423 /* Check if stack realign is really needed after reload, and
8424 stores result in cfun */
8425 unsigned int incoming_stack_boundary
8426 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8427 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8428 unsigned int stack_realign = (incoming_stack_boundary
8429 < (current_function_is_leaf
8430 ? crtl->max_used_stack_slot_alignment
8431 : crtl->stack_alignment_needed));
8433 if (crtl->stack_realign_finalized)
8435 /* After stack_realign_needed is finalized, we can't no longer
8437 gcc_assert (crtl->stack_realign_needed == stack_realign);
8441 crtl->stack_realign_needed = stack_realign;
8442 crtl->stack_realign_finalized = true;
8446 /* Expand the prologue into a bunch of separate insns. */
8449 ix86_expand_prologue (void)
8453 struct ix86_frame frame;
8454 HOST_WIDE_INT allocate;
8455 int gen_frame_pointer = frame_pointer_needed;
8457 ix86_finalize_stack_realign_flags ();
8459 /* DRAP should not coexist with stack_realign_fp */
8460 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8462 /* Initialize CFA state for before the prologue. */
8463 ix86_cfa_state->reg = stack_pointer_rtx;
8464 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8466 ix86_compute_frame_layout (&frame);
8468 if (ix86_function_ms_hook_prologue (current_function_decl))
8472 /* Make sure the function starts with
8473 8b ff movl.s %edi,%edi
8475 8b ec movl.s %esp,%ebp
8477 This matches the hookable function prologue in Win32 API
8478 functions in Microsoft Windows XP Service Pack 2 and newer.
8479 Wine uses this to enable Windows apps to hook the Win32 API
8480 functions provided by Wine. */
8481 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8482 gen_rtx_REG (SImode, DI_REG)));
8483 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8484 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8485 stack_pointer_rtx));
8487 if (frame_pointer_needed && !(crtl->drap_reg
8488 && crtl->stack_realign_needed))
8490 /* The push %ebp and movl.s %esp, %ebp already set up
8491 the frame pointer. No need to do this again. */
8492 gen_frame_pointer = 0;
8493 RTX_FRAME_RELATED_P (push) = 1;
8494 RTX_FRAME_RELATED_P (mov) = 1;
8495 if (ix86_cfa_state->reg == stack_pointer_rtx)
8496 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8499 /* If the frame pointer is not needed, pop %ebp again. This
8500 could be optimized for cases where ebp needs to be backed up
8501 for some other reason. If stack realignment is needed, pop
8502 the base pointer again, align the stack, and later regenerate
8503 the frame pointer setup. The frame pointer generated by the
8504 hook prologue is not aligned, so it can't be used. */
8505 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8508 /* The first insn of a function that accepts its static chain on the
8509 stack is to push the register that would be filled in by a direct
8510 call. This insn will be skipped by the trampoline. */
8511 if (ix86_static_chain_on_stack)
8515 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8516 emit_insn (gen_blockage ());
8518 /* We don't want to interpret this push insn as a register save,
8519 only as a stack adjustment. The real copy of the register as
8520 a save will be done later, if needed. */
8521 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8522 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8523 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8524 RTX_FRAME_RELATED_P (insn) = 1;
8527 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8528 of DRAP is needed and stack realignment is really needed after reload */
8529 if (crtl->drap_reg && crtl->stack_realign_needed)
8532 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8533 int param_ptr_offset = UNITS_PER_WORD;
8535 if (ix86_static_chain_on_stack)
8536 param_ptr_offset += UNITS_PER_WORD;
8537 if (!call_used_regs[REGNO (crtl->drap_reg)])
8538 param_ptr_offset += UNITS_PER_WORD;
8540 gcc_assert (stack_realign_drap);
8542 /* Grab the argument pointer. */
8543 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8546 /* Only need to push parameter pointer reg if it is caller
8548 if (!call_used_regs[REGNO (crtl->drap_reg)])
8550 /* Push arg pointer reg */
8551 insn = emit_insn (gen_push (y));
8552 RTX_FRAME_RELATED_P (insn) = 1;
8555 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8556 RTX_FRAME_RELATED_P (insn) = 1;
8557 ix86_cfa_state->reg = crtl->drap_reg;
8559 /* Align the stack. */
8560 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8562 GEN_INT (-align_bytes)));
8563 RTX_FRAME_RELATED_P (insn) = 1;
8565 /* Replicate the return address on the stack so that return
8566 address can be reached via (argp - 1) slot. This is needed
8567 to implement macro RETURN_ADDR_RTX and intrinsic function
8568 expand_builtin_return_addr etc. */
8570 x = gen_frame_mem (Pmode,
8571 plus_constant (x, -UNITS_PER_WORD));
8572 insn = emit_insn (gen_push (x));
8573 RTX_FRAME_RELATED_P (insn) = 1;
8576 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8577 slower on all targets. Also sdb doesn't like it. */
8579 if (gen_frame_pointer)
8581 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8582 RTX_FRAME_RELATED_P (insn) = 1;
8584 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8585 RTX_FRAME_RELATED_P (insn) = 1;
8587 if (ix86_cfa_state->reg == stack_pointer_rtx)
8588 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8591 if (stack_realign_fp)
8593 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8594 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8596 /* Align the stack. */
8597 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8599 GEN_INT (-align_bytes)));
8600 RTX_FRAME_RELATED_P (insn) = 1;
8603 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8605 if (!frame.save_regs_using_mov)
8606 ix86_emit_save_regs ();
8608 allocate += frame.nregs * UNITS_PER_WORD;
8610 /* When using red zone we may start register saving before allocating
8611 the stack frame saving one cycle of the prologue. However I will
8612 avoid doing this if I am going to have to probe the stack since
8613 at least on x86_64 the stack probe can turn into a call that clobbers
8614 a red zone location */
8615 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8616 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8617 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8618 && !crtl->stack_realign_needed)
8619 ? hard_frame_pointer_rtx
8620 : stack_pointer_rtx,
8621 -frame.nregs * UNITS_PER_WORD);
8625 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8626 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8627 GEN_INT (-allocate), -1,
8628 ix86_cfa_state->reg == stack_pointer_rtx);
8631 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8635 if (cfun->machine->call_abi == MS_ABI)
8638 eax_live = ix86_eax_live_at_start_p ();
8642 emit_insn (gen_push (eax));
8643 allocate -= UNITS_PER_WORD;
8646 emit_move_insn (eax, GEN_INT (allocate));
8649 insn = gen_allocate_stack_worker_64 (eax, eax);
8651 insn = gen_allocate_stack_worker_32 (eax, eax);
8652 insn = emit_insn (insn);
8654 if (ix86_cfa_state->reg == stack_pointer_rtx)
8656 ix86_cfa_state->offset += allocate;
8657 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8658 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8659 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8660 RTX_FRAME_RELATED_P (insn) = 1;
8665 if (frame_pointer_needed)
8666 t = plus_constant (hard_frame_pointer_rtx,
8669 - frame.nregs * UNITS_PER_WORD);
8671 t = plus_constant (stack_pointer_rtx, allocate);
8672 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8676 if (frame.save_regs_using_mov
8677 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8678 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8680 if (!frame_pointer_needed
8681 || !(frame.to_allocate + frame.padding0)
8682 || crtl->stack_realign_needed)
8683 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8685 + frame.nsseregs * 16 + frame.padding0);
8687 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8688 -frame.nregs * UNITS_PER_WORD);
8690 if (!frame_pointer_needed
8691 || !(frame.to_allocate + frame.padding0)
8692 || crtl->stack_realign_needed)
8693 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8696 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8697 - frame.nregs * UNITS_PER_WORD
8698 - frame.nsseregs * 16
8701 pic_reg_used = false;
8702 if (pic_offset_table_rtx
8703 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8706 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8708 if (alt_pic_reg_used != INVALID_REGNUM)
8709 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8711 pic_reg_used = true;
8718 if (ix86_cmodel == CM_LARGE_PIC)
8720 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8721 rtx label = gen_label_rtx ();
8723 LABEL_PRESERVE_P (label) = 1;
8724 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8725 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8726 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8727 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8728 pic_offset_table_rtx, tmp_reg));
8731 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8734 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8737 /* In the pic_reg_used case, make sure that the got load isn't deleted
8738 when mcount needs it. Blockage to avoid call movement across mcount
8739 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8741 if (crtl->profile && pic_reg_used)
8742 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8744 if (crtl->drap_reg && !crtl->stack_realign_needed)
8746 /* vDRAP is setup but after reload it turns out stack realign
8747 isn't necessary, here we will emit prologue to setup DRAP
8748 without stack realign adjustment */
8750 int drap_bp_offset = UNITS_PER_WORD * 2;
8752 if (ix86_static_chain_on_stack)
8753 drap_bp_offset += UNITS_PER_WORD;
8754 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8755 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8758 /* Prevent instructions from being scheduled into register save push
8759 sequence when access to the redzone area is done through frame pointer.
8760 The offset between the frame pointer and the stack pointer is calculated
8761 relative to the value of the stack pointer at the end of the function
8762 prologue, and moving instructions that access redzone area via frame
8763 pointer inside push sequence violates this assumption. */
8764 if (frame_pointer_needed && frame.red_zone_size)
8765 emit_insn (gen_memory_blockage ());
8767 /* Emit cld instruction if stringops are used in the function. */
8768 if (TARGET_CLD && ix86_current_function_needs_cld)
8769 emit_insn (gen_cld ());
8772 /* Emit code to restore REG using a POP insn. */
8775 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8777 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8779 if (ix86_cfa_state->reg == crtl->drap_reg
8780 && REGNO (reg) == REGNO (crtl->drap_reg))
8782 /* Previously we'd represented the CFA as an expression
8783 like *(%ebp - 8). We've just popped that value from
8784 the stack, which means we need to reset the CFA to
8785 the drap register. This will remain until we restore
8786 the stack pointer. */
8787 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8788 RTX_FRAME_RELATED_P (insn) = 1;
8792 if (ix86_cfa_state->reg == stack_pointer_rtx)
8794 ix86_cfa_state->offset -= UNITS_PER_WORD;
8795 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8796 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8797 RTX_FRAME_RELATED_P (insn) = 1;
8800 /* When the frame pointer is the CFA, and we pop it, we are
8801 swapping back to the stack pointer as the CFA. This happens
8802 for stack frames that don't allocate other data, so we assume
8803 the stack pointer is now pointing at the return address, i.e.
8804 the function entry state, which makes the offset be 1 word. */
8805 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8806 && reg == hard_frame_pointer_rtx)
8808 ix86_cfa_state->reg = stack_pointer_rtx;
8809 ix86_cfa_state->offset -= UNITS_PER_WORD;
8811 add_reg_note (insn, REG_CFA_DEF_CFA,
8812 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8813 GEN_INT (ix86_cfa_state->offset)));
8814 RTX_FRAME_RELATED_P (insn) = 1;
8817 ix86_add_cfa_restore_note (insn, reg, red_offset);
8820 /* Emit code to restore saved registers using POP insns. */
8823 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8827 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8828 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8830 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8832 red_offset += UNITS_PER_WORD;
8836 /* Emit code and notes for the LEAVE instruction. */
8839 ix86_emit_leave (HOST_WIDE_INT red_offset)
8841 rtx insn = emit_insn (ix86_gen_leave ());
8843 ix86_add_queued_cfa_restore_notes (insn);
8845 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8847 ix86_cfa_state->reg = stack_pointer_rtx;
8848 ix86_cfa_state->offset -= UNITS_PER_WORD;
8850 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8851 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8852 RTX_FRAME_RELATED_P (insn) = 1;
8853 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8857 /* Emit code to restore saved registers using MOV insns. First register
8858 is restored from POINTER + OFFSET. */
8860 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8861 HOST_WIDE_INT red_offset,
8862 int maybe_eh_return)
8865 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8868 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8869 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8871 rtx reg = gen_rtx_REG (Pmode, regno);
8873 /* Ensure that adjust_address won't be forced to produce pointer
8874 out of range allowed by x86-64 instruction set. */
8875 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8879 r11 = gen_rtx_REG (DImode, R11_REG);
8880 emit_move_insn (r11, GEN_INT (offset));
8881 emit_insn (gen_adddi3 (r11, r11, pointer));
8882 base_address = gen_rtx_MEM (Pmode, r11);
8885 insn = emit_move_insn (reg,
8886 adjust_address (base_address, Pmode, offset));
8887 offset += UNITS_PER_WORD;
8889 if (ix86_cfa_state->reg == crtl->drap_reg
8890 && regno == REGNO (crtl->drap_reg))
8892 /* Previously we'd represented the CFA as an expression
8893 like *(%ebp - 8). We've just popped that value from
8894 the stack, which means we need to reset the CFA to
8895 the drap register. This will remain until we restore
8896 the stack pointer. */
8897 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8898 RTX_FRAME_RELATED_P (insn) = 1;
8901 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8903 red_offset += UNITS_PER_WORD;
8907 /* Emit code to restore saved registers using MOV insns. First register
8908 is restored from POINTER + OFFSET. */
8910 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8911 HOST_WIDE_INT red_offset,
8912 int maybe_eh_return)
8915 rtx base_address = gen_rtx_MEM (TImode, pointer);
8918 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8919 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8921 rtx reg = gen_rtx_REG (TImode, regno);
8923 /* Ensure that adjust_address won't be forced to produce pointer
8924 out of range allowed by x86-64 instruction set. */
8925 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8929 r11 = gen_rtx_REG (DImode, R11_REG);
8930 emit_move_insn (r11, GEN_INT (offset));
8931 emit_insn (gen_adddi3 (r11, r11, pointer));
8932 base_address = gen_rtx_MEM (TImode, r11);
8935 mem = adjust_address (base_address, TImode, offset);
8936 set_mem_align (mem, 128);
8937 emit_move_insn (reg, mem);
8940 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8946 /* Restore function stack, frame, and registers. */
8949 ix86_expand_epilogue (int style)
8952 struct ix86_frame frame;
8953 HOST_WIDE_INT offset, red_offset;
8954 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8957 ix86_finalize_stack_realign_flags ();
8959 /* When stack is realigned, SP must be valid. */
8960 sp_valid = (!frame_pointer_needed
8961 || current_function_sp_is_unchanging
8962 || stack_realign_fp);
8964 ix86_compute_frame_layout (&frame);
8966 /* See the comment about red zone and frame
8967 pointer usage in ix86_expand_prologue. */
8968 if (frame_pointer_needed && frame.red_zone_size)
8969 emit_insn (gen_memory_blockage ());
8971 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
8972 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
8974 /* Calculate start of saved registers relative to ebp. Special care
8975 must be taken for the normal return case of a function using
8976 eh_return: the eax and edx registers are marked as saved, but not
8977 restored along this path. */
8978 offset = frame.nregs;
8979 if (crtl->calls_eh_return && style != 2)
8981 offset *= -UNITS_PER_WORD;
8982 offset -= frame.nsseregs * 16 + frame.padding0;
8984 /* Calculate start of saved registers relative to esp on entry of the
8985 function. When realigning stack, this needs to be the most negative
8986 value possible at runtime. */
8987 red_offset = offset;
8989 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8991 else if (stack_realign_fp)
8992 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8994 if (ix86_static_chain_on_stack)
8995 red_offset -= UNITS_PER_WORD;
8996 if (frame_pointer_needed)
8997 red_offset -= UNITS_PER_WORD;
8999 /* If we're only restoring one register and sp is not valid then
9000 using a move instruction to restore the register since it's
9001 less work than reloading sp and popping the register.
9003 The default code result in stack adjustment using add/lea instruction,
9004 while this code results in LEAVE instruction (or discrete equivalent),
9005 so it is profitable in some other cases as well. Especially when there
9006 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9007 and there is exactly one register to pop. This heuristic may need some
9008 tuning in future. */
9009 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9010 || (TARGET_EPILOGUE_USING_MOVE
9011 && cfun->machine->use_fast_prologue_epilogue
9012 && ((frame.nregs + frame.nsseregs) > 1
9013 || (frame.to_allocate + frame.padding0) != 0))
9014 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9015 && (frame.to_allocate + frame.padding0) != 0)
9016 || (frame_pointer_needed && TARGET_USE_LEAVE
9017 && cfun->machine->use_fast_prologue_epilogue
9018 && (frame.nregs + frame.nsseregs) == 1)
9019 || crtl->calls_eh_return)
9021 /* Restore registers. We can use ebp or esp to address the memory
9022 locations. If both are available, default to ebp, since offsets
9023 are known to be small. Only exception is esp pointing directly
9024 to the end of block of saved registers, where we may simplify
9027 If we are realigning stack with bp and sp, regs restore can't
9028 be addressed by bp. sp must be used instead. */
9030 if (!frame_pointer_needed
9031 || (sp_valid && !(frame.to_allocate + frame.padding0))
9032 || stack_realign_fp)
9034 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9035 frame.to_allocate, red_offset,
9037 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9039 + frame.nsseregs * 16
9042 + frame.nsseregs * 16
9043 + frame.padding0, style == 2);
9047 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9050 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9052 + frame.nsseregs * 16
9055 + frame.nsseregs * 16
9056 + frame.padding0, style == 2);
9059 red_offset -= offset;
9061 /* eh_return epilogues need %ecx added to the stack pointer. */
9064 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9066 /* Stack align doesn't work with eh_return. */
9067 gcc_assert (!crtl->stack_realign_needed);
9068 /* Neither does regparm nested functions. */
9069 gcc_assert (!ix86_static_chain_on_stack);
9071 if (frame_pointer_needed)
9073 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9074 tmp = plus_constant (tmp, UNITS_PER_WORD);
9075 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9077 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9078 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9080 /* Note that we use SA as a temporary CFA, as the return
9081 address is at the proper place relative to it. We
9082 pretend this happens at the FP restore insn because
9083 prior to this insn the FP would be stored at the wrong
9084 offset relative to SA, and after this insn we have no
9085 other reasonable register to use for the CFA. We don't
9086 bother resetting the CFA to the SP for the duration of
9088 add_reg_note (tmp, REG_CFA_DEF_CFA,
9089 plus_constant (sa, UNITS_PER_WORD));
9090 ix86_add_queued_cfa_restore_notes (tmp);
9091 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9092 RTX_FRAME_RELATED_P (tmp) = 1;
9093 ix86_cfa_state->reg = sa;
9094 ix86_cfa_state->offset = UNITS_PER_WORD;
9096 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9097 const0_rtx, style, false);
9101 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9102 tmp = plus_constant (tmp, (frame.to_allocate
9103 + frame.nregs * UNITS_PER_WORD
9104 + frame.nsseregs * 16
9106 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9107 ix86_add_queued_cfa_restore_notes (tmp);
9109 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9110 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9112 ix86_cfa_state->offset = UNITS_PER_WORD;
9113 add_reg_note (tmp, REG_CFA_DEF_CFA,
9114 plus_constant (stack_pointer_rtx,
9116 RTX_FRAME_RELATED_P (tmp) = 1;
9120 else if (!frame_pointer_needed)
9121 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9122 GEN_INT (frame.to_allocate
9123 + frame.nregs * UNITS_PER_WORD
9124 + frame.nsseregs * 16
9126 style, !using_drap);
9127 /* If not an i386, mov & pop is faster than "leave". */
9128 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9129 || !cfun->machine->use_fast_prologue_epilogue)
9130 ix86_emit_leave (red_offset);
9133 pro_epilogue_adjust_stack (stack_pointer_rtx,
9134 hard_frame_pointer_rtx,
9135 const0_rtx, style, !using_drap);
9137 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9142 /* First step is to deallocate the stack frame so that we can
9145 If we realign stack with frame pointer, then stack pointer
9146 won't be able to recover via lea $offset(%bp), %sp, because
9147 there is a padding area between bp and sp for realign.
9148 "add $to_allocate, %sp" must be used instead. */
9151 gcc_assert (frame_pointer_needed);
9152 gcc_assert (!stack_realign_fp);
9153 pro_epilogue_adjust_stack (stack_pointer_rtx,
9154 hard_frame_pointer_rtx,
9155 GEN_INT (offset), style, false);
9156 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9159 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9160 GEN_INT (frame.nsseregs * 16
9164 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9166 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9167 frame.to_allocate, red_offset,
9169 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9170 GEN_INT (frame.to_allocate
9171 + frame.nsseregs * 16
9172 + frame.padding0), style,
9173 !using_drap && !frame_pointer_needed);
9176 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9178 red_offset -= offset;
9180 if (frame_pointer_needed)
9182 /* Leave results in shorter dependency chains on CPUs that are
9183 able to grok it fast. */
9184 if (TARGET_USE_LEAVE)
9185 ix86_emit_leave (red_offset);
9188 /* For stack realigned really happens, recover stack
9189 pointer to hard frame pointer is a must, if not using
9191 if (stack_realign_fp)
9192 pro_epilogue_adjust_stack (stack_pointer_rtx,
9193 hard_frame_pointer_rtx,
9194 const0_rtx, style, !using_drap);
9195 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9203 int param_ptr_offset = UNITS_PER_WORD;
9206 gcc_assert (stack_realign_drap);
9208 if (ix86_static_chain_on_stack)
9209 param_ptr_offset += UNITS_PER_WORD;
9210 if (!call_used_regs[REGNO (crtl->drap_reg)])
9211 param_ptr_offset += UNITS_PER_WORD;
9213 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9215 GEN_INT (-param_ptr_offset)));
9217 ix86_cfa_state->reg = stack_pointer_rtx;
9218 ix86_cfa_state->offset = param_ptr_offset;
9220 add_reg_note (insn, REG_CFA_DEF_CFA,
9221 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9222 GEN_INT (ix86_cfa_state->offset)));
9223 RTX_FRAME_RELATED_P (insn) = 1;
9225 if (!call_used_regs[REGNO (crtl->drap_reg)])
9226 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9229 /* Remove the saved static chain from the stack. The use of ECX is
9230 merely as a scratch register, not as the actual static chain. */
9231 if (ix86_static_chain_on_stack)
9235 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9236 ix86_cfa_state->offset += UNITS_PER_WORD;
9238 r = gen_rtx_REG (Pmode, CX_REG);
9239 insn = emit_insn (ix86_gen_pop1 (r));
9241 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9242 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9243 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9244 RTX_FRAME_RELATED_P (insn) = 1;
9247 /* Sibcall epilogues don't want a return instruction. */
9250 *ix86_cfa_state = cfa_state_save;
9254 if (crtl->args.pops_args && crtl->args.size)
9256 rtx popc = GEN_INT (crtl->args.pops_args);
9258 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9259 address, do explicit add, and jump indirectly to the caller. */
9261 if (crtl->args.pops_args >= 65536)
9263 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9266 /* There is no "pascal" calling convention in any 64bit ABI. */
9267 gcc_assert (!TARGET_64BIT);
9269 insn = emit_insn (gen_popsi1 (ecx));
9270 ix86_cfa_state->offset -= UNITS_PER_WORD;
9272 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9273 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9274 add_reg_note (insn, REG_CFA_REGISTER,
9275 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9276 RTX_FRAME_RELATED_P (insn) = 1;
9278 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9280 emit_jump_insn (gen_return_indirect_internal (ecx));
9283 emit_jump_insn (gen_return_pop_internal (popc));
9286 emit_jump_insn (gen_return_internal ());
9288 /* Restore the state back to the state from the prologue,
9289 so that it's correct for the next epilogue. */
9290 *ix86_cfa_state = cfa_state_save;
9293 /* Reset from the function's potential modifications. */
9296 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9297 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9299 if (pic_offset_table_rtx)
9300 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9302 /* Mach-O doesn't support labels at the end of objects, so if
9303 it looks like we might want one, insert a NOP. */
9305 rtx insn = get_last_insn ();
9308 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9309 insn = PREV_INSN (insn);
9313 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9314 fputs ("\tnop\n", file);
9320 /* Extract the parts of an RTL expression that is a valid memory address
9321 for an instruction. Return 0 if the structure of the address is
9322 grossly off. Return -1 if the address contains ASHIFT, so it is not
9323 strictly valid, but still used for computing length of lea instruction. */
9326 ix86_decompose_address (rtx addr, struct ix86_address *out)
9328 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9329 rtx base_reg, index_reg;
9330 HOST_WIDE_INT scale = 1;
9331 rtx scale_rtx = NULL_RTX;
9333 enum ix86_address_seg seg = SEG_DEFAULT;
9335 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9337 else if (GET_CODE (addr) == PLUS)
9347 addends[n++] = XEXP (op, 1);
9350 while (GET_CODE (op) == PLUS);
9355 for (i = n; i >= 0; --i)
9358 switch (GET_CODE (op))
9363 index = XEXP (op, 0);
9364 scale_rtx = XEXP (op, 1);
9368 if (XINT (op, 1) == UNSPEC_TP
9369 && TARGET_TLS_DIRECT_SEG_REFS
9370 && seg == SEG_DEFAULT)
9371 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9400 else if (GET_CODE (addr) == MULT)
9402 index = XEXP (addr, 0); /* index*scale */
9403 scale_rtx = XEXP (addr, 1);
9405 else if (GET_CODE (addr) == ASHIFT)
9409 /* We're called for lea too, which implements ashift on occasion. */
9410 index = XEXP (addr, 0);
9411 tmp = XEXP (addr, 1);
9412 if (!CONST_INT_P (tmp))
9414 scale = INTVAL (tmp);
9415 if ((unsigned HOST_WIDE_INT) scale > 3)
9421 disp = addr; /* displacement */
9423 /* Extract the integral value of scale. */
9426 if (!CONST_INT_P (scale_rtx))
9428 scale = INTVAL (scale_rtx);
9431 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9432 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9434 /* Avoid useless 0 displacement. */
9435 if (disp == const0_rtx && (base || index))
9438 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9439 if (base_reg && index_reg && scale == 1
9440 && (index_reg == arg_pointer_rtx
9441 || index_reg == frame_pointer_rtx
9442 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9445 tmp = base, base = index, index = tmp;
9446 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9449 /* Special case: %ebp cannot be encoded as a base without a displacement.
9453 && (base_reg == hard_frame_pointer_rtx
9454 || base_reg == frame_pointer_rtx
9455 || base_reg == arg_pointer_rtx
9456 || (REG_P (base_reg)
9457 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9458 || REGNO (base_reg) == R13_REG))))
9461 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9462 Avoid this by transforming to [%esi+0].
9463 Reload calls address legitimization without cfun defined, so we need
9464 to test cfun for being non-NULL. */
9465 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9466 && base_reg && !index_reg && !disp
9468 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9471 /* Special case: encode reg+reg instead of reg*2. */
9472 if (!base && index && scale == 2)
9473 base = index, base_reg = index_reg, scale = 1;
9475 /* Special case: scaling cannot be encoded without base or displacement. */
9476 if (!base && !disp && index && scale != 1)
9488 /* Return cost of the memory address x.
9489 For i386, it is better to use a complex address than let gcc copy
9490 the address into a reg and make a new pseudo. But not if the address
9491 requires to two regs - that would mean more pseudos with longer
9494 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9496 struct ix86_address parts;
9498 int ok = ix86_decompose_address (x, &parts);
9502 if (parts.base && GET_CODE (parts.base) == SUBREG)
9503 parts.base = SUBREG_REG (parts.base);
9504 if (parts.index && GET_CODE (parts.index) == SUBREG)
9505 parts.index = SUBREG_REG (parts.index);
9507 /* Attempt to minimize number of registers in the address. */
9509 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9511 && (!REG_P (parts.index)
9512 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9516 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9518 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9519 && parts.base != parts.index)
9522 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9523 since it's predecode logic can't detect the length of instructions
9524 and it degenerates to vector decoded. Increase cost of such
9525 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9526 to split such addresses or even refuse such addresses at all.
9528 Following addressing modes are affected:
9533 The first and last case may be avoidable by explicitly coding the zero in
9534 memory address, but I don't have AMD-K6 machine handy to check this
9538 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9539 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9540 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9546 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9547 this is used for to form addresses to local data when -fPIC is in
9551 darwin_local_data_pic (rtx disp)
9553 return (GET_CODE (disp) == UNSPEC
9554 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9557 /* Determine if a given RTX is a valid constant. We already know this
9558 satisfies CONSTANT_P. */
9561 legitimate_constant_p (rtx x)
9563 switch (GET_CODE (x))
9568 if (GET_CODE (x) == PLUS)
9570 if (!CONST_INT_P (XEXP (x, 1)))
9575 if (TARGET_MACHO && darwin_local_data_pic (x))
9578 /* Only some unspecs are valid as "constants". */
9579 if (GET_CODE (x) == UNSPEC)
9580 switch (XINT (x, 1))
9585 return TARGET_64BIT;
9588 x = XVECEXP (x, 0, 0);
9589 return (GET_CODE (x) == SYMBOL_REF
9590 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9592 x = XVECEXP (x, 0, 0);
9593 return (GET_CODE (x) == SYMBOL_REF
9594 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9599 /* We must have drilled down to a symbol. */
9600 if (GET_CODE (x) == LABEL_REF)
9602 if (GET_CODE (x) != SYMBOL_REF)
9607 /* TLS symbols are never valid. */
9608 if (SYMBOL_REF_TLS_MODEL (x))
9611 /* DLLIMPORT symbols are never valid. */
9612 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9613 && SYMBOL_REF_DLLIMPORT_P (x))
9618 if (GET_MODE (x) == TImode
9619 && x != CONST0_RTX (TImode)
9625 if (!standard_sse_constant_p (x))
9632 /* Otherwise we handle everything else in the move patterns. */
9636 /* Determine if it's legal to put X into the constant pool. This
9637 is not possible for the address of thread-local symbols, which
9638 is checked above. */
9641 ix86_cannot_force_const_mem (rtx x)
9643 /* We can always put integral constants and vectors in memory. */
9644 switch (GET_CODE (x))
9654 return !legitimate_constant_p (x);
9658 /* Nonzero if the constant value X is a legitimate general operand
9659 when generating PIC code. It is given that flag_pic is on and
9660 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9663 legitimate_pic_operand_p (rtx x)
9667 switch (GET_CODE (x))
9670 inner = XEXP (x, 0);
9671 if (GET_CODE (inner) == PLUS
9672 && CONST_INT_P (XEXP (inner, 1)))
9673 inner = XEXP (inner, 0);
9675 /* Only some unspecs are valid as "constants". */
9676 if (GET_CODE (inner) == UNSPEC)
9677 switch (XINT (inner, 1))
9682 return TARGET_64BIT;
9684 x = XVECEXP (inner, 0, 0);
9685 return (GET_CODE (x) == SYMBOL_REF
9686 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9687 case UNSPEC_MACHOPIC_OFFSET:
9688 return legitimate_pic_address_disp_p (x);
9696 return legitimate_pic_address_disp_p (x);
9703 /* Determine if a given CONST RTX is a valid memory displacement
9707 legitimate_pic_address_disp_p (rtx disp)
9711 /* In 64bit mode we can allow direct addresses of symbols and labels
9712 when they are not dynamic symbols. */
9715 rtx op0 = disp, op1;
9717 switch (GET_CODE (disp))
9723 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9725 op0 = XEXP (XEXP (disp, 0), 0);
9726 op1 = XEXP (XEXP (disp, 0), 1);
9727 if (!CONST_INT_P (op1)
9728 || INTVAL (op1) >= 16*1024*1024
9729 || INTVAL (op1) < -16*1024*1024)
9731 if (GET_CODE (op0) == LABEL_REF)
9733 if (GET_CODE (op0) != SYMBOL_REF)
9738 /* TLS references should always be enclosed in UNSPEC. */
9739 if (SYMBOL_REF_TLS_MODEL (op0))
9741 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9742 && ix86_cmodel != CM_LARGE_PIC)
9750 if (GET_CODE (disp) != CONST)
9752 disp = XEXP (disp, 0);
9756 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9757 of GOT tables. We should not need these anyway. */
9758 if (GET_CODE (disp) != UNSPEC
9759 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9760 && XINT (disp, 1) != UNSPEC_GOTOFF
9761 && XINT (disp, 1) != UNSPEC_PLTOFF))
9764 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9765 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9771 if (GET_CODE (disp) == PLUS)
9773 if (!CONST_INT_P (XEXP (disp, 1)))
9775 disp = XEXP (disp, 0);
9779 if (TARGET_MACHO && darwin_local_data_pic (disp))
9782 if (GET_CODE (disp) != UNSPEC)
9785 switch (XINT (disp, 1))
9790 /* We need to check for both symbols and labels because VxWorks loads
9791 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9793 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9794 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9796 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9797 While ABI specify also 32bit relocation but we don't produce it in
9798 small PIC model at all. */
9799 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9800 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9802 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9804 case UNSPEC_GOTTPOFF:
9805 case UNSPEC_GOTNTPOFF:
9806 case UNSPEC_INDNTPOFF:
9809 disp = XVECEXP (disp, 0, 0);
9810 return (GET_CODE (disp) == SYMBOL_REF
9811 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9813 disp = XVECEXP (disp, 0, 0);
9814 return (GET_CODE (disp) == SYMBOL_REF
9815 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9817 disp = XVECEXP (disp, 0, 0);
9818 return (GET_CODE (disp) == SYMBOL_REF
9819 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9825 /* Recognizes RTL expressions that are valid memory addresses for an
9826 instruction. The MODE argument is the machine mode for the MEM
9827 expression that wants to use this address.
9829 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9830 convert common non-canonical forms to canonical form so that they will
9834 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9835 rtx addr, bool strict)
9837 struct ix86_address parts;
9838 rtx base, index, disp;
9839 HOST_WIDE_INT scale;
9841 if (ix86_decompose_address (addr, &parts) <= 0)
9842 /* Decomposition failed. */
9846 index = parts.index;
9848 scale = parts.scale;
9850 /* Validate base register.
9852 Don't allow SUBREG's that span more than a word here. It can lead to spill
9853 failures when the base is one word out of a two word structure, which is
9854 represented internally as a DImode int. */
9862 else if (GET_CODE (base) == SUBREG
9863 && REG_P (SUBREG_REG (base))
9864 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9866 reg = SUBREG_REG (base);
9868 /* Base is not a register. */
9871 if (GET_MODE (base) != Pmode)
9872 /* Base is not in Pmode. */
9875 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9876 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9877 /* Base is not valid. */
9881 /* Validate index register.
9883 Don't allow SUBREG's that span more than a word here -- same as above. */
9891 else if (GET_CODE (index) == SUBREG
9892 && REG_P (SUBREG_REG (index))
9893 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9895 reg = SUBREG_REG (index);
9897 /* Index is not a register. */
9900 if (GET_MODE (index) != Pmode)
9901 /* Index is not in Pmode. */
9904 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9905 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9906 /* Index is not valid. */
9910 /* Validate scale factor. */
9914 /* Scale without index. */
9917 if (scale != 2 && scale != 4 && scale != 8)
9918 /* Scale is not a valid multiplier. */
9922 /* Validate displacement. */
9925 if (GET_CODE (disp) == CONST
9926 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9927 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9928 switch (XINT (XEXP (disp, 0), 1))
9930 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9931 used. While ABI specify also 32bit relocations, we don't produce
9932 them at all and use IP relative instead. */
9935 gcc_assert (flag_pic);
9937 goto is_legitimate_pic;
9939 /* 64bit address unspec. */
9942 case UNSPEC_GOTPCREL:
9943 gcc_assert (flag_pic);
9944 goto is_legitimate_pic;
9946 case UNSPEC_GOTTPOFF:
9947 case UNSPEC_GOTNTPOFF:
9948 case UNSPEC_INDNTPOFF:
9954 /* Invalid address unspec. */
9958 else if (SYMBOLIC_CONST (disp)
9962 && MACHOPIC_INDIRECT
9963 && !machopic_operand_p (disp)
9969 if (TARGET_64BIT && (index || base))
9971 /* foo@dtpoff(%rX) is ok. */
9972 if (GET_CODE (disp) != CONST
9973 || GET_CODE (XEXP (disp, 0)) != PLUS
9974 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9975 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9976 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9977 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9978 /* Non-constant pic memory reference. */
9981 else if (! legitimate_pic_address_disp_p (disp))
9982 /* Displacement is an invalid pic construct. */
9985 /* This code used to verify that a symbolic pic displacement
9986 includes the pic_offset_table_rtx register.
9988 While this is good idea, unfortunately these constructs may
9989 be created by "adds using lea" optimization for incorrect
9998 This code is nonsensical, but results in addressing
9999 GOT table with pic_offset_table_rtx base. We can't
10000 just refuse it easily, since it gets matched by
10001 "addsi3" pattern, that later gets split to lea in the
10002 case output register differs from input. While this
10003 can be handled by separate addsi pattern for this case
10004 that never results in lea, this seems to be easier and
10005 correct fix for crash to disable this test. */
10007 else if (GET_CODE (disp) != LABEL_REF
10008 && !CONST_INT_P (disp)
10009 && (GET_CODE (disp) != CONST
10010 || !legitimate_constant_p (disp))
10011 && (GET_CODE (disp) != SYMBOL_REF
10012 || !legitimate_constant_p (disp)))
10013 /* Displacement is not constant. */
10015 else if (TARGET_64BIT
10016 && !x86_64_immediate_operand (disp, VOIDmode))
10017 /* Displacement is out of range. */
10021 /* Everything looks valid. */
10025 /* Determine if a given RTX is a valid constant address. */
10028 constant_address_p (rtx x)
10030 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10033 /* Return a unique alias set for the GOT. */
10035 static alias_set_type
10036 ix86_GOT_alias_set (void)
10038 static alias_set_type set = -1;
10040 set = new_alias_set ();
10044 /* Return a legitimate reference for ORIG (an address) using the
10045 register REG. If REG is 0, a new pseudo is generated.
10047 There are two types of references that must be handled:
10049 1. Global data references must load the address from the GOT, via
10050 the PIC reg. An insn is emitted to do this load, and the reg is
10053 2. Static data references, constant pool addresses, and code labels
10054 compute the address as an offset from the GOT, whose base is in
10055 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10056 differentiate them from global data objects. The returned
10057 address is the PIC reg + an unspec constant.
10059 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10060 reg also appears in the address. */
10063 legitimize_pic_address (rtx orig, rtx reg)
10066 rtx new_rtx = orig;
10070 if (TARGET_MACHO && !TARGET_64BIT)
10073 reg = gen_reg_rtx (Pmode);
10074 /* Use the generic Mach-O PIC machinery. */
10075 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10079 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10081 else if (TARGET_64BIT
10082 && ix86_cmodel != CM_SMALL_PIC
10083 && gotoff_operand (addr, Pmode))
10086 /* This symbol may be referenced via a displacement from the PIC
10087 base address (@GOTOFF). */
10089 if (reload_in_progress)
10090 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10091 if (GET_CODE (addr) == CONST)
10092 addr = XEXP (addr, 0);
10093 if (GET_CODE (addr) == PLUS)
10095 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10097 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10100 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10101 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10103 tmpreg = gen_reg_rtx (Pmode);
10106 emit_move_insn (tmpreg, new_rtx);
10110 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10111 tmpreg, 1, OPTAB_DIRECT);
10114 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10116 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10118 /* This symbol may be referenced via a displacement from the PIC
10119 base address (@GOTOFF). */
10121 if (reload_in_progress)
10122 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10123 if (GET_CODE (addr) == CONST)
10124 addr = XEXP (addr, 0);
10125 if (GET_CODE (addr) == PLUS)
10127 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10129 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10132 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10133 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10134 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10138 emit_move_insn (reg, new_rtx);
10142 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10143 /* We can't use @GOTOFF for text labels on VxWorks;
10144 see gotoff_operand. */
10145 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10147 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10149 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10150 return legitimize_dllimport_symbol (addr, true);
10151 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10152 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10153 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10155 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10156 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10160 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10162 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10163 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10164 new_rtx = gen_const_mem (Pmode, new_rtx);
10165 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10168 reg = gen_reg_rtx (Pmode);
10169 /* Use directly gen_movsi, otherwise the address is loaded
10170 into register for CSE. We don't want to CSE this addresses,
10171 instead we CSE addresses from the GOT table, so skip this. */
10172 emit_insn (gen_movsi (reg, new_rtx));
10177 /* This symbol must be referenced via a load from the
10178 Global Offset Table (@GOT). */
10180 if (reload_in_progress)
10181 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10182 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10183 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10185 new_rtx = force_reg (Pmode, new_rtx);
10186 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10187 new_rtx = gen_const_mem (Pmode, new_rtx);
10188 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10191 reg = gen_reg_rtx (Pmode);
10192 emit_move_insn (reg, new_rtx);
10198 if (CONST_INT_P (addr)
10199 && !x86_64_immediate_operand (addr, VOIDmode))
10203 emit_move_insn (reg, addr);
10207 new_rtx = force_reg (Pmode, addr);
10209 else if (GET_CODE (addr) == CONST)
10211 addr = XEXP (addr, 0);
10213 /* We must match stuff we generate before. Assume the only
10214 unspecs that can get here are ours. Not that we could do
10215 anything with them anyway.... */
10216 if (GET_CODE (addr) == UNSPEC
10217 || (GET_CODE (addr) == PLUS
10218 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10220 gcc_assert (GET_CODE (addr) == PLUS);
10222 if (GET_CODE (addr) == PLUS)
10224 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10226 /* Check first to see if this is a constant offset from a @GOTOFF
10227 symbol reference. */
10228 if (gotoff_operand (op0, Pmode)
10229 && CONST_INT_P (op1))
10233 if (reload_in_progress)
10234 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10235 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10237 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10238 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10239 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10243 emit_move_insn (reg, new_rtx);
10249 if (INTVAL (op1) < -16*1024*1024
10250 || INTVAL (op1) >= 16*1024*1024)
10252 if (!x86_64_immediate_operand (op1, Pmode))
10253 op1 = force_reg (Pmode, op1);
10254 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10260 base = legitimize_pic_address (XEXP (addr, 0), reg);
10261 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10262 base == reg ? NULL_RTX : reg);
10264 if (CONST_INT_P (new_rtx))
10265 new_rtx = plus_constant (base, INTVAL (new_rtx));
10268 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10270 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10271 new_rtx = XEXP (new_rtx, 1);
10273 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10281 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10284 get_thread_pointer (int to_reg)
10288 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10292 reg = gen_reg_rtx (Pmode);
10293 insn = gen_rtx_SET (VOIDmode, reg, tp);
10294 insn = emit_insn (insn);
10299 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10300 false if we expect this to be used for a memory address and true if
10301 we expect to load the address into a register. */
10304 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10306 rtx dest, base, off, pic, tp;
10311 case TLS_MODEL_GLOBAL_DYNAMIC:
10312 dest = gen_reg_rtx (Pmode);
10313 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10315 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10317 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10320 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10321 insns = get_insns ();
10324 RTL_CONST_CALL_P (insns) = 1;
10325 emit_libcall_block (insns, dest, rax, x);
10327 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10328 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10330 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10332 if (TARGET_GNU2_TLS)
10334 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10336 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10340 case TLS_MODEL_LOCAL_DYNAMIC:
10341 base = gen_reg_rtx (Pmode);
10342 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10344 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10346 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10349 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10350 insns = get_insns ();
10353 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10354 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10355 RTL_CONST_CALL_P (insns) = 1;
10356 emit_libcall_block (insns, base, rax, note);
10358 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10359 emit_insn (gen_tls_local_dynamic_base_64 (base));
10361 emit_insn (gen_tls_local_dynamic_base_32 (base));
10363 if (TARGET_GNU2_TLS)
10365 rtx x = ix86_tls_module_base ();
10367 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10368 gen_rtx_MINUS (Pmode, x, tp));
10371 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10372 off = gen_rtx_CONST (Pmode, off);
10374 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10376 if (TARGET_GNU2_TLS)
10378 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10380 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10385 case TLS_MODEL_INITIAL_EXEC:
10389 type = UNSPEC_GOTNTPOFF;
10393 if (reload_in_progress)
10394 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10395 pic = pic_offset_table_rtx;
10396 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10398 else if (!TARGET_ANY_GNU_TLS)
10400 pic = gen_reg_rtx (Pmode);
10401 emit_insn (gen_set_got (pic));
10402 type = UNSPEC_GOTTPOFF;
10407 type = UNSPEC_INDNTPOFF;
10410 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10411 off = gen_rtx_CONST (Pmode, off);
10413 off = gen_rtx_PLUS (Pmode, pic, off);
10414 off = gen_const_mem (Pmode, off);
10415 set_mem_alias_set (off, ix86_GOT_alias_set ());
10417 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10419 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10420 off = force_reg (Pmode, off);
10421 return gen_rtx_PLUS (Pmode, base, off);
10425 base = get_thread_pointer (true);
10426 dest = gen_reg_rtx (Pmode);
10427 emit_insn (gen_subsi3 (dest, base, off));
10431 case TLS_MODEL_LOCAL_EXEC:
10432 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10433 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10434 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10435 off = gen_rtx_CONST (Pmode, off);
10437 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10439 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10440 return gen_rtx_PLUS (Pmode, base, off);
10444 base = get_thread_pointer (true);
10445 dest = gen_reg_rtx (Pmode);
10446 emit_insn (gen_subsi3 (dest, base, off));
10451 gcc_unreachable ();
10457 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10460 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10461 htab_t dllimport_map;
10464 get_dllimport_decl (tree decl)
10466 struct tree_map *h, in;
10469 const char *prefix;
10470 size_t namelen, prefixlen;
10475 if (!dllimport_map)
10476 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10478 in.hash = htab_hash_pointer (decl);
10479 in.base.from = decl;
10480 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10481 h = (struct tree_map *) *loc;
10485 *loc = h = GGC_NEW (struct tree_map);
10487 h->base.from = decl;
10488 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10489 VAR_DECL, NULL, ptr_type_node);
10490 DECL_ARTIFICIAL (to) = 1;
10491 DECL_IGNORED_P (to) = 1;
10492 DECL_EXTERNAL (to) = 1;
10493 TREE_READONLY (to) = 1;
10495 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10496 name = targetm.strip_name_encoding (name);
10497 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10498 ? "*__imp_" : "*__imp__";
10499 namelen = strlen (name);
10500 prefixlen = strlen (prefix);
10501 imp_name = (char *) alloca (namelen + prefixlen + 1);
10502 memcpy (imp_name, prefix, prefixlen);
10503 memcpy (imp_name + prefixlen, name, namelen + 1);
10505 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10506 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10507 SET_SYMBOL_REF_DECL (rtl, to);
10508 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10510 rtl = gen_const_mem (Pmode, rtl);
10511 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10513 SET_DECL_RTL (to, rtl);
10514 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10519 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10520 true if we require the result be a register. */
10523 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10528 gcc_assert (SYMBOL_REF_DECL (symbol));
10529 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10531 x = DECL_RTL (imp_decl);
10533 x = force_reg (Pmode, x);
10537 /* Try machine-dependent ways of modifying an illegitimate address
10538 to be legitimate. If we find one, return the new, valid address.
10539 This macro is used in only one place: `memory_address' in explow.c.
10541 OLDX is the address as it was before break_out_memory_refs was called.
10542 In some cases it is useful to look at this to decide what needs to be done.
10544 It is always safe for this macro to do nothing. It exists to recognize
10545 opportunities to optimize the output.
10547 For the 80386, we handle X+REG by loading X into a register R and
10548 using R+REG. R will go in a general reg and indexing will be used.
10549 However, if REG is a broken-out memory address or multiplication,
10550 nothing needs to be done because REG can certainly go in a general reg.
10552 When -fpic is used, special handling is needed for symbolic references.
10553 See comments by legitimize_pic_address in i386.c for details. */
10556 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10557 enum machine_mode mode)
10562 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10564 return legitimize_tls_address (x, (enum tls_model) log, false);
10565 if (GET_CODE (x) == CONST
10566 && GET_CODE (XEXP (x, 0)) == PLUS
10567 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10568 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10570 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10571 (enum tls_model) log, false);
10572 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10575 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10577 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10578 return legitimize_dllimport_symbol (x, true);
10579 if (GET_CODE (x) == CONST
10580 && GET_CODE (XEXP (x, 0)) == PLUS
10581 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10582 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10584 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10585 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10589 if (flag_pic && SYMBOLIC_CONST (x))
10590 return legitimize_pic_address (x, 0);
10592 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10593 if (GET_CODE (x) == ASHIFT
10594 && CONST_INT_P (XEXP (x, 1))
10595 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10598 log = INTVAL (XEXP (x, 1));
10599 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10600 GEN_INT (1 << log));
10603 if (GET_CODE (x) == PLUS)
10605 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10607 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10608 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10609 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10612 log = INTVAL (XEXP (XEXP (x, 0), 1));
10613 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10614 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10615 GEN_INT (1 << log));
10618 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10619 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10620 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10623 log = INTVAL (XEXP (XEXP (x, 1), 1));
10624 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10625 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10626 GEN_INT (1 << log));
10629 /* Put multiply first if it isn't already. */
10630 if (GET_CODE (XEXP (x, 1)) == MULT)
10632 rtx tmp = XEXP (x, 0);
10633 XEXP (x, 0) = XEXP (x, 1);
10638 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10639 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10640 created by virtual register instantiation, register elimination, and
10641 similar optimizations. */
10642 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10645 x = gen_rtx_PLUS (Pmode,
10646 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10647 XEXP (XEXP (x, 1), 0)),
10648 XEXP (XEXP (x, 1), 1));
10652 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10653 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10654 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10655 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10656 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10657 && CONSTANT_P (XEXP (x, 1)))
10660 rtx other = NULL_RTX;
10662 if (CONST_INT_P (XEXP (x, 1)))
10664 constant = XEXP (x, 1);
10665 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10667 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10669 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10670 other = XEXP (x, 1);
10678 x = gen_rtx_PLUS (Pmode,
10679 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10680 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10681 plus_constant (other, INTVAL (constant)));
10685 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10688 if (GET_CODE (XEXP (x, 0)) == MULT)
10691 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10694 if (GET_CODE (XEXP (x, 1)) == MULT)
10697 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10701 && REG_P (XEXP (x, 1))
10702 && REG_P (XEXP (x, 0)))
10705 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10708 x = legitimize_pic_address (x, 0);
10711 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10714 if (REG_P (XEXP (x, 0)))
10716 rtx temp = gen_reg_rtx (Pmode);
10717 rtx val = force_operand (XEXP (x, 1), temp);
10719 emit_move_insn (temp, val);
10721 XEXP (x, 1) = temp;
10725 else if (REG_P (XEXP (x, 1)))
10727 rtx temp = gen_reg_rtx (Pmode);
10728 rtx val = force_operand (XEXP (x, 0), temp);
10730 emit_move_insn (temp, val);
10732 XEXP (x, 0) = temp;
10740 /* Print an integer constant expression in assembler syntax. Addition
10741 and subtraction are the only arithmetic that may appear in these
10742 expressions. FILE is the stdio stream to write to, X is the rtx, and
10743 CODE is the operand print code from the output string. */
10746 output_pic_addr_const (FILE *file, rtx x, int code)
10750 switch (GET_CODE (x))
10753 gcc_assert (flag_pic);
10758 if (! TARGET_MACHO || TARGET_64BIT)
10759 output_addr_const (file, x);
10762 const char *name = XSTR (x, 0);
10764 /* Mark the decl as referenced so that cgraph will
10765 output the function. */
10766 if (SYMBOL_REF_DECL (x))
10767 mark_decl_referenced (SYMBOL_REF_DECL (x));
10770 if (MACHOPIC_INDIRECT
10771 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10772 name = machopic_indirection_name (x, /*stub_p=*/true);
10774 assemble_name (file, name);
10776 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10777 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10778 fputs ("@PLT", file);
10785 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10786 assemble_name (asm_out_file, buf);
10790 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10794 /* This used to output parentheses around the expression,
10795 but that does not work on the 386 (either ATT or BSD assembler). */
10796 output_pic_addr_const (file, XEXP (x, 0), code);
10800 if (GET_MODE (x) == VOIDmode)
10802 /* We can use %d if the number is <32 bits and positive. */
10803 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10804 fprintf (file, "0x%lx%08lx",
10805 (unsigned long) CONST_DOUBLE_HIGH (x),
10806 (unsigned long) CONST_DOUBLE_LOW (x));
10808 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10811 /* We can't handle floating point constants;
10812 PRINT_OPERAND must handle them. */
10813 output_operand_lossage ("floating constant misused");
10817 /* Some assemblers need integer constants to appear first. */
10818 if (CONST_INT_P (XEXP (x, 0)))
10820 output_pic_addr_const (file, XEXP (x, 0), code);
10822 output_pic_addr_const (file, XEXP (x, 1), code);
10826 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10827 output_pic_addr_const (file, XEXP (x, 1), code);
10829 output_pic_addr_const (file, XEXP (x, 0), code);
10835 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10836 output_pic_addr_const (file, XEXP (x, 0), code);
10838 output_pic_addr_const (file, XEXP (x, 1), code);
10840 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10844 gcc_assert (XVECLEN (x, 0) == 1);
10845 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10846 switch (XINT (x, 1))
10849 fputs ("@GOT", file);
10851 case UNSPEC_GOTOFF:
10852 fputs ("@GOTOFF", file);
10854 case UNSPEC_PLTOFF:
10855 fputs ("@PLTOFF", file);
10857 case UNSPEC_GOTPCREL:
10858 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10859 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10861 case UNSPEC_GOTTPOFF:
10862 /* FIXME: This might be @TPOFF in Sun ld too. */
10863 fputs ("@gottpoff", file);
10866 fputs ("@tpoff", file);
10868 case UNSPEC_NTPOFF:
10870 fputs ("@tpoff", file);
10872 fputs ("@ntpoff", file);
10874 case UNSPEC_DTPOFF:
10875 fputs ("@dtpoff", file);
10877 case UNSPEC_GOTNTPOFF:
10879 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10880 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10882 fputs ("@gotntpoff", file);
10884 case UNSPEC_INDNTPOFF:
10885 fputs ("@indntpoff", file);
10888 case UNSPEC_MACHOPIC_OFFSET:
10890 machopic_output_function_base_name (file);
10894 output_operand_lossage ("invalid UNSPEC as operand");
10900 output_operand_lossage ("invalid expression as operand");
10904 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10905 We need to emit DTP-relative relocations. */
10907 static void ATTRIBUTE_UNUSED
10908 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10910 fputs (ASM_LONG, file);
10911 output_addr_const (file, x);
10912 fputs ("@dtpoff", file);
10918 fputs (", 0", file);
10921 gcc_unreachable ();
10925 /* Return true if X is a representation of the PIC register. This copes
10926 with calls from ix86_find_base_term, where the register might have
10927 been replaced by a cselib value. */
10930 ix86_pic_register_p (rtx x)
10932 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10933 return (pic_offset_table_rtx
10934 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10936 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10939 /* In the name of slightly smaller debug output, and to cater to
10940 general assembler lossage, recognize PIC+GOTOFF and turn it back
10941 into a direct symbol reference.
10943 On Darwin, this is necessary to avoid a crash, because Darwin
10944 has a different PIC label for each routine but the DWARF debugging
10945 information is not associated with any particular routine, so it's
10946 necessary to remove references to the PIC label from RTL stored by
10947 the DWARF output code. */
10950 ix86_delegitimize_address (rtx x)
10952 rtx orig_x = delegitimize_mem_from_attrs (x);
10953 /* addend is NULL or some rtx if x is something+GOTOFF where
10954 something doesn't include the PIC register. */
10955 rtx addend = NULL_RTX;
10956 /* reg_addend is NULL or a multiple of some register. */
10957 rtx reg_addend = NULL_RTX;
10958 /* const_addend is NULL or a const_int. */
10959 rtx const_addend = NULL_RTX;
10960 /* This is the result, or NULL. */
10961 rtx result = NULL_RTX;
10970 if (GET_CODE (x) != CONST
10971 || GET_CODE (XEXP (x, 0)) != UNSPEC
10972 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10973 || !MEM_P (orig_x))
10975 return XVECEXP (XEXP (x, 0), 0, 0);
10978 if (GET_CODE (x) != PLUS
10979 || GET_CODE (XEXP (x, 1)) != CONST)
10982 if (ix86_pic_register_p (XEXP (x, 0)))
10983 /* %ebx + GOT/GOTOFF */
10985 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10987 /* %ebx + %reg * scale + GOT/GOTOFF */
10988 reg_addend = XEXP (x, 0);
10989 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10990 reg_addend = XEXP (reg_addend, 1);
10991 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10992 reg_addend = XEXP (reg_addend, 0);
10995 reg_addend = NULL_RTX;
10996 addend = XEXP (x, 0);
11000 addend = XEXP (x, 0);
11002 x = XEXP (XEXP (x, 1), 0);
11003 if (GET_CODE (x) == PLUS
11004 && CONST_INT_P (XEXP (x, 1)))
11006 const_addend = XEXP (x, 1);
11010 if (GET_CODE (x) == UNSPEC
11011 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11012 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11013 result = XVECEXP (x, 0, 0);
11015 if (TARGET_MACHO && darwin_local_data_pic (x)
11016 && !MEM_P (orig_x))
11017 result = XVECEXP (x, 0, 0);
11023 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11025 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11028 /* If the rest of original X doesn't involve the PIC register, add
11029 addend and subtract pic_offset_table_rtx. This can happen e.g.
11031 leal (%ebx, %ecx, 4), %ecx
11033 movl foo@GOTOFF(%ecx), %edx
11034 in which case we return (%ecx - %ebx) + foo. */
11035 if (pic_offset_table_rtx)
11036 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11037 pic_offset_table_rtx),
11045 /* If X is a machine specific address (i.e. a symbol or label being
11046 referenced as a displacement from the GOT implemented using an
11047 UNSPEC), then return the base term. Otherwise return X. */
11050 ix86_find_base_term (rtx x)
11056 if (GET_CODE (x) != CONST)
11058 term = XEXP (x, 0);
11059 if (GET_CODE (term) == PLUS
11060 && (CONST_INT_P (XEXP (term, 1))
11061 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11062 term = XEXP (term, 0);
11063 if (GET_CODE (term) != UNSPEC
11064 || XINT (term, 1) != UNSPEC_GOTPCREL)
11067 return XVECEXP (term, 0, 0);
11070 return ix86_delegitimize_address (x);
11074 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11075 int fp, FILE *file)
11077 const char *suffix;
11079 if (mode == CCFPmode || mode == CCFPUmode)
11081 code = ix86_fp_compare_code_to_integer (code);
11085 code = reverse_condition (code);
11136 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11140 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11141 Those same assemblers have the same but opposite lossage on cmov. */
11142 if (mode == CCmode)
11143 suffix = fp ? "nbe" : "a";
11144 else if (mode == CCCmode)
11147 gcc_unreachable ();
11163 gcc_unreachable ();
11167 gcc_assert (mode == CCmode || mode == CCCmode);
11184 gcc_unreachable ();
11188 /* ??? As above. */
11189 gcc_assert (mode == CCmode || mode == CCCmode);
11190 suffix = fp ? "nb" : "ae";
11193 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11197 /* ??? As above. */
11198 if (mode == CCmode)
11200 else if (mode == CCCmode)
11201 suffix = fp ? "nb" : "ae";
11203 gcc_unreachable ();
11206 suffix = fp ? "u" : "p";
11209 suffix = fp ? "nu" : "np";
11212 gcc_unreachable ();
11214 fputs (suffix, file);
11217 /* Print the name of register X to FILE based on its machine mode and number.
11218 If CODE is 'w', pretend the mode is HImode.
11219 If CODE is 'b', pretend the mode is QImode.
11220 If CODE is 'k', pretend the mode is SImode.
11221 If CODE is 'q', pretend the mode is DImode.
11222 If CODE is 'x', pretend the mode is V4SFmode.
11223 If CODE is 't', pretend the mode is V8SFmode.
11224 If CODE is 'h', pretend the reg is the 'high' byte register.
11225 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11226 If CODE is 'd', duplicate the operand for AVX instruction.
11230 print_reg (rtx x, int code, FILE *file)
11233 bool duplicated = code == 'd' && TARGET_AVX;
11235 gcc_assert (x == pc_rtx
11236 || (REGNO (x) != ARG_POINTER_REGNUM
11237 && REGNO (x) != FRAME_POINTER_REGNUM
11238 && REGNO (x) != FLAGS_REG
11239 && REGNO (x) != FPSR_REG
11240 && REGNO (x) != FPCR_REG));
11242 if (ASSEMBLER_DIALECT == ASM_ATT)
11247 gcc_assert (TARGET_64BIT);
11248 fputs ("rip", file);
11252 if (code == 'w' || MMX_REG_P (x))
11254 else if (code == 'b')
11256 else if (code == 'k')
11258 else if (code == 'q')
11260 else if (code == 'y')
11262 else if (code == 'h')
11264 else if (code == 'x')
11266 else if (code == 't')
11269 code = GET_MODE_SIZE (GET_MODE (x));
11271 /* Irritatingly, AMD extended registers use different naming convention
11272 from the normal registers. */
11273 if (REX_INT_REG_P (x))
11275 gcc_assert (TARGET_64BIT);
11279 error ("extended registers have no high halves");
11282 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11285 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11288 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11291 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11294 error ("unsupported operand size for extended register");
11304 if (STACK_TOP_P (x))
11313 if (! ANY_FP_REG_P (x))
11314 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11319 reg = hi_reg_name[REGNO (x)];
11322 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11324 reg = qi_reg_name[REGNO (x)];
11327 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11329 reg = qi_high_reg_name[REGNO (x)];
11334 gcc_assert (!duplicated);
11336 fputs (hi_reg_name[REGNO (x)] + 1, file);
11341 gcc_unreachable ();
11347 if (ASSEMBLER_DIALECT == ASM_ATT)
11348 fprintf (file, ", %%%s", reg);
11350 fprintf (file, ", %s", reg);
11354 /* Locate some local-dynamic symbol still in use by this function
11355 so that we can print its name in some tls_local_dynamic_base
11359 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11363 if (GET_CODE (x) == SYMBOL_REF
11364 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11366 cfun->machine->some_ld_name = XSTR (x, 0);
11373 static const char *
11374 get_some_local_dynamic_name (void)
11378 if (cfun->machine->some_ld_name)
11379 return cfun->machine->some_ld_name;
11381 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11383 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11384 return cfun->machine->some_ld_name;
11389 /* Meaning of CODE:
11390 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11391 C -- print opcode suffix for set/cmov insn.
11392 c -- like C, but print reversed condition
11393 F,f -- likewise, but for floating-point.
11394 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11396 R -- print the prefix for register names.
11397 z -- print the opcode suffix for the size of the current operand.
11398 Z -- likewise, with special suffixes for x87 instructions.
11399 * -- print a star (in certain assembler syntax)
11400 A -- print an absolute memory reference.
11401 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11402 s -- print a shift double count, followed by the assemblers argument
11404 b -- print the QImode name of the register for the indicated operand.
11405 %b0 would print %al if operands[0] is reg 0.
11406 w -- likewise, print the HImode name of the register.
11407 k -- likewise, print the SImode name of the register.
11408 q -- likewise, print the DImode name of the register.
11409 x -- likewise, print the V4SFmode name of the register.
11410 t -- likewise, print the V8SFmode name of the register.
11411 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11412 y -- print "st(0)" instead of "st" as a register.
11413 d -- print duplicated register operand for AVX instruction.
11414 D -- print condition for SSE cmp instruction.
11415 P -- if PIC, print an @PLT suffix.
11416 X -- don't print any sort of PIC '@' suffix for a symbol.
11417 & -- print some in-use local-dynamic symbol name.
11418 H -- print a memory address offset by 8; used for sse high-parts
11419 Y -- print condition for XOP pcom* instruction.
11420 + -- print a branch hint as 'cs' or 'ds' prefix
11421 ; -- print a semicolon (after prefixes due to bug in older gas).
11425 print_operand (FILE *file, rtx x, int code)
11432 if (ASSEMBLER_DIALECT == ASM_ATT)
11438 const char *name = get_some_local_dynamic_name ();
11440 output_operand_lossage ("'%%&' used without any "
11441 "local dynamic TLS references");
11443 assemble_name (file, name);
11448 switch (ASSEMBLER_DIALECT)
11455 /* Intel syntax. For absolute addresses, registers should not
11456 be surrounded by braces. */
11460 PRINT_OPERAND (file, x, 0);
11467 gcc_unreachable ();
11470 PRINT_OPERAND (file, x, 0);
11475 if (ASSEMBLER_DIALECT == ASM_ATT)
11480 if (ASSEMBLER_DIALECT == ASM_ATT)
11485 if (ASSEMBLER_DIALECT == ASM_ATT)
11490 if (ASSEMBLER_DIALECT == ASM_ATT)
11495 if (ASSEMBLER_DIALECT == ASM_ATT)
11500 if (ASSEMBLER_DIALECT == ASM_ATT)
11505 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11507 /* Opcodes don't get size suffixes if using Intel opcodes. */
11508 if (ASSEMBLER_DIALECT == ASM_INTEL)
11511 switch (GET_MODE_SIZE (GET_MODE (x)))
11530 output_operand_lossage
11531 ("invalid operand size for operand code '%c'", code);
11536 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11538 (0, "non-integer operand used with operand code '%c'", code);
11542 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11543 if (ASSEMBLER_DIALECT == ASM_INTEL)
11546 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11548 switch (GET_MODE_SIZE (GET_MODE (x)))
11551 #ifdef HAVE_AS_IX86_FILDS
11561 #ifdef HAVE_AS_IX86_FILDQ
11564 fputs ("ll", file);
11572 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11574 /* 387 opcodes don't get size suffixes
11575 if the operands are registers. */
11576 if (STACK_REG_P (x))
11579 switch (GET_MODE_SIZE (GET_MODE (x)))
11600 output_operand_lossage
11601 ("invalid operand type used with operand code '%c'", code);
11605 output_operand_lossage
11606 ("invalid operand size for operand code '%c'", code);
11623 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11625 PRINT_OPERAND (file, x, 0);
11626 fputs (", ", file);
11631 /* Little bit of braindamage here. The SSE compare instructions
11632 does use completely different names for the comparisons that the
11633 fp conditional moves. */
11636 switch (GET_CODE (x))
11639 fputs ("eq", file);
11642 fputs ("eq_us", file);
11645 fputs ("lt", file);
11648 fputs ("nge", file);
11651 fputs ("le", file);
11654 fputs ("ngt", file);
11657 fputs ("unord", file);
11660 fputs ("neq", file);
11663 fputs ("neq_oq", file);
11666 fputs ("ge", file);
11669 fputs ("nlt", file);
11672 fputs ("gt", file);
11675 fputs ("nle", file);
11678 fputs ("ord", file);
11681 output_operand_lossage ("operand is not a condition code, "
11682 "invalid operand code 'D'");
11688 switch (GET_CODE (x))
11692 fputs ("eq", file);
11696 fputs ("lt", file);
11700 fputs ("le", file);
11703 fputs ("unord", file);
11707 fputs ("neq", file);
11711 fputs ("nlt", file);
11715 fputs ("nle", file);
11718 fputs ("ord", file);
11721 output_operand_lossage ("operand is not a condition code, "
11722 "invalid operand code 'D'");
11728 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11729 if (ASSEMBLER_DIALECT == ASM_ATT)
11731 switch (GET_MODE (x))
11733 case HImode: putc ('w', file); break;
11735 case SFmode: putc ('l', file); break;
11737 case DFmode: putc ('q', file); break;
11738 default: gcc_unreachable ();
11745 if (!COMPARISON_P (x))
11747 output_operand_lossage ("operand is neither a constant nor a "
11748 "condition code, invalid operand code "
11752 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11755 if (!COMPARISON_P (x))
11757 output_operand_lossage ("operand is neither a constant nor a "
11758 "condition code, invalid operand code "
11762 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11763 if (ASSEMBLER_DIALECT == ASM_ATT)
11766 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11769 /* Like above, but reverse condition */
11771 /* Check to see if argument to %c is really a constant
11772 and not a condition code which needs to be reversed. */
11773 if (!COMPARISON_P (x))
11775 output_operand_lossage ("operand is neither a constant nor a "
11776 "condition code, invalid operand "
11780 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11783 if (!COMPARISON_P (x))
11785 output_operand_lossage ("operand is neither a constant nor a "
11786 "condition code, invalid operand "
11790 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11791 if (ASSEMBLER_DIALECT == ASM_ATT)
11794 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11798 /* It doesn't actually matter what mode we use here, as we're
11799 only going to use this for printing. */
11800 x = adjust_address_nv (x, DImode, 8);
11808 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11811 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11814 int pred_val = INTVAL (XEXP (x, 0));
11816 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11817 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11819 int taken = pred_val > REG_BR_PROB_BASE / 2;
11820 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11822 /* Emit hints only in the case default branch prediction
11823 heuristics would fail. */
11824 if (taken != cputaken)
11826 /* We use 3e (DS) prefix for taken branches and
11827 2e (CS) prefix for not taken branches. */
11829 fputs ("ds ; ", file);
11831 fputs ("cs ; ", file);
11839 switch (GET_CODE (x))
11842 fputs ("neq", file);
11845 fputs ("eq", file);
11849 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11853 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11857 fputs ("le", file);
11861 fputs ("lt", file);
11864 fputs ("unord", file);
11867 fputs ("ord", file);
11870 fputs ("ueq", file);
11873 fputs ("nlt", file);
11876 fputs ("nle", file);
11879 fputs ("ule", file);
11882 fputs ("ult", file);
11885 fputs ("une", file);
11888 output_operand_lossage ("operand is not a condition code, "
11889 "invalid operand code 'Y'");
11896 fputs (" ; ", file);
11903 output_operand_lossage ("invalid operand code '%c'", code);
11908 print_reg (x, code, file);
11910 else if (MEM_P (x))
11912 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11913 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11914 && GET_MODE (x) != BLKmode)
11917 switch (GET_MODE_SIZE (GET_MODE (x)))
11919 case 1: size = "BYTE"; break;
11920 case 2: size = "WORD"; break;
11921 case 4: size = "DWORD"; break;
11922 case 8: size = "QWORD"; break;
11923 case 12: size = "TBYTE"; break;
11925 if (GET_MODE (x) == XFmode)
11930 case 32: size = "YMMWORD"; break;
11932 gcc_unreachable ();
11935 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11938 else if (code == 'w')
11940 else if (code == 'k')
11943 fputs (size, file);
11944 fputs (" PTR ", file);
11948 /* Avoid (%rip) for call operands. */
11949 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11950 && !CONST_INT_P (x))
11951 output_addr_const (file, x);
11952 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11953 output_operand_lossage ("invalid constraints for operand");
11955 output_address (x);
11958 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11963 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11964 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11966 if (ASSEMBLER_DIALECT == ASM_ATT)
11968 fprintf (file, "0x%08lx", (long unsigned int) l);
11971 /* These float cases don't actually occur as immediate operands. */
11972 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11976 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11977 fputs (dstr, file);
11980 else if (GET_CODE (x) == CONST_DOUBLE
11981 && GET_MODE (x) == XFmode)
11985 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11986 fputs (dstr, file);
11991 /* We have patterns that allow zero sets of memory, for instance.
11992 In 64-bit mode, we should probably support all 8-byte vectors,
11993 since we can in fact encode that into an immediate. */
11994 if (GET_CODE (x) == CONST_VECTOR)
11996 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12002 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12004 if (ASSEMBLER_DIALECT == ASM_ATT)
12007 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12008 || GET_CODE (x) == LABEL_REF)
12010 if (ASSEMBLER_DIALECT == ASM_ATT)
12013 fputs ("OFFSET FLAT:", file);
12016 if (CONST_INT_P (x))
12017 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12019 output_pic_addr_const (file, x, code);
12021 output_addr_const (file, x);
12025 /* Print a memory operand whose address is ADDR. */
12028 print_operand_address (FILE *file, rtx addr)
12030 struct ix86_address parts;
12031 rtx base, index, disp;
12033 int ok = ix86_decompose_address (addr, &parts);
12038 index = parts.index;
12040 scale = parts.scale;
12048 if (ASSEMBLER_DIALECT == ASM_ATT)
12050 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12053 gcc_unreachable ();
12056 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12057 if (TARGET_64BIT && !base && !index)
12061 if (GET_CODE (disp) == CONST
12062 && GET_CODE (XEXP (disp, 0)) == PLUS
12063 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12064 symbol = XEXP (XEXP (disp, 0), 0);
12066 if (GET_CODE (symbol) == LABEL_REF
12067 || (GET_CODE (symbol) == SYMBOL_REF
12068 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12071 if (!base && !index)
12073 /* Displacement only requires special attention. */
12075 if (CONST_INT_P (disp))
12077 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12078 fputs ("ds:", file);
12079 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12082 output_pic_addr_const (file, disp, 0);
12084 output_addr_const (file, disp);
12088 if (ASSEMBLER_DIALECT == ASM_ATT)
12093 output_pic_addr_const (file, disp, 0);
12094 else if (GET_CODE (disp) == LABEL_REF)
12095 output_asm_label (disp);
12097 output_addr_const (file, disp);
12102 print_reg (base, 0, file);
12106 print_reg (index, 0, file);
12108 fprintf (file, ",%d", scale);
12114 rtx offset = NULL_RTX;
12118 /* Pull out the offset of a symbol; print any symbol itself. */
12119 if (GET_CODE (disp) == CONST
12120 && GET_CODE (XEXP (disp, 0)) == PLUS
12121 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12123 offset = XEXP (XEXP (disp, 0), 1);
12124 disp = gen_rtx_CONST (VOIDmode,
12125 XEXP (XEXP (disp, 0), 0));
12129 output_pic_addr_const (file, disp, 0);
12130 else if (GET_CODE (disp) == LABEL_REF)
12131 output_asm_label (disp);
12132 else if (CONST_INT_P (disp))
12135 output_addr_const (file, disp);
12141 print_reg (base, 0, file);
12144 if (INTVAL (offset) >= 0)
12146 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12150 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12157 print_reg (index, 0, file);
12159 fprintf (file, "*%d", scale);
12167 output_addr_const_extra (FILE *file, rtx x)
12171 if (GET_CODE (x) != UNSPEC)
12174 op = XVECEXP (x, 0, 0);
12175 switch (XINT (x, 1))
12177 case UNSPEC_GOTTPOFF:
12178 output_addr_const (file, op);
12179 /* FIXME: This might be @TPOFF in Sun ld. */
12180 fputs ("@gottpoff", file);
12183 output_addr_const (file, op);
12184 fputs ("@tpoff", file);
12186 case UNSPEC_NTPOFF:
12187 output_addr_const (file, op);
12189 fputs ("@tpoff", file);
12191 fputs ("@ntpoff", file);
12193 case UNSPEC_DTPOFF:
12194 output_addr_const (file, op);
12195 fputs ("@dtpoff", file);
12197 case UNSPEC_GOTNTPOFF:
12198 output_addr_const (file, op);
12200 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12201 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12203 fputs ("@gotntpoff", file);
12205 case UNSPEC_INDNTPOFF:
12206 output_addr_const (file, op);
12207 fputs ("@indntpoff", file);
12210 case UNSPEC_MACHOPIC_OFFSET:
12211 output_addr_const (file, op);
12213 machopic_output_function_base_name (file);
12224 /* Split one or more DImode RTL references into pairs of SImode
12225 references. The RTL can be REG, offsettable MEM, integer constant, or
12226 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12227 split and "num" is its length. lo_half and hi_half are output arrays
12228 that parallel "operands". */
12231 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12235 rtx op = operands[num];
12237 /* simplify_subreg refuse to split volatile memory addresses,
12238 but we still have to handle it. */
12241 lo_half[num] = adjust_address (op, SImode, 0);
12242 hi_half[num] = adjust_address (op, SImode, 4);
12246 lo_half[num] = simplify_gen_subreg (SImode, op,
12247 GET_MODE (op) == VOIDmode
12248 ? DImode : GET_MODE (op), 0);
12249 hi_half[num] = simplify_gen_subreg (SImode, op,
12250 GET_MODE (op) == VOIDmode
12251 ? DImode : GET_MODE (op), 4);
12255 /* Split one or more TImode RTL references into pairs of DImode
12256 references. The RTL can be REG, offsettable MEM, integer constant, or
12257 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12258 split and "num" is its length. lo_half and hi_half are output arrays
12259 that parallel "operands". */
12262 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12266 rtx op = operands[num];
12268 /* simplify_subreg refuse to split volatile memory addresses, but we
12269 still have to handle it. */
12272 lo_half[num] = adjust_address (op, DImode, 0);
12273 hi_half[num] = adjust_address (op, DImode, 8);
12277 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12278 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12283 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12284 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12285 is the expression of the binary operation. The output may either be
12286 emitted here, or returned to the caller, like all output_* functions.
12288 There is no guarantee that the operands are the same mode, as they
12289 might be within FLOAT or FLOAT_EXTEND expressions. */
12291 #ifndef SYSV386_COMPAT
12292 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12293 wants to fix the assemblers because that causes incompatibility
12294 with gcc. No-one wants to fix gcc because that causes
12295 incompatibility with assemblers... You can use the option of
12296 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12297 #define SYSV386_COMPAT 1
12301 output_387_binary_op (rtx insn, rtx *operands)
12303 static char buf[40];
12306 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12308 #ifdef ENABLE_CHECKING
12309 /* Even if we do not want to check the inputs, this documents input
12310 constraints. Which helps in understanding the following code. */
12311 if (STACK_REG_P (operands[0])
12312 && ((REG_P (operands[1])
12313 && REGNO (operands[0]) == REGNO (operands[1])
12314 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12315 || (REG_P (operands[2])
12316 && REGNO (operands[0]) == REGNO (operands[2])
12317 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12318 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12321 gcc_assert (is_sse);
12324 switch (GET_CODE (operands[3]))
12327 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12328 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12336 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12337 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12345 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12346 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12354 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12355 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12363 gcc_unreachable ();
12370 strcpy (buf, ssep);
12371 if (GET_MODE (operands[0]) == SFmode)
12372 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12374 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12378 strcpy (buf, ssep + 1);
12379 if (GET_MODE (operands[0]) == SFmode)
12380 strcat (buf, "ss\t{%2, %0|%0, %2}");
12382 strcat (buf, "sd\t{%2, %0|%0, %2}");
12388 switch (GET_CODE (operands[3]))
12392 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12394 rtx temp = operands[2];
12395 operands[2] = operands[1];
12396 operands[1] = temp;
12399 /* know operands[0] == operands[1]. */
12401 if (MEM_P (operands[2]))
12407 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12409 if (STACK_TOP_P (operands[0]))
12410 /* How is it that we are storing to a dead operand[2]?
12411 Well, presumably operands[1] is dead too. We can't
12412 store the result to st(0) as st(0) gets popped on this
12413 instruction. Instead store to operands[2] (which I
12414 think has to be st(1)). st(1) will be popped later.
12415 gcc <= 2.8.1 didn't have this check and generated
12416 assembly code that the Unixware assembler rejected. */
12417 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12419 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12423 if (STACK_TOP_P (operands[0]))
12424 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12426 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12431 if (MEM_P (operands[1]))
12437 if (MEM_P (operands[2]))
12443 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12446 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12447 derived assemblers, confusingly reverse the direction of
12448 the operation for fsub{r} and fdiv{r} when the
12449 destination register is not st(0). The Intel assembler
12450 doesn't have this brain damage. Read !SYSV386_COMPAT to
12451 figure out what the hardware really does. */
12452 if (STACK_TOP_P (operands[0]))
12453 p = "{p\t%0, %2|rp\t%2, %0}";
12455 p = "{rp\t%2, %0|p\t%0, %2}";
12457 if (STACK_TOP_P (operands[0]))
12458 /* As above for fmul/fadd, we can't store to st(0). */
12459 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12461 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12466 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12469 if (STACK_TOP_P (operands[0]))
12470 p = "{rp\t%0, %1|p\t%1, %0}";
12472 p = "{p\t%1, %0|rp\t%0, %1}";
12474 if (STACK_TOP_P (operands[0]))
12475 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12477 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12482 if (STACK_TOP_P (operands[0]))
12484 if (STACK_TOP_P (operands[1]))
12485 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12487 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12490 else if (STACK_TOP_P (operands[1]))
12493 p = "{\t%1, %0|r\t%0, %1}";
12495 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12501 p = "{r\t%2, %0|\t%0, %2}";
12503 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12509 gcc_unreachable ();
12516 /* Return needed mode for entity in optimize_mode_switching pass. */
12519 ix86_mode_needed (int entity, rtx insn)
12521 enum attr_i387_cw mode;
12523 /* The mode UNINITIALIZED is used to store control word after a
12524 function call or ASM pattern. The mode ANY specify that function
12525 has no requirements on the control word and make no changes in the
12526 bits we are interested in. */
12529 || (NONJUMP_INSN_P (insn)
12530 && (asm_noperands (PATTERN (insn)) >= 0
12531 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12532 return I387_CW_UNINITIALIZED;
12534 if (recog_memoized (insn) < 0)
12535 return I387_CW_ANY;
12537 mode = get_attr_i387_cw (insn);
12542 if (mode == I387_CW_TRUNC)
12547 if (mode == I387_CW_FLOOR)
12552 if (mode == I387_CW_CEIL)
12557 if (mode == I387_CW_MASK_PM)
12562 gcc_unreachable ();
12565 return I387_CW_ANY;
12568 /* Output code to initialize control word copies used by trunc?f?i and
12569 rounding patterns. CURRENT_MODE is set to current control word,
12570 while NEW_MODE is set to new control word. */
12573 emit_i387_cw_initialization (int mode)
12575 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12578 enum ix86_stack_slot slot;
12580 rtx reg = gen_reg_rtx (HImode);
12582 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12583 emit_move_insn (reg, copy_rtx (stored_mode));
12585 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12586 || optimize_function_for_size_p (cfun))
12590 case I387_CW_TRUNC:
12591 /* round toward zero (truncate) */
12592 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12593 slot = SLOT_CW_TRUNC;
12596 case I387_CW_FLOOR:
12597 /* round down toward -oo */
12598 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12599 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12600 slot = SLOT_CW_FLOOR;
12604 /* round up toward +oo */
12605 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12606 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12607 slot = SLOT_CW_CEIL;
12610 case I387_CW_MASK_PM:
12611 /* mask precision exception for nearbyint() */
12612 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12613 slot = SLOT_CW_MASK_PM;
12617 gcc_unreachable ();
12624 case I387_CW_TRUNC:
12625 /* round toward zero (truncate) */
12626 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12627 slot = SLOT_CW_TRUNC;
12630 case I387_CW_FLOOR:
12631 /* round down toward -oo */
12632 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12633 slot = SLOT_CW_FLOOR;
12637 /* round up toward +oo */
12638 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12639 slot = SLOT_CW_CEIL;
12642 case I387_CW_MASK_PM:
12643 /* mask precision exception for nearbyint() */
12644 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12645 slot = SLOT_CW_MASK_PM;
12649 gcc_unreachable ();
12653 gcc_assert (slot < MAX_386_STACK_LOCALS);
12655 new_mode = assign_386_stack_local (HImode, slot);
12656 emit_move_insn (new_mode, reg);
12659 /* Output code for INSN to convert a float to a signed int. OPERANDS
12660 are the insn operands. The output may be [HSD]Imode and the input
12661 operand may be [SDX]Fmode. */
12664 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12666 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12667 int dimode_p = GET_MODE (operands[0]) == DImode;
12668 int round_mode = get_attr_i387_cw (insn);
12670 /* Jump through a hoop or two for DImode, since the hardware has no
12671 non-popping instruction. We used to do this a different way, but
12672 that was somewhat fragile and broke with post-reload splitters. */
12673 if ((dimode_p || fisttp) && !stack_top_dies)
12674 output_asm_insn ("fld\t%y1", operands);
12676 gcc_assert (STACK_TOP_P (operands[1]));
12677 gcc_assert (MEM_P (operands[0]));
12678 gcc_assert (GET_MODE (operands[1]) != TFmode);
12681 output_asm_insn ("fisttp%Z0\t%0", operands);
12684 if (round_mode != I387_CW_ANY)
12685 output_asm_insn ("fldcw\t%3", operands);
12686 if (stack_top_dies || dimode_p)
12687 output_asm_insn ("fistp%Z0\t%0", operands);
12689 output_asm_insn ("fist%Z0\t%0", operands);
12690 if (round_mode != I387_CW_ANY)
12691 output_asm_insn ("fldcw\t%2", operands);
12697 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12698 have the values zero or one, indicates the ffreep insn's operand
12699 from the OPERANDS array. */
12701 static const char *
12702 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12704 if (TARGET_USE_FFREEP)
12705 #ifdef HAVE_AS_IX86_FFREEP
12706 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12709 static char retval[32];
12710 int regno = REGNO (operands[opno]);
12712 gcc_assert (FP_REGNO_P (regno));
12714 regno -= FIRST_STACK_REG;
12716 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12721 return opno ? "fstp\t%y1" : "fstp\t%y0";
12725 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12726 should be used. UNORDERED_P is true when fucom should be used. */
12729 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12731 int stack_top_dies;
12732 rtx cmp_op0, cmp_op1;
12733 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12737 cmp_op0 = operands[0];
12738 cmp_op1 = operands[1];
12742 cmp_op0 = operands[1];
12743 cmp_op1 = operands[2];
12748 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12749 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12750 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12751 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12753 if (GET_MODE (operands[0]) == SFmode)
12755 return &ucomiss[TARGET_AVX ? 0 : 1];
12757 return &comiss[TARGET_AVX ? 0 : 1];
12760 return &ucomisd[TARGET_AVX ? 0 : 1];
12762 return &comisd[TARGET_AVX ? 0 : 1];
12765 gcc_assert (STACK_TOP_P (cmp_op0));
12767 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12769 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12771 if (stack_top_dies)
12773 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12774 return output_387_ffreep (operands, 1);
12777 return "ftst\n\tfnstsw\t%0";
12780 if (STACK_REG_P (cmp_op1)
12782 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12783 && REGNO (cmp_op1) != FIRST_STACK_REG)
12785 /* If both the top of the 387 stack dies, and the other operand
12786 is also a stack register that dies, then this must be a
12787 `fcompp' float compare */
12791 /* There is no double popping fcomi variant. Fortunately,
12792 eflags is immune from the fstp's cc clobbering. */
12794 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12796 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12797 return output_387_ffreep (operands, 0);
12802 return "fucompp\n\tfnstsw\t%0";
12804 return "fcompp\n\tfnstsw\t%0";
12809 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12811 static const char * const alt[16] =
12813 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12814 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12815 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12816 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12818 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12819 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12823 "fcomi\t{%y1, %0|%0, %y1}",
12824 "fcomip\t{%y1, %0|%0, %y1}",
12825 "fucomi\t{%y1, %0|%0, %y1}",
12826 "fucomip\t{%y1, %0|%0, %y1}",
12837 mask = eflags_p << 3;
12838 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12839 mask |= unordered_p << 1;
12840 mask |= stack_top_dies;
12842 gcc_assert (mask < 16);
12851 ix86_output_addr_vec_elt (FILE *file, int value)
12853 const char *directive = ASM_LONG;
12857 directive = ASM_QUAD;
12859 gcc_assert (!TARGET_64BIT);
12862 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12866 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12868 const char *directive = ASM_LONG;
12871 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12872 directive = ASM_QUAD;
12874 gcc_assert (!TARGET_64BIT);
12876 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12877 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12878 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12879 directive, value, rel);
12880 else if (HAVE_AS_GOTOFF_IN_DATA)
12881 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12883 else if (TARGET_MACHO)
12885 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12886 machopic_output_function_base_name (file);
12891 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12892 GOT_SYMBOL_NAME, value);
12895 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12899 ix86_expand_clear (rtx dest)
12903 /* We play register width games, which are only valid after reload. */
12904 gcc_assert (reload_completed);
12906 /* Avoid HImode and its attendant prefix byte. */
12907 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12908 dest = gen_rtx_REG (SImode, REGNO (dest));
12909 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12911 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12912 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12914 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12915 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12921 /* X is an unchanging MEM. If it is a constant pool reference, return
12922 the constant pool rtx, else NULL. */
12925 maybe_get_pool_constant (rtx x)
12927 x = ix86_delegitimize_address (XEXP (x, 0));
12929 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12930 return get_pool_constant (x);
12936 ix86_expand_move (enum machine_mode mode, rtx operands[])
12939 enum tls_model model;
12944 if (GET_CODE (op1) == SYMBOL_REF)
12946 model = SYMBOL_REF_TLS_MODEL (op1);
12949 op1 = legitimize_tls_address (op1, model, true);
12950 op1 = force_operand (op1, op0);
12954 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12955 && SYMBOL_REF_DLLIMPORT_P (op1))
12956 op1 = legitimize_dllimport_symbol (op1, false);
12958 else if (GET_CODE (op1) == CONST
12959 && GET_CODE (XEXP (op1, 0)) == PLUS
12960 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12962 rtx addend = XEXP (XEXP (op1, 0), 1);
12963 rtx symbol = XEXP (XEXP (op1, 0), 0);
12966 model = SYMBOL_REF_TLS_MODEL (symbol);
12968 tmp = legitimize_tls_address (symbol, model, true);
12969 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12970 && SYMBOL_REF_DLLIMPORT_P (symbol))
12971 tmp = legitimize_dllimport_symbol (symbol, true);
12975 tmp = force_operand (tmp, NULL);
12976 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12977 op0, 1, OPTAB_DIRECT);
12983 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12985 if (TARGET_MACHO && !TARGET_64BIT)
12990 rtx temp = ((reload_in_progress
12991 || ((op0 && REG_P (op0))
12993 ? op0 : gen_reg_rtx (Pmode));
12994 op1 = machopic_indirect_data_reference (op1, temp);
12995 op1 = machopic_legitimize_pic_address (op1, mode,
12996 temp == op1 ? 0 : temp);
12998 else if (MACHOPIC_INDIRECT)
12999 op1 = machopic_indirect_data_reference (op1, 0);
13007 op1 = force_reg (Pmode, op1);
13008 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13010 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13011 op1 = legitimize_pic_address (op1, reg);
13020 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13021 || !push_operand (op0, mode))
13023 op1 = force_reg (mode, op1);
13025 if (push_operand (op0, mode)
13026 && ! general_no_elim_operand (op1, mode))
13027 op1 = copy_to_mode_reg (mode, op1);
13029 /* Force large constants in 64bit compilation into register
13030 to get them CSEed. */
13031 if (can_create_pseudo_p ()
13032 && (mode == DImode) && TARGET_64BIT
13033 && immediate_operand (op1, mode)
13034 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13035 && !register_operand (op0, mode)
13037 op1 = copy_to_mode_reg (mode, op1);
13039 if (can_create_pseudo_p ()
13040 && FLOAT_MODE_P (mode)
13041 && GET_CODE (op1) == CONST_DOUBLE)
13043 /* If we are loading a floating point constant to a register,
13044 force the value to memory now, since we'll get better code
13045 out the back end. */
13047 op1 = validize_mem (force_const_mem (mode, op1));
13048 if (!register_operand (op0, mode))
13050 rtx temp = gen_reg_rtx (mode);
13051 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13052 emit_move_insn (op0, temp);
13058 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13062 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13064 rtx op0 = operands[0], op1 = operands[1];
13065 unsigned int align = GET_MODE_ALIGNMENT (mode);
13067 /* Force constants other than zero into memory. We do not know how
13068 the instructions used to build constants modify the upper 64 bits
13069 of the register, once we have that information we may be able
13070 to handle some of them more efficiently. */
13071 if (can_create_pseudo_p ()
13072 && register_operand (op0, mode)
13073 && (CONSTANT_P (op1)
13074 || (GET_CODE (op1) == SUBREG
13075 && CONSTANT_P (SUBREG_REG (op1))))
13076 && !standard_sse_constant_p (op1))
13077 op1 = validize_mem (force_const_mem (mode, op1));
13079 /* We need to check memory alignment for SSE mode since attribute
13080 can make operands unaligned. */
13081 if (can_create_pseudo_p ()
13082 && SSE_REG_MODE_P (mode)
13083 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13084 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13088 /* ix86_expand_vector_move_misalign() does not like constants ... */
13089 if (CONSTANT_P (op1)
13090 || (GET_CODE (op1) == SUBREG
13091 && CONSTANT_P (SUBREG_REG (op1))))
13092 op1 = validize_mem (force_const_mem (mode, op1));
13094 /* ... nor both arguments in memory. */
13095 if (!register_operand (op0, mode)
13096 && !register_operand (op1, mode))
13097 op1 = force_reg (mode, op1);
13099 tmp[0] = op0; tmp[1] = op1;
13100 ix86_expand_vector_move_misalign (mode, tmp);
13104 /* Make operand1 a register if it isn't already. */
13105 if (can_create_pseudo_p ()
13106 && !register_operand (op0, mode)
13107 && !register_operand (op1, mode))
13109 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13113 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13116 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13117 straight to ix86_expand_vector_move. */
13118 /* Code generation for scalar reg-reg moves of single and double precision data:
13119 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13123 if (x86_sse_partial_reg_dependency == true)
13128 Code generation for scalar loads of double precision data:
13129 if (x86_sse_split_regs == true)
13130 movlpd mem, reg (gas syntax)
13134 Code generation for unaligned packed loads of single precision data
13135 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13136 if (x86_sse_unaligned_move_optimal)
13139 if (x86_sse_partial_reg_dependency == true)
13151 Code generation for unaligned packed loads of double precision data
13152 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13153 if (x86_sse_unaligned_move_optimal)
13156 if (x86_sse_split_regs == true)
13169 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13178 switch (GET_MODE_CLASS (mode))
13180 case MODE_VECTOR_INT:
13182 switch (GET_MODE_SIZE (mode))
13185 op0 = gen_lowpart (V16QImode, op0);
13186 op1 = gen_lowpart (V16QImode, op1);
13187 emit_insn (gen_avx_movdqu (op0, op1));
13190 op0 = gen_lowpart (V32QImode, op0);
13191 op1 = gen_lowpart (V32QImode, op1);
13192 emit_insn (gen_avx_movdqu256 (op0, op1));
13195 gcc_unreachable ();
13198 case MODE_VECTOR_FLOAT:
13199 op0 = gen_lowpart (mode, op0);
13200 op1 = gen_lowpart (mode, op1);
13205 emit_insn (gen_avx_movups (op0, op1));
13208 emit_insn (gen_avx_movups256 (op0, op1));
13211 emit_insn (gen_avx_movupd (op0, op1));
13214 emit_insn (gen_avx_movupd256 (op0, op1));
13217 gcc_unreachable ();
13222 gcc_unreachable ();
13230 /* If we're optimizing for size, movups is the smallest. */
13231 if (optimize_insn_for_size_p ())
13233 op0 = gen_lowpart (V4SFmode, op0);
13234 op1 = gen_lowpart (V4SFmode, op1);
13235 emit_insn (gen_sse_movups (op0, op1));
13239 /* ??? If we have typed data, then it would appear that using
13240 movdqu is the only way to get unaligned data loaded with
13242 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13244 op0 = gen_lowpart (V16QImode, op0);
13245 op1 = gen_lowpart (V16QImode, op1);
13246 emit_insn (gen_sse2_movdqu (op0, op1));
13250 if (TARGET_SSE2 && mode == V2DFmode)
13254 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13256 op0 = gen_lowpart (V2DFmode, op0);
13257 op1 = gen_lowpart (V2DFmode, op1);
13258 emit_insn (gen_sse2_movupd (op0, op1));
13262 /* When SSE registers are split into halves, we can avoid
13263 writing to the top half twice. */
13264 if (TARGET_SSE_SPLIT_REGS)
13266 emit_clobber (op0);
13271 /* ??? Not sure about the best option for the Intel chips.
13272 The following would seem to satisfy; the register is
13273 entirely cleared, breaking the dependency chain. We
13274 then store to the upper half, with a dependency depth
13275 of one. A rumor has it that Intel recommends two movsd
13276 followed by an unpacklpd, but this is unconfirmed. And
13277 given that the dependency depth of the unpacklpd would
13278 still be one, I'm not sure why this would be better. */
13279 zero = CONST0_RTX (V2DFmode);
13282 m = adjust_address (op1, DFmode, 0);
13283 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13284 m = adjust_address (op1, DFmode, 8);
13285 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13289 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13291 op0 = gen_lowpart (V4SFmode, op0);
13292 op1 = gen_lowpart (V4SFmode, op1);
13293 emit_insn (gen_sse_movups (op0, op1));
13297 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13298 emit_move_insn (op0, CONST0_RTX (mode));
13300 emit_clobber (op0);
13302 if (mode != V4SFmode)
13303 op0 = gen_lowpart (V4SFmode, op0);
13304 m = adjust_address (op1, V2SFmode, 0);
13305 emit_insn (gen_sse_loadlps (op0, op0, m));
13306 m = adjust_address (op1, V2SFmode, 8);
13307 emit_insn (gen_sse_loadhps (op0, op0, m));
13310 else if (MEM_P (op0))
13312 /* If we're optimizing for size, movups is the smallest. */
13313 if (optimize_insn_for_size_p ())
13315 op0 = gen_lowpart (V4SFmode, op0);
13316 op1 = gen_lowpart (V4SFmode, op1);
13317 emit_insn (gen_sse_movups (op0, op1));
13321 /* ??? Similar to above, only less clear because of quote
13322 typeless stores unquote. */
13323 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13324 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13326 op0 = gen_lowpart (V16QImode, op0);
13327 op1 = gen_lowpart (V16QImode, op1);
13328 emit_insn (gen_sse2_movdqu (op0, op1));
13332 if (TARGET_SSE2 && mode == V2DFmode)
13334 m = adjust_address (op0, DFmode, 0);
13335 emit_insn (gen_sse2_storelpd (m, op1));
13336 m = adjust_address (op0, DFmode, 8);
13337 emit_insn (gen_sse2_storehpd (m, op1));
13341 if (mode != V4SFmode)
13342 op1 = gen_lowpart (V4SFmode, op1);
13343 m = adjust_address (op0, V2SFmode, 0);
13344 emit_insn (gen_sse_storelps (m, op1));
13345 m = adjust_address (op0, V2SFmode, 8);
13346 emit_insn (gen_sse_storehps (m, op1));
13350 gcc_unreachable ();
13353 /* Expand a push in MODE. This is some mode for which we do not support
13354 proper push instructions, at least from the registers that we expect
13355 the value to live in. */
13358 ix86_expand_push (enum machine_mode mode, rtx x)
13362 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13363 GEN_INT (-GET_MODE_SIZE (mode)),
13364 stack_pointer_rtx, 1, OPTAB_DIRECT);
13365 if (tmp != stack_pointer_rtx)
13366 emit_move_insn (stack_pointer_rtx, tmp);
13368 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13370 /* When we push an operand onto stack, it has to be aligned at least
13371 at the function argument boundary. However since we don't have
13372 the argument type, we can't determine the actual argument
13374 emit_move_insn (tmp, x);
13377 /* Helper function of ix86_fixup_binary_operands to canonicalize
13378 operand order. Returns true if the operands should be swapped. */
13381 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13384 rtx dst = operands[0];
13385 rtx src1 = operands[1];
13386 rtx src2 = operands[2];
13388 /* If the operation is not commutative, we can't do anything. */
13389 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13392 /* Highest priority is that src1 should match dst. */
13393 if (rtx_equal_p (dst, src1))
13395 if (rtx_equal_p (dst, src2))
13398 /* Next highest priority is that immediate constants come second. */
13399 if (immediate_operand (src2, mode))
13401 if (immediate_operand (src1, mode))
13404 /* Lowest priority is that memory references should come second. */
13414 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13415 destination to use for the operation. If different from the true
13416 destination in operands[0], a copy operation will be required. */
13419 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13422 rtx dst = operands[0];
13423 rtx src1 = operands[1];
13424 rtx src2 = operands[2];
13426 /* Canonicalize operand order. */
13427 if (ix86_swap_binary_operands_p (code, mode, operands))
13431 /* It is invalid to swap operands of different modes. */
13432 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13439 /* Both source operands cannot be in memory. */
13440 if (MEM_P (src1) && MEM_P (src2))
13442 /* Optimization: Only read from memory once. */
13443 if (rtx_equal_p (src1, src2))
13445 src2 = force_reg (mode, src2);
13449 src2 = force_reg (mode, src2);
13452 /* If the destination is memory, and we do not have matching source
13453 operands, do things in registers. */
13454 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13455 dst = gen_reg_rtx (mode);
13457 /* Source 1 cannot be a constant. */
13458 if (CONSTANT_P (src1))
13459 src1 = force_reg (mode, src1);
13461 /* Source 1 cannot be a non-matching memory. */
13462 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13463 src1 = force_reg (mode, src1);
13465 operands[1] = src1;
13466 operands[2] = src2;
13470 /* Similarly, but assume that the destination has already been
13471 set up properly. */
13474 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13475 enum machine_mode mode, rtx operands[])
13477 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13478 gcc_assert (dst == operands[0]);
13481 /* Attempt to expand a binary operator. Make the expansion closer to the
13482 actual machine, then just general_operand, which will allow 3 separate
13483 memory references (one output, two input) in a single insn. */
13486 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13489 rtx src1, src2, dst, op, clob;
13491 dst = ix86_fixup_binary_operands (code, mode, operands);
13492 src1 = operands[1];
13493 src2 = operands[2];
13495 /* Emit the instruction. */
13497 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13498 if (reload_in_progress)
13500 /* Reload doesn't know about the flags register, and doesn't know that
13501 it doesn't want to clobber it. We can only do this with PLUS. */
13502 gcc_assert (code == PLUS);
13507 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13508 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13511 /* Fix up the destination if needed. */
13512 if (dst != operands[0])
13513 emit_move_insn (operands[0], dst);
13516 /* Return TRUE or FALSE depending on whether the binary operator meets the
13517 appropriate constraints. */
13520 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13523 rtx dst = operands[0];
13524 rtx src1 = operands[1];
13525 rtx src2 = operands[2];
13527 /* Both source operands cannot be in memory. */
13528 if (MEM_P (src1) && MEM_P (src2))
13531 /* Canonicalize operand order for commutative operators. */
13532 if (ix86_swap_binary_operands_p (code, mode, operands))
13539 /* If the destination is memory, we must have a matching source operand. */
13540 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13543 /* Source 1 cannot be a constant. */
13544 if (CONSTANT_P (src1))
13547 /* Source 1 cannot be a non-matching memory. */
13548 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13554 /* Attempt to expand a unary operator. Make the expansion closer to the
13555 actual machine, then just general_operand, which will allow 2 separate
13556 memory references (one output, one input) in a single insn. */
13559 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13562 int matching_memory;
13563 rtx src, dst, op, clob;
13568 /* If the destination is memory, and we do not have matching source
13569 operands, do things in registers. */
13570 matching_memory = 0;
13573 if (rtx_equal_p (dst, src))
13574 matching_memory = 1;
13576 dst = gen_reg_rtx (mode);
13579 /* When source operand is memory, destination must match. */
13580 if (MEM_P (src) && !matching_memory)
13581 src = force_reg (mode, src);
13583 /* Emit the instruction. */
13585 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13586 if (reload_in_progress || code == NOT)
13588 /* Reload doesn't know about the flags register, and doesn't know that
13589 it doesn't want to clobber it. */
13590 gcc_assert (code == NOT);
13595 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13596 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13599 /* Fix up the destination if needed. */
13600 if (dst != operands[0])
13601 emit_move_insn (operands[0], dst);
13604 #define LEA_SEARCH_THRESHOLD 12
13606 /* Search backward for non-agu definition of register number REGNO1
13607 or register number REGNO2 in INSN's basic block until
13608 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13609 2. Reach BB boundary, or
13610 3. Reach agu definition.
13611 Returns the distance between the non-agu definition point and INSN.
13612 If no definition point, returns -1. */
13615 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13618 basic_block bb = BLOCK_FOR_INSN (insn);
13621 enum attr_type insn_type;
13623 if (insn != BB_HEAD (bb))
13625 rtx prev = PREV_INSN (insn);
13626 while (prev && distance < LEA_SEARCH_THRESHOLD)
13631 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13632 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13633 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13634 && (regno1 == DF_REF_REGNO (*def_rec)
13635 || regno2 == DF_REF_REGNO (*def_rec)))
13637 insn_type = get_attr_type (prev);
13638 if (insn_type != TYPE_LEA)
13642 if (prev == BB_HEAD (bb))
13644 prev = PREV_INSN (prev);
13648 if (distance < LEA_SEARCH_THRESHOLD)
13652 bool simple_loop = false;
13654 FOR_EACH_EDGE (e, ei, bb->preds)
13657 simple_loop = true;
13663 rtx prev = BB_END (bb);
13666 && distance < LEA_SEARCH_THRESHOLD)
13671 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13672 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13673 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13674 && (regno1 == DF_REF_REGNO (*def_rec)
13675 || regno2 == DF_REF_REGNO (*def_rec)))
13677 insn_type = get_attr_type (prev);
13678 if (insn_type != TYPE_LEA)
13682 prev = PREV_INSN (prev);
13690 /* get_attr_type may modify recog data. We want to make sure
13691 that recog data is valid for instruction INSN, on which
13692 distance_non_agu_define is called. INSN is unchanged here. */
13693 extract_insn_cached (insn);
13697 /* Return the distance between INSN and the next insn that uses
13698 register number REGNO0 in memory address. Return -1 if no such
13699 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13702 distance_agu_use (unsigned int regno0, rtx insn)
13704 basic_block bb = BLOCK_FOR_INSN (insn);
13709 if (insn != BB_END (bb))
13711 rtx next = NEXT_INSN (insn);
13712 while (next && distance < LEA_SEARCH_THRESHOLD)
13718 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13719 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13720 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13721 && regno0 == DF_REF_REGNO (*use_rec))
13723 /* Return DISTANCE if OP0 is used in memory
13724 address in NEXT. */
13728 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13729 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13730 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13731 && regno0 == DF_REF_REGNO (*def_rec))
13733 /* Return -1 if OP0 is set in NEXT. */
13737 if (next == BB_END (bb))
13739 next = NEXT_INSN (next);
13743 if (distance < LEA_SEARCH_THRESHOLD)
13747 bool simple_loop = false;
13749 FOR_EACH_EDGE (e, ei, bb->succs)
13752 simple_loop = true;
13758 rtx next = BB_HEAD (bb);
13761 && distance < LEA_SEARCH_THRESHOLD)
13767 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13768 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13769 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13770 && regno0 == DF_REF_REGNO (*use_rec))
13772 /* Return DISTANCE if OP0 is used in memory
13773 address in NEXT. */
13777 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13778 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13779 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13780 && regno0 == DF_REF_REGNO (*def_rec))
13782 /* Return -1 if OP0 is set in NEXT. */
13787 next = NEXT_INSN (next);
13795 /* Define this macro to tune LEA priority vs ADD, it take effect when
13796 there is a dilemma of choicing LEA or ADD
13797 Negative value: ADD is more preferred than LEA
13799 Positive value: LEA is more preferred than ADD*/
13800 #define IX86_LEA_PRIORITY 2
13802 /* Return true if it is ok to optimize an ADD operation to LEA
13803 operation to avoid flag register consumation. For the processors
13804 like ATOM, if the destination register of LEA holds an actual
13805 address which will be used soon, LEA is better and otherwise ADD
13809 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13810 rtx insn, rtx operands[])
13812 unsigned int regno0 = true_regnum (operands[0]);
13813 unsigned int regno1 = true_regnum (operands[1]);
13814 unsigned int regno2;
13816 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13817 return regno0 != regno1;
13819 regno2 = true_regnum (operands[2]);
13821 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13822 if (regno0 != regno1 && regno0 != regno2)
13826 int dist_define, dist_use;
13827 dist_define = distance_non_agu_define (regno1, regno2, insn);
13828 if (dist_define <= 0)
13831 /* If this insn has both backward non-agu dependence and forward
13832 agu dependence, the one with short distance take effect. */
13833 dist_use = distance_agu_use (regno0, insn);
13835 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13842 /* Return true if destination reg of SET_BODY is shift count of
13846 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13852 /* Retrieve destination of SET_BODY. */
13853 switch (GET_CODE (set_body))
13856 set_dest = SET_DEST (set_body);
13857 if (!set_dest || !REG_P (set_dest))
13861 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13862 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13870 /* Retrieve shift count of USE_BODY. */
13871 switch (GET_CODE (use_body))
13874 shift_rtx = XEXP (use_body, 1);
13877 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13878 if (ix86_dep_by_shift_count_body (set_body,
13879 XVECEXP (use_body, 0, i)))
13887 && (GET_CODE (shift_rtx) == ASHIFT
13888 || GET_CODE (shift_rtx) == LSHIFTRT
13889 || GET_CODE (shift_rtx) == ASHIFTRT
13890 || GET_CODE (shift_rtx) == ROTATE
13891 || GET_CODE (shift_rtx) == ROTATERT))
13893 rtx shift_count = XEXP (shift_rtx, 1);
13895 /* Return true if shift count is dest of SET_BODY. */
13896 if (REG_P (shift_count)
13897 && true_regnum (set_dest) == true_regnum (shift_count))
13904 /* Return true if destination reg of SET_INSN is shift count of
13908 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13910 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13911 PATTERN (use_insn));
13914 /* Return TRUE or FALSE depending on whether the unary operator meets the
13915 appropriate constraints. */
13918 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13919 enum machine_mode mode ATTRIBUTE_UNUSED,
13920 rtx operands[2] ATTRIBUTE_UNUSED)
13922 /* If one of operands is memory, source and destination must match. */
13923 if ((MEM_P (operands[0])
13924 || MEM_P (operands[1]))
13925 && ! rtx_equal_p (operands[0], operands[1]))
13930 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13931 are ok, keeping in mind the possible movddup alternative. */
13934 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13936 if (MEM_P (operands[0]))
13937 return rtx_equal_p (operands[0], operands[1 + high]);
13938 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13939 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13943 /* Post-reload splitter for converting an SF or DFmode value in an
13944 SSE register into an unsigned SImode. */
13947 ix86_split_convert_uns_si_sse (rtx operands[])
13949 enum machine_mode vecmode;
13950 rtx value, large, zero_or_two31, input, two31, x;
13952 large = operands[1];
13953 zero_or_two31 = operands[2];
13954 input = operands[3];
13955 two31 = operands[4];
13956 vecmode = GET_MODE (large);
13957 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13959 /* Load up the value into the low element. We must ensure that the other
13960 elements are valid floats -- zero is the easiest such value. */
13963 if (vecmode == V4SFmode)
13964 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
13966 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
13970 input = gen_rtx_REG (vecmode, REGNO (input));
13971 emit_move_insn (value, CONST0_RTX (vecmode));
13972 if (vecmode == V4SFmode)
13973 emit_insn (gen_sse_movss (value, value, input));
13975 emit_insn (gen_sse2_movsd (value, value, input));
13978 emit_move_insn (large, two31);
13979 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
13981 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
13982 emit_insn (gen_rtx_SET (VOIDmode, large, x));
13984 x = gen_rtx_AND (vecmode, zero_or_two31, large);
13985 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
13987 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
13988 emit_insn (gen_rtx_SET (VOIDmode, value, x));
13990 large = gen_rtx_REG (V4SImode, REGNO (large));
13991 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
13993 x = gen_rtx_REG (V4SImode, REGNO (value));
13994 if (vecmode == V4SFmode)
13995 emit_insn (gen_sse2_cvttps2dq (x, value));
13997 emit_insn (gen_sse2_cvttpd2dq (x, value));
14000 emit_insn (gen_xorv4si3 (value, value, large));
14003 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14004 Expects the 64-bit DImode to be supplied in a pair of integral
14005 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14006 -mfpmath=sse, !optimize_size only. */
14009 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14011 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14012 rtx int_xmm, fp_xmm;
14013 rtx biases, exponents;
14016 int_xmm = gen_reg_rtx (V4SImode);
14017 if (TARGET_INTER_UNIT_MOVES)
14018 emit_insn (gen_movdi_to_sse (int_xmm, input));
14019 else if (TARGET_SSE_SPLIT_REGS)
14021 emit_clobber (int_xmm);
14022 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14026 x = gen_reg_rtx (V2DImode);
14027 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14028 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14031 x = gen_rtx_CONST_VECTOR (V4SImode,
14032 gen_rtvec (4, GEN_INT (0x43300000UL),
14033 GEN_INT (0x45300000UL),
14034 const0_rtx, const0_rtx));
14035 exponents = validize_mem (force_const_mem (V4SImode, x));
14037 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14038 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14040 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14041 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14042 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14043 (0x1.0p84 + double(fp_value_hi_xmm)).
14044 Note these exponents differ by 32. */
14046 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14048 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14049 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14050 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14051 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14052 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14053 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14054 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14055 biases = validize_mem (force_const_mem (V2DFmode, biases));
14056 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14058 /* Add the upper and lower DFmode values together. */
14060 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14063 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14064 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14065 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14068 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14071 /* Not used, but eases macroization of patterns. */
14073 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14074 rtx input ATTRIBUTE_UNUSED)
14076 gcc_unreachable ();
14079 /* Convert an unsigned SImode value into a DFmode. Only currently used
14080 for SSE, but applicable anywhere. */
14083 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14085 REAL_VALUE_TYPE TWO31r;
14088 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14089 NULL, 1, OPTAB_DIRECT);
14091 fp = gen_reg_rtx (DFmode);
14092 emit_insn (gen_floatsidf2 (fp, x));
14094 real_ldexp (&TWO31r, &dconst1, 31);
14095 x = const_double_from_real_value (TWO31r, DFmode);
14097 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14099 emit_move_insn (target, x);
14102 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14103 32-bit mode; otherwise we have a direct convert instruction. */
14106 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14108 REAL_VALUE_TYPE TWO32r;
14109 rtx fp_lo, fp_hi, x;
14111 fp_lo = gen_reg_rtx (DFmode);
14112 fp_hi = gen_reg_rtx (DFmode);
14114 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14116 real_ldexp (&TWO32r, &dconst1, 32);
14117 x = const_double_from_real_value (TWO32r, DFmode);
14118 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14120 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14122 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14125 emit_move_insn (target, x);
14128 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14129 For x86_32, -mfpmath=sse, !optimize_size only. */
14131 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14133 REAL_VALUE_TYPE ONE16r;
14134 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14136 real_ldexp (&ONE16r, &dconst1, 16);
14137 x = const_double_from_real_value (ONE16r, SFmode);
14138 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14139 NULL, 0, OPTAB_DIRECT);
14140 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14141 NULL, 0, OPTAB_DIRECT);
14142 fp_hi = gen_reg_rtx (SFmode);
14143 fp_lo = gen_reg_rtx (SFmode);
14144 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14145 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14146 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14148 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14150 if (!rtx_equal_p (target, fp_hi))
14151 emit_move_insn (target, fp_hi);
14154 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14155 then replicate the value for all elements of the vector
14159 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14166 v = gen_rtvec (4, value, value, value, value);
14167 return gen_rtx_CONST_VECTOR (V4SImode, v);
14171 v = gen_rtvec (2, value, value);
14172 return gen_rtx_CONST_VECTOR (V2DImode, v);
14176 v = gen_rtvec (4, value, value, value, value);
14178 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14179 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14180 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14184 v = gen_rtvec (2, value, value);
14186 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14187 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14190 gcc_unreachable ();
14194 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14195 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14196 for an SSE register. If VECT is true, then replicate the mask for
14197 all elements of the vector register. If INVERT is true, then create
14198 a mask excluding the sign bit. */
14201 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14203 enum machine_mode vec_mode, imode;
14204 HOST_WIDE_INT hi, lo;
14209 /* Find the sign bit, sign extended to 2*HWI. */
14215 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14216 lo = 0x80000000, hi = lo < 0;
14222 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14223 if (HOST_BITS_PER_WIDE_INT >= 64)
14224 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14226 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14231 vec_mode = VOIDmode;
14232 if (HOST_BITS_PER_WIDE_INT >= 64)
14235 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14242 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14246 lo = ~lo, hi = ~hi;
14252 mask = immed_double_const (lo, hi, imode);
14254 vec = gen_rtvec (2, v, mask);
14255 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14256 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14263 gcc_unreachable ();
14267 lo = ~lo, hi = ~hi;
14269 /* Force this value into the low part of a fp vector constant. */
14270 mask = immed_double_const (lo, hi, imode);
14271 mask = gen_lowpart (mode, mask);
14273 if (vec_mode == VOIDmode)
14274 return force_reg (mode, mask);
14276 v = ix86_build_const_vector (mode, vect, mask);
14277 return force_reg (vec_mode, v);
14280 /* Generate code for floating point ABS or NEG. */
14283 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14286 rtx mask, set, use, clob, dst, src;
14287 bool use_sse = false;
14288 bool vector_mode = VECTOR_MODE_P (mode);
14289 enum machine_mode elt_mode = mode;
14293 elt_mode = GET_MODE_INNER (mode);
14296 else if (mode == TFmode)
14298 else if (TARGET_SSE_MATH)
14299 use_sse = SSE_FLOAT_MODE_P (mode);
14301 /* NEG and ABS performed with SSE use bitwise mask operations.
14302 Create the appropriate mask now. */
14304 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14313 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14314 set = gen_rtx_SET (VOIDmode, dst, set);
14319 set = gen_rtx_fmt_e (code, mode, src);
14320 set = gen_rtx_SET (VOIDmode, dst, set);
14323 use = gen_rtx_USE (VOIDmode, mask);
14324 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14325 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14326 gen_rtvec (3, set, use, clob)));
14333 /* Expand a copysign operation. Special case operand 0 being a constant. */
14336 ix86_expand_copysign (rtx operands[])
14338 enum machine_mode mode;
14339 rtx dest, op0, op1, mask, nmask;
14341 dest = operands[0];
14345 mode = GET_MODE (dest);
14347 if (GET_CODE (op0) == CONST_DOUBLE)
14349 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14351 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14352 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14354 if (mode == SFmode || mode == DFmode)
14356 enum machine_mode vmode;
14358 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14360 if (op0 == CONST0_RTX (mode))
14361 op0 = CONST0_RTX (vmode);
14364 rtx v = ix86_build_const_vector (mode, false, op0);
14366 op0 = force_reg (vmode, v);
14369 else if (op0 != CONST0_RTX (mode))
14370 op0 = force_reg (mode, op0);
14372 mask = ix86_build_signbit_mask (mode, 0, 0);
14374 if (mode == SFmode)
14375 copysign_insn = gen_copysignsf3_const;
14376 else if (mode == DFmode)
14377 copysign_insn = gen_copysigndf3_const;
14379 copysign_insn = gen_copysigntf3_const;
14381 emit_insn (copysign_insn (dest, op0, op1, mask));
14385 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14387 nmask = ix86_build_signbit_mask (mode, 0, 1);
14388 mask = ix86_build_signbit_mask (mode, 0, 0);
14390 if (mode == SFmode)
14391 copysign_insn = gen_copysignsf3_var;
14392 else if (mode == DFmode)
14393 copysign_insn = gen_copysigndf3_var;
14395 copysign_insn = gen_copysigntf3_var;
14397 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14401 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14402 be a constant, and so has already been expanded into a vector constant. */
14405 ix86_split_copysign_const (rtx operands[])
14407 enum machine_mode mode, vmode;
14408 rtx dest, op0, mask, x;
14410 dest = operands[0];
14412 mask = operands[3];
14414 mode = GET_MODE (dest);
14415 vmode = GET_MODE (mask);
14417 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14418 x = gen_rtx_AND (vmode, dest, mask);
14419 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14421 if (op0 != CONST0_RTX (vmode))
14423 x = gen_rtx_IOR (vmode, dest, op0);
14424 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14428 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14429 so we have to do two masks. */
14432 ix86_split_copysign_var (rtx operands[])
14434 enum machine_mode mode, vmode;
14435 rtx dest, scratch, op0, op1, mask, nmask, x;
14437 dest = operands[0];
14438 scratch = operands[1];
14441 nmask = operands[4];
14442 mask = operands[5];
14444 mode = GET_MODE (dest);
14445 vmode = GET_MODE (mask);
14447 if (rtx_equal_p (op0, op1))
14449 /* Shouldn't happen often (it's useless, obviously), but when it does
14450 we'd generate incorrect code if we continue below. */
14451 emit_move_insn (dest, op0);
14455 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14457 gcc_assert (REGNO (op1) == REGNO (scratch));
14459 x = gen_rtx_AND (vmode, scratch, mask);
14460 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14463 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14464 x = gen_rtx_NOT (vmode, dest);
14465 x = gen_rtx_AND (vmode, x, op0);
14466 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14470 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14472 x = gen_rtx_AND (vmode, scratch, mask);
14474 else /* alternative 2,4 */
14476 gcc_assert (REGNO (mask) == REGNO (scratch));
14477 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14478 x = gen_rtx_AND (vmode, scratch, op1);
14480 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14482 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14484 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14485 x = gen_rtx_AND (vmode, dest, nmask);
14487 else /* alternative 3,4 */
14489 gcc_assert (REGNO (nmask) == REGNO (dest));
14491 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14492 x = gen_rtx_AND (vmode, dest, op0);
14494 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14497 x = gen_rtx_IOR (vmode, dest, scratch);
14498 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14501 /* Return TRUE or FALSE depending on whether the first SET in INSN
14502 has source and destination with matching CC modes, and that the
14503 CC mode is at least as constrained as REQ_MODE. */
14506 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14509 enum machine_mode set_mode;
14511 set = PATTERN (insn);
14512 if (GET_CODE (set) == PARALLEL)
14513 set = XVECEXP (set, 0, 0);
14514 gcc_assert (GET_CODE (set) == SET);
14515 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14517 set_mode = GET_MODE (SET_DEST (set));
14521 if (req_mode != CCNOmode
14522 && (req_mode != CCmode
14523 || XEXP (SET_SRC (set), 1) != const0_rtx))
14527 if (req_mode == CCGCmode)
14531 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14535 if (req_mode == CCZmode)
14546 gcc_unreachable ();
14549 return (GET_MODE (SET_SRC (set)) == set_mode);
14552 /* Generate insn patterns to do an integer compare of OPERANDS. */
14555 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14557 enum machine_mode cmpmode;
14560 cmpmode = SELECT_CC_MODE (code, op0, op1);
14561 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14563 /* This is very simple, but making the interface the same as in the
14564 FP case makes the rest of the code easier. */
14565 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14566 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14568 /* Return the test that should be put into the flags user, i.e.
14569 the bcc, scc, or cmov instruction. */
14570 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14573 /* Figure out whether to use ordered or unordered fp comparisons.
14574 Return the appropriate mode to use. */
14577 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14579 /* ??? In order to make all comparisons reversible, we do all comparisons
14580 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14581 all forms trapping and nontrapping comparisons, we can make inequality
14582 comparisons trapping again, since it results in better code when using
14583 FCOM based compares. */
14584 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14588 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14590 enum machine_mode mode = GET_MODE (op0);
14592 if (SCALAR_FLOAT_MODE_P (mode))
14594 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14595 return ix86_fp_compare_mode (code);
14600 /* Only zero flag is needed. */
14601 case EQ: /* ZF=0 */
14602 case NE: /* ZF!=0 */
14604 /* Codes needing carry flag. */
14605 case GEU: /* CF=0 */
14606 case LTU: /* CF=1 */
14607 /* Detect overflow checks. They need just the carry flag. */
14608 if (GET_CODE (op0) == PLUS
14609 && rtx_equal_p (op1, XEXP (op0, 0)))
14613 case GTU: /* CF=0 & ZF=0 */
14614 case LEU: /* CF=1 | ZF=1 */
14615 /* Detect overflow checks. They need just the carry flag. */
14616 if (GET_CODE (op0) == MINUS
14617 && rtx_equal_p (op1, XEXP (op0, 0)))
14621 /* Codes possibly doable only with sign flag when
14622 comparing against zero. */
14623 case GE: /* SF=OF or SF=0 */
14624 case LT: /* SF<>OF or SF=1 */
14625 if (op1 == const0_rtx)
14628 /* For other cases Carry flag is not required. */
14630 /* Codes doable only with sign flag when comparing
14631 against zero, but we miss jump instruction for it
14632 so we need to use relational tests against overflow
14633 that thus needs to be zero. */
14634 case GT: /* ZF=0 & SF=OF */
14635 case LE: /* ZF=1 | SF<>OF */
14636 if (op1 == const0_rtx)
14640 /* strcmp pattern do (use flags) and combine may ask us for proper
14645 gcc_unreachable ();
14649 /* Return the fixed registers used for condition codes. */
14652 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14659 /* If two condition code modes are compatible, return a condition code
14660 mode which is compatible with both. Otherwise, return
14663 static enum machine_mode
14664 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14669 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14672 if ((m1 == CCGCmode && m2 == CCGOCmode)
14673 || (m1 == CCGOCmode && m2 == CCGCmode))
14679 gcc_unreachable ();
14709 /* These are only compatible with themselves, which we already
14716 /* Return a comparison we can do and that it is equivalent to
14717 swap_condition (code) apart possibly from orderedness.
14718 But, never change orderedness if TARGET_IEEE_FP, returning
14719 UNKNOWN in that case if necessary. */
14721 static enum rtx_code
14722 ix86_fp_swap_condition (enum rtx_code code)
14726 case GT: /* GTU - CF=0 & ZF=0 */
14727 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14728 case GE: /* GEU - CF=0 */
14729 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14730 case UNLT: /* LTU - CF=1 */
14731 return TARGET_IEEE_FP ? UNKNOWN : GT;
14732 case UNLE: /* LEU - CF=1 | ZF=1 */
14733 return TARGET_IEEE_FP ? UNKNOWN : GE;
14735 return swap_condition (code);
14739 /* Return cost of comparison CODE using the best strategy for performance.
14740 All following functions do use number of instructions as a cost metrics.
14741 In future this should be tweaked to compute bytes for optimize_size and
14742 take into account performance of various instructions on various CPUs. */
14745 ix86_fp_comparison_cost (enum rtx_code code)
14749 /* The cost of code using bit-twiddling on %ah. */
14766 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14770 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14773 gcc_unreachable ();
14776 switch (ix86_fp_comparison_strategy (code))
14778 case IX86_FPCMP_COMI:
14779 return arith_cost > 4 ? 3 : 2;
14780 case IX86_FPCMP_SAHF:
14781 return arith_cost > 4 ? 4 : 3;
14787 /* Return strategy to use for floating-point. We assume that fcomi is always
14788 preferrable where available, since that is also true when looking at size
14789 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14791 enum ix86_fpcmp_strategy
14792 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14794 /* Do fcomi/sahf based test when profitable. */
14797 return IX86_FPCMP_COMI;
14799 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14800 return IX86_FPCMP_SAHF;
14802 return IX86_FPCMP_ARITH;
14805 /* Swap, force into registers, or otherwise massage the two operands
14806 to a fp comparison. The operands are updated in place; the new
14807 comparison code is returned. */
14809 static enum rtx_code
14810 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14812 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14813 rtx op0 = *pop0, op1 = *pop1;
14814 enum machine_mode op_mode = GET_MODE (op0);
14815 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14817 /* All of the unordered compare instructions only work on registers.
14818 The same is true of the fcomi compare instructions. The XFmode
14819 compare instructions require registers except when comparing
14820 against zero or when converting operand 1 from fixed point to
14824 && (fpcmp_mode == CCFPUmode
14825 || (op_mode == XFmode
14826 && ! (standard_80387_constant_p (op0) == 1
14827 || standard_80387_constant_p (op1) == 1)
14828 && GET_CODE (op1) != FLOAT)
14829 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14831 op0 = force_reg (op_mode, op0);
14832 op1 = force_reg (op_mode, op1);
14836 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14837 things around if they appear profitable, otherwise force op0
14838 into a register. */
14840 if (standard_80387_constant_p (op0) == 0
14842 && ! (standard_80387_constant_p (op1) == 0
14845 enum rtx_code new_code = ix86_fp_swap_condition (code);
14846 if (new_code != UNKNOWN)
14849 tmp = op0, op0 = op1, op1 = tmp;
14855 op0 = force_reg (op_mode, op0);
14857 if (CONSTANT_P (op1))
14859 int tmp = standard_80387_constant_p (op1);
14861 op1 = validize_mem (force_const_mem (op_mode, op1));
14865 op1 = force_reg (op_mode, op1);
14868 op1 = force_reg (op_mode, op1);
14872 /* Try to rearrange the comparison to make it cheaper. */
14873 if (ix86_fp_comparison_cost (code)
14874 > ix86_fp_comparison_cost (swap_condition (code))
14875 && (REG_P (op1) || can_create_pseudo_p ()))
14878 tmp = op0, op0 = op1, op1 = tmp;
14879 code = swap_condition (code);
14881 op0 = force_reg (op_mode, op0);
14889 /* Convert comparison codes we use to represent FP comparison to integer
14890 code that will result in proper branch. Return UNKNOWN if no such code
14894 ix86_fp_compare_code_to_integer (enum rtx_code code)
14923 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14926 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14928 enum machine_mode fpcmp_mode, intcmp_mode;
14931 fpcmp_mode = ix86_fp_compare_mode (code);
14932 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14934 /* Do fcomi/sahf based test when profitable. */
14935 switch (ix86_fp_comparison_strategy (code))
14937 case IX86_FPCMP_COMI:
14938 intcmp_mode = fpcmp_mode;
14939 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14940 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14945 case IX86_FPCMP_SAHF:
14946 intcmp_mode = fpcmp_mode;
14947 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14948 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14952 scratch = gen_reg_rtx (HImode);
14953 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14954 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14957 case IX86_FPCMP_ARITH:
14958 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14959 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14960 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14962 scratch = gen_reg_rtx (HImode);
14963 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14965 /* In the unordered case, we have to check C2 for NaN's, which
14966 doesn't happen to work out to anything nice combination-wise.
14967 So do some bit twiddling on the value we've got in AH to come
14968 up with an appropriate set of condition codes. */
14970 intcmp_mode = CCNOmode;
14975 if (code == GT || !TARGET_IEEE_FP)
14977 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14982 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14983 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14984 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14985 intcmp_mode = CCmode;
14991 if (code == LT && TARGET_IEEE_FP)
14993 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14994 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
14995 intcmp_mode = CCmode;
15000 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15006 if (code == GE || !TARGET_IEEE_FP)
15008 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15013 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15014 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15020 if (code == LE && TARGET_IEEE_FP)
15022 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15023 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15024 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15025 intcmp_mode = CCmode;
15030 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15036 if (code == EQ && TARGET_IEEE_FP)
15038 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15039 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15040 intcmp_mode = CCmode;
15045 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15051 if (code == NE && TARGET_IEEE_FP)
15053 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15054 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15060 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15066 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15070 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15075 gcc_unreachable ();
15083 /* Return the test that should be put into the flags user, i.e.
15084 the bcc, scc, or cmov instruction. */
15085 return gen_rtx_fmt_ee (code, VOIDmode,
15086 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15091 ix86_expand_compare (enum rtx_code code)
15094 op0 = ix86_compare_op0;
15095 op1 = ix86_compare_op1;
15097 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15098 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15100 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15102 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15103 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15106 ret = ix86_expand_int_compare (code, op0, op1);
15112 ix86_expand_branch (enum rtx_code code, rtx label)
15116 switch (GET_MODE (ix86_compare_op0))
15125 tmp = ix86_expand_compare (code);
15126 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15127 gen_rtx_LABEL_REF (VOIDmode, label),
15129 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15136 /* Expand DImode branch into multiple compare+branch. */
15138 rtx lo[2], hi[2], label2;
15139 enum rtx_code code1, code2, code3;
15140 enum machine_mode submode;
15142 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15144 tmp = ix86_compare_op0;
15145 ix86_compare_op0 = ix86_compare_op1;
15146 ix86_compare_op1 = tmp;
15147 code = swap_condition (code);
15149 if (GET_MODE (ix86_compare_op0) == DImode)
15151 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15152 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15157 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15158 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15162 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15163 avoid two branches. This costs one extra insn, so disable when
15164 optimizing for size. */
15166 if ((code == EQ || code == NE)
15167 && (!optimize_insn_for_size_p ()
15168 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15173 if (hi[1] != const0_rtx)
15174 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15175 NULL_RTX, 0, OPTAB_WIDEN);
15178 if (lo[1] != const0_rtx)
15179 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15180 NULL_RTX, 0, OPTAB_WIDEN);
15182 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15183 NULL_RTX, 0, OPTAB_WIDEN);
15185 ix86_compare_op0 = tmp;
15186 ix86_compare_op1 = const0_rtx;
15187 ix86_expand_branch (code, label);
15191 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15192 op1 is a constant and the low word is zero, then we can just
15193 examine the high word. Similarly for low word -1 and
15194 less-or-equal-than or greater-than. */
15196 if (CONST_INT_P (hi[1]))
15199 case LT: case LTU: case GE: case GEU:
15200 if (lo[1] == const0_rtx)
15202 ix86_compare_op0 = hi[0];
15203 ix86_compare_op1 = hi[1];
15204 ix86_expand_branch (code, label);
15208 case LE: case LEU: case GT: case GTU:
15209 if (lo[1] == constm1_rtx)
15211 ix86_compare_op0 = hi[0];
15212 ix86_compare_op1 = hi[1];
15213 ix86_expand_branch (code, label);
15221 /* Otherwise, we need two or three jumps. */
15223 label2 = gen_label_rtx ();
15226 code2 = swap_condition (code);
15227 code3 = unsigned_condition (code);
15231 case LT: case GT: case LTU: case GTU:
15234 case LE: code1 = LT; code2 = GT; break;
15235 case GE: code1 = GT; code2 = LT; break;
15236 case LEU: code1 = LTU; code2 = GTU; break;
15237 case GEU: code1 = GTU; code2 = LTU; break;
15239 case EQ: code1 = UNKNOWN; code2 = NE; break;
15240 case NE: code2 = UNKNOWN; break;
15243 gcc_unreachable ();
15248 * if (hi(a) < hi(b)) goto true;
15249 * if (hi(a) > hi(b)) goto false;
15250 * if (lo(a) < lo(b)) goto true;
15254 ix86_compare_op0 = hi[0];
15255 ix86_compare_op1 = hi[1];
15257 if (code1 != UNKNOWN)
15258 ix86_expand_branch (code1, label);
15259 if (code2 != UNKNOWN)
15260 ix86_expand_branch (code2, label2);
15262 ix86_compare_op0 = lo[0];
15263 ix86_compare_op1 = lo[1];
15264 ix86_expand_branch (code3, label);
15266 if (code2 != UNKNOWN)
15267 emit_label (label2);
15272 /* If we have already emitted a compare insn, go straight to simple.
15273 ix86_expand_compare won't emit anything if ix86_compare_emitted
15275 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15280 /* Split branch based on floating point condition. */
15282 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15283 rtx target1, rtx target2, rtx tmp, rtx pushed)
15288 if (target2 != pc_rtx)
15291 code = reverse_condition_maybe_unordered (code);
15296 condition = ix86_expand_fp_compare (code, op1, op2,
15299 /* Remove pushed operand from stack. */
15301 ix86_free_from_memory (GET_MODE (pushed));
15303 i = emit_jump_insn (gen_rtx_SET
15305 gen_rtx_IF_THEN_ELSE (VOIDmode,
15306 condition, target1, target2)));
15307 if (split_branch_probability >= 0)
15308 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15312 ix86_expand_setcc (enum rtx_code code, rtx dest)
15316 gcc_assert (GET_MODE (dest) == QImode);
15318 ret = ix86_expand_compare (code);
15319 PUT_MODE (ret, QImode);
15320 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15323 /* Expand comparison setting or clearing carry flag. Return true when
15324 successful and set pop for the operation. */
15326 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15328 enum machine_mode mode =
15329 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15331 /* Do not handle DImode compares that go through special path. */
15332 if (mode == (TARGET_64BIT ? TImode : DImode))
15335 if (SCALAR_FLOAT_MODE_P (mode))
15337 rtx compare_op, compare_seq;
15339 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15341 /* Shortcut: following common codes never translate
15342 into carry flag compares. */
15343 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15344 || code == ORDERED || code == UNORDERED)
15347 /* These comparisons require zero flag; swap operands so they won't. */
15348 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15349 && !TARGET_IEEE_FP)
15354 code = swap_condition (code);
15357 /* Try to expand the comparison and verify that we end up with
15358 carry flag based comparison. This fails to be true only when
15359 we decide to expand comparison using arithmetic that is not
15360 too common scenario. */
15362 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15363 compare_seq = get_insns ();
15366 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15367 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15368 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15370 code = GET_CODE (compare_op);
15372 if (code != LTU && code != GEU)
15375 emit_insn (compare_seq);
15380 if (!INTEGRAL_MODE_P (mode))
15389 /* Convert a==0 into (unsigned)a<1. */
15392 if (op1 != const0_rtx)
15395 code = (code == EQ ? LTU : GEU);
15398 /* Convert a>b into b<a or a>=b-1. */
15401 if (CONST_INT_P (op1))
15403 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15404 /* Bail out on overflow. We still can swap operands but that
15405 would force loading of the constant into register. */
15406 if (op1 == const0_rtx
15407 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15409 code = (code == GTU ? GEU : LTU);
15416 code = (code == GTU ? LTU : GEU);
15420 /* Convert a>=0 into (unsigned)a<0x80000000. */
15423 if (mode == DImode || op1 != const0_rtx)
15425 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15426 code = (code == LT ? GEU : LTU);
15430 if (mode == DImode || op1 != constm1_rtx)
15432 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15433 code = (code == LE ? GEU : LTU);
15439 /* Swapping operands may cause constant to appear as first operand. */
15440 if (!nonimmediate_operand (op0, VOIDmode))
15442 if (!can_create_pseudo_p ())
15444 op0 = force_reg (mode, op0);
15446 ix86_compare_op0 = op0;
15447 ix86_compare_op1 = op1;
15448 *pop = ix86_expand_compare (code);
15449 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15454 ix86_expand_int_movcc (rtx operands[])
15456 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15457 rtx compare_seq, compare_op;
15458 enum machine_mode mode = GET_MODE (operands[0]);
15459 bool sign_bit_compare_p = false;
15462 ix86_compare_op0 = XEXP (operands[1], 0);
15463 ix86_compare_op1 = XEXP (operands[1], 1);
15464 compare_op = ix86_expand_compare (code);
15465 compare_seq = get_insns ();
15468 compare_code = GET_CODE (compare_op);
15470 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15471 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15472 sign_bit_compare_p = true;
15474 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15475 HImode insns, we'd be swallowed in word prefix ops. */
15477 if ((mode != HImode || TARGET_FAST_PREFIX)
15478 && (mode != (TARGET_64BIT ? TImode : DImode))
15479 && CONST_INT_P (operands[2])
15480 && CONST_INT_P (operands[3]))
15482 rtx out = operands[0];
15483 HOST_WIDE_INT ct = INTVAL (operands[2]);
15484 HOST_WIDE_INT cf = INTVAL (operands[3]);
15485 HOST_WIDE_INT diff;
15488 /* Sign bit compares are better done using shifts than we do by using
15490 if (sign_bit_compare_p
15491 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15492 ix86_compare_op1, &compare_op))
15494 /* Detect overlap between destination and compare sources. */
15497 if (!sign_bit_compare_p)
15500 bool fpcmp = false;
15502 compare_code = GET_CODE (compare_op);
15504 flags = XEXP (compare_op, 0);
15506 if (GET_MODE (flags) == CCFPmode
15507 || GET_MODE (flags) == CCFPUmode)
15511 = ix86_fp_compare_code_to_integer (compare_code);
15514 /* To simplify rest of code, restrict to the GEU case. */
15515 if (compare_code == LTU)
15517 HOST_WIDE_INT tmp = ct;
15520 compare_code = reverse_condition (compare_code);
15521 code = reverse_condition (code);
15526 PUT_CODE (compare_op,
15527 reverse_condition_maybe_unordered
15528 (GET_CODE (compare_op)));
15530 PUT_CODE (compare_op,
15531 reverse_condition (GET_CODE (compare_op)));
15535 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15536 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15537 tmp = gen_reg_rtx (mode);
15539 if (mode == DImode)
15540 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15542 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15543 flags, compare_op));
15547 if (code == GT || code == GE)
15548 code = reverse_condition (code);
15551 HOST_WIDE_INT tmp = ct;
15556 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15557 ix86_compare_op1, VOIDmode, 0, -1);
15570 tmp = expand_simple_binop (mode, PLUS,
15572 copy_rtx (tmp), 1, OPTAB_DIRECT);
15583 tmp = expand_simple_binop (mode, IOR,
15585 copy_rtx (tmp), 1, OPTAB_DIRECT);
15587 else if (diff == -1 && ct)
15597 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15599 tmp = expand_simple_binop (mode, PLUS,
15600 copy_rtx (tmp), GEN_INT (cf),
15601 copy_rtx (tmp), 1, OPTAB_DIRECT);
15609 * andl cf - ct, dest
15619 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15622 tmp = expand_simple_binop (mode, AND,
15624 gen_int_mode (cf - ct, mode),
15625 copy_rtx (tmp), 1, OPTAB_DIRECT);
15627 tmp = expand_simple_binop (mode, PLUS,
15628 copy_rtx (tmp), GEN_INT (ct),
15629 copy_rtx (tmp), 1, OPTAB_DIRECT);
15632 if (!rtx_equal_p (tmp, out))
15633 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15635 return 1; /* DONE */
15640 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15643 tmp = ct, ct = cf, cf = tmp;
15646 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15648 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15650 /* We may be reversing unordered compare to normal compare, that
15651 is not valid in general (we may convert non-trapping condition
15652 to trapping one), however on i386 we currently emit all
15653 comparisons unordered. */
15654 compare_code = reverse_condition_maybe_unordered (compare_code);
15655 code = reverse_condition_maybe_unordered (code);
15659 compare_code = reverse_condition (compare_code);
15660 code = reverse_condition (code);
15664 compare_code = UNKNOWN;
15665 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15666 && CONST_INT_P (ix86_compare_op1))
15668 if (ix86_compare_op1 == const0_rtx
15669 && (code == LT || code == GE))
15670 compare_code = code;
15671 else if (ix86_compare_op1 == constm1_rtx)
15675 else if (code == GT)
15680 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15681 if (compare_code != UNKNOWN
15682 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15683 && (cf == -1 || ct == -1))
15685 /* If lea code below could be used, only optimize
15686 if it results in a 2 insn sequence. */
15688 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15689 || diff == 3 || diff == 5 || diff == 9)
15690 || (compare_code == LT && ct == -1)
15691 || (compare_code == GE && cf == -1))
15694 * notl op1 (if necessary)
15702 code = reverse_condition (code);
15705 out = emit_store_flag (out, code, ix86_compare_op0,
15706 ix86_compare_op1, VOIDmode, 0, -1);
15708 out = expand_simple_binop (mode, IOR,
15710 out, 1, OPTAB_DIRECT);
15711 if (out != operands[0])
15712 emit_move_insn (operands[0], out);
15714 return 1; /* DONE */
15719 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15720 || diff == 3 || diff == 5 || diff == 9)
15721 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15723 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15729 * lea cf(dest*(ct-cf)),dest
15733 * This also catches the degenerate setcc-only case.
15739 out = emit_store_flag (out, code, ix86_compare_op0,
15740 ix86_compare_op1, VOIDmode, 0, 1);
15743 /* On x86_64 the lea instruction operates on Pmode, so we need
15744 to get arithmetics done in proper mode to match. */
15746 tmp = copy_rtx (out);
15750 out1 = copy_rtx (out);
15751 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15755 tmp = gen_rtx_PLUS (mode, tmp, out1);
15761 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15764 if (!rtx_equal_p (tmp, out))
15767 out = force_operand (tmp, copy_rtx (out));
15769 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15771 if (!rtx_equal_p (out, operands[0]))
15772 emit_move_insn (operands[0], copy_rtx (out));
15774 return 1; /* DONE */
15778 * General case: Jumpful:
15779 * xorl dest,dest cmpl op1, op2
15780 * cmpl op1, op2 movl ct, dest
15781 * setcc dest jcc 1f
15782 * decl dest movl cf, dest
15783 * andl (cf-ct),dest 1:
15786 * Size 20. Size 14.
15788 * This is reasonably steep, but branch mispredict costs are
15789 * high on modern cpus, so consider failing only if optimizing
15793 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15794 && BRANCH_COST (optimize_insn_for_speed_p (),
15799 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15804 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15806 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15808 /* We may be reversing unordered compare to normal compare,
15809 that is not valid in general (we may convert non-trapping
15810 condition to trapping one), however on i386 we currently
15811 emit all comparisons unordered. */
15812 code = reverse_condition_maybe_unordered (code);
15816 code = reverse_condition (code);
15817 if (compare_code != UNKNOWN)
15818 compare_code = reverse_condition (compare_code);
15822 if (compare_code != UNKNOWN)
15824 /* notl op1 (if needed)
15829 For x < 0 (resp. x <= -1) there will be no notl,
15830 so if possible swap the constants to get rid of the
15832 True/false will be -1/0 while code below (store flag
15833 followed by decrement) is 0/-1, so the constants need
15834 to be exchanged once more. */
15836 if (compare_code == GE || !cf)
15838 code = reverse_condition (code);
15843 HOST_WIDE_INT tmp = cf;
15848 out = emit_store_flag (out, code, ix86_compare_op0,
15849 ix86_compare_op1, VOIDmode, 0, -1);
15853 out = emit_store_flag (out, code, ix86_compare_op0,
15854 ix86_compare_op1, VOIDmode, 0, 1);
15856 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15857 copy_rtx (out), 1, OPTAB_DIRECT);
15860 out = expand_simple_binop (mode, AND, copy_rtx (out),
15861 gen_int_mode (cf - ct, mode),
15862 copy_rtx (out), 1, OPTAB_DIRECT);
15864 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15865 copy_rtx (out), 1, OPTAB_DIRECT);
15866 if (!rtx_equal_p (out, operands[0]))
15867 emit_move_insn (operands[0], copy_rtx (out));
15869 return 1; /* DONE */
15873 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15875 /* Try a few things more with specific constants and a variable. */
15878 rtx var, orig_out, out, tmp;
15880 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15881 return 0; /* FAIL */
15883 /* If one of the two operands is an interesting constant, load a
15884 constant with the above and mask it in with a logical operation. */
15886 if (CONST_INT_P (operands[2]))
15889 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15890 operands[3] = constm1_rtx, op = and_optab;
15891 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15892 operands[3] = const0_rtx, op = ior_optab;
15894 return 0; /* FAIL */
15896 else if (CONST_INT_P (operands[3]))
15899 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15900 operands[2] = constm1_rtx, op = and_optab;
15901 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15902 operands[2] = const0_rtx, op = ior_optab;
15904 return 0; /* FAIL */
15907 return 0; /* FAIL */
15909 orig_out = operands[0];
15910 tmp = gen_reg_rtx (mode);
15913 /* Recurse to get the constant loaded. */
15914 if (ix86_expand_int_movcc (operands) == 0)
15915 return 0; /* FAIL */
15917 /* Mask in the interesting variable. */
15918 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15920 if (!rtx_equal_p (out, orig_out))
15921 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15923 return 1; /* DONE */
15927 * For comparison with above,
15937 if (! nonimmediate_operand (operands[2], mode))
15938 operands[2] = force_reg (mode, operands[2]);
15939 if (! nonimmediate_operand (operands[3], mode))
15940 operands[3] = force_reg (mode, operands[3]);
15942 if (! register_operand (operands[2], VOIDmode)
15944 || ! register_operand (operands[3], VOIDmode)))
15945 operands[2] = force_reg (mode, operands[2]);
15948 && ! register_operand (operands[3], VOIDmode))
15949 operands[3] = force_reg (mode, operands[3]);
15951 emit_insn (compare_seq);
15952 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15953 gen_rtx_IF_THEN_ELSE (mode,
15954 compare_op, operands[2],
15957 return 1; /* DONE */
15960 /* Swap, force into registers, or otherwise massage the two operands
15961 to an sse comparison with a mask result. Thus we differ a bit from
15962 ix86_prepare_fp_compare_args which expects to produce a flags result.
15964 The DEST operand exists to help determine whether to commute commutative
15965 operators. The POP0/POP1 operands are updated in place. The new
15966 comparison code is returned, or UNKNOWN if not implementable. */
15968 static enum rtx_code
15969 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15970 rtx *pop0, rtx *pop1)
15978 /* We have no LTGT as an operator. We could implement it with
15979 NE & ORDERED, but this requires an extra temporary. It's
15980 not clear that it's worth it. */
15987 /* These are supported directly. */
15994 /* For commutative operators, try to canonicalize the destination
15995 operand to be first in the comparison - this helps reload to
15996 avoid extra moves. */
15997 if (!dest || !rtx_equal_p (dest, *pop1))
16005 /* These are not supported directly. Swap the comparison operands
16006 to transform into something that is supported. */
16010 code = swap_condition (code);
16014 gcc_unreachable ();
16020 /* Detect conditional moves that exactly match min/max operational
16021 semantics. Note that this is IEEE safe, as long as we don't
16022 interchange the operands.
16024 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16025 and TRUE if the operation is successful and instructions are emitted. */
16028 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16029 rtx cmp_op1, rtx if_true, rtx if_false)
16031 enum machine_mode mode;
16037 else if (code == UNGE)
16040 if_true = if_false;
16046 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16048 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16053 mode = GET_MODE (dest);
16055 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16056 but MODE may be a vector mode and thus not appropriate. */
16057 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16059 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16062 if_true = force_reg (mode, if_true);
16063 v = gen_rtvec (2, if_true, if_false);
16064 tmp = gen_rtx_UNSPEC (mode, v, u);
16068 code = is_min ? SMIN : SMAX;
16069 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16072 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16076 /* Expand an sse vector comparison. Return the register with the result. */
16079 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16080 rtx op_true, rtx op_false)
16082 enum machine_mode mode = GET_MODE (dest);
16085 cmp_op0 = force_reg (mode, cmp_op0);
16086 if (!nonimmediate_operand (cmp_op1, mode))
16087 cmp_op1 = force_reg (mode, cmp_op1);
16090 || reg_overlap_mentioned_p (dest, op_true)
16091 || reg_overlap_mentioned_p (dest, op_false))
16092 dest = gen_reg_rtx (mode);
16094 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16095 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16100 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16101 operations. This is used for both scalar and vector conditional moves. */
16104 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16106 enum machine_mode mode = GET_MODE (dest);
16109 if (op_false == CONST0_RTX (mode))
16111 op_true = force_reg (mode, op_true);
16112 x = gen_rtx_AND (mode, cmp, op_true);
16113 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16115 else if (op_true == CONST0_RTX (mode))
16117 op_false = force_reg (mode, op_false);
16118 x = gen_rtx_NOT (mode, cmp);
16119 x = gen_rtx_AND (mode, x, op_false);
16120 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16122 else if (TARGET_XOP)
16124 rtx pcmov = gen_rtx_SET (mode, dest,
16125 gen_rtx_IF_THEN_ELSE (mode, cmp,
16132 op_true = force_reg (mode, op_true);
16133 op_false = force_reg (mode, op_false);
16135 t2 = gen_reg_rtx (mode);
16137 t3 = gen_reg_rtx (mode);
16141 x = gen_rtx_AND (mode, op_true, cmp);
16142 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16144 x = gen_rtx_NOT (mode, cmp);
16145 x = gen_rtx_AND (mode, x, op_false);
16146 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16148 x = gen_rtx_IOR (mode, t3, t2);
16149 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16153 /* Expand a floating-point conditional move. Return true if successful. */
16156 ix86_expand_fp_movcc (rtx operands[])
16158 enum machine_mode mode = GET_MODE (operands[0]);
16159 enum rtx_code code = GET_CODE (operands[1]);
16160 rtx tmp, compare_op;
16162 ix86_compare_op0 = XEXP (operands[1], 0);
16163 ix86_compare_op1 = XEXP (operands[1], 1);
16164 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16166 enum machine_mode cmode;
16168 /* Since we've no cmove for sse registers, don't force bad register
16169 allocation just to gain access to it. Deny movcc when the
16170 comparison mode doesn't match the move mode. */
16171 cmode = GET_MODE (ix86_compare_op0);
16172 if (cmode == VOIDmode)
16173 cmode = GET_MODE (ix86_compare_op1);
16177 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16179 &ix86_compare_op1);
16180 if (code == UNKNOWN)
16183 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16184 ix86_compare_op1, operands[2],
16188 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16189 ix86_compare_op1, operands[2], operands[3]);
16190 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16194 /* The floating point conditional move instructions don't directly
16195 support conditions resulting from a signed integer comparison. */
16197 compare_op = ix86_expand_compare (code);
16198 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16200 tmp = gen_reg_rtx (QImode);
16201 ix86_expand_setcc (code, tmp);
16203 ix86_compare_op0 = tmp;
16204 ix86_compare_op1 = const0_rtx;
16205 compare_op = ix86_expand_compare (code);
16208 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16209 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16210 operands[2], operands[3])));
16215 /* Expand a floating-point vector conditional move; a vcond operation
16216 rather than a movcc operation. */
16219 ix86_expand_fp_vcond (rtx operands[])
16221 enum rtx_code code = GET_CODE (operands[3]);
16224 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16225 &operands[4], &operands[5]);
16226 if (code == UNKNOWN)
16229 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16230 operands[5], operands[1], operands[2]))
16233 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16234 operands[1], operands[2]);
16235 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16239 /* Expand a signed/unsigned integral vector conditional move. */
16242 ix86_expand_int_vcond (rtx operands[])
16244 enum machine_mode mode = GET_MODE (operands[0]);
16245 enum rtx_code code = GET_CODE (operands[3]);
16246 bool negate = false;
16249 cop0 = operands[4];
16250 cop1 = operands[5];
16252 /* XOP supports all of the comparisons on all vector int types. */
16255 /* Canonicalize the comparison to EQ, GT, GTU. */
16266 code = reverse_condition (code);
16272 code = reverse_condition (code);
16278 code = swap_condition (code);
16279 x = cop0, cop0 = cop1, cop1 = x;
16283 gcc_unreachable ();
16286 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16287 if (mode == V2DImode)
16292 /* SSE4.1 supports EQ. */
16293 if (!TARGET_SSE4_1)
16299 /* SSE4.2 supports GT/GTU. */
16300 if (!TARGET_SSE4_2)
16305 gcc_unreachable ();
16309 /* Unsigned parallel compare is not supported by the hardware.
16310 Play some tricks to turn this into a signed comparison
16314 cop0 = force_reg (mode, cop0);
16322 rtx (*gen_sub3) (rtx, rtx, rtx);
16324 /* Subtract (-(INT MAX) - 1) from both operands to make
16326 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16328 gen_sub3 = (mode == V4SImode
16329 ? gen_subv4si3 : gen_subv2di3);
16330 t1 = gen_reg_rtx (mode);
16331 emit_insn (gen_sub3 (t1, cop0, mask));
16333 t2 = gen_reg_rtx (mode);
16334 emit_insn (gen_sub3 (t2, cop1, mask));
16344 /* Perform a parallel unsigned saturating subtraction. */
16345 x = gen_reg_rtx (mode);
16346 emit_insn (gen_rtx_SET (VOIDmode, x,
16347 gen_rtx_US_MINUS (mode, cop0, cop1)));
16350 cop1 = CONST0_RTX (mode);
16356 gcc_unreachable ();
16361 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16362 operands[1+negate], operands[2-negate]);
16364 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16365 operands[2-negate]);
16369 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16370 true if we should do zero extension, else sign extension. HIGH_P is
16371 true if we want the N/2 high elements, else the low elements. */
16374 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16376 enum machine_mode imode = GET_MODE (operands[1]);
16377 rtx (*unpack)(rtx, rtx, rtx);
16384 unpack = gen_vec_interleave_highv16qi;
16386 unpack = gen_vec_interleave_lowv16qi;
16390 unpack = gen_vec_interleave_highv8hi;
16392 unpack = gen_vec_interleave_lowv8hi;
16396 unpack = gen_vec_interleave_highv4si;
16398 unpack = gen_vec_interleave_lowv4si;
16401 gcc_unreachable ();
16404 dest = gen_lowpart (imode, operands[0]);
16407 se = force_reg (imode, CONST0_RTX (imode));
16409 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16410 operands[1], pc_rtx, pc_rtx);
16412 emit_insn (unpack (dest, operands[1], se));
16415 /* This function performs the same task as ix86_expand_sse_unpack,
16416 but with SSE4.1 instructions. */
16419 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16421 enum machine_mode imode = GET_MODE (operands[1]);
16422 rtx (*unpack)(rtx, rtx);
16429 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16431 unpack = gen_sse4_1_extendv8qiv8hi2;
16435 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16437 unpack = gen_sse4_1_extendv4hiv4si2;
16441 unpack = gen_sse4_1_zero_extendv2siv2di2;
16443 unpack = gen_sse4_1_extendv2siv2di2;
16446 gcc_unreachable ();
16449 dest = operands[0];
16452 /* Shift higher 8 bytes to lower 8 bytes. */
16453 src = gen_reg_rtx (imode);
16454 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16455 gen_lowpart (V1TImode, operands[1]),
16461 emit_insn (unpack (dest, src));
16464 /* Expand conditional increment or decrement using adb/sbb instructions.
16465 The default case using setcc followed by the conditional move can be
16466 done by generic code. */
16468 ix86_expand_int_addcc (rtx operands[])
16470 enum rtx_code code = GET_CODE (operands[1]);
16472 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16474 rtx val = const0_rtx;
16475 bool fpcmp = false;
16476 enum machine_mode mode;
16478 ix86_compare_op0 = XEXP (operands[1], 0);
16479 ix86_compare_op1 = XEXP (operands[1], 1);
16480 if (operands[3] != const1_rtx
16481 && operands[3] != constm1_rtx)
16483 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16484 ix86_compare_op1, &compare_op))
16486 code = GET_CODE (compare_op);
16488 flags = XEXP (compare_op, 0);
16490 if (GET_MODE (flags) == CCFPmode
16491 || GET_MODE (flags) == CCFPUmode)
16494 code = ix86_fp_compare_code_to_integer (code);
16501 PUT_CODE (compare_op,
16502 reverse_condition_maybe_unordered
16503 (GET_CODE (compare_op)));
16505 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16508 mode = GET_MODE (operands[0]);
16510 /* Construct either adc or sbb insn. */
16511 if ((code == LTU) == (operands[3] == constm1_rtx))
16516 insn = gen_subqi3_carry;
16519 insn = gen_subhi3_carry;
16522 insn = gen_subsi3_carry;
16525 insn = gen_subdi3_carry;
16528 gcc_unreachable ();
16536 insn = gen_addqi3_carry;
16539 insn = gen_addhi3_carry;
16542 insn = gen_addsi3_carry;
16545 insn = gen_adddi3_carry;
16548 gcc_unreachable ();
16551 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16553 return 1; /* DONE */
16557 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16558 works for floating pointer parameters and nonoffsetable memories.
16559 For pushes, it returns just stack offsets; the values will be saved
16560 in the right order. Maximally three parts are generated. */
16563 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16568 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16570 size = (GET_MODE_SIZE (mode) + 4) / 8;
16572 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16573 gcc_assert (size >= 2 && size <= 4);
16575 /* Optimize constant pool reference to immediates. This is used by fp
16576 moves, that force all constants to memory to allow combining. */
16577 if (MEM_P (operand) && MEM_READONLY_P (operand))
16579 rtx tmp = maybe_get_pool_constant (operand);
16584 if (MEM_P (operand) && !offsettable_memref_p (operand))
16586 /* The only non-offsetable memories we handle are pushes. */
16587 int ok = push_operand (operand, VOIDmode);
16591 operand = copy_rtx (operand);
16592 PUT_MODE (operand, Pmode);
16593 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16597 if (GET_CODE (operand) == CONST_VECTOR)
16599 enum machine_mode imode = int_mode_for_mode (mode);
16600 /* Caution: if we looked through a constant pool memory above,
16601 the operand may actually have a different mode now. That's
16602 ok, since we want to pun this all the way back to an integer. */
16603 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16604 gcc_assert (operand != NULL);
16610 if (mode == DImode)
16611 split_di (&operand, 1, &parts[0], &parts[1]);
16616 if (REG_P (operand))
16618 gcc_assert (reload_completed);
16619 for (i = 0; i < size; i++)
16620 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16622 else if (offsettable_memref_p (operand))
16624 operand = adjust_address (operand, SImode, 0);
16625 parts[0] = operand;
16626 for (i = 1; i < size; i++)
16627 parts[i] = adjust_address (operand, SImode, 4 * i);
16629 else if (GET_CODE (operand) == CONST_DOUBLE)
16634 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16638 real_to_target (l, &r, mode);
16639 parts[3] = gen_int_mode (l[3], SImode);
16640 parts[2] = gen_int_mode (l[2], SImode);
16643 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16644 parts[2] = gen_int_mode (l[2], SImode);
16647 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16650 gcc_unreachable ();
16652 parts[1] = gen_int_mode (l[1], SImode);
16653 parts[0] = gen_int_mode (l[0], SImode);
16656 gcc_unreachable ();
16661 if (mode == TImode)
16662 split_ti (&operand, 1, &parts[0], &parts[1]);
16663 if (mode == XFmode || mode == TFmode)
16665 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16666 if (REG_P (operand))
16668 gcc_assert (reload_completed);
16669 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16670 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16672 else if (offsettable_memref_p (operand))
16674 operand = adjust_address (operand, DImode, 0);
16675 parts[0] = operand;
16676 parts[1] = adjust_address (operand, upper_mode, 8);
16678 else if (GET_CODE (operand) == CONST_DOUBLE)
16683 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16684 real_to_target (l, &r, mode);
16686 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16687 if (HOST_BITS_PER_WIDE_INT >= 64)
16690 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16691 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16694 parts[0] = immed_double_const (l[0], l[1], DImode);
16696 if (upper_mode == SImode)
16697 parts[1] = gen_int_mode (l[2], SImode);
16698 else if (HOST_BITS_PER_WIDE_INT >= 64)
16701 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16702 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16705 parts[1] = immed_double_const (l[2], l[3], DImode);
16708 gcc_unreachable ();
16715 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16716 Return false when normal moves are needed; true when all required
16717 insns have been emitted. Operands 2-4 contain the input values
16718 int the correct order; operands 5-7 contain the output values. */
16721 ix86_split_long_move (rtx operands[])
16726 int collisions = 0;
16727 enum machine_mode mode = GET_MODE (operands[0]);
16728 bool collisionparts[4];
16730 /* The DFmode expanders may ask us to move double.
16731 For 64bit target this is single move. By hiding the fact
16732 here we simplify i386.md splitters. */
16733 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16735 /* Optimize constant pool reference to immediates. This is used by
16736 fp moves, that force all constants to memory to allow combining. */
16738 if (MEM_P (operands[1])
16739 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16740 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16741 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16742 if (push_operand (operands[0], VOIDmode))
16744 operands[0] = copy_rtx (operands[0]);
16745 PUT_MODE (operands[0], Pmode);
16748 operands[0] = gen_lowpart (DImode, operands[0]);
16749 operands[1] = gen_lowpart (DImode, operands[1]);
16750 emit_move_insn (operands[0], operands[1]);
16754 /* The only non-offsettable memory we handle is push. */
16755 if (push_operand (operands[0], VOIDmode))
16758 gcc_assert (!MEM_P (operands[0])
16759 || offsettable_memref_p (operands[0]));
16761 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16762 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16764 /* When emitting push, take care for source operands on the stack. */
16765 if (push && MEM_P (operands[1])
16766 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16768 rtx src_base = XEXP (part[1][nparts - 1], 0);
16770 /* Compensate for the stack decrement by 4. */
16771 if (!TARGET_64BIT && nparts == 3
16772 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16773 src_base = plus_constant (src_base, 4);
16775 /* src_base refers to the stack pointer and is
16776 automatically decreased by emitted push. */
16777 for (i = 0; i < nparts; i++)
16778 part[1][i] = change_address (part[1][i],
16779 GET_MODE (part[1][i]), src_base);
16782 /* We need to do copy in the right order in case an address register
16783 of the source overlaps the destination. */
16784 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16788 for (i = 0; i < nparts; i++)
16791 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16792 if (collisionparts[i])
16796 /* Collision in the middle part can be handled by reordering. */
16797 if (collisions == 1 && nparts == 3 && collisionparts [1])
16799 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16800 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16802 else if (collisions == 1
16804 && (collisionparts [1] || collisionparts [2]))
16806 if (collisionparts [1])
16808 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16809 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16813 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16814 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16818 /* If there are more collisions, we can't handle it by reordering.
16819 Do an lea to the last part and use only one colliding move. */
16820 else if (collisions > 1)
16826 base = part[0][nparts - 1];
16828 /* Handle the case when the last part isn't valid for lea.
16829 Happens in 64-bit mode storing the 12-byte XFmode. */
16830 if (GET_MODE (base) != Pmode)
16831 base = gen_rtx_REG (Pmode, REGNO (base));
16833 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16834 part[1][0] = replace_equiv_address (part[1][0], base);
16835 for (i = 1; i < nparts; i++)
16837 tmp = plus_constant (base, UNITS_PER_WORD * i);
16838 part[1][i] = replace_equiv_address (part[1][i], tmp);
16849 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16850 emit_insn (gen_addsi3 (stack_pointer_rtx,
16851 stack_pointer_rtx, GEN_INT (-4)));
16852 emit_move_insn (part[0][2], part[1][2]);
16854 else if (nparts == 4)
16856 emit_move_insn (part[0][3], part[1][3]);
16857 emit_move_insn (part[0][2], part[1][2]);
16862 /* In 64bit mode we don't have 32bit push available. In case this is
16863 register, it is OK - we will just use larger counterpart. We also
16864 retype memory - these comes from attempt to avoid REX prefix on
16865 moving of second half of TFmode value. */
16866 if (GET_MODE (part[1][1]) == SImode)
16868 switch (GET_CODE (part[1][1]))
16871 part[1][1] = adjust_address (part[1][1], DImode, 0);
16875 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16879 gcc_unreachable ();
16882 if (GET_MODE (part[1][0]) == SImode)
16883 part[1][0] = part[1][1];
16886 emit_move_insn (part[0][1], part[1][1]);
16887 emit_move_insn (part[0][0], part[1][0]);
16891 /* Choose correct order to not overwrite the source before it is copied. */
16892 if ((REG_P (part[0][0])
16893 && REG_P (part[1][1])
16894 && (REGNO (part[0][0]) == REGNO (part[1][1])
16896 && REGNO (part[0][0]) == REGNO (part[1][2]))
16898 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16900 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16902 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16904 operands[2 + i] = part[0][j];
16905 operands[6 + i] = part[1][j];
16910 for (i = 0; i < nparts; i++)
16912 operands[2 + i] = part[0][i];
16913 operands[6 + i] = part[1][i];
16917 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16918 if (optimize_insn_for_size_p ())
16920 for (j = 0; j < nparts - 1; j++)
16921 if (CONST_INT_P (operands[6 + j])
16922 && operands[6 + j] != const0_rtx
16923 && REG_P (operands[2 + j]))
16924 for (i = j; i < nparts - 1; i++)
16925 if (CONST_INT_P (operands[7 + i])
16926 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16927 operands[7 + i] = operands[2 + j];
16930 for (i = 0; i < nparts; i++)
16931 emit_move_insn (operands[2 + i], operands[6 + i]);
16936 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16937 left shift by a constant, either using a single shift or
16938 a sequence of add instructions. */
16941 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16945 emit_insn ((mode == DImode
16947 : gen_adddi3) (operand, operand, operand));
16949 else if (!optimize_insn_for_size_p ()
16950 && count * ix86_cost->add <= ix86_cost->shift_const)
16953 for (i=0; i<count; i++)
16955 emit_insn ((mode == DImode
16957 : gen_adddi3) (operand, operand, operand));
16961 emit_insn ((mode == DImode
16963 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16967 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16969 rtx low[2], high[2];
16971 const int single_width = mode == DImode ? 32 : 64;
16973 if (CONST_INT_P (operands[2]))
16975 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16976 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16978 if (count >= single_width)
16980 emit_move_insn (high[0], low[1]);
16981 emit_move_insn (low[0], const0_rtx);
16983 if (count > single_width)
16984 ix86_expand_ashl_const (high[0], count - single_width, mode);
16988 if (!rtx_equal_p (operands[0], operands[1]))
16989 emit_move_insn (operands[0], operands[1]);
16990 emit_insn ((mode == DImode
16992 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16993 ix86_expand_ashl_const (low[0], count, mode);
16998 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17000 if (operands[1] == const1_rtx)
17002 /* Assuming we've chosen a QImode capable registers, then 1 << N
17003 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17004 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17006 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17008 ix86_expand_clear (low[0]);
17009 ix86_expand_clear (high[0]);
17010 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17012 d = gen_lowpart (QImode, low[0]);
17013 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17014 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17015 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17017 d = gen_lowpart (QImode, high[0]);
17018 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17019 s = gen_rtx_NE (QImode, flags, const0_rtx);
17020 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17023 /* Otherwise, we can get the same results by manually performing
17024 a bit extract operation on bit 5/6, and then performing the two
17025 shifts. The two methods of getting 0/1 into low/high are exactly
17026 the same size. Avoiding the shift in the bit extract case helps
17027 pentium4 a bit; no one else seems to care much either way. */
17032 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17033 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17035 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17036 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17038 emit_insn ((mode == DImode
17040 : gen_lshrdi3) (high[0], high[0],
17041 GEN_INT (mode == DImode ? 5 : 6)));
17042 emit_insn ((mode == DImode
17044 : gen_anddi3) (high[0], high[0], const1_rtx));
17045 emit_move_insn (low[0], high[0]);
17046 emit_insn ((mode == DImode
17048 : gen_xordi3) (low[0], low[0], const1_rtx));
17051 emit_insn ((mode == DImode
17053 : gen_ashldi3) (low[0], low[0], operands[2]));
17054 emit_insn ((mode == DImode
17056 : gen_ashldi3) (high[0], high[0], operands[2]));
17060 if (operands[1] == constm1_rtx)
17062 /* For -1 << N, we can avoid the shld instruction, because we
17063 know that we're shifting 0...31/63 ones into a -1. */
17064 emit_move_insn (low[0], constm1_rtx);
17065 if (optimize_insn_for_size_p ())
17066 emit_move_insn (high[0], low[0]);
17068 emit_move_insn (high[0], constm1_rtx);
17072 if (!rtx_equal_p (operands[0], operands[1]))
17073 emit_move_insn (operands[0], operands[1]);
17075 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17076 emit_insn ((mode == DImode
17078 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17081 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
17083 if (TARGET_CMOVE && scratch)
17085 ix86_expand_clear (scratch);
17086 emit_insn ((mode == DImode
17087 ? gen_x86_shift_adj_1
17088 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
17092 emit_insn ((mode == DImode
17093 ? gen_x86_shift_adj_2
17094 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
17098 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17100 rtx low[2], high[2];
17102 const int single_width = mode == DImode ? 32 : 64;
17104 if (CONST_INT_P (operands[2]))
17106 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17107 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17109 if (count == single_width * 2 - 1)
17111 emit_move_insn (high[0], high[1]);
17112 emit_insn ((mode == DImode
17114 : gen_ashrdi3) (high[0], high[0],
17115 GEN_INT (single_width - 1)));
17116 emit_move_insn (low[0], high[0]);
17119 else if (count >= single_width)
17121 emit_move_insn (low[0], high[1]);
17122 emit_move_insn (high[0], low[0]);
17123 emit_insn ((mode == DImode
17125 : gen_ashrdi3) (high[0], high[0],
17126 GEN_INT (single_width - 1)));
17127 if (count > single_width)
17128 emit_insn ((mode == DImode
17130 : gen_ashrdi3) (low[0], low[0],
17131 GEN_INT (count - single_width)));
17135 if (!rtx_equal_p (operands[0], operands[1]))
17136 emit_move_insn (operands[0], operands[1]);
17137 emit_insn ((mode == DImode
17139 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17140 emit_insn ((mode == DImode
17142 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17147 if (!rtx_equal_p (operands[0], operands[1]))
17148 emit_move_insn (operands[0], operands[1]);
17150 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17152 emit_insn ((mode == DImode
17154 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17155 emit_insn ((mode == DImode
17157 : gen_ashrdi3) (high[0], high[0], operands[2]));
17159 if (TARGET_CMOVE && scratch)
17161 emit_move_insn (scratch, high[0]);
17162 emit_insn ((mode == DImode
17164 : gen_ashrdi3) (scratch, scratch,
17165 GEN_INT (single_width - 1)));
17166 emit_insn ((mode == DImode
17167 ? gen_x86_shift_adj_1
17168 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17172 emit_insn ((mode == DImode
17173 ? gen_x86_shift_adj_3
17174 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
17179 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17181 rtx low[2], high[2];
17183 const int single_width = mode == DImode ? 32 : 64;
17185 if (CONST_INT_P (operands[2]))
17187 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17188 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17190 if (count >= single_width)
17192 emit_move_insn (low[0], high[1]);
17193 ix86_expand_clear (high[0]);
17195 if (count > single_width)
17196 emit_insn ((mode == DImode
17198 : gen_lshrdi3) (low[0], low[0],
17199 GEN_INT (count - single_width)));
17203 if (!rtx_equal_p (operands[0], operands[1]))
17204 emit_move_insn (operands[0], operands[1]);
17205 emit_insn ((mode == DImode
17207 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17208 emit_insn ((mode == DImode
17210 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17215 if (!rtx_equal_p (operands[0], operands[1]))
17216 emit_move_insn (operands[0], operands[1]);
17218 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17220 emit_insn ((mode == DImode
17222 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17223 emit_insn ((mode == DImode
17225 : gen_lshrdi3) (high[0], high[0], operands[2]));
17227 /* Heh. By reversing the arguments, we can reuse this pattern. */
17228 if (TARGET_CMOVE && scratch)
17230 ix86_expand_clear (scratch);
17231 emit_insn ((mode == DImode
17232 ? gen_x86_shift_adj_1
17233 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17237 emit_insn ((mode == DImode
17238 ? gen_x86_shift_adj_2
17239 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
17243 /* Predict just emitted jump instruction to be taken with probability PROB. */
17245 predict_jump (int prob)
17247 rtx insn = get_last_insn ();
17248 gcc_assert (JUMP_P (insn));
17249 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17252 /* Helper function for the string operations below. Dest VARIABLE whether
17253 it is aligned to VALUE bytes. If true, jump to the label. */
17255 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17257 rtx label = gen_label_rtx ();
17258 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17259 if (GET_MODE (variable) == DImode)
17260 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17262 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17263 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17266 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17268 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17272 /* Adjust COUNTER by the VALUE. */
17274 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17276 if (GET_MODE (countreg) == DImode)
17277 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17279 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17282 /* Zero extend possibly SImode EXP to Pmode register. */
17284 ix86_zero_extend_to_Pmode (rtx exp)
17287 if (GET_MODE (exp) == VOIDmode)
17288 return force_reg (Pmode, exp);
17289 if (GET_MODE (exp) == Pmode)
17290 return copy_to_mode_reg (Pmode, exp);
17291 r = gen_reg_rtx (Pmode);
17292 emit_insn (gen_zero_extendsidi2 (r, exp));
17296 /* Divide COUNTREG by SCALE. */
17298 scale_counter (rtx countreg, int scale)
17304 if (CONST_INT_P (countreg))
17305 return GEN_INT (INTVAL (countreg) / scale);
17306 gcc_assert (REG_P (countreg));
17308 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17309 GEN_INT (exact_log2 (scale)),
17310 NULL, 1, OPTAB_DIRECT);
17314 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17315 DImode for constant loop counts. */
17317 static enum machine_mode
17318 counter_mode (rtx count_exp)
17320 if (GET_MODE (count_exp) != VOIDmode)
17321 return GET_MODE (count_exp);
17322 if (!CONST_INT_P (count_exp))
17324 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17329 /* When SRCPTR is non-NULL, output simple loop to move memory
17330 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17331 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17332 equivalent loop to set memory by VALUE (supposed to be in MODE).
17334 The size is rounded down to whole number of chunk size moved at once.
17335 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17339 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17340 rtx destptr, rtx srcptr, rtx value,
17341 rtx count, enum machine_mode mode, int unroll,
17344 rtx out_label, top_label, iter, tmp;
17345 enum machine_mode iter_mode = counter_mode (count);
17346 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17347 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17353 top_label = gen_label_rtx ();
17354 out_label = gen_label_rtx ();
17355 iter = gen_reg_rtx (iter_mode);
17357 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17358 NULL, 1, OPTAB_DIRECT);
17359 /* Those two should combine. */
17360 if (piece_size == const1_rtx)
17362 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17364 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17366 emit_move_insn (iter, const0_rtx);
17368 emit_label (top_label);
17370 tmp = convert_modes (Pmode, iter_mode, iter, true);
17371 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17372 destmem = change_address (destmem, mode, x_addr);
17376 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17377 srcmem = change_address (srcmem, mode, y_addr);
17379 /* When unrolling for chips that reorder memory reads and writes,
17380 we can save registers by using single temporary.
17381 Also using 4 temporaries is overkill in 32bit mode. */
17382 if (!TARGET_64BIT && 0)
17384 for (i = 0; i < unroll; i++)
17389 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17391 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17393 emit_move_insn (destmem, srcmem);
17399 gcc_assert (unroll <= 4);
17400 for (i = 0; i < unroll; i++)
17402 tmpreg[i] = gen_reg_rtx (mode);
17406 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17408 emit_move_insn (tmpreg[i], srcmem);
17410 for (i = 0; i < unroll; i++)
17415 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17417 emit_move_insn (destmem, tmpreg[i]);
17422 for (i = 0; i < unroll; i++)
17426 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17427 emit_move_insn (destmem, value);
17430 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17431 true, OPTAB_LIB_WIDEN);
17433 emit_move_insn (iter, tmp);
17435 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17437 if (expected_size != -1)
17439 expected_size /= GET_MODE_SIZE (mode) * unroll;
17440 if (expected_size == 0)
17442 else if (expected_size > REG_BR_PROB_BASE)
17443 predict_jump (REG_BR_PROB_BASE - 1);
17445 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17448 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17449 iter = ix86_zero_extend_to_Pmode (iter);
17450 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17451 true, OPTAB_LIB_WIDEN);
17452 if (tmp != destptr)
17453 emit_move_insn (destptr, tmp);
17456 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17457 true, OPTAB_LIB_WIDEN);
17459 emit_move_insn (srcptr, tmp);
17461 emit_label (out_label);
17464 /* Output "rep; mov" instruction.
17465 Arguments have same meaning as for previous function */
17467 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17468 rtx destptr, rtx srcptr,
17470 enum machine_mode mode)
17476 /* If the size is known, it is shorter to use rep movs. */
17477 if (mode == QImode && CONST_INT_P (count)
17478 && !(INTVAL (count) & 3))
17481 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17482 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17483 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17484 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17485 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17486 if (mode != QImode)
17488 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17489 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17490 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17491 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17492 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17493 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17497 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17498 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17500 if (CONST_INT_P (count))
17502 count = GEN_INT (INTVAL (count)
17503 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17504 destmem = shallow_copy_rtx (destmem);
17505 srcmem = shallow_copy_rtx (srcmem);
17506 set_mem_size (destmem, count);
17507 set_mem_size (srcmem, count);
17511 if (MEM_SIZE (destmem))
17512 set_mem_size (destmem, NULL_RTX);
17513 if (MEM_SIZE (srcmem))
17514 set_mem_size (srcmem, NULL_RTX);
17516 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17520 /* Output "rep; stos" instruction.
17521 Arguments have same meaning as for previous function */
17523 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17524 rtx count, enum machine_mode mode,
17530 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17531 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17532 value = force_reg (mode, gen_lowpart (mode, value));
17533 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17534 if (mode != QImode)
17536 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17537 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17538 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17541 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17542 if (orig_value == const0_rtx && CONST_INT_P (count))
17544 count = GEN_INT (INTVAL (count)
17545 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17546 destmem = shallow_copy_rtx (destmem);
17547 set_mem_size (destmem, count);
17549 else if (MEM_SIZE (destmem))
17550 set_mem_size (destmem, NULL_RTX);
17551 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17555 emit_strmov (rtx destmem, rtx srcmem,
17556 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17558 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17559 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17560 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17563 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17565 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17566 rtx destptr, rtx srcptr, rtx count, int max_size)
17569 if (CONST_INT_P (count))
17571 HOST_WIDE_INT countval = INTVAL (count);
17574 if ((countval & 0x10) && max_size > 16)
17578 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17579 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17582 gcc_unreachable ();
17585 if ((countval & 0x08) && max_size > 8)
17588 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17591 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17592 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17596 if ((countval & 0x04) && max_size > 4)
17598 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17601 if ((countval & 0x02) && max_size > 2)
17603 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17606 if ((countval & 0x01) && max_size > 1)
17608 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17615 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17616 count, 1, OPTAB_DIRECT);
17617 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17618 count, QImode, 1, 4);
17622 /* When there are stringops, we can cheaply increase dest and src pointers.
17623 Otherwise we save code size by maintaining offset (zero is readily
17624 available from preceding rep operation) and using x86 addressing modes.
17626 if (TARGET_SINGLE_STRINGOP)
17630 rtx label = ix86_expand_aligntest (count, 4, true);
17631 src = change_address (srcmem, SImode, srcptr);
17632 dest = change_address (destmem, SImode, destptr);
17633 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17634 emit_label (label);
17635 LABEL_NUSES (label) = 1;
17639 rtx label = ix86_expand_aligntest (count, 2, true);
17640 src = change_address (srcmem, HImode, srcptr);
17641 dest = change_address (destmem, HImode, destptr);
17642 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17643 emit_label (label);
17644 LABEL_NUSES (label) = 1;
17648 rtx label = ix86_expand_aligntest (count, 1, true);
17649 src = change_address (srcmem, QImode, srcptr);
17650 dest = change_address (destmem, QImode, destptr);
17651 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17652 emit_label (label);
17653 LABEL_NUSES (label) = 1;
17658 rtx offset = force_reg (Pmode, const0_rtx);
17663 rtx label = ix86_expand_aligntest (count, 4, true);
17664 src = change_address (srcmem, SImode, srcptr);
17665 dest = change_address (destmem, SImode, destptr);
17666 emit_move_insn (dest, src);
17667 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17668 true, OPTAB_LIB_WIDEN);
17670 emit_move_insn (offset, tmp);
17671 emit_label (label);
17672 LABEL_NUSES (label) = 1;
17676 rtx label = ix86_expand_aligntest (count, 2, true);
17677 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17678 src = change_address (srcmem, HImode, tmp);
17679 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17680 dest = change_address (destmem, HImode, tmp);
17681 emit_move_insn (dest, src);
17682 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17683 true, OPTAB_LIB_WIDEN);
17685 emit_move_insn (offset, tmp);
17686 emit_label (label);
17687 LABEL_NUSES (label) = 1;
17691 rtx label = ix86_expand_aligntest (count, 1, true);
17692 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17693 src = change_address (srcmem, QImode, tmp);
17694 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17695 dest = change_address (destmem, QImode, tmp);
17696 emit_move_insn (dest, src);
17697 emit_label (label);
17698 LABEL_NUSES (label) = 1;
17703 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17705 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17706 rtx count, int max_size)
17709 expand_simple_binop (counter_mode (count), AND, count,
17710 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17711 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17712 gen_lowpart (QImode, value), count, QImode,
17716 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17718 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17722 if (CONST_INT_P (count))
17724 HOST_WIDE_INT countval = INTVAL (count);
17727 if ((countval & 0x10) && max_size > 16)
17731 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17732 emit_insn (gen_strset (destptr, dest, value));
17733 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17734 emit_insn (gen_strset (destptr, dest, value));
17737 gcc_unreachable ();
17740 if ((countval & 0x08) && max_size > 8)
17744 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17745 emit_insn (gen_strset (destptr, dest, value));
17749 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17750 emit_insn (gen_strset (destptr, dest, value));
17751 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17752 emit_insn (gen_strset (destptr, dest, value));
17756 if ((countval & 0x04) && max_size > 4)
17758 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17759 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17762 if ((countval & 0x02) && max_size > 2)
17764 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17765 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17768 if ((countval & 0x01) && max_size > 1)
17770 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17771 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17778 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17783 rtx label = ix86_expand_aligntest (count, 16, true);
17786 dest = change_address (destmem, DImode, destptr);
17787 emit_insn (gen_strset (destptr, dest, value));
17788 emit_insn (gen_strset (destptr, dest, value));
17792 dest = change_address (destmem, SImode, destptr);
17793 emit_insn (gen_strset (destptr, dest, value));
17794 emit_insn (gen_strset (destptr, dest, value));
17795 emit_insn (gen_strset (destptr, dest, value));
17796 emit_insn (gen_strset (destptr, dest, value));
17798 emit_label (label);
17799 LABEL_NUSES (label) = 1;
17803 rtx label = ix86_expand_aligntest (count, 8, true);
17806 dest = change_address (destmem, DImode, destptr);
17807 emit_insn (gen_strset (destptr, dest, value));
17811 dest = change_address (destmem, SImode, destptr);
17812 emit_insn (gen_strset (destptr, dest, value));
17813 emit_insn (gen_strset (destptr, dest, value));
17815 emit_label (label);
17816 LABEL_NUSES (label) = 1;
17820 rtx label = ix86_expand_aligntest (count, 4, true);
17821 dest = change_address (destmem, SImode, destptr);
17822 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17823 emit_label (label);
17824 LABEL_NUSES (label) = 1;
17828 rtx label = ix86_expand_aligntest (count, 2, true);
17829 dest = change_address (destmem, HImode, destptr);
17830 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17831 emit_label (label);
17832 LABEL_NUSES (label) = 1;
17836 rtx label = ix86_expand_aligntest (count, 1, true);
17837 dest = change_address (destmem, QImode, destptr);
17838 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17839 emit_label (label);
17840 LABEL_NUSES (label) = 1;
17844 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17845 DESIRED_ALIGNMENT. */
17847 expand_movmem_prologue (rtx destmem, rtx srcmem,
17848 rtx destptr, rtx srcptr, rtx count,
17849 int align, int desired_alignment)
17851 if (align <= 1 && desired_alignment > 1)
17853 rtx label = ix86_expand_aligntest (destptr, 1, false);
17854 srcmem = change_address (srcmem, QImode, srcptr);
17855 destmem = change_address (destmem, QImode, destptr);
17856 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17857 ix86_adjust_counter (count, 1);
17858 emit_label (label);
17859 LABEL_NUSES (label) = 1;
17861 if (align <= 2 && desired_alignment > 2)
17863 rtx label = ix86_expand_aligntest (destptr, 2, false);
17864 srcmem = change_address (srcmem, HImode, srcptr);
17865 destmem = change_address (destmem, HImode, destptr);
17866 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17867 ix86_adjust_counter (count, 2);
17868 emit_label (label);
17869 LABEL_NUSES (label) = 1;
17871 if (align <= 4 && desired_alignment > 4)
17873 rtx label = ix86_expand_aligntest (destptr, 4, false);
17874 srcmem = change_address (srcmem, SImode, srcptr);
17875 destmem = change_address (destmem, SImode, destptr);
17876 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17877 ix86_adjust_counter (count, 4);
17878 emit_label (label);
17879 LABEL_NUSES (label) = 1;
17881 gcc_assert (desired_alignment <= 8);
17884 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17885 ALIGN_BYTES is how many bytes need to be copied. */
17887 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17888 int desired_align, int align_bytes)
17891 rtx src_size, dst_size;
17893 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17894 if (src_align_bytes >= 0)
17895 src_align_bytes = desired_align - src_align_bytes;
17896 src_size = MEM_SIZE (src);
17897 dst_size = MEM_SIZE (dst);
17898 if (align_bytes & 1)
17900 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17901 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17903 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17905 if (align_bytes & 2)
17907 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17908 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17909 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17910 set_mem_align (dst, 2 * BITS_PER_UNIT);
17911 if (src_align_bytes >= 0
17912 && (src_align_bytes & 1) == (align_bytes & 1)
17913 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17914 set_mem_align (src, 2 * BITS_PER_UNIT);
17916 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17918 if (align_bytes & 4)
17920 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17921 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17922 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17923 set_mem_align (dst, 4 * BITS_PER_UNIT);
17924 if (src_align_bytes >= 0)
17926 unsigned int src_align = 0;
17927 if ((src_align_bytes & 3) == (align_bytes & 3))
17929 else if ((src_align_bytes & 1) == (align_bytes & 1))
17931 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17932 set_mem_align (src, src_align * BITS_PER_UNIT);
17935 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17937 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17938 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17939 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17940 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17941 if (src_align_bytes >= 0)
17943 unsigned int src_align = 0;
17944 if ((src_align_bytes & 7) == (align_bytes & 7))
17946 else if ((src_align_bytes & 3) == (align_bytes & 3))
17948 else if ((src_align_bytes & 1) == (align_bytes & 1))
17950 if (src_align > (unsigned int) desired_align)
17951 src_align = desired_align;
17952 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17953 set_mem_align (src, src_align * BITS_PER_UNIT);
17956 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17958 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17963 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17964 DESIRED_ALIGNMENT. */
17966 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17967 int align, int desired_alignment)
17969 if (align <= 1 && desired_alignment > 1)
17971 rtx label = ix86_expand_aligntest (destptr, 1, false);
17972 destmem = change_address (destmem, QImode, destptr);
17973 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17974 ix86_adjust_counter (count, 1);
17975 emit_label (label);
17976 LABEL_NUSES (label) = 1;
17978 if (align <= 2 && desired_alignment > 2)
17980 rtx label = ix86_expand_aligntest (destptr, 2, false);
17981 destmem = change_address (destmem, HImode, destptr);
17982 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17983 ix86_adjust_counter (count, 2);
17984 emit_label (label);
17985 LABEL_NUSES (label) = 1;
17987 if (align <= 4 && desired_alignment > 4)
17989 rtx label = ix86_expand_aligntest (destptr, 4, false);
17990 destmem = change_address (destmem, SImode, destptr);
17991 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17992 ix86_adjust_counter (count, 4);
17993 emit_label (label);
17994 LABEL_NUSES (label) = 1;
17996 gcc_assert (desired_alignment <= 8);
17999 /* Set enough from DST to align DST known to by aligned by ALIGN to
18000 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18002 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18003 int desired_align, int align_bytes)
18006 rtx dst_size = MEM_SIZE (dst);
18007 if (align_bytes & 1)
18009 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18011 emit_insn (gen_strset (destreg, dst,
18012 gen_lowpart (QImode, value)));
18014 if (align_bytes & 2)
18016 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18017 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18018 set_mem_align (dst, 2 * BITS_PER_UNIT);
18020 emit_insn (gen_strset (destreg, dst,
18021 gen_lowpart (HImode, value)));
18023 if (align_bytes & 4)
18025 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18026 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18027 set_mem_align (dst, 4 * BITS_PER_UNIT);
18029 emit_insn (gen_strset (destreg, dst,
18030 gen_lowpart (SImode, value)));
18032 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18033 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18034 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18036 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18040 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18041 static enum stringop_alg
18042 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18043 int *dynamic_check)
18045 const struct stringop_algs * algs;
18046 bool optimize_for_speed;
18047 /* Algorithms using the rep prefix want at least edi and ecx;
18048 additionally, memset wants eax and memcpy wants esi. Don't
18049 consider such algorithms if the user has appropriated those
18050 registers for their own purposes. */
18051 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18053 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18055 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18056 || (alg != rep_prefix_1_byte \
18057 && alg != rep_prefix_4_byte \
18058 && alg != rep_prefix_8_byte))
18059 const struct processor_costs *cost;
18061 /* Even if the string operation call is cold, we still might spend a lot
18062 of time processing large blocks. */
18063 if (optimize_function_for_size_p (cfun)
18064 || (optimize_insn_for_size_p ()
18065 && expected_size != -1 && expected_size < 256))
18066 optimize_for_speed = false;
18068 optimize_for_speed = true;
18070 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18072 *dynamic_check = -1;
18074 algs = &cost->memset[TARGET_64BIT != 0];
18076 algs = &cost->memcpy[TARGET_64BIT != 0];
18077 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18078 return stringop_alg;
18079 /* rep; movq or rep; movl is the smallest variant. */
18080 else if (!optimize_for_speed)
18082 if (!count || (count & 3))
18083 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18085 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18087 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18089 else if (expected_size != -1 && expected_size < 4)
18090 return loop_1_byte;
18091 else if (expected_size != -1)
18094 enum stringop_alg alg = libcall;
18095 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18097 /* We get here if the algorithms that were not libcall-based
18098 were rep-prefix based and we are unable to use rep prefixes
18099 based on global register usage. Break out of the loop and
18100 use the heuristic below. */
18101 if (algs->size[i].max == 0)
18103 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18105 enum stringop_alg candidate = algs->size[i].alg;
18107 if (candidate != libcall && ALG_USABLE_P (candidate))
18109 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18110 last non-libcall inline algorithm. */
18111 if (TARGET_INLINE_ALL_STRINGOPS)
18113 /* When the current size is best to be copied by a libcall,
18114 but we are still forced to inline, run the heuristic below
18115 that will pick code for medium sized blocks. */
18116 if (alg != libcall)
18120 else if (ALG_USABLE_P (candidate))
18124 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18126 /* When asked to inline the call anyway, try to pick meaningful choice.
18127 We look for maximal size of block that is faster to copy by hand and
18128 take blocks of at most of that size guessing that average size will
18129 be roughly half of the block.
18131 If this turns out to be bad, we might simply specify the preferred
18132 choice in ix86_costs. */
18133 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18134 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18137 enum stringop_alg alg;
18139 bool any_alg_usable_p = true;
18141 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18143 enum stringop_alg candidate = algs->size[i].alg;
18144 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18146 if (candidate != libcall && candidate
18147 && ALG_USABLE_P (candidate))
18148 max = algs->size[i].max;
18150 /* If there aren't any usable algorithms, then recursing on
18151 smaller sizes isn't going to find anything. Just return the
18152 simple byte-at-a-time copy loop. */
18153 if (!any_alg_usable_p)
18155 /* Pick something reasonable. */
18156 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18157 *dynamic_check = 128;
18158 return loop_1_byte;
18162 alg = decide_alg (count, max / 2, memset, dynamic_check);
18163 gcc_assert (*dynamic_check == -1);
18164 gcc_assert (alg != libcall);
18165 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18166 *dynamic_check = max;
18169 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18170 #undef ALG_USABLE_P
18173 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18174 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18176 decide_alignment (int align,
18177 enum stringop_alg alg,
18180 int desired_align = 0;
18184 gcc_unreachable ();
18186 case unrolled_loop:
18187 desired_align = GET_MODE_SIZE (Pmode);
18189 case rep_prefix_8_byte:
18192 case rep_prefix_4_byte:
18193 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18194 copying whole cacheline at once. */
18195 if (TARGET_PENTIUMPRO)
18200 case rep_prefix_1_byte:
18201 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18202 copying whole cacheline at once. */
18203 if (TARGET_PENTIUMPRO)
18217 if (desired_align < align)
18218 desired_align = align;
18219 if (expected_size != -1 && expected_size < 4)
18220 desired_align = align;
18221 return desired_align;
18224 /* Return the smallest power of 2 greater than VAL. */
18226 smallest_pow2_greater_than (int val)
18234 /* Expand string move (memcpy) operation. Use i386 string operations when
18235 profitable. expand_setmem contains similar code. The code depends upon
18236 architecture, block size and alignment, but always has the same
18239 1) Prologue guard: Conditional that jumps up to epilogues for small
18240 blocks that can be handled by epilogue alone. This is faster but
18241 also needed for correctness, since prologue assume the block is larger
18242 than the desired alignment.
18244 Optional dynamic check for size and libcall for large
18245 blocks is emitted here too, with -minline-stringops-dynamically.
18247 2) Prologue: copy first few bytes in order to get destination aligned
18248 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18249 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18250 We emit either a jump tree on power of two sized blocks, or a byte loop.
18252 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18253 with specified algorithm.
18255 4) Epilogue: code copying tail of the block that is too small to be
18256 handled by main body (or up to size guarded by prologue guard). */
18259 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18260 rtx expected_align_exp, rtx expected_size_exp)
18266 rtx jump_around_label = NULL;
18267 HOST_WIDE_INT align = 1;
18268 unsigned HOST_WIDE_INT count = 0;
18269 HOST_WIDE_INT expected_size = -1;
18270 int size_needed = 0, epilogue_size_needed;
18271 int desired_align = 0, align_bytes = 0;
18272 enum stringop_alg alg;
18274 bool need_zero_guard = false;
18276 if (CONST_INT_P (align_exp))
18277 align = INTVAL (align_exp);
18278 /* i386 can do misaligned access on reasonably increased cost. */
18279 if (CONST_INT_P (expected_align_exp)
18280 && INTVAL (expected_align_exp) > align)
18281 align = INTVAL (expected_align_exp);
18282 /* ALIGN is the minimum of destination and source alignment, but we care here
18283 just about destination alignment. */
18284 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18285 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18287 if (CONST_INT_P (count_exp))
18288 count = expected_size = INTVAL (count_exp);
18289 if (CONST_INT_P (expected_size_exp) && count == 0)
18290 expected_size = INTVAL (expected_size_exp);
18292 /* Make sure we don't need to care about overflow later on. */
18293 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18296 /* Step 0: Decide on preferred algorithm, desired alignment and
18297 size of chunks to be copied by main loop. */
18299 alg = decide_alg (count, expected_size, false, &dynamic_check);
18300 desired_align = decide_alignment (align, alg, expected_size);
18302 if (!TARGET_ALIGN_STRINGOPS)
18303 align = desired_align;
18305 if (alg == libcall)
18307 gcc_assert (alg != no_stringop);
18309 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18310 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18311 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18316 gcc_unreachable ();
18318 need_zero_guard = true;
18319 size_needed = GET_MODE_SIZE (Pmode);
18321 case unrolled_loop:
18322 need_zero_guard = true;
18323 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18325 case rep_prefix_8_byte:
18328 case rep_prefix_4_byte:
18331 case rep_prefix_1_byte:
18335 need_zero_guard = true;
18340 epilogue_size_needed = size_needed;
18342 /* Step 1: Prologue guard. */
18344 /* Alignment code needs count to be in register. */
18345 if (CONST_INT_P (count_exp) && desired_align > align)
18347 if (INTVAL (count_exp) > desired_align
18348 && INTVAL (count_exp) > size_needed)
18351 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18352 if (align_bytes <= 0)
18355 align_bytes = desired_align - align_bytes;
18357 if (align_bytes == 0)
18358 count_exp = force_reg (counter_mode (count_exp), count_exp);
18360 gcc_assert (desired_align >= 1 && align >= 1);
18362 /* Ensure that alignment prologue won't copy past end of block. */
18363 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18365 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18366 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18367 Make sure it is power of 2. */
18368 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18372 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18374 /* If main algorithm works on QImode, no epilogue is needed.
18375 For small sizes just don't align anything. */
18376 if (size_needed == 1)
18377 desired_align = align;
18384 label = gen_label_rtx ();
18385 emit_cmp_and_jump_insns (count_exp,
18386 GEN_INT (epilogue_size_needed),
18387 LTU, 0, counter_mode (count_exp), 1, label);
18388 if (expected_size == -1 || expected_size < epilogue_size_needed)
18389 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18391 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18395 /* Emit code to decide on runtime whether library call or inline should be
18397 if (dynamic_check != -1)
18399 if (CONST_INT_P (count_exp))
18401 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18403 emit_block_move_via_libcall (dst, src, count_exp, false);
18404 count_exp = const0_rtx;
18410 rtx hot_label = gen_label_rtx ();
18411 jump_around_label = gen_label_rtx ();
18412 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18413 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18414 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18415 emit_block_move_via_libcall (dst, src, count_exp, false);
18416 emit_jump (jump_around_label);
18417 emit_label (hot_label);
18421 /* Step 2: Alignment prologue. */
18423 if (desired_align > align)
18425 if (align_bytes == 0)
18427 /* Except for the first move in epilogue, we no longer know
18428 constant offset in aliasing info. It don't seems to worth
18429 the pain to maintain it for the first move, so throw away
18431 src = change_address (src, BLKmode, srcreg);
18432 dst = change_address (dst, BLKmode, destreg);
18433 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18438 /* If we know how many bytes need to be stored before dst is
18439 sufficiently aligned, maintain aliasing info accurately. */
18440 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18441 desired_align, align_bytes);
18442 count_exp = plus_constant (count_exp, -align_bytes);
18443 count -= align_bytes;
18445 if (need_zero_guard
18446 && (count < (unsigned HOST_WIDE_INT) size_needed
18447 || (align_bytes == 0
18448 && count < ((unsigned HOST_WIDE_INT) size_needed
18449 + desired_align - align))))
18451 /* It is possible that we copied enough so the main loop will not
18453 gcc_assert (size_needed > 1);
18454 if (label == NULL_RTX)
18455 label = gen_label_rtx ();
18456 emit_cmp_and_jump_insns (count_exp,
18457 GEN_INT (size_needed),
18458 LTU, 0, counter_mode (count_exp), 1, label);
18459 if (expected_size == -1
18460 || expected_size < (desired_align - align) / 2 + size_needed)
18461 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18463 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18466 if (label && size_needed == 1)
18468 emit_label (label);
18469 LABEL_NUSES (label) = 1;
18471 epilogue_size_needed = 1;
18473 else if (label == NULL_RTX)
18474 epilogue_size_needed = size_needed;
18476 /* Step 3: Main loop. */
18482 gcc_unreachable ();
18484 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18485 count_exp, QImode, 1, expected_size);
18488 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18489 count_exp, Pmode, 1, expected_size);
18491 case unrolled_loop:
18492 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18493 registers for 4 temporaries anyway. */
18494 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18495 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18498 case rep_prefix_8_byte:
18499 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18502 case rep_prefix_4_byte:
18503 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18506 case rep_prefix_1_byte:
18507 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18511 /* Adjust properly the offset of src and dest memory for aliasing. */
18512 if (CONST_INT_P (count_exp))
18514 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18515 (count / size_needed) * size_needed);
18516 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18517 (count / size_needed) * size_needed);
18521 src = change_address (src, BLKmode, srcreg);
18522 dst = change_address (dst, BLKmode, destreg);
18525 /* Step 4: Epilogue to copy the remaining bytes. */
18529 /* When the main loop is done, COUNT_EXP might hold original count,
18530 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18531 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18532 bytes. Compensate if needed. */
18534 if (size_needed < epilogue_size_needed)
18537 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18538 GEN_INT (size_needed - 1), count_exp, 1,
18540 if (tmp != count_exp)
18541 emit_move_insn (count_exp, tmp);
18543 emit_label (label);
18544 LABEL_NUSES (label) = 1;
18547 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18548 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18549 epilogue_size_needed);
18550 if (jump_around_label)
18551 emit_label (jump_around_label);
18555 /* Helper function for memcpy. For QImode value 0xXY produce
18556 0xXYXYXYXY of wide specified by MODE. This is essentially
18557 a * 0x10101010, but we can do slightly better than
18558 synth_mult by unwinding the sequence by hand on CPUs with
18561 promote_duplicated_reg (enum machine_mode mode, rtx val)
18563 enum machine_mode valmode = GET_MODE (val);
18565 int nops = mode == DImode ? 3 : 2;
18567 gcc_assert (mode == SImode || mode == DImode);
18568 if (val == const0_rtx)
18569 return copy_to_mode_reg (mode, const0_rtx);
18570 if (CONST_INT_P (val))
18572 HOST_WIDE_INT v = INTVAL (val) & 255;
18576 if (mode == DImode)
18577 v |= (v << 16) << 16;
18578 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18581 if (valmode == VOIDmode)
18583 if (valmode != QImode)
18584 val = gen_lowpart (QImode, val);
18585 if (mode == QImode)
18587 if (!TARGET_PARTIAL_REG_STALL)
18589 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18590 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18591 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18592 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18594 rtx reg = convert_modes (mode, QImode, val, true);
18595 tmp = promote_duplicated_reg (mode, const1_rtx);
18596 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18601 rtx reg = convert_modes (mode, QImode, val, true);
18603 if (!TARGET_PARTIAL_REG_STALL)
18604 if (mode == SImode)
18605 emit_insn (gen_movsi_insv_1 (reg, reg));
18607 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18610 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18611 NULL, 1, OPTAB_DIRECT);
18613 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18615 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18616 NULL, 1, OPTAB_DIRECT);
18617 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18618 if (mode == SImode)
18620 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18621 NULL, 1, OPTAB_DIRECT);
18622 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18627 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18628 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18629 alignment from ALIGN to DESIRED_ALIGN. */
18631 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18636 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18637 promoted_val = promote_duplicated_reg (DImode, val);
18638 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18639 promoted_val = promote_duplicated_reg (SImode, val);
18640 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18641 promoted_val = promote_duplicated_reg (HImode, val);
18643 promoted_val = val;
18645 return promoted_val;
18648 /* Expand string clear operation (bzero). Use i386 string operations when
18649 profitable. See expand_movmem comment for explanation of individual
18650 steps performed. */
18652 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18653 rtx expected_align_exp, rtx expected_size_exp)
18658 rtx jump_around_label = NULL;
18659 HOST_WIDE_INT align = 1;
18660 unsigned HOST_WIDE_INT count = 0;
18661 HOST_WIDE_INT expected_size = -1;
18662 int size_needed = 0, epilogue_size_needed;
18663 int desired_align = 0, align_bytes = 0;
18664 enum stringop_alg alg;
18665 rtx promoted_val = NULL;
18666 bool force_loopy_epilogue = false;
18668 bool need_zero_guard = false;
18670 if (CONST_INT_P (align_exp))
18671 align = INTVAL (align_exp);
18672 /* i386 can do misaligned access on reasonably increased cost. */
18673 if (CONST_INT_P (expected_align_exp)
18674 && INTVAL (expected_align_exp) > align)
18675 align = INTVAL (expected_align_exp);
18676 if (CONST_INT_P (count_exp))
18677 count = expected_size = INTVAL (count_exp);
18678 if (CONST_INT_P (expected_size_exp) && count == 0)
18679 expected_size = INTVAL (expected_size_exp);
18681 /* Make sure we don't need to care about overflow later on. */
18682 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18685 /* Step 0: Decide on preferred algorithm, desired alignment and
18686 size of chunks to be copied by main loop. */
18688 alg = decide_alg (count, expected_size, true, &dynamic_check);
18689 desired_align = decide_alignment (align, alg, expected_size);
18691 if (!TARGET_ALIGN_STRINGOPS)
18692 align = desired_align;
18694 if (alg == libcall)
18696 gcc_assert (alg != no_stringop);
18698 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18699 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18704 gcc_unreachable ();
18706 need_zero_guard = true;
18707 size_needed = GET_MODE_SIZE (Pmode);
18709 case unrolled_loop:
18710 need_zero_guard = true;
18711 size_needed = GET_MODE_SIZE (Pmode) * 4;
18713 case rep_prefix_8_byte:
18716 case rep_prefix_4_byte:
18719 case rep_prefix_1_byte:
18723 need_zero_guard = true;
18727 epilogue_size_needed = size_needed;
18729 /* Step 1: Prologue guard. */
18731 /* Alignment code needs count to be in register. */
18732 if (CONST_INT_P (count_exp) && desired_align > align)
18734 if (INTVAL (count_exp) > desired_align
18735 && INTVAL (count_exp) > size_needed)
18738 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18739 if (align_bytes <= 0)
18742 align_bytes = desired_align - align_bytes;
18744 if (align_bytes == 0)
18746 enum machine_mode mode = SImode;
18747 if (TARGET_64BIT && (count & ~0xffffffff))
18749 count_exp = force_reg (mode, count_exp);
18752 /* Do the cheap promotion to allow better CSE across the
18753 main loop and epilogue (ie one load of the big constant in the
18754 front of all code. */
18755 if (CONST_INT_P (val_exp))
18756 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18757 desired_align, align);
18758 /* Ensure that alignment prologue won't copy past end of block. */
18759 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18761 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18762 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18763 Make sure it is power of 2. */
18764 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18766 /* To improve performance of small blocks, we jump around the VAL
18767 promoting mode. This mean that if the promoted VAL is not constant,
18768 we might not use it in the epilogue and have to use byte
18770 if (epilogue_size_needed > 2 && !promoted_val)
18771 force_loopy_epilogue = true;
18774 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18776 /* If main algorithm works on QImode, no epilogue is needed.
18777 For small sizes just don't align anything. */
18778 if (size_needed == 1)
18779 desired_align = align;
18786 label = gen_label_rtx ();
18787 emit_cmp_and_jump_insns (count_exp,
18788 GEN_INT (epilogue_size_needed),
18789 LTU, 0, counter_mode (count_exp), 1, label);
18790 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18791 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18793 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18796 if (dynamic_check != -1)
18798 rtx hot_label = gen_label_rtx ();
18799 jump_around_label = gen_label_rtx ();
18800 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18801 LEU, 0, counter_mode (count_exp), 1, hot_label);
18802 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18803 set_storage_via_libcall (dst, count_exp, val_exp, false);
18804 emit_jump (jump_around_label);
18805 emit_label (hot_label);
18808 /* Step 2: Alignment prologue. */
18810 /* Do the expensive promotion once we branched off the small blocks. */
18812 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18813 desired_align, align);
18814 gcc_assert (desired_align >= 1 && align >= 1);
18816 if (desired_align > align)
18818 if (align_bytes == 0)
18820 /* Except for the first move in epilogue, we no longer know
18821 constant offset in aliasing info. It don't seems to worth
18822 the pain to maintain it for the first move, so throw away
18824 dst = change_address (dst, BLKmode, destreg);
18825 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18830 /* If we know how many bytes need to be stored before dst is
18831 sufficiently aligned, maintain aliasing info accurately. */
18832 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18833 desired_align, align_bytes);
18834 count_exp = plus_constant (count_exp, -align_bytes);
18835 count -= align_bytes;
18837 if (need_zero_guard
18838 && (count < (unsigned HOST_WIDE_INT) size_needed
18839 || (align_bytes == 0
18840 && count < ((unsigned HOST_WIDE_INT) size_needed
18841 + desired_align - align))))
18843 /* It is possible that we copied enough so the main loop will not
18845 gcc_assert (size_needed > 1);
18846 if (label == NULL_RTX)
18847 label = gen_label_rtx ();
18848 emit_cmp_and_jump_insns (count_exp,
18849 GEN_INT (size_needed),
18850 LTU, 0, counter_mode (count_exp), 1, label);
18851 if (expected_size == -1
18852 || expected_size < (desired_align - align) / 2 + size_needed)
18853 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18855 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18858 if (label && size_needed == 1)
18860 emit_label (label);
18861 LABEL_NUSES (label) = 1;
18863 promoted_val = val_exp;
18864 epilogue_size_needed = 1;
18866 else if (label == NULL_RTX)
18867 epilogue_size_needed = size_needed;
18869 /* Step 3: Main loop. */
18875 gcc_unreachable ();
18877 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18878 count_exp, QImode, 1, expected_size);
18881 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18882 count_exp, Pmode, 1, expected_size);
18884 case unrolled_loop:
18885 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18886 count_exp, Pmode, 4, expected_size);
18888 case rep_prefix_8_byte:
18889 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18892 case rep_prefix_4_byte:
18893 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18896 case rep_prefix_1_byte:
18897 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18901 /* Adjust properly the offset of src and dest memory for aliasing. */
18902 if (CONST_INT_P (count_exp))
18903 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18904 (count / size_needed) * size_needed);
18906 dst = change_address (dst, BLKmode, destreg);
18908 /* Step 4: Epilogue to copy the remaining bytes. */
18912 /* When the main loop is done, COUNT_EXP might hold original count,
18913 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18914 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18915 bytes. Compensate if needed. */
18917 if (size_needed < epilogue_size_needed)
18920 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18921 GEN_INT (size_needed - 1), count_exp, 1,
18923 if (tmp != count_exp)
18924 emit_move_insn (count_exp, tmp);
18926 emit_label (label);
18927 LABEL_NUSES (label) = 1;
18930 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18932 if (force_loopy_epilogue)
18933 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18934 epilogue_size_needed);
18936 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18937 epilogue_size_needed);
18939 if (jump_around_label)
18940 emit_label (jump_around_label);
18944 /* Expand the appropriate insns for doing strlen if not just doing
18947 out = result, initialized with the start address
18948 align_rtx = alignment of the address.
18949 scratch = scratch register, initialized with the startaddress when
18950 not aligned, otherwise undefined
18952 This is just the body. It needs the initializations mentioned above and
18953 some address computing at the end. These things are done in i386.md. */
18956 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18960 rtx align_2_label = NULL_RTX;
18961 rtx align_3_label = NULL_RTX;
18962 rtx align_4_label = gen_label_rtx ();
18963 rtx end_0_label = gen_label_rtx ();
18965 rtx tmpreg = gen_reg_rtx (SImode);
18966 rtx scratch = gen_reg_rtx (SImode);
18970 if (CONST_INT_P (align_rtx))
18971 align = INTVAL (align_rtx);
18973 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18975 /* Is there a known alignment and is it less than 4? */
18978 rtx scratch1 = gen_reg_rtx (Pmode);
18979 emit_move_insn (scratch1, out);
18980 /* Is there a known alignment and is it not 2? */
18983 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18984 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18986 /* Leave just the 3 lower bits. */
18987 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
18988 NULL_RTX, 0, OPTAB_WIDEN);
18990 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18991 Pmode, 1, align_4_label);
18992 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
18993 Pmode, 1, align_2_label);
18994 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
18995 Pmode, 1, align_3_label);
18999 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19000 check if is aligned to 4 - byte. */
19002 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19003 NULL_RTX, 0, OPTAB_WIDEN);
19005 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19006 Pmode, 1, align_4_label);
19009 mem = change_address (src, QImode, out);
19011 /* Now compare the bytes. */
19013 /* Compare the first n unaligned byte on a byte per byte basis. */
19014 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19015 QImode, 1, end_0_label);
19017 /* Increment the address. */
19018 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19020 /* Not needed with an alignment of 2 */
19023 emit_label (align_2_label);
19025 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19028 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19030 emit_label (align_3_label);
19033 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19036 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19039 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19040 align this loop. It gives only huge programs, but does not help to
19042 emit_label (align_4_label);
19044 mem = change_address (src, SImode, out);
19045 emit_move_insn (scratch, mem);
19046 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19048 /* This formula yields a nonzero result iff one of the bytes is zero.
19049 This saves three branches inside loop and many cycles. */
19051 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19052 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19053 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19054 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19055 gen_int_mode (0x80808080, SImode)));
19056 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19061 rtx reg = gen_reg_rtx (SImode);
19062 rtx reg2 = gen_reg_rtx (Pmode);
19063 emit_move_insn (reg, tmpreg);
19064 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19066 /* If zero is not in the first two bytes, move two bytes forward. */
19067 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19068 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19069 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19070 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19071 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19074 /* Emit lea manually to avoid clobbering of flags. */
19075 emit_insn (gen_rtx_SET (SImode, reg2,
19076 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19078 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19079 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19080 emit_insn (gen_rtx_SET (VOIDmode, out,
19081 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19087 rtx end_2_label = gen_label_rtx ();
19088 /* Is zero in the first two bytes? */
19090 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19091 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19092 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19093 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19094 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19096 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19097 JUMP_LABEL (tmp) = end_2_label;
19099 /* Not in the first two. Move two bytes forward. */
19100 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19101 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19103 emit_label (end_2_label);
19107 /* Avoid branch in fixing the byte. */
19108 tmpreg = gen_lowpart (QImode, tmpreg);
19109 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19110 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19111 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19112 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19114 emit_label (end_0_label);
19117 /* Expand strlen. */
19120 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19122 rtx addr, scratch1, scratch2, scratch3, scratch4;
19124 /* The generic case of strlen expander is long. Avoid it's
19125 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19127 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19128 && !TARGET_INLINE_ALL_STRINGOPS
19129 && !optimize_insn_for_size_p ()
19130 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19133 addr = force_reg (Pmode, XEXP (src, 0));
19134 scratch1 = gen_reg_rtx (Pmode);
19136 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19137 && !optimize_insn_for_size_p ())
19139 /* Well it seems that some optimizer does not combine a call like
19140 foo(strlen(bar), strlen(bar));
19141 when the move and the subtraction is done here. It does calculate
19142 the length just once when these instructions are done inside of
19143 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19144 often used and I use one fewer register for the lifetime of
19145 output_strlen_unroll() this is better. */
19147 emit_move_insn (out, addr);
19149 ix86_expand_strlensi_unroll_1 (out, src, align);
19151 /* strlensi_unroll_1 returns the address of the zero at the end of
19152 the string, like memchr(), so compute the length by subtracting
19153 the start address. */
19154 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19160 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19161 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19164 scratch2 = gen_reg_rtx (Pmode);
19165 scratch3 = gen_reg_rtx (Pmode);
19166 scratch4 = force_reg (Pmode, constm1_rtx);
19168 emit_move_insn (scratch3, addr);
19169 eoschar = force_reg (QImode, eoschar);
19171 src = replace_equiv_address_nv (src, scratch3);
19173 /* If .md starts supporting :P, this can be done in .md. */
19174 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19175 scratch4), UNSPEC_SCAS);
19176 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19177 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19178 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19183 /* For given symbol (function) construct code to compute address of it's PLT
19184 entry in large x86-64 PIC model. */
19186 construct_plt_address (rtx symbol)
19188 rtx tmp = gen_reg_rtx (Pmode);
19189 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19191 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19192 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19194 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19195 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19200 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19202 rtx pop, int sibcall)
19204 rtx use = NULL, call;
19206 if (pop == const0_rtx)
19208 gcc_assert (!TARGET_64BIT || !pop);
19210 if (TARGET_MACHO && !TARGET_64BIT)
19213 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19214 fnaddr = machopic_indirect_call_target (fnaddr);
19219 /* Static functions and indirect calls don't need the pic register. */
19220 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19221 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19222 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19223 use_reg (&use, pic_offset_table_rtx);
19226 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19228 rtx al = gen_rtx_REG (QImode, AX_REG);
19229 emit_move_insn (al, callarg2);
19230 use_reg (&use, al);
19233 if (ix86_cmodel == CM_LARGE_PIC
19235 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19236 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19237 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19239 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19240 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19242 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19243 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19246 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19248 call = gen_rtx_SET (VOIDmode, retval, call);
19251 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19252 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19253 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19256 && ix86_cfun_abi () == MS_ABI
19257 && (!callarg2 || INTVAL (callarg2) != -2))
19259 /* We need to represent that SI and DI registers are clobbered
19261 static int clobbered_registers[] = {
19262 XMM6_REG, XMM7_REG, XMM8_REG,
19263 XMM9_REG, XMM10_REG, XMM11_REG,
19264 XMM12_REG, XMM13_REG, XMM14_REG,
19265 XMM15_REG, SI_REG, DI_REG
19268 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19269 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19270 UNSPEC_MS_TO_SYSV_CALL);
19274 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19275 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19278 (SSE_REGNO_P (clobbered_registers[i])
19280 clobbered_registers[i]));
19282 call = gen_rtx_PARALLEL (VOIDmode,
19283 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19287 call = emit_call_insn (call);
19289 CALL_INSN_FUNCTION_USAGE (call) = use;
19293 /* Clear stack slot assignments remembered from previous functions.
19294 This is called from INIT_EXPANDERS once before RTL is emitted for each
19297 static struct machine_function *
19298 ix86_init_machine_status (void)
19300 struct machine_function *f;
19302 f = GGC_CNEW (struct machine_function);
19303 f->use_fast_prologue_epilogue_nregs = -1;
19304 f->tls_descriptor_call_expanded_p = 0;
19305 f->call_abi = ix86_abi;
19310 /* Return a MEM corresponding to a stack slot with mode MODE.
19311 Allocate a new slot if necessary.
19313 The RTL for a function can have several slots available: N is
19314 which slot to use. */
19317 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19319 struct stack_local_entry *s;
19321 gcc_assert (n < MAX_386_STACK_LOCALS);
19323 /* Virtual slot is valid only before vregs are instantiated. */
19324 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19326 for (s = ix86_stack_locals; s; s = s->next)
19327 if (s->mode == mode && s->n == n)
19328 return copy_rtx (s->rtl);
19330 s = (struct stack_local_entry *)
19331 ggc_alloc (sizeof (struct stack_local_entry));
19334 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19336 s->next = ix86_stack_locals;
19337 ix86_stack_locals = s;
19341 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19343 static GTY(()) rtx ix86_tls_symbol;
19345 ix86_tls_get_addr (void)
19348 if (!ix86_tls_symbol)
19350 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19351 (TARGET_ANY_GNU_TLS
19353 ? "___tls_get_addr"
19354 : "__tls_get_addr");
19357 return ix86_tls_symbol;
19360 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19362 static GTY(()) rtx ix86_tls_module_base_symbol;
19364 ix86_tls_module_base (void)
19367 if (!ix86_tls_module_base_symbol)
19369 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19370 "_TLS_MODULE_BASE_");
19371 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19372 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19375 return ix86_tls_module_base_symbol;
19378 /* Calculate the length of the memory address in the instruction
19379 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19382 memory_address_length (rtx addr)
19384 struct ix86_address parts;
19385 rtx base, index, disp;
19389 if (GET_CODE (addr) == PRE_DEC
19390 || GET_CODE (addr) == POST_INC
19391 || GET_CODE (addr) == PRE_MODIFY
19392 || GET_CODE (addr) == POST_MODIFY)
19395 ok = ix86_decompose_address (addr, &parts);
19398 if (parts.base && GET_CODE (parts.base) == SUBREG)
19399 parts.base = SUBREG_REG (parts.base);
19400 if (parts.index && GET_CODE (parts.index) == SUBREG)
19401 parts.index = SUBREG_REG (parts.index);
19404 index = parts.index;
19409 - esp as the base always wants an index,
19410 - ebp as the base always wants a displacement,
19411 - r12 as the base always wants an index,
19412 - r13 as the base always wants a displacement. */
19414 /* Register Indirect. */
19415 if (base && !index && !disp)
19417 /* esp (for its index) and ebp (for its displacement) need
19418 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19421 && (addr == arg_pointer_rtx
19422 || addr == frame_pointer_rtx
19423 || REGNO (addr) == SP_REG
19424 || REGNO (addr) == BP_REG
19425 || REGNO (addr) == R12_REG
19426 || REGNO (addr) == R13_REG))
19430 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19431 is not disp32, but disp32(%rip), so for disp32
19432 SIB byte is needed, unless print_operand_address
19433 optimizes it into disp32(%rip) or (%rip) is implied
19435 else if (disp && !base && !index)
19442 if (GET_CODE (disp) == CONST)
19443 symbol = XEXP (disp, 0);
19444 if (GET_CODE (symbol) == PLUS
19445 && CONST_INT_P (XEXP (symbol, 1)))
19446 symbol = XEXP (symbol, 0);
19448 if (GET_CODE (symbol) != LABEL_REF
19449 && (GET_CODE (symbol) != SYMBOL_REF
19450 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19451 && (GET_CODE (symbol) != UNSPEC
19452 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19453 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19460 /* Find the length of the displacement constant. */
19463 if (base && satisfies_constraint_K (disp))
19468 /* ebp always wants a displacement. Similarly r13. */
19469 else if (base && REG_P (base)
19470 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19473 /* An index requires the two-byte modrm form.... */
19475 /* ...like esp (or r12), which always wants an index. */
19476 || base == arg_pointer_rtx
19477 || base == frame_pointer_rtx
19478 || (base && REG_P (base)
19479 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19496 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19497 is set, expect that insn have 8bit immediate alternative. */
19499 ix86_attr_length_immediate_default (rtx insn, int shortform)
19503 extract_insn_cached (insn);
19504 for (i = recog_data.n_operands - 1; i >= 0; --i)
19505 if (CONSTANT_P (recog_data.operand[i]))
19507 enum attr_mode mode = get_attr_mode (insn);
19510 if (shortform && CONST_INT_P (recog_data.operand[i]))
19512 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19519 ival = trunc_int_for_mode (ival, HImode);
19522 ival = trunc_int_for_mode (ival, SImode);
19527 if (IN_RANGE (ival, -128, 127))
19544 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19549 fatal_insn ("unknown insn mode", insn);
19554 /* Compute default value for "length_address" attribute. */
19556 ix86_attr_length_address_default (rtx insn)
19560 if (get_attr_type (insn) == TYPE_LEA)
19562 rtx set = PATTERN (insn), addr;
19564 if (GET_CODE (set) == PARALLEL)
19565 set = XVECEXP (set, 0, 0);
19567 gcc_assert (GET_CODE (set) == SET);
19569 addr = SET_SRC (set);
19570 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19572 if (GET_CODE (addr) == ZERO_EXTEND)
19573 addr = XEXP (addr, 0);
19574 if (GET_CODE (addr) == SUBREG)
19575 addr = SUBREG_REG (addr);
19578 return memory_address_length (addr);
19581 extract_insn_cached (insn);
19582 for (i = recog_data.n_operands - 1; i >= 0; --i)
19583 if (MEM_P (recog_data.operand[i]))
19585 constrain_operands_cached (reload_completed);
19586 if (which_alternative != -1)
19588 const char *constraints = recog_data.constraints[i];
19589 int alt = which_alternative;
19591 while (*constraints == '=' || *constraints == '+')
19594 while (*constraints++ != ',')
19596 /* Skip ignored operands. */
19597 if (*constraints == 'X')
19600 return memory_address_length (XEXP (recog_data.operand[i], 0));
19605 /* Compute default value for "length_vex" attribute. It includes
19606 2 or 3 byte VEX prefix and 1 opcode byte. */
19609 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19614 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19615 byte VEX prefix. */
19616 if (!has_0f_opcode || has_vex_w)
19619 /* We can always use 2 byte VEX prefix in 32bit. */
19623 extract_insn_cached (insn);
19625 for (i = recog_data.n_operands - 1; i >= 0; --i)
19626 if (REG_P (recog_data.operand[i]))
19628 /* REX.W bit uses 3 byte VEX prefix. */
19629 if (GET_MODE (recog_data.operand[i]) == DImode
19630 && GENERAL_REG_P (recog_data.operand[i]))
19635 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19636 if (MEM_P (recog_data.operand[i])
19637 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19644 /* Return the maximum number of instructions a cpu can issue. */
19647 ix86_issue_rate (void)
19651 case PROCESSOR_PENTIUM:
19652 case PROCESSOR_ATOM:
19656 case PROCESSOR_PENTIUMPRO:
19657 case PROCESSOR_PENTIUM4:
19658 case PROCESSOR_ATHLON:
19660 case PROCESSOR_AMDFAM10:
19661 case PROCESSOR_NOCONA:
19662 case PROCESSOR_GENERIC32:
19663 case PROCESSOR_GENERIC64:
19666 case PROCESSOR_CORE2:
19674 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19675 by DEP_INSN and nothing set by DEP_INSN. */
19678 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19682 /* Simplify the test for uninteresting insns. */
19683 if (insn_type != TYPE_SETCC
19684 && insn_type != TYPE_ICMOV
19685 && insn_type != TYPE_FCMOV
19686 && insn_type != TYPE_IBR)
19689 if ((set = single_set (dep_insn)) != 0)
19691 set = SET_DEST (set);
19694 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19695 && XVECLEN (PATTERN (dep_insn), 0) == 2
19696 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19697 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19699 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19700 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19705 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19708 /* This test is true if the dependent insn reads the flags but
19709 not any other potentially set register. */
19710 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19713 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19719 /* Return true iff USE_INSN has a memory address with operands set by
19723 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19726 extract_insn_cached (use_insn);
19727 for (i = recog_data.n_operands - 1; i >= 0; --i)
19728 if (MEM_P (recog_data.operand[i]))
19730 rtx addr = XEXP (recog_data.operand[i], 0);
19731 return modified_in_p (addr, set_insn) != 0;
19737 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19739 enum attr_type insn_type, dep_insn_type;
19740 enum attr_memory memory;
19742 int dep_insn_code_number;
19744 /* Anti and output dependencies have zero cost on all CPUs. */
19745 if (REG_NOTE_KIND (link) != 0)
19748 dep_insn_code_number = recog_memoized (dep_insn);
19750 /* If we can't recognize the insns, we can't really do anything. */
19751 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19754 insn_type = get_attr_type (insn);
19755 dep_insn_type = get_attr_type (dep_insn);
19759 case PROCESSOR_PENTIUM:
19760 /* Address Generation Interlock adds a cycle of latency. */
19761 if (insn_type == TYPE_LEA)
19763 rtx addr = PATTERN (insn);
19765 if (GET_CODE (addr) == PARALLEL)
19766 addr = XVECEXP (addr, 0, 0);
19768 gcc_assert (GET_CODE (addr) == SET);
19770 addr = SET_SRC (addr);
19771 if (modified_in_p (addr, dep_insn))
19774 else if (ix86_agi_dependent (dep_insn, insn))
19777 /* ??? Compares pair with jump/setcc. */
19778 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19781 /* Floating point stores require value to be ready one cycle earlier. */
19782 if (insn_type == TYPE_FMOV
19783 && get_attr_memory (insn) == MEMORY_STORE
19784 && !ix86_agi_dependent (dep_insn, insn))
19788 case PROCESSOR_PENTIUMPRO:
19789 memory = get_attr_memory (insn);
19791 /* INT->FP conversion is expensive. */
19792 if (get_attr_fp_int_src (dep_insn))
19795 /* There is one cycle extra latency between an FP op and a store. */
19796 if (insn_type == TYPE_FMOV
19797 && (set = single_set (dep_insn)) != NULL_RTX
19798 && (set2 = single_set (insn)) != NULL_RTX
19799 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19800 && MEM_P (SET_DEST (set2)))
19803 /* Show ability of reorder buffer to hide latency of load by executing
19804 in parallel with previous instruction in case
19805 previous instruction is not needed to compute the address. */
19806 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19807 && !ix86_agi_dependent (dep_insn, insn))
19809 /* Claim moves to take one cycle, as core can issue one load
19810 at time and the next load can start cycle later. */
19811 if (dep_insn_type == TYPE_IMOV
19812 || dep_insn_type == TYPE_FMOV)
19820 memory = get_attr_memory (insn);
19822 /* The esp dependency is resolved before the instruction is really
19824 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19825 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19828 /* INT->FP conversion is expensive. */
19829 if (get_attr_fp_int_src (dep_insn))
19832 /* Show ability of reorder buffer to hide latency of load by executing
19833 in parallel with previous instruction in case
19834 previous instruction is not needed to compute the address. */
19835 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19836 && !ix86_agi_dependent (dep_insn, insn))
19838 /* Claim moves to take one cycle, as core can issue one load
19839 at time and the next load can start cycle later. */
19840 if (dep_insn_type == TYPE_IMOV
19841 || dep_insn_type == TYPE_FMOV)
19850 case PROCESSOR_ATHLON:
19852 case PROCESSOR_AMDFAM10:
19853 case PROCESSOR_ATOM:
19854 case PROCESSOR_GENERIC32:
19855 case PROCESSOR_GENERIC64:
19856 memory = get_attr_memory (insn);
19858 /* Show ability of reorder buffer to hide latency of load by executing
19859 in parallel with previous instruction in case
19860 previous instruction is not needed to compute the address. */
19861 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19862 && !ix86_agi_dependent (dep_insn, insn))
19864 enum attr_unit unit = get_attr_unit (insn);
19867 /* Because of the difference between the length of integer and
19868 floating unit pipeline preparation stages, the memory operands
19869 for floating point are cheaper.
19871 ??? For Athlon it the difference is most probably 2. */
19872 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19875 loadcost = TARGET_ATHLON ? 2 : 0;
19877 if (cost >= loadcost)
19890 /* How many alternative schedules to try. This should be as wide as the
19891 scheduling freedom in the DFA, but no wider. Making this value too
19892 large results extra work for the scheduler. */
19895 ia32_multipass_dfa_lookahead (void)
19899 case PROCESSOR_PENTIUM:
19902 case PROCESSOR_PENTIUMPRO:
19912 /* Compute the alignment given to a constant that is being placed in memory.
19913 EXP is the constant and ALIGN is the alignment that the object would
19915 The value of this function is used instead of that alignment to align
19919 ix86_constant_alignment (tree exp, int align)
19921 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19922 || TREE_CODE (exp) == INTEGER_CST)
19924 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19926 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19929 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19930 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19931 return BITS_PER_WORD;
19936 /* Compute the alignment for a static variable.
19937 TYPE is the data type, and ALIGN is the alignment that
19938 the object would ordinarily have. The value of this function is used
19939 instead of that alignment to align the object. */
19942 ix86_data_alignment (tree type, int align)
19944 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19946 if (AGGREGATE_TYPE_P (type)
19947 && TYPE_SIZE (type)
19948 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19949 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19950 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19951 && align < max_align)
19954 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19955 to 16byte boundary. */
19958 if (AGGREGATE_TYPE_P (type)
19959 && TYPE_SIZE (type)
19960 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19961 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19962 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19966 if (TREE_CODE (type) == ARRAY_TYPE)
19968 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19970 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19973 else if (TREE_CODE (type) == COMPLEX_TYPE)
19976 if (TYPE_MODE (type) == DCmode && align < 64)
19978 if ((TYPE_MODE (type) == XCmode
19979 || TYPE_MODE (type) == TCmode) && align < 128)
19982 else if ((TREE_CODE (type) == RECORD_TYPE
19983 || TREE_CODE (type) == UNION_TYPE
19984 || TREE_CODE (type) == QUAL_UNION_TYPE)
19985 && TYPE_FIELDS (type))
19987 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19989 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19992 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19993 || TREE_CODE (type) == INTEGER_TYPE)
19995 if (TYPE_MODE (type) == DFmode && align < 64)
19997 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20004 /* Compute the alignment for a local variable or a stack slot. EXP is
20005 the data type or decl itself, MODE is the widest mode available and
20006 ALIGN is the alignment that the object would ordinarily have. The
20007 value of this macro is used instead of that alignment to align the
20011 ix86_local_alignment (tree exp, enum machine_mode mode,
20012 unsigned int align)
20016 if (exp && DECL_P (exp))
20018 type = TREE_TYPE (exp);
20027 /* Don't do dynamic stack realignment for long long objects with
20028 -mpreferred-stack-boundary=2. */
20031 && ix86_preferred_stack_boundary < 64
20032 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20033 && (!type || !TYPE_USER_ALIGN (type))
20034 && (!decl || !DECL_USER_ALIGN (decl)))
20037 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20038 register in MODE. We will return the largest alignment of XF
20042 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20043 align = GET_MODE_ALIGNMENT (DFmode);
20047 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20048 to 16byte boundary. */
20051 if (AGGREGATE_TYPE_P (type)
20052 && TYPE_SIZE (type)
20053 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20054 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20055 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20058 if (TREE_CODE (type) == ARRAY_TYPE)
20060 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20062 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20065 else if (TREE_CODE (type) == COMPLEX_TYPE)
20067 if (TYPE_MODE (type) == DCmode && align < 64)
20069 if ((TYPE_MODE (type) == XCmode
20070 || TYPE_MODE (type) == TCmode) && align < 128)
20073 else if ((TREE_CODE (type) == RECORD_TYPE
20074 || TREE_CODE (type) == UNION_TYPE
20075 || TREE_CODE (type) == QUAL_UNION_TYPE)
20076 && TYPE_FIELDS (type))
20078 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20080 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20083 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20084 || TREE_CODE (type) == INTEGER_TYPE)
20087 if (TYPE_MODE (type) == DFmode && align < 64)
20089 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20095 /* Compute the minimum required alignment for dynamic stack realignment
20096 purposes for a local variable, parameter or a stack slot. EXP is
20097 the data type or decl itself, MODE is its mode and ALIGN is the
20098 alignment that the object would ordinarily have. */
20101 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20102 unsigned int align)
20106 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20109 if (exp && DECL_P (exp))
20111 type = TREE_TYPE (exp);
20120 /* Don't do dynamic stack realignment for long long objects with
20121 -mpreferred-stack-boundary=2. */
20122 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20123 && (!type || !TYPE_USER_ALIGN (type))
20124 && (!decl || !DECL_USER_ALIGN (decl)))
20130 /* Find a location for the static chain incoming to a nested function.
20131 This is a register, unless all free registers are used by arguments. */
20134 ix86_static_chain (const_tree fndecl, bool incoming_p)
20138 if (!DECL_STATIC_CHAIN (fndecl))
20143 /* We always use R10 in 64-bit mode. */
20149 /* By default in 32-bit mode we use ECX to pass the static chain. */
20152 fntype = TREE_TYPE (fndecl);
20153 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20155 /* Fastcall functions use ecx/edx for arguments, which leaves
20156 us with EAX for the static chain. */
20159 else if (ix86_function_regparm (fntype, fndecl) == 3)
20161 /* For regparm 3, we have no free call-clobbered registers in
20162 which to store the static chain. In order to implement this,
20163 we have the trampoline push the static chain to the stack.
20164 However, we can't push a value below the return address when
20165 we call the nested function directly, so we have to use an
20166 alternate entry point. For this we use ESI, and have the
20167 alternate entry point push ESI, so that things appear the
20168 same once we're executing the nested function. */
20171 if (fndecl == current_function_decl)
20172 ix86_static_chain_on_stack = true;
20173 return gen_frame_mem (SImode,
20174 plus_constant (arg_pointer_rtx, -8));
20180 return gen_rtx_REG (Pmode, regno);
20183 /* Emit RTL insns to initialize the variable parts of a trampoline.
20184 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20185 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20186 to be passed to the target function. */
20189 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20193 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20200 /* Depending on the static chain location, either load a register
20201 with a constant, or push the constant to the stack. All of the
20202 instructions are the same size. */
20203 chain = ix86_static_chain (fndecl, true);
20206 if (REGNO (chain) == CX_REG)
20208 else if (REGNO (chain) == AX_REG)
20211 gcc_unreachable ();
20216 mem = adjust_address (m_tramp, QImode, 0);
20217 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20219 mem = adjust_address (m_tramp, SImode, 1);
20220 emit_move_insn (mem, chain_value);
20222 /* Compute offset from the end of the jmp to the target function.
20223 In the case in which the trampoline stores the static chain on
20224 the stack, we need to skip the first insn which pushes the
20225 (call-saved) register static chain; this push is 1 byte. */
20226 disp = expand_binop (SImode, sub_optab, fnaddr,
20227 plus_constant (XEXP (m_tramp, 0),
20228 MEM_P (chain) ? 9 : 10),
20229 NULL_RTX, 1, OPTAB_DIRECT);
20231 mem = adjust_address (m_tramp, QImode, 5);
20232 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20234 mem = adjust_address (m_tramp, SImode, 6);
20235 emit_move_insn (mem, disp);
20241 /* Load the function address to r11. Try to load address using
20242 the shorter movl instead of movabs. We may want to support
20243 movq for kernel mode, but kernel does not use trampolines at
20245 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20247 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20249 mem = adjust_address (m_tramp, HImode, offset);
20250 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20252 mem = adjust_address (m_tramp, SImode, offset + 2);
20253 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20258 mem = adjust_address (m_tramp, HImode, offset);
20259 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20261 mem = adjust_address (m_tramp, DImode, offset + 2);
20262 emit_move_insn (mem, fnaddr);
20266 /* Load static chain using movabs to r10. */
20267 mem = adjust_address (m_tramp, HImode, offset);
20268 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20270 mem = adjust_address (m_tramp, DImode, offset + 2);
20271 emit_move_insn (mem, chain_value);
20274 /* Jump to r11; the last (unused) byte is a nop, only there to
20275 pad the write out to a single 32-bit store. */
20276 mem = adjust_address (m_tramp, SImode, offset);
20277 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20280 gcc_assert (offset <= TRAMPOLINE_SIZE);
20283 #ifdef ENABLE_EXECUTE_STACK
20284 #ifdef CHECK_EXECUTE_STACK_ENABLED
20285 if (CHECK_EXECUTE_STACK_ENABLED)
20287 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20288 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20292 /* The following file contains several enumerations and data structures
20293 built from the definitions in i386-builtin-types.def. */
20295 #include "i386-builtin-types.inc"
20297 /* Table for the ix86 builtin non-function types. */
20298 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20300 /* Retrieve an element from the above table, building some of
20301 the types lazily. */
20304 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20306 unsigned int index;
20309 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20311 type = ix86_builtin_type_tab[(int) tcode];
20315 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20316 if (tcode <= IX86_BT_LAST_VECT)
20318 enum machine_mode mode;
20320 index = tcode - IX86_BT_LAST_PRIM - 1;
20321 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20322 mode = ix86_builtin_type_vect_mode[index];
20324 type = build_vector_type_for_mode (itype, mode);
20330 index = tcode - IX86_BT_LAST_VECT - 1;
20331 if (tcode <= IX86_BT_LAST_PTR)
20332 quals = TYPE_UNQUALIFIED;
20334 quals = TYPE_QUAL_CONST;
20336 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20337 if (quals != TYPE_UNQUALIFIED)
20338 itype = build_qualified_type (itype, quals);
20340 type = build_pointer_type (itype);
20343 ix86_builtin_type_tab[(int) tcode] = type;
20347 /* Table for the ix86 builtin function types. */
20348 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20350 /* Retrieve an element from the above table, building some of
20351 the types lazily. */
20354 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20358 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20360 type = ix86_builtin_func_type_tab[(int) tcode];
20364 if (tcode <= IX86_BT_LAST_FUNC)
20366 unsigned start = ix86_builtin_func_start[(int) tcode];
20367 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20368 tree rtype, atype, args = void_list_node;
20371 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20372 for (i = after - 1; i > start; --i)
20374 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20375 args = tree_cons (NULL, atype, args);
20378 type = build_function_type (rtype, args);
20382 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20383 enum ix86_builtin_func_type icode;
20385 icode = ix86_builtin_func_alias_base[index];
20386 type = ix86_get_builtin_func_type (icode);
20389 ix86_builtin_func_type_tab[(int) tcode] = type;
20394 /* Codes for all the SSE/MMX builtins. */
20397 IX86_BUILTIN_ADDPS,
20398 IX86_BUILTIN_ADDSS,
20399 IX86_BUILTIN_DIVPS,
20400 IX86_BUILTIN_DIVSS,
20401 IX86_BUILTIN_MULPS,
20402 IX86_BUILTIN_MULSS,
20403 IX86_BUILTIN_SUBPS,
20404 IX86_BUILTIN_SUBSS,
20406 IX86_BUILTIN_CMPEQPS,
20407 IX86_BUILTIN_CMPLTPS,
20408 IX86_BUILTIN_CMPLEPS,
20409 IX86_BUILTIN_CMPGTPS,
20410 IX86_BUILTIN_CMPGEPS,
20411 IX86_BUILTIN_CMPNEQPS,
20412 IX86_BUILTIN_CMPNLTPS,
20413 IX86_BUILTIN_CMPNLEPS,
20414 IX86_BUILTIN_CMPNGTPS,
20415 IX86_BUILTIN_CMPNGEPS,
20416 IX86_BUILTIN_CMPORDPS,
20417 IX86_BUILTIN_CMPUNORDPS,
20418 IX86_BUILTIN_CMPEQSS,
20419 IX86_BUILTIN_CMPLTSS,
20420 IX86_BUILTIN_CMPLESS,
20421 IX86_BUILTIN_CMPNEQSS,
20422 IX86_BUILTIN_CMPNLTSS,
20423 IX86_BUILTIN_CMPNLESS,
20424 IX86_BUILTIN_CMPNGTSS,
20425 IX86_BUILTIN_CMPNGESS,
20426 IX86_BUILTIN_CMPORDSS,
20427 IX86_BUILTIN_CMPUNORDSS,
20429 IX86_BUILTIN_COMIEQSS,
20430 IX86_BUILTIN_COMILTSS,
20431 IX86_BUILTIN_COMILESS,
20432 IX86_BUILTIN_COMIGTSS,
20433 IX86_BUILTIN_COMIGESS,
20434 IX86_BUILTIN_COMINEQSS,
20435 IX86_BUILTIN_UCOMIEQSS,
20436 IX86_BUILTIN_UCOMILTSS,
20437 IX86_BUILTIN_UCOMILESS,
20438 IX86_BUILTIN_UCOMIGTSS,
20439 IX86_BUILTIN_UCOMIGESS,
20440 IX86_BUILTIN_UCOMINEQSS,
20442 IX86_BUILTIN_CVTPI2PS,
20443 IX86_BUILTIN_CVTPS2PI,
20444 IX86_BUILTIN_CVTSI2SS,
20445 IX86_BUILTIN_CVTSI642SS,
20446 IX86_BUILTIN_CVTSS2SI,
20447 IX86_BUILTIN_CVTSS2SI64,
20448 IX86_BUILTIN_CVTTPS2PI,
20449 IX86_BUILTIN_CVTTSS2SI,
20450 IX86_BUILTIN_CVTTSS2SI64,
20452 IX86_BUILTIN_MAXPS,
20453 IX86_BUILTIN_MAXSS,
20454 IX86_BUILTIN_MINPS,
20455 IX86_BUILTIN_MINSS,
20457 IX86_BUILTIN_LOADUPS,
20458 IX86_BUILTIN_STOREUPS,
20459 IX86_BUILTIN_MOVSS,
20461 IX86_BUILTIN_MOVHLPS,
20462 IX86_BUILTIN_MOVLHPS,
20463 IX86_BUILTIN_LOADHPS,
20464 IX86_BUILTIN_LOADLPS,
20465 IX86_BUILTIN_STOREHPS,
20466 IX86_BUILTIN_STORELPS,
20468 IX86_BUILTIN_MASKMOVQ,
20469 IX86_BUILTIN_MOVMSKPS,
20470 IX86_BUILTIN_PMOVMSKB,
20472 IX86_BUILTIN_MOVNTPS,
20473 IX86_BUILTIN_MOVNTQ,
20475 IX86_BUILTIN_LOADDQU,
20476 IX86_BUILTIN_STOREDQU,
20478 IX86_BUILTIN_PACKSSWB,
20479 IX86_BUILTIN_PACKSSDW,
20480 IX86_BUILTIN_PACKUSWB,
20482 IX86_BUILTIN_PADDB,
20483 IX86_BUILTIN_PADDW,
20484 IX86_BUILTIN_PADDD,
20485 IX86_BUILTIN_PADDQ,
20486 IX86_BUILTIN_PADDSB,
20487 IX86_BUILTIN_PADDSW,
20488 IX86_BUILTIN_PADDUSB,
20489 IX86_BUILTIN_PADDUSW,
20490 IX86_BUILTIN_PSUBB,
20491 IX86_BUILTIN_PSUBW,
20492 IX86_BUILTIN_PSUBD,
20493 IX86_BUILTIN_PSUBQ,
20494 IX86_BUILTIN_PSUBSB,
20495 IX86_BUILTIN_PSUBSW,
20496 IX86_BUILTIN_PSUBUSB,
20497 IX86_BUILTIN_PSUBUSW,
20500 IX86_BUILTIN_PANDN,
20504 IX86_BUILTIN_PAVGB,
20505 IX86_BUILTIN_PAVGW,
20507 IX86_BUILTIN_PCMPEQB,
20508 IX86_BUILTIN_PCMPEQW,
20509 IX86_BUILTIN_PCMPEQD,
20510 IX86_BUILTIN_PCMPGTB,
20511 IX86_BUILTIN_PCMPGTW,
20512 IX86_BUILTIN_PCMPGTD,
20514 IX86_BUILTIN_PMADDWD,
20516 IX86_BUILTIN_PMAXSW,
20517 IX86_BUILTIN_PMAXUB,
20518 IX86_BUILTIN_PMINSW,
20519 IX86_BUILTIN_PMINUB,
20521 IX86_BUILTIN_PMULHUW,
20522 IX86_BUILTIN_PMULHW,
20523 IX86_BUILTIN_PMULLW,
20525 IX86_BUILTIN_PSADBW,
20526 IX86_BUILTIN_PSHUFW,
20528 IX86_BUILTIN_PSLLW,
20529 IX86_BUILTIN_PSLLD,
20530 IX86_BUILTIN_PSLLQ,
20531 IX86_BUILTIN_PSRAW,
20532 IX86_BUILTIN_PSRAD,
20533 IX86_BUILTIN_PSRLW,
20534 IX86_BUILTIN_PSRLD,
20535 IX86_BUILTIN_PSRLQ,
20536 IX86_BUILTIN_PSLLWI,
20537 IX86_BUILTIN_PSLLDI,
20538 IX86_BUILTIN_PSLLQI,
20539 IX86_BUILTIN_PSRAWI,
20540 IX86_BUILTIN_PSRADI,
20541 IX86_BUILTIN_PSRLWI,
20542 IX86_BUILTIN_PSRLDI,
20543 IX86_BUILTIN_PSRLQI,
20545 IX86_BUILTIN_PUNPCKHBW,
20546 IX86_BUILTIN_PUNPCKHWD,
20547 IX86_BUILTIN_PUNPCKHDQ,
20548 IX86_BUILTIN_PUNPCKLBW,
20549 IX86_BUILTIN_PUNPCKLWD,
20550 IX86_BUILTIN_PUNPCKLDQ,
20552 IX86_BUILTIN_SHUFPS,
20554 IX86_BUILTIN_RCPPS,
20555 IX86_BUILTIN_RCPSS,
20556 IX86_BUILTIN_RSQRTPS,
20557 IX86_BUILTIN_RSQRTPS_NR,
20558 IX86_BUILTIN_RSQRTSS,
20559 IX86_BUILTIN_RSQRTF,
20560 IX86_BUILTIN_SQRTPS,
20561 IX86_BUILTIN_SQRTPS_NR,
20562 IX86_BUILTIN_SQRTSS,
20564 IX86_BUILTIN_UNPCKHPS,
20565 IX86_BUILTIN_UNPCKLPS,
20567 IX86_BUILTIN_ANDPS,
20568 IX86_BUILTIN_ANDNPS,
20570 IX86_BUILTIN_XORPS,
20573 IX86_BUILTIN_LDMXCSR,
20574 IX86_BUILTIN_STMXCSR,
20575 IX86_BUILTIN_SFENCE,
20577 /* 3DNow! Original */
20578 IX86_BUILTIN_FEMMS,
20579 IX86_BUILTIN_PAVGUSB,
20580 IX86_BUILTIN_PF2ID,
20581 IX86_BUILTIN_PFACC,
20582 IX86_BUILTIN_PFADD,
20583 IX86_BUILTIN_PFCMPEQ,
20584 IX86_BUILTIN_PFCMPGE,
20585 IX86_BUILTIN_PFCMPGT,
20586 IX86_BUILTIN_PFMAX,
20587 IX86_BUILTIN_PFMIN,
20588 IX86_BUILTIN_PFMUL,
20589 IX86_BUILTIN_PFRCP,
20590 IX86_BUILTIN_PFRCPIT1,
20591 IX86_BUILTIN_PFRCPIT2,
20592 IX86_BUILTIN_PFRSQIT1,
20593 IX86_BUILTIN_PFRSQRT,
20594 IX86_BUILTIN_PFSUB,
20595 IX86_BUILTIN_PFSUBR,
20596 IX86_BUILTIN_PI2FD,
20597 IX86_BUILTIN_PMULHRW,
20599 /* 3DNow! Athlon Extensions */
20600 IX86_BUILTIN_PF2IW,
20601 IX86_BUILTIN_PFNACC,
20602 IX86_BUILTIN_PFPNACC,
20603 IX86_BUILTIN_PI2FW,
20604 IX86_BUILTIN_PSWAPDSI,
20605 IX86_BUILTIN_PSWAPDSF,
20608 IX86_BUILTIN_ADDPD,
20609 IX86_BUILTIN_ADDSD,
20610 IX86_BUILTIN_DIVPD,
20611 IX86_BUILTIN_DIVSD,
20612 IX86_BUILTIN_MULPD,
20613 IX86_BUILTIN_MULSD,
20614 IX86_BUILTIN_SUBPD,
20615 IX86_BUILTIN_SUBSD,
20617 IX86_BUILTIN_CMPEQPD,
20618 IX86_BUILTIN_CMPLTPD,
20619 IX86_BUILTIN_CMPLEPD,
20620 IX86_BUILTIN_CMPGTPD,
20621 IX86_BUILTIN_CMPGEPD,
20622 IX86_BUILTIN_CMPNEQPD,
20623 IX86_BUILTIN_CMPNLTPD,
20624 IX86_BUILTIN_CMPNLEPD,
20625 IX86_BUILTIN_CMPNGTPD,
20626 IX86_BUILTIN_CMPNGEPD,
20627 IX86_BUILTIN_CMPORDPD,
20628 IX86_BUILTIN_CMPUNORDPD,
20629 IX86_BUILTIN_CMPEQSD,
20630 IX86_BUILTIN_CMPLTSD,
20631 IX86_BUILTIN_CMPLESD,
20632 IX86_BUILTIN_CMPNEQSD,
20633 IX86_BUILTIN_CMPNLTSD,
20634 IX86_BUILTIN_CMPNLESD,
20635 IX86_BUILTIN_CMPORDSD,
20636 IX86_BUILTIN_CMPUNORDSD,
20638 IX86_BUILTIN_COMIEQSD,
20639 IX86_BUILTIN_COMILTSD,
20640 IX86_BUILTIN_COMILESD,
20641 IX86_BUILTIN_COMIGTSD,
20642 IX86_BUILTIN_COMIGESD,
20643 IX86_BUILTIN_COMINEQSD,
20644 IX86_BUILTIN_UCOMIEQSD,
20645 IX86_BUILTIN_UCOMILTSD,
20646 IX86_BUILTIN_UCOMILESD,
20647 IX86_BUILTIN_UCOMIGTSD,
20648 IX86_BUILTIN_UCOMIGESD,
20649 IX86_BUILTIN_UCOMINEQSD,
20651 IX86_BUILTIN_MAXPD,
20652 IX86_BUILTIN_MAXSD,
20653 IX86_BUILTIN_MINPD,
20654 IX86_BUILTIN_MINSD,
20656 IX86_BUILTIN_ANDPD,
20657 IX86_BUILTIN_ANDNPD,
20659 IX86_BUILTIN_XORPD,
20661 IX86_BUILTIN_SQRTPD,
20662 IX86_BUILTIN_SQRTSD,
20664 IX86_BUILTIN_UNPCKHPD,
20665 IX86_BUILTIN_UNPCKLPD,
20667 IX86_BUILTIN_SHUFPD,
20669 IX86_BUILTIN_LOADUPD,
20670 IX86_BUILTIN_STOREUPD,
20671 IX86_BUILTIN_MOVSD,
20673 IX86_BUILTIN_LOADHPD,
20674 IX86_BUILTIN_LOADLPD,
20676 IX86_BUILTIN_CVTDQ2PD,
20677 IX86_BUILTIN_CVTDQ2PS,
20679 IX86_BUILTIN_CVTPD2DQ,
20680 IX86_BUILTIN_CVTPD2PI,
20681 IX86_BUILTIN_CVTPD2PS,
20682 IX86_BUILTIN_CVTTPD2DQ,
20683 IX86_BUILTIN_CVTTPD2PI,
20685 IX86_BUILTIN_CVTPI2PD,
20686 IX86_BUILTIN_CVTSI2SD,
20687 IX86_BUILTIN_CVTSI642SD,
20689 IX86_BUILTIN_CVTSD2SI,
20690 IX86_BUILTIN_CVTSD2SI64,
20691 IX86_BUILTIN_CVTSD2SS,
20692 IX86_BUILTIN_CVTSS2SD,
20693 IX86_BUILTIN_CVTTSD2SI,
20694 IX86_BUILTIN_CVTTSD2SI64,
20696 IX86_BUILTIN_CVTPS2DQ,
20697 IX86_BUILTIN_CVTPS2PD,
20698 IX86_BUILTIN_CVTTPS2DQ,
20700 IX86_BUILTIN_MOVNTI,
20701 IX86_BUILTIN_MOVNTPD,
20702 IX86_BUILTIN_MOVNTDQ,
20704 IX86_BUILTIN_MOVQ128,
20707 IX86_BUILTIN_MASKMOVDQU,
20708 IX86_BUILTIN_MOVMSKPD,
20709 IX86_BUILTIN_PMOVMSKB128,
20711 IX86_BUILTIN_PACKSSWB128,
20712 IX86_BUILTIN_PACKSSDW128,
20713 IX86_BUILTIN_PACKUSWB128,
20715 IX86_BUILTIN_PADDB128,
20716 IX86_BUILTIN_PADDW128,
20717 IX86_BUILTIN_PADDD128,
20718 IX86_BUILTIN_PADDQ128,
20719 IX86_BUILTIN_PADDSB128,
20720 IX86_BUILTIN_PADDSW128,
20721 IX86_BUILTIN_PADDUSB128,
20722 IX86_BUILTIN_PADDUSW128,
20723 IX86_BUILTIN_PSUBB128,
20724 IX86_BUILTIN_PSUBW128,
20725 IX86_BUILTIN_PSUBD128,
20726 IX86_BUILTIN_PSUBQ128,
20727 IX86_BUILTIN_PSUBSB128,
20728 IX86_BUILTIN_PSUBSW128,
20729 IX86_BUILTIN_PSUBUSB128,
20730 IX86_BUILTIN_PSUBUSW128,
20732 IX86_BUILTIN_PAND128,
20733 IX86_BUILTIN_PANDN128,
20734 IX86_BUILTIN_POR128,
20735 IX86_BUILTIN_PXOR128,
20737 IX86_BUILTIN_PAVGB128,
20738 IX86_BUILTIN_PAVGW128,
20740 IX86_BUILTIN_PCMPEQB128,
20741 IX86_BUILTIN_PCMPEQW128,
20742 IX86_BUILTIN_PCMPEQD128,
20743 IX86_BUILTIN_PCMPGTB128,
20744 IX86_BUILTIN_PCMPGTW128,
20745 IX86_BUILTIN_PCMPGTD128,
20747 IX86_BUILTIN_PMADDWD128,
20749 IX86_BUILTIN_PMAXSW128,
20750 IX86_BUILTIN_PMAXUB128,
20751 IX86_BUILTIN_PMINSW128,
20752 IX86_BUILTIN_PMINUB128,
20754 IX86_BUILTIN_PMULUDQ,
20755 IX86_BUILTIN_PMULUDQ128,
20756 IX86_BUILTIN_PMULHUW128,
20757 IX86_BUILTIN_PMULHW128,
20758 IX86_BUILTIN_PMULLW128,
20760 IX86_BUILTIN_PSADBW128,
20761 IX86_BUILTIN_PSHUFHW,
20762 IX86_BUILTIN_PSHUFLW,
20763 IX86_BUILTIN_PSHUFD,
20765 IX86_BUILTIN_PSLLDQI128,
20766 IX86_BUILTIN_PSLLWI128,
20767 IX86_BUILTIN_PSLLDI128,
20768 IX86_BUILTIN_PSLLQI128,
20769 IX86_BUILTIN_PSRAWI128,
20770 IX86_BUILTIN_PSRADI128,
20771 IX86_BUILTIN_PSRLDQI128,
20772 IX86_BUILTIN_PSRLWI128,
20773 IX86_BUILTIN_PSRLDI128,
20774 IX86_BUILTIN_PSRLQI128,
20776 IX86_BUILTIN_PSLLDQ128,
20777 IX86_BUILTIN_PSLLW128,
20778 IX86_BUILTIN_PSLLD128,
20779 IX86_BUILTIN_PSLLQ128,
20780 IX86_BUILTIN_PSRAW128,
20781 IX86_BUILTIN_PSRAD128,
20782 IX86_BUILTIN_PSRLW128,
20783 IX86_BUILTIN_PSRLD128,
20784 IX86_BUILTIN_PSRLQ128,
20786 IX86_BUILTIN_PUNPCKHBW128,
20787 IX86_BUILTIN_PUNPCKHWD128,
20788 IX86_BUILTIN_PUNPCKHDQ128,
20789 IX86_BUILTIN_PUNPCKHQDQ128,
20790 IX86_BUILTIN_PUNPCKLBW128,
20791 IX86_BUILTIN_PUNPCKLWD128,
20792 IX86_BUILTIN_PUNPCKLDQ128,
20793 IX86_BUILTIN_PUNPCKLQDQ128,
20795 IX86_BUILTIN_CLFLUSH,
20796 IX86_BUILTIN_MFENCE,
20797 IX86_BUILTIN_LFENCE,
20799 IX86_BUILTIN_BSRSI,
20800 IX86_BUILTIN_BSRDI,
20801 IX86_BUILTIN_RDPMC,
20802 IX86_BUILTIN_RDTSC,
20803 IX86_BUILTIN_RDTSCP,
20804 IX86_BUILTIN_ROLQI,
20805 IX86_BUILTIN_ROLHI,
20806 IX86_BUILTIN_RORQI,
20807 IX86_BUILTIN_RORHI,
20810 IX86_BUILTIN_ADDSUBPS,
20811 IX86_BUILTIN_HADDPS,
20812 IX86_BUILTIN_HSUBPS,
20813 IX86_BUILTIN_MOVSHDUP,
20814 IX86_BUILTIN_MOVSLDUP,
20815 IX86_BUILTIN_ADDSUBPD,
20816 IX86_BUILTIN_HADDPD,
20817 IX86_BUILTIN_HSUBPD,
20818 IX86_BUILTIN_LDDQU,
20820 IX86_BUILTIN_MONITOR,
20821 IX86_BUILTIN_MWAIT,
20824 IX86_BUILTIN_PHADDW,
20825 IX86_BUILTIN_PHADDD,
20826 IX86_BUILTIN_PHADDSW,
20827 IX86_BUILTIN_PHSUBW,
20828 IX86_BUILTIN_PHSUBD,
20829 IX86_BUILTIN_PHSUBSW,
20830 IX86_BUILTIN_PMADDUBSW,
20831 IX86_BUILTIN_PMULHRSW,
20832 IX86_BUILTIN_PSHUFB,
20833 IX86_BUILTIN_PSIGNB,
20834 IX86_BUILTIN_PSIGNW,
20835 IX86_BUILTIN_PSIGND,
20836 IX86_BUILTIN_PALIGNR,
20837 IX86_BUILTIN_PABSB,
20838 IX86_BUILTIN_PABSW,
20839 IX86_BUILTIN_PABSD,
20841 IX86_BUILTIN_PHADDW128,
20842 IX86_BUILTIN_PHADDD128,
20843 IX86_BUILTIN_PHADDSW128,
20844 IX86_BUILTIN_PHSUBW128,
20845 IX86_BUILTIN_PHSUBD128,
20846 IX86_BUILTIN_PHSUBSW128,
20847 IX86_BUILTIN_PMADDUBSW128,
20848 IX86_BUILTIN_PMULHRSW128,
20849 IX86_BUILTIN_PSHUFB128,
20850 IX86_BUILTIN_PSIGNB128,
20851 IX86_BUILTIN_PSIGNW128,
20852 IX86_BUILTIN_PSIGND128,
20853 IX86_BUILTIN_PALIGNR128,
20854 IX86_BUILTIN_PABSB128,
20855 IX86_BUILTIN_PABSW128,
20856 IX86_BUILTIN_PABSD128,
20858 /* AMDFAM10 - SSE4A New Instructions. */
20859 IX86_BUILTIN_MOVNTSD,
20860 IX86_BUILTIN_MOVNTSS,
20861 IX86_BUILTIN_EXTRQI,
20862 IX86_BUILTIN_EXTRQ,
20863 IX86_BUILTIN_INSERTQI,
20864 IX86_BUILTIN_INSERTQ,
20867 IX86_BUILTIN_BLENDPD,
20868 IX86_BUILTIN_BLENDPS,
20869 IX86_BUILTIN_BLENDVPD,
20870 IX86_BUILTIN_BLENDVPS,
20871 IX86_BUILTIN_PBLENDVB128,
20872 IX86_BUILTIN_PBLENDW128,
20877 IX86_BUILTIN_INSERTPS128,
20879 IX86_BUILTIN_MOVNTDQA,
20880 IX86_BUILTIN_MPSADBW128,
20881 IX86_BUILTIN_PACKUSDW128,
20882 IX86_BUILTIN_PCMPEQQ,
20883 IX86_BUILTIN_PHMINPOSUW128,
20885 IX86_BUILTIN_PMAXSB128,
20886 IX86_BUILTIN_PMAXSD128,
20887 IX86_BUILTIN_PMAXUD128,
20888 IX86_BUILTIN_PMAXUW128,
20890 IX86_BUILTIN_PMINSB128,
20891 IX86_BUILTIN_PMINSD128,
20892 IX86_BUILTIN_PMINUD128,
20893 IX86_BUILTIN_PMINUW128,
20895 IX86_BUILTIN_PMOVSXBW128,
20896 IX86_BUILTIN_PMOVSXBD128,
20897 IX86_BUILTIN_PMOVSXBQ128,
20898 IX86_BUILTIN_PMOVSXWD128,
20899 IX86_BUILTIN_PMOVSXWQ128,
20900 IX86_BUILTIN_PMOVSXDQ128,
20902 IX86_BUILTIN_PMOVZXBW128,
20903 IX86_BUILTIN_PMOVZXBD128,
20904 IX86_BUILTIN_PMOVZXBQ128,
20905 IX86_BUILTIN_PMOVZXWD128,
20906 IX86_BUILTIN_PMOVZXWQ128,
20907 IX86_BUILTIN_PMOVZXDQ128,
20909 IX86_BUILTIN_PMULDQ128,
20910 IX86_BUILTIN_PMULLD128,
20912 IX86_BUILTIN_ROUNDPD,
20913 IX86_BUILTIN_ROUNDPS,
20914 IX86_BUILTIN_ROUNDSD,
20915 IX86_BUILTIN_ROUNDSS,
20917 IX86_BUILTIN_PTESTZ,
20918 IX86_BUILTIN_PTESTC,
20919 IX86_BUILTIN_PTESTNZC,
20921 IX86_BUILTIN_VEC_INIT_V2SI,
20922 IX86_BUILTIN_VEC_INIT_V4HI,
20923 IX86_BUILTIN_VEC_INIT_V8QI,
20924 IX86_BUILTIN_VEC_EXT_V2DF,
20925 IX86_BUILTIN_VEC_EXT_V2DI,
20926 IX86_BUILTIN_VEC_EXT_V4SF,
20927 IX86_BUILTIN_VEC_EXT_V4SI,
20928 IX86_BUILTIN_VEC_EXT_V8HI,
20929 IX86_BUILTIN_VEC_EXT_V2SI,
20930 IX86_BUILTIN_VEC_EXT_V4HI,
20931 IX86_BUILTIN_VEC_EXT_V16QI,
20932 IX86_BUILTIN_VEC_SET_V2DI,
20933 IX86_BUILTIN_VEC_SET_V4SF,
20934 IX86_BUILTIN_VEC_SET_V4SI,
20935 IX86_BUILTIN_VEC_SET_V8HI,
20936 IX86_BUILTIN_VEC_SET_V4HI,
20937 IX86_BUILTIN_VEC_SET_V16QI,
20939 IX86_BUILTIN_VEC_PACK_SFIX,
20942 IX86_BUILTIN_CRC32QI,
20943 IX86_BUILTIN_CRC32HI,
20944 IX86_BUILTIN_CRC32SI,
20945 IX86_BUILTIN_CRC32DI,
20947 IX86_BUILTIN_PCMPESTRI128,
20948 IX86_BUILTIN_PCMPESTRM128,
20949 IX86_BUILTIN_PCMPESTRA128,
20950 IX86_BUILTIN_PCMPESTRC128,
20951 IX86_BUILTIN_PCMPESTRO128,
20952 IX86_BUILTIN_PCMPESTRS128,
20953 IX86_BUILTIN_PCMPESTRZ128,
20954 IX86_BUILTIN_PCMPISTRI128,
20955 IX86_BUILTIN_PCMPISTRM128,
20956 IX86_BUILTIN_PCMPISTRA128,
20957 IX86_BUILTIN_PCMPISTRC128,
20958 IX86_BUILTIN_PCMPISTRO128,
20959 IX86_BUILTIN_PCMPISTRS128,
20960 IX86_BUILTIN_PCMPISTRZ128,
20962 IX86_BUILTIN_PCMPGTQ,
20964 /* AES instructions */
20965 IX86_BUILTIN_AESENC128,
20966 IX86_BUILTIN_AESENCLAST128,
20967 IX86_BUILTIN_AESDEC128,
20968 IX86_BUILTIN_AESDECLAST128,
20969 IX86_BUILTIN_AESIMC128,
20970 IX86_BUILTIN_AESKEYGENASSIST128,
20972 /* PCLMUL instruction */
20973 IX86_BUILTIN_PCLMULQDQ128,
20976 IX86_BUILTIN_ADDPD256,
20977 IX86_BUILTIN_ADDPS256,
20978 IX86_BUILTIN_ADDSUBPD256,
20979 IX86_BUILTIN_ADDSUBPS256,
20980 IX86_BUILTIN_ANDPD256,
20981 IX86_BUILTIN_ANDPS256,
20982 IX86_BUILTIN_ANDNPD256,
20983 IX86_BUILTIN_ANDNPS256,
20984 IX86_BUILTIN_BLENDPD256,
20985 IX86_BUILTIN_BLENDPS256,
20986 IX86_BUILTIN_BLENDVPD256,
20987 IX86_BUILTIN_BLENDVPS256,
20988 IX86_BUILTIN_DIVPD256,
20989 IX86_BUILTIN_DIVPS256,
20990 IX86_BUILTIN_DPPS256,
20991 IX86_BUILTIN_HADDPD256,
20992 IX86_BUILTIN_HADDPS256,
20993 IX86_BUILTIN_HSUBPD256,
20994 IX86_BUILTIN_HSUBPS256,
20995 IX86_BUILTIN_MAXPD256,
20996 IX86_BUILTIN_MAXPS256,
20997 IX86_BUILTIN_MINPD256,
20998 IX86_BUILTIN_MINPS256,
20999 IX86_BUILTIN_MULPD256,
21000 IX86_BUILTIN_MULPS256,
21001 IX86_BUILTIN_ORPD256,
21002 IX86_BUILTIN_ORPS256,
21003 IX86_BUILTIN_SHUFPD256,
21004 IX86_BUILTIN_SHUFPS256,
21005 IX86_BUILTIN_SUBPD256,
21006 IX86_BUILTIN_SUBPS256,
21007 IX86_BUILTIN_XORPD256,
21008 IX86_BUILTIN_XORPS256,
21009 IX86_BUILTIN_CMPSD,
21010 IX86_BUILTIN_CMPSS,
21011 IX86_BUILTIN_CMPPD,
21012 IX86_BUILTIN_CMPPS,
21013 IX86_BUILTIN_CMPPD256,
21014 IX86_BUILTIN_CMPPS256,
21015 IX86_BUILTIN_CVTDQ2PD256,
21016 IX86_BUILTIN_CVTDQ2PS256,
21017 IX86_BUILTIN_CVTPD2PS256,
21018 IX86_BUILTIN_CVTPS2DQ256,
21019 IX86_BUILTIN_CVTPS2PD256,
21020 IX86_BUILTIN_CVTTPD2DQ256,
21021 IX86_BUILTIN_CVTPD2DQ256,
21022 IX86_BUILTIN_CVTTPS2DQ256,
21023 IX86_BUILTIN_EXTRACTF128PD256,
21024 IX86_BUILTIN_EXTRACTF128PS256,
21025 IX86_BUILTIN_EXTRACTF128SI256,
21026 IX86_BUILTIN_VZEROALL,
21027 IX86_BUILTIN_VZEROUPPER,
21028 IX86_BUILTIN_VPERMILVARPD,
21029 IX86_BUILTIN_VPERMILVARPS,
21030 IX86_BUILTIN_VPERMILVARPD256,
21031 IX86_BUILTIN_VPERMILVARPS256,
21032 IX86_BUILTIN_VPERMILPD,
21033 IX86_BUILTIN_VPERMILPS,
21034 IX86_BUILTIN_VPERMILPD256,
21035 IX86_BUILTIN_VPERMILPS256,
21036 IX86_BUILTIN_VPERMIL2PD,
21037 IX86_BUILTIN_VPERMIL2PS,
21038 IX86_BUILTIN_VPERMIL2PD256,
21039 IX86_BUILTIN_VPERMIL2PS256,
21040 IX86_BUILTIN_VPERM2F128PD256,
21041 IX86_BUILTIN_VPERM2F128PS256,
21042 IX86_BUILTIN_VPERM2F128SI256,
21043 IX86_BUILTIN_VBROADCASTSS,
21044 IX86_BUILTIN_VBROADCASTSD256,
21045 IX86_BUILTIN_VBROADCASTSS256,
21046 IX86_BUILTIN_VBROADCASTPD256,
21047 IX86_BUILTIN_VBROADCASTPS256,
21048 IX86_BUILTIN_VINSERTF128PD256,
21049 IX86_BUILTIN_VINSERTF128PS256,
21050 IX86_BUILTIN_VINSERTF128SI256,
21051 IX86_BUILTIN_LOADUPD256,
21052 IX86_BUILTIN_LOADUPS256,
21053 IX86_BUILTIN_STOREUPD256,
21054 IX86_BUILTIN_STOREUPS256,
21055 IX86_BUILTIN_LDDQU256,
21056 IX86_BUILTIN_MOVNTDQ256,
21057 IX86_BUILTIN_MOVNTPD256,
21058 IX86_BUILTIN_MOVNTPS256,
21059 IX86_BUILTIN_LOADDQU256,
21060 IX86_BUILTIN_STOREDQU256,
21061 IX86_BUILTIN_MASKLOADPD,
21062 IX86_BUILTIN_MASKLOADPS,
21063 IX86_BUILTIN_MASKSTOREPD,
21064 IX86_BUILTIN_MASKSTOREPS,
21065 IX86_BUILTIN_MASKLOADPD256,
21066 IX86_BUILTIN_MASKLOADPS256,
21067 IX86_BUILTIN_MASKSTOREPD256,
21068 IX86_BUILTIN_MASKSTOREPS256,
21069 IX86_BUILTIN_MOVSHDUP256,
21070 IX86_BUILTIN_MOVSLDUP256,
21071 IX86_BUILTIN_MOVDDUP256,
21073 IX86_BUILTIN_SQRTPD256,
21074 IX86_BUILTIN_SQRTPS256,
21075 IX86_BUILTIN_SQRTPS_NR256,
21076 IX86_BUILTIN_RSQRTPS256,
21077 IX86_BUILTIN_RSQRTPS_NR256,
21079 IX86_BUILTIN_RCPPS256,
21081 IX86_BUILTIN_ROUNDPD256,
21082 IX86_BUILTIN_ROUNDPS256,
21084 IX86_BUILTIN_UNPCKHPD256,
21085 IX86_BUILTIN_UNPCKLPD256,
21086 IX86_BUILTIN_UNPCKHPS256,
21087 IX86_BUILTIN_UNPCKLPS256,
21089 IX86_BUILTIN_SI256_SI,
21090 IX86_BUILTIN_PS256_PS,
21091 IX86_BUILTIN_PD256_PD,
21092 IX86_BUILTIN_SI_SI256,
21093 IX86_BUILTIN_PS_PS256,
21094 IX86_BUILTIN_PD_PD256,
21096 IX86_BUILTIN_VTESTZPD,
21097 IX86_BUILTIN_VTESTCPD,
21098 IX86_BUILTIN_VTESTNZCPD,
21099 IX86_BUILTIN_VTESTZPS,
21100 IX86_BUILTIN_VTESTCPS,
21101 IX86_BUILTIN_VTESTNZCPS,
21102 IX86_BUILTIN_VTESTZPD256,
21103 IX86_BUILTIN_VTESTCPD256,
21104 IX86_BUILTIN_VTESTNZCPD256,
21105 IX86_BUILTIN_VTESTZPS256,
21106 IX86_BUILTIN_VTESTCPS256,
21107 IX86_BUILTIN_VTESTNZCPS256,
21108 IX86_BUILTIN_PTESTZ256,
21109 IX86_BUILTIN_PTESTC256,
21110 IX86_BUILTIN_PTESTNZC256,
21112 IX86_BUILTIN_MOVMSKPD256,
21113 IX86_BUILTIN_MOVMSKPS256,
21115 /* TFmode support builtins. */
21117 IX86_BUILTIN_HUGE_VALQ,
21118 IX86_BUILTIN_FABSQ,
21119 IX86_BUILTIN_COPYSIGNQ,
21121 /* Vectorizer support builtins. */
21122 IX86_BUILTIN_CPYSGNPS,
21123 IX86_BUILTIN_CPYSGNPD,
21125 IX86_BUILTIN_CVTUDQ2PS,
21127 IX86_BUILTIN_VEC_PERM_V2DF,
21128 IX86_BUILTIN_VEC_PERM_V4SF,
21129 IX86_BUILTIN_VEC_PERM_V2DI,
21130 IX86_BUILTIN_VEC_PERM_V4SI,
21131 IX86_BUILTIN_VEC_PERM_V8HI,
21132 IX86_BUILTIN_VEC_PERM_V16QI,
21133 IX86_BUILTIN_VEC_PERM_V2DI_U,
21134 IX86_BUILTIN_VEC_PERM_V4SI_U,
21135 IX86_BUILTIN_VEC_PERM_V8HI_U,
21136 IX86_BUILTIN_VEC_PERM_V16QI_U,
21137 IX86_BUILTIN_VEC_PERM_V4DF,
21138 IX86_BUILTIN_VEC_PERM_V8SF,
21140 /* FMA4 and XOP instructions. */
21141 IX86_BUILTIN_VFMADDSS,
21142 IX86_BUILTIN_VFMADDSD,
21143 IX86_BUILTIN_VFMADDPS,
21144 IX86_BUILTIN_VFMADDPD,
21145 IX86_BUILTIN_VFMSUBSS,
21146 IX86_BUILTIN_VFMSUBSD,
21147 IX86_BUILTIN_VFMSUBPS,
21148 IX86_BUILTIN_VFMSUBPD,
21149 IX86_BUILTIN_VFMADDSUBPS,
21150 IX86_BUILTIN_VFMADDSUBPD,
21151 IX86_BUILTIN_VFMSUBADDPS,
21152 IX86_BUILTIN_VFMSUBADDPD,
21153 IX86_BUILTIN_VFNMADDSS,
21154 IX86_BUILTIN_VFNMADDSD,
21155 IX86_BUILTIN_VFNMADDPS,
21156 IX86_BUILTIN_VFNMADDPD,
21157 IX86_BUILTIN_VFNMSUBSS,
21158 IX86_BUILTIN_VFNMSUBSD,
21159 IX86_BUILTIN_VFNMSUBPS,
21160 IX86_BUILTIN_VFNMSUBPD,
21161 IX86_BUILTIN_VFMADDPS256,
21162 IX86_BUILTIN_VFMADDPD256,
21163 IX86_BUILTIN_VFMSUBPS256,
21164 IX86_BUILTIN_VFMSUBPD256,
21165 IX86_BUILTIN_VFMADDSUBPS256,
21166 IX86_BUILTIN_VFMADDSUBPD256,
21167 IX86_BUILTIN_VFMSUBADDPS256,
21168 IX86_BUILTIN_VFMSUBADDPD256,
21169 IX86_BUILTIN_VFNMADDPS256,
21170 IX86_BUILTIN_VFNMADDPD256,
21171 IX86_BUILTIN_VFNMSUBPS256,
21172 IX86_BUILTIN_VFNMSUBPD256,
21174 IX86_BUILTIN_VPCMOV,
21175 IX86_BUILTIN_VPCMOV_V2DI,
21176 IX86_BUILTIN_VPCMOV_V4SI,
21177 IX86_BUILTIN_VPCMOV_V8HI,
21178 IX86_BUILTIN_VPCMOV_V16QI,
21179 IX86_BUILTIN_VPCMOV_V4SF,
21180 IX86_BUILTIN_VPCMOV_V2DF,
21181 IX86_BUILTIN_VPCMOV256,
21182 IX86_BUILTIN_VPCMOV_V4DI256,
21183 IX86_BUILTIN_VPCMOV_V8SI256,
21184 IX86_BUILTIN_VPCMOV_V16HI256,
21185 IX86_BUILTIN_VPCMOV_V32QI256,
21186 IX86_BUILTIN_VPCMOV_V8SF256,
21187 IX86_BUILTIN_VPCMOV_V4DF256,
21189 IX86_BUILTIN_VPPERM,
21191 IX86_BUILTIN_VPMACSSWW,
21192 IX86_BUILTIN_VPMACSWW,
21193 IX86_BUILTIN_VPMACSSWD,
21194 IX86_BUILTIN_VPMACSWD,
21195 IX86_BUILTIN_VPMACSSDD,
21196 IX86_BUILTIN_VPMACSDD,
21197 IX86_BUILTIN_VPMACSSDQL,
21198 IX86_BUILTIN_VPMACSSDQH,
21199 IX86_BUILTIN_VPMACSDQL,
21200 IX86_BUILTIN_VPMACSDQH,
21201 IX86_BUILTIN_VPMADCSSWD,
21202 IX86_BUILTIN_VPMADCSWD,
21204 IX86_BUILTIN_VPHADDBW,
21205 IX86_BUILTIN_VPHADDBD,
21206 IX86_BUILTIN_VPHADDBQ,
21207 IX86_BUILTIN_VPHADDWD,
21208 IX86_BUILTIN_VPHADDWQ,
21209 IX86_BUILTIN_VPHADDDQ,
21210 IX86_BUILTIN_VPHADDUBW,
21211 IX86_BUILTIN_VPHADDUBD,
21212 IX86_BUILTIN_VPHADDUBQ,
21213 IX86_BUILTIN_VPHADDUWD,
21214 IX86_BUILTIN_VPHADDUWQ,
21215 IX86_BUILTIN_VPHADDUDQ,
21216 IX86_BUILTIN_VPHSUBBW,
21217 IX86_BUILTIN_VPHSUBWD,
21218 IX86_BUILTIN_VPHSUBDQ,
21220 IX86_BUILTIN_VPROTB,
21221 IX86_BUILTIN_VPROTW,
21222 IX86_BUILTIN_VPROTD,
21223 IX86_BUILTIN_VPROTQ,
21224 IX86_BUILTIN_VPROTB_IMM,
21225 IX86_BUILTIN_VPROTW_IMM,
21226 IX86_BUILTIN_VPROTD_IMM,
21227 IX86_BUILTIN_VPROTQ_IMM,
21229 IX86_BUILTIN_VPSHLB,
21230 IX86_BUILTIN_VPSHLW,
21231 IX86_BUILTIN_VPSHLD,
21232 IX86_BUILTIN_VPSHLQ,
21233 IX86_BUILTIN_VPSHAB,
21234 IX86_BUILTIN_VPSHAW,
21235 IX86_BUILTIN_VPSHAD,
21236 IX86_BUILTIN_VPSHAQ,
21238 IX86_BUILTIN_VFRCZSS,
21239 IX86_BUILTIN_VFRCZSD,
21240 IX86_BUILTIN_VFRCZPS,
21241 IX86_BUILTIN_VFRCZPD,
21242 IX86_BUILTIN_VFRCZPS256,
21243 IX86_BUILTIN_VFRCZPD256,
21245 IX86_BUILTIN_VPCOMEQUB,
21246 IX86_BUILTIN_VPCOMNEUB,
21247 IX86_BUILTIN_VPCOMLTUB,
21248 IX86_BUILTIN_VPCOMLEUB,
21249 IX86_BUILTIN_VPCOMGTUB,
21250 IX86_BUILTIN_VPCOMGEUB,
21251 IX86_BUILTIN_VPCOMFALSEUB,
21252 IX86_BUILTIN_VPCOMTRUEUB,
21254 IX86_BUILTIN_VPCOMEQUW,
21255 IX86_BUILTIN_VPCOMNEUW,
21256 IX86_BUILTIN_VPCOMLTUW,
21257 IX86_BUILTIN_VPCOMLEUW,
21258 IX86_BUILTIN_VPCOMGTUW,
21259 IX86_BUILTIN_VPCOMGEUW,
21260 IX86_BUILTIN_VPCOMFALSEUW,
21261 IX86_BUILTIN_VPCOMTRUEUW,
21263 IX86_BUILTIN_VPCOMEQUD,
21264 IX86_BUILTIN_VPCOMNEUD,
21265 IX86_BUILTIN_VPCOMLTUD,
21266 IX86_BUILTIN_VPCOMLEUD,
21267 IX86_BUILTIN_VPCOMGTUD,
21268 IX86_BUILTIN_VPCOMGEUD,
21269 IX86_BUILTIN_VPCOMFALSEUD,
21270 IX86_BUILTIN_VPCOMTRUEUD,
21272 IX86_BUILTIN_VPCOMEQUQ,
21273 IX86_BUILTIN_VPCOMNEUQ,
21274 IX86_BUILTIN_VPCOMLTUQ,
21275 IX86_BUILTIN_VPCOMLEUQ,
21276 IX86_BUILTIN_VPCOMGTUQ,
21277 IX86_BUILTIN_VPCOMGEUQ,
21278 IX86_BUILTIN_VPCOMFALSEUQ,
21279 IX86_BUILTIN_VPCOMTRUEUQ,
21281 IX86_BUILTIN_VPCOMEQB,
21282 IX86_BUILTIN_VPCOMNEB,
21283 IX86_BUILTIN_VPCOMLTB,
21284 IX86_BUILTIN_VPCOMLEB,
21285 IX86_BUILTIN_VPCOMGTB,
21286 IX86_BUILTIN_VPCOMGEB,
21287 IX86_BUILTIN_VPCOMFALSEB,
21288 IX86_BUILTIN_VPCOMTRUEB,
21290 IX86_BUILTIN_VPCOMEQW,
21291 IX86_BUILTIN_VPCOMNEW,
21292 IX86_BUILTIN_VPCOMLTW,
21293 IX86_BUILTIN_VPCOMLEW,
21294 IX86_BUILTIN_VPCOMGTW,
21295 IX86_BUILTIN_VPCOMGEW,
21296 IX86_BUILTIN_VPCOMFALSEW,
21297 IX86_BUILTIN_VPCOMTRUEW,
21299 IX86_BUILTIN_VPCOMEQD,
21300 IX86_BUILTIN_VPCOMNED,
21301 IX86_BUILTIN_VPCOMLTD,
21302 IX86_BUILTIN_VPCOMLED,
21303 IX86_BUILTIN_VPCOMGTD,
21304 IX86_BUILTIN_VPCOMGED,
21305 IX86_BUILTIN_VPCOMFALSED,
21306 IX86_BUILTIN_VPCOMTRUED,
21308 IX86_BUILTIN_VPCOMEQQ,
21309 IX86_BUILTIN_VPCOMNEQ,
21310 IX86_BUILTIN_VPCOMLTQ,
21311 IX86_BUILTIN_VPCOMLEQ,
21312 IX86_BUILTIN_VPCOMGTQ,
21313 IX86_BUILTIN_VPCOMGEQ,
21314 IX86_BUILTIN_VPCOMFALSEQ,
21315 IX86_BUILTIN_VPCOMTRUEQ,
21317 /* LWP instructions. */
21318 IX86_BUILTIN_LLWPCB,
21319 IX86_BUILTIN_SLWPCB,
21320 IX86_BUILTIN_LWPVAL32,
21321 IX86_BUILTIN_LWPVAL64,
21322 IX86_BUILTIN_LWPINS32,
21323 IX86_BUILTIN_LWPINS64,
21330 /* Table for the ix86 builtin decls. */
21331 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21333 /* Table of all of the builtin functions that are possible with different ISA's
21334 but are waiting to be built until a function is declared to use that
21336 struct builtin_isa {
21337 const char *name; /* function name */
21338 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21339 int isa; /* isa_flags this builtin is defined for */
21340 bool const_p; /* true if the declaration is constant */
21341 bool set_and_not_built_p;
21344 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21347 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21348 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21349 function decl in the ix86_builtins array. Returns the function decl or
21350 NULL_TREE, if the builtin was not added.
21352 If the front end has a special hook for builtin functions, delay adding
21353 builtin functions that aren't in the current ISA until the ISA is changed
21354 with function specific optimization. Doing so, can save about 300K for the
21355 default compiler. When the builtin is expanded, check at that time whether
21358 If the front end doesn't have a special hook, record all builtins, even if
21359 it isn't an instruction set in the current ISA in case the user uses
21360 function specific options for a different ISA, so that we don't get scope
21361 errors if a builtin is added in the middle of a function scope. */
21364 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21365 enum ix86_builtins code)
21367 tree decl = NULL_TREE;
21369 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21371 ix86_builtins_isa[(int) code].isa = mask;
21374 || (mask & ix86_isa_flags) != 0
21375 || (lang_hooks.builtin_function
21376 == lang_hooks.builtin_function_ext_scope))
21379 tree type = ix86_get_builtin_func_type (tcode);
21380 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21382 ix86_builtins[(int) code] = decl;
21383 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21387 ix86_builtins[(int) code] = NULL_TREE;
21388 ix86_builtins_isa[(int) code].tcode = tcode;
21389 ix86_builtins_isa[(int) code].name = name;
21390 ix86_builtins_isa[(int) code].const_p = false;
21391 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21398 /* Like def_builtin, but also marks the function decl "const". */
21401 def_builtin_const (int mask, const char *name,
21402 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21404 tree decl = def_builtin (mask, name, tcode, code);
21406 TREE_READONLY (decl) = 1;
21408 ix86_builtins_isa[(int) code].const_p = true;
21413 /* Add any new builtin functions for a given ISA that may not have been
21414 declared. This saves a bit of space compared to adding all of the
21415 declarations to the tree, even if we didn't use them. */
21418 ix86_add_new_builtins (int isa)
21422 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21424 if ((ix86_builtins_isa[i].isa & isa) != 0
21425 && ix86_builtins_isa[i].set_and_not_built_p)
21429 /* Don't define the builtin again. */
21430 ix86_builtins_isa[i].set_and_not_built_p = false;
21432 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21433 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21434 type, i, BUILT_IN_MD, NULL,
21437 ix86_builtins[i] = decl;
21438 if (ix86_builtins_isa[i].const_p)
21439 TREE_READONLY (decl) = 1;
21444 /* Bits for builtin_description.flag. */
21446 /* Set when we don't support the comparison natively, and should
21447 swap_comparison in order to support it. */
21448 #define BUILTIN_DESC_SWAP_OPERANDS 1
21450 struct builtin_description
21452 const unsigned int mask;
21453 const enum insn_code icode;
21454 const char *const name;
21455 const enum ix86_builtins code;
21456 const enum rtx_code comparison;
21460 static const struct builtin_description bdesc_comi[] =
21462 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21463 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21464 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21465 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21466 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21467 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21468 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21469 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21470 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21471 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21472 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21473 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21474 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21476 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21477 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21478 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21479 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21480 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21481 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21482 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21483 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21484 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21485 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21488 static const struct builtin_description bdesc_pcmpestr[] =
21491 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21492 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21493 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21494 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21495 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21496 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21497 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21500 static const struct builtin_description bdesc_pcmpistr[] =
21503 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21504 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21505 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21506 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21507 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21508 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21509 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21512 /* Special builtins with variable number of arguments. */
21513 static const struct builtin_description bdesc_special_args[] =
21515 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21516 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21519 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21522 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21525 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21526 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21527 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21529 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21530 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21531 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21532 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21534 /* SSE or 3DNow!A */
21535 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21536 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21539 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21540 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21541 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21542 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21544 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21546 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21547 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21549 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21553 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21556 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21559 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21560 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21563 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21564 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21566 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21567 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21568 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21569 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21570 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21572 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21573 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21574 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21575 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21576 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21577 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21578 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21580 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21581 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21582 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21584 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21585 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21586 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21587 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21588 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21589 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21590 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21591 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21593 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21594 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21595 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21596 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21597 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21598 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21602 /* Builtins with variable number of arguments. */
21603 static const struct builtin_description bdesc_args[] =
21605 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21606 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21607 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21608 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21609 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21610 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21611 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21614 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21615 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21616 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21617 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21618 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21619 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21621 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21622 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21623 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21624 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21625 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21626 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21627 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21628 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21630 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21631 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21633 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21634 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21635 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21636 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21638 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21639 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21640 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21641 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21642 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21643 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21645 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21646 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21647 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21648 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21649 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21650 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21652 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21653 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21654 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21656 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21658 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21659 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21660 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21661 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21662 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21663 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21665 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21666 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21667 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21668 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21669 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21670 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21672 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21673 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21674 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21675 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21678 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21679 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21680 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21681 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21683 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21684 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21685 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21686 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21687 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21688 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21689 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21690 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21691 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21692 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21693 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21694 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21695 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21696 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21697 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21700 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21701 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21702 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21703 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21704 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21705 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21708 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21709 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21710 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21711 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21712 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21713 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21714 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21715 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21716 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21717 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21718 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21719 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21721 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21723 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21724 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21725 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21726 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21727 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21728 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21729 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21730 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21732 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21733 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21734 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21735 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21736 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21737 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21738 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21739 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21740 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21741 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21742 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21743 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21744 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21745 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21746 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21747 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21748 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21749 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21750 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21751 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21752 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21753 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21755 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21756 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21757 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21758 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21760 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21761 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21762 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21763 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21765 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21767 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21768 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21769 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21770 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21771 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21774 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21775 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21777 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21780 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21781 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21783 /* SSE MMX or 3Dnow!A */
21784 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21785 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21786 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21788 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21789 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21790 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21791 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21793 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21794 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21796 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21799 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21801 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21802 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21803 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21804 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21805 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21806 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21807 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21808 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21809 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21810 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21811 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21812 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21814 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21815 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21816 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21817 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21818 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21819 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21821 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21822 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21823 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21824 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21825 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21827 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21829 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21830 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21831 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21832 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21834 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21835 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21836 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21838 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21839 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21840 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21841 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21842 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21843 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21844 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21845 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21847 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21848 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21849 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21850 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21851 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21852 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21853 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21854 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21855 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21856 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21857 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21858 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21859 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21860 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21861 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21862 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21863 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21864 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21871 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21878 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21884 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21886 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21891 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21893 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21897 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21898 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21902 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21904 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21915 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21918 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21925 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21927 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21930 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21932 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21933 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21937 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21938 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21941 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21943 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21944 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21948 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21949 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21950 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21951 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21954 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21956 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21957 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21959 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21961 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21963 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21964 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21965 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21966 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21969 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21971 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21972 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21974 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21976 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21980 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21981 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21983 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21986 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21987 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21990 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
21991 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21993 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21994 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21995 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21996 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21997 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21998 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22001 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22002 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22003 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22004 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22005 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22006 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22008 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22009 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22010 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22011 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22012 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22013 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22014 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22015 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22016 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22017 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22018 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22019 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22020 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22021 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22022 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22023 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22024 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22025 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22026 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22027 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22028 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22029 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22030 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22031 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22034 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22035 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22038 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22039 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22040 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22041 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22042 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22043 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22044 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22045 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22046 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22047 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22049 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22050 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22051 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22052 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22053 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22054 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22055 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22056 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22057 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22058 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22059 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22060 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22061 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22063 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22064 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22065 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22066 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22067 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22068 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22069 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22070 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22071 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22072 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22073 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22074 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22077 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22078 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22079 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22080 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22082 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22083 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22084 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22087 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22088 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22089 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22090 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22091 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22094 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22095 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22096 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22097 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22100 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22101 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22103 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22104 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22105 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22106 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22109 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22112 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22113 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22114 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22115 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22116 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22117 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22118 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22119 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22120 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22121 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22122 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22123 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22124 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22125 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22126 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22127 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22128 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22129 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22130 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22131 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22132 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22133 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22134 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22135 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22136 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22137 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22139 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22140 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22141 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22142 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22144 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22145 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22146 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22147 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22148 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22149 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22150 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22151 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22152 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22153 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22154 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22155 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22156 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22157 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22158 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22159 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22160 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22161 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22162 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22163 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22164 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22165 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22166 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22167 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22168 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22169 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22170 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22171 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22172 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22173 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22174 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22175 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22176 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22177 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22179 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22180 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22181 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22194 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22196 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22200 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22201 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22202 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22203 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22204 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22208 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22209 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22210 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22214 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22217 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22225 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22228 /* FMA4 and XOP. */
22229 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22230 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22231 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22232 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22233 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22234 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22235 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22236 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22237 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22238 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22239 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22240 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22241 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22242 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22243 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22244 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22245 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22246 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22247 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22248 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22249 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22250 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22251 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22252 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22253 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22254 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22255 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22256 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22257 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22258 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22259 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22260 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22261 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22262 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22263 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22264 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22265 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22266 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22267 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22268 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22269 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22270 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22271 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22272 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22273 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22274 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22275 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22276 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22277 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22278 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22279 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22280 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22282 static const struct builtin_description bdesc_multi_arg[] =
22284 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22285 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22286 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22287 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22288 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22289 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22290 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22291 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22293 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22294 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22295 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22296 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22297 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22298 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22299 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22300 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22302 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22303 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22304 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22305 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22307 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22308 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22309 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22310 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22312 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22313 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22314 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22315 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22317 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22318 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22319 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22320 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22322 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22323 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22324 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22325 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22326 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22327 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22328 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22330 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22331 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22332 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22333 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22334 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22335 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22336 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22338 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22340 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22341 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22342 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22343 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22344 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22345 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22346 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22347 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22348 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22349 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22350 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22351 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22353 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22354 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22355 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22356 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22357 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22358 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22359 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22360 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22361 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22362 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22363 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22364 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22365 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22366 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22367 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22368 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22370 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22371 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22372 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22373 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22374 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22375 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22377 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22378 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22379 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22380 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22381 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22382 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22383 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22384 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22385 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22386 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22387 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22388 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22397 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22404 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22410 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22412 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22417 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22418 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22420 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22423 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22427 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22428 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22429 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22434 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22435 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22436 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22441 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22442 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22444 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22447 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22450 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22452 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22457 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22458 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22460 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22463 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22466 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22468 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22471 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22473 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22476 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22477 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22482 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22483 in the current target ISA to allow the user to compile particular modules
22484 with different target specific options that differ from the command line
22487 ix86_init_mmx_sse_builtins (void)
22489 const struct builtin_description * d;
22490 enum ix86_builtin_func_type ftype;
22493 /* Add all special builtins with variable number of operands. */
22494 for (i = 0, d = bdesc_special_args;
22495 i < ARRAY_SIZE (bdesc_special_args);
22501 ftype = (enum ix86_builtin_func_type) d->flag;
22502 def_builtin (d->mask, d->name, ftype, d->code);
22505 /* Add all builtins with variable number of operands. */
22506 for (i = 0, d = bdesc_args;
22507 i < ARRAY_SIZE (bdesc_args);
22513 ftype = (enum ix86_builtin_func_type) d->flag;
22514 def_builtin_const (d->mask, d->name, ftype, d->code);
22517 /* pcmpestr[im] insns. */
22518 for (i = 0, d = bdesc_pcmpestr;
22519 i < ARRAY_SIZE (bdesc_pcmpestr);
22522 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22523 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22525 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22526 def_builtin_const (d->mask, d->name, ftype, d->code);
22529 /* pcmpistr[im] insns. */
22530 for (i = 0, d = bdesc_pcmpistr;
22531 i < ARRAY_SIZE (bdesc_pcmpistr);
22534 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22535 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22537 ftype = INT_FTYPE_V16QI_V16QI_INT;
22538 def_builtin_const (d->mask, d->name, ftype, d->code);
22541 /* comi/ucomi insns. */
22542 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22544 if (d->mask == OPTION_MASK_ISA_SSE2)
22545 ftype = INT_FTYPE_V2DF_V2DF;
22547 ftype = INT_FTYPE_V4SF_V4SF;
22548 def_builtin_const (d->mask, d->name, ftype, d->code);
22552 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22553 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22554 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22555 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22557 /* SSE or 3DNow!A */
22558 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22559 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22560 IX86_BUILTIN_MASKMOVQ);
22563 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22564 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22566 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22567 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22568 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22569 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22572 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22573 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22574 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22575 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22578 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22579 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22580 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22581 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22582 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22583 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22584 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22585 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22586 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22587 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22588 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22589 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22592 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22593 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22595 /* MMX access to the vec_init patterns. */
22596 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22597 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22599 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22600 V4HI_FTYPE_HI_HI_HI_HI,
22601 IX86_BUILTIN_VEC_INIT_V4HI);
22603 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22604 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22605 IX86_BUILTIN_VEC_INIT_V8QI);
22607 /* Access to the vec_extract patterns. */
22608 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22609 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22610 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22611 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22612 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22613 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22614 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22615 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22616 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22617 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22619 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22620 "__builtin_ia32_vec_ext_v4hi",
22621 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22623 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22624 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22626 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22627 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22629 /* Access to the vec_set patterns. */
22630 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22631 "__builtin_ia32_vec_set_v2di",
22632 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22634 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22635 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22637 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22638 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22640 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22641 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22643 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22644 "__builtin_ia32_vec_set_v4hi",
22645 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22647 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22648 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22650 /* Add FMA4 multi-arg argument instructions */
22651 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22656 ftype = (enum ix86_builtin_func_type) d->flag;
22657 def_builtin_const (d->mask, d->name, ftype, d->code);
22661 /* Internal method for ix86_init_builtins. */
22664 ix86_init_builtins_va_builtins_abi (void)
22666 tree ms_va_ref, sysv_va_ref;
22667 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22668 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22669 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22670 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22674 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22675 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22676 ms_va_ref = build_reference_type (ms_va_list_type_node);
22678 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22681 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22682 fnvoid_va_start_ms =
22683 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22684 fnvoid_va_end_sysv =
22685 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22686 fnvoid_va_start_sysv =
22687 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22689 fnvoid_va_copy_ms =
22690 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22692 fnvoid_va_copy_sysv =
22693 build_function_type_list (void_type_node, sysv_va_ref,
22694 sysv_va_ref, NULL_TREE);
22696 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22697 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22698 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22699 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22700 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22701 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22702 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22703 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22704 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22705 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22706 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22707 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22711 ix86_init_builtin_types (void)
22713 tree float128_type_node, float80_type_node;
22715 /* The __float80 type. */
22716 float80_type_node = long_double_type_node;
22717 if (TYPE_MODE (float80_type_node) != XFmode)
22719 /* The __float80 type. */
22720 float80_type_node = make_node (REAL_TYPE);
22722 TYPE_PRECISION (float80_type_node) = 80;
22723 layout_type (float80_type_node);
22725 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22727 /* The __float128 type. */
22728 float128_type_node = make_node (REAL_TYPE);
22729 TYPE_PRECISION (float128_type_node) = 128;
22730 layout_type (float128_type_node);
22731 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22733 /* This macro is built by i386-builtin-types.awk. */
22734 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22738 ix86_init_builtins (void)
22742 ix86_init_builtin_types ();
22744 /* TFmode support builtins. */
22745 def_builtin_const (0, "__builtin_infq",
22746 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22747 def_builtin_const (0, "__builtin_huge_valq",
22748 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22750 /* We will expand them to normal call if SSE2 isn't available since
22751 they are used by libgcc. */
22752 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22753 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22754 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22755 TREE_READONLY (t) = 1;
22756 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22758 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22759 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22760 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22761 TREE_READONLY (t) = 1;
22762 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22764 ix86_init_mmx_sse_builtins ();
22767 ix86_init_builtins_va_builtins_abi ();
22770 /* Return the ix86 builtin for CODE. */
22773 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22775 if (code >= IX86_BUILTIN_MAX)
22776 return error_mark_node;
22778 return ix86_builtins[code];
22781 /* Errors in the source file can cause expand_expr to return const0_rtx
22782 where we expect a vector. To avoid crashing, use one of the vector
22783 clear instructions. */
22785 safe_vector_operand (rtx x, enum machine_mode mode)
22787 if (x == const0_rtx)
22788 x = CONST0_RTX (mode);
22792 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22795 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22798 tree arg0 = CALL_EXPR_ARG (exp, 0);
22799 tree arg1 = CALL_EXPR_ARG (exp, 1);
22800 rtx op0 = expand_normal (arg0);
22801 rtx op1 = expand_normal (arg1);
22802 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22803 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22804 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22806 if (VECTOR_MODE_P (mode0))
22807 op0 = safe_vector_operand (op0, mode0);
22808 if (VECTOR_MODE_P (mode1))
22809 op1 = safe_vector_operand (op1, mode1);
22811 if (optimize || !target
22812 || GET_MODE (target) != tmode
22813 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22814 target = gen_reg_rtx (tmode);
22816 if (GET_MODE (op1) == SImode && mode1 == TImode)
22818 rtx x = gen_reg_rtx (V4SImode);
22819 emit_insn (gen_sse2_loadd (x, op1));
22820 op1 = gen_lowpart (TImode, x);
22823 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22824 op0 = copy_to_mode_reg (mode0, op0);
22825 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22826 op1 = copy_to_mode_reg (mode1, op1);
22828 pat = GEN_FCN (icode) (target, op0, op1);
22837 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22840 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22841 enum ix86_builtin_func_type m_type,
22842 enum rtx_code sub_code)
22847 bool comparison_p = false;
22849 bool last_arg_constant = false;
22850 int num_memory = 0;
22853 enum machine_mode mode;
22856 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22860 case MULTI_ARG_4_DF2_DI_I:
22861 case MULTI_ARG_4_DF2_DI_I1:
22862 case MULTI_ARG_4_SF2_SI_I:
22863 case MULTI_ARG_4_SF2_SI_I1:
22865 last_arg_constant = true;
22868 case MULTI_ARG_3_SF:
22869 case MULTI_ARG_3_DF:
22870 case MULTI_ARG_3_SF2:
22871 case MULTI_ARG_3_DF2:
22872 case MULTI_ARG_3_DI:
22873 case MULTI_ARG_3_SI:
22874 case MULTI_ARG_3_SI_DI:
22875 case MULTI_ARG_3_HI:
22876 case MULTI_ARG_3_HI_SI:
22877 case MULTI_ARG_3_QI:
22878 case MULTI_ARG_3_DI2:
22879 case MULTI_ARG_3_SI2:
22880 case MULTI_ARG_3_HI2:
22881 case MULTI_ARG_3_QI2:
22885 case MULTI_ARG_2_SF:
22886 case MULTI_ARG_2_DF:
22887 case MULTI_ARG_2_DI:
22888 case MULTI_ARG_2_SI:
22889 case MULTI_ARG_2_HI:
22890 case MULTI_ARG_2_QI:
22894 case MULTI_ARG_2_DI_IMM:
22895 case MULTI_ARG_2_SI_IMM:
22896 case MULTI_ARG_2_HI_IMM:
22897 case MULTI_ARG_2_QI_IMM:
22899 last_arg_constant = true;
22902 case MULTI_ARG_1_SF:
22903 case MULTI_ARG_1_DF:
22904 case MULTI_ARG_1_SF2:
22905 case MULTI_ARG_1_DF2:
22906 case MULTI_ARG_1_DI:
22907 case MULTI_ARG_1_SI:
22908 case MULTI_ARG_1_HI:
22909 case MULTI_ARG_1_QI:
22910 case MULTI_ARG_1_SI_DI:
22911 case MULTI_ARG_1_HI_DI:
22912 case MULTI_ARG_1_HI_SI:
22913 case MULTI_ARG_1_QI_DI:
22914 case MULTI_ARG_1_QI_SI:
22915 case MULTI_ARG_1_QI_HI:
22919 case MULTI_ARG_2_DI_CMP:
22920 case MULTI_ARG_2_SI_CMP:
22921 case MULTI_ARG_2_HI_CMP:
22922 case MULTI_ARG_2_QI_CMP:
22924 comparison_p = true;
22927 case MULTI_ARG_2_SF_TF:
22928 case MULTI_ARG_2_DF_TF:
22929 case MULTI_ARG_2_DI_TF:
22930 case MULTI_ARG_2_SI_TF:
22931 case MULTI_ARG_2_HI_TF:
22932 case MULTI_ARG_2_QI_TF:
22938 gcc_unreachable ();
22941 if (optimize || !target
22942 || GET_MODE (target) != tmode
22943 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22944 target = gen_reg_rtx (tmode);
22946 gcc_assert (nargs <= 4);
22948 for (i = 0; i < nargs; i++)
22950 tree arg = CALL_EXPR_ARG (exp, i);
22951 rtx op = expand_normal (arg);
22952 int adjust = (comparison_p) ? 1 : 0;
22953 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
22955 if (last_arg_constant && i == nargs-1)
22957 if (!CONST_INT_P (op))
22959 error ("last argument must be an immediate");
22960 return gen_reg_rtx (tmode);
22965 if (VECTOR_MODE_P (mode))
22966 op = safe_vector_operand (op, mode);
22968 /* If we aren't optimizing, only allow one memory operand to be
22970 if (memory_operand (op, mode))
22973 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
22976 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
22978 op = force_reg (mode, op);
22982 args[i].mode = mode;
22988 pat = GEN_FCN (icode) (target, args[0].op);
22993 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
22994 GEN_INT ((int)sub_code));
22995 else if (! comparison_p)
22996 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
22999 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23003 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23008 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23012 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23016 gcc_unreachable ();
23026 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23027 insns with vec_merge. */
23030 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23034 tree arg0 = CALL_EXPR_ARG (exp, 0);
23035 rtx op1, op0 = expand_normal (arg0);
23036 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23037 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23039 if (optimize || !target
23040 || GET_MODE (target) != tmode
23041 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23042 target = gen_reg_rtx (tmode);
23044 if (VECTOR_MODE_P (mode0))
23045 op0 = safe_vector_operand (op0, mode0);
23047 if ((optimize && !register_operand (op0, mode0))
23048 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23049 op0 = copy_to_mode_reg (mode0, op0);
23052 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23053 op1 = copy_to_mode_reg (mode0, op1);
23055 pat = GEN_FCN (icode) (target, op0, op1);
23062 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23065 ix86_expand_sse_compare (const struct builtin_description *d,
23066 tree exp, rtx target, bool swap)
23069 tree arg0 = CALL_EXPR_ARG (exp, 0);
23070 tree arg1 = CALL_EXPR_ARG (exp, 1);
23071 rtx op0 = expand_normal (arg0);
23072 rtx op1 = expand_normal (arg1);
23074 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23075 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23076 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23077 enum rtx_code comparison = d->comparison;
23079 if (VECTOR_MODE_P (mode0))
23080 op0 = safe_vector_operand (op0, mode0);
23081 if (VECTOR_MODE_P (mode1))
23082 op1 = safe_vector_operand (op1, mode1);
23084 /* Swap operands if we have a comparison that isn't available in
23088 rtx tmp = gen_reg_rtx (mode1);
23089 emit_move_insn (tmp, op1);
23094 if (optimize || !target
23095 || GET_MODE (target) != tmode
23096 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23097 target = gen_reg_rtx (tmode);
23099 if ((optimize && !register_operand (op0, mode0))
23100 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23101 op0 = copy_to_mode_reg (mode0, op0);
23102 if ((optimize && !register_operand (op1, mode1))
23103 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23104 op1 = copy_to_mode_reg (mode1, op1);
23106 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23107 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23114 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23117 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23121 tree arg0 = CALL_EXPR_ARG (exp, 0);
23122 tree arg1 = CALL_EXPR_ARG (exp, 1);
23123 rtx op0 = expand_normal (arg0);
23124 rtx op1 = expand_normal (arg1);
23125 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23126 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23127 enum rtx_code comparison = d->comparison;
23129 if (VECTOR_MODE_P (mode0))
23130 op0 = safe_vector_operand (op0, mode0);
23131 if (VECTOR_MODE_P (mode1))
23132 op1 = safe_vector_operand (op1, mode1);
23134 /* Swap operands if we have a comparison that isn't available in
23136 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23143 target = gen_reg_rtx (SImode);
23144 emit_move_insn (target, const0_rtx);
23145 target = gen_rtx_SUBREG (QImode, target, 0);
23147 if ((optimize && !register_operand (op0, mode0))
23148 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23149 op0 = copy_to_mode_reg (mode0, op0);
23150 if ((optimize && !register_operand (op1, mode1))
23151 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23152 op1 = copy_to_mode_reg (mode1, op1);
23154 pat = GEN_FCN (d->icode) (op0, op1);
23158 emit_insn (gen_rtx_SET (VOIDmode,
23159 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23160 gen_rtx_fmt_ee (comparison, QImode,
23164 return SUBREG_REG (target);
23167 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23170 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23174 tree arg0 = CALL_EXPR_ARG (exp, 0);
23175 tree arg1 = CALL_EXPR_ARG (exp, 1);
23176 rtx op0 = expand_normal (arg0);
23177 rtx op1 = expand_normal (arg1);
23178 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23179 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23180 enum rtx_code comparison = d->comparison;
23182 if (VECTOR_MODE_P (mode0))
23183 op0 = safe_vector_operand (op0, mode0);
23184 if (VECTOR_MODE_P (mode1))
23185 op1 = safe_vector_operand (op1, mode1);
23187 target = gen_reg_rtx (SImode);
23188 emit_move_insn (target, const0_rtx);
23189 target = gen_rtx_SUBREG (QImode, target, 0);
23191 if ((optimize && !register_operand (op0, mode0))
23192 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23193 op0 = copy_to_mode_reg (mode0, op0);
23194 if ((optimize && !register_operand (op1, mode1))
23195 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23196 op1 = copy_to_mode_reg (mode1, op1);
23198 pat = GEN_FCN (d->icode) (op0, op1);
23202 emit_insn (gen_rtx_SET (VOIDmode,
23203 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23204 gen_rtx_fmt_ee (comparison, QImode,
23208 return SUBREG_REG (target);
23211 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23214 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23215 tree exp, rtx target)
23218 tree arg0 = CALL_EXPR_ARG (exp, 0);
23219 tree arg1 = CALL_EXPR_ARG (exp, 1);
23220 tree arg2 = CALL_EXPR_ARG (exp, 2);
23221 tree arg3 = CALL_EXPR_ARG (exp, 3);
23222 tree arg4 = CALL_EXPR_ARG (exp, 4);
23223 rtx scratch0, scratch1;
23224 rtx op0 = expand_normal (arg0);
23225 rtx op1 = expand_normal (arg1);
23226 rtx op2 = expand_normal (arg2);
23227 rtx op3 = expand_normal (arg3);
23228 rtx op4 = expand_normal (arg4);
23229 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23231 tmode0 = insn_data[d->icode].operand[0].mode;
23232 tmode1 = insn_data[d->icode].operand[1].mode;
23233 modev2 = insn_data[d->icode].operand[2].mode;
23234 modei3 = insn_data[d->icode].operand[3].mode;
23235 modev4 = insn_data[d->icode].operand[4].mode;
23236 modei5 = insn_data[d->icode].operand[5].mode;
23237 modeimm = insn_data[d->icode].operand[6].mode;
23239 if (VECTOR_MODE_P (modev2))
23240 op0 = safe_vector_operand (op0, modev2);
23241 if (VECTOR_MODE_P (modev4))
23242 op2 = safe_vector_operand (op2, modev4);
23244 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23245 op0 = copy_to_mode_reg (modev2, op0);
23246 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23247 op1 = copy_to_mode_reg (modei3, op1);
23248 if ((optimize && !register_operand (op2, modev4))
23249 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23250 op2 = copy_to_mode_reg (modev4, op2);
23251 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23252 op3 = copy_to_mode_reg (modei5, op3);
23254 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23256 error ("the fifth argument must be a 8-bit immediate");
23260 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23262 if (optimize || !target
23263 || GET_MODE (target) != tmode0
23264 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23265 target = gen_reg_rtx (tmode0);
23267 scratch1 = gen_reg_rtx (tmode1);
23269 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23271 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23273 if (optimize || !target
23274 || GET_MODE (target) != tmode1
23275 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23276 target = gen_reg_rtx (tmode1);
23278 scratch0 = gen_reg_rtx (tmode0);
23280 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23284 gcc_assert (d->flag);
23286 scratch0 = gen_reg_rtx (tmode0);
23287 scratch1 = gen_reg_rtx (tmode1);
23289 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23299 target = gen_reg_rtx (SImode);
23300 emit_move_insn (target, const0_rtx);
23301 target = gen_rtx_SUBREG (QImode, target, 0);
23304 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23305 gen_rtx_fmt_ee (EQ, QImode,
23306 gen_rtx_REG ((enum machine_mode) d->flag,
23309 return SUBREG_REG (target);
23316 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23319 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23320 tree exp, rtx target)
23323 tree arg0 = CALL_EXPR_ARG (exp, 0);
23324 tree arg1 = CALL_EXPR_ARG (exp, 1);
23325 tree arg2 = CALL_EXPR_ARG (exp, 2);
23326 rtx scratch0, scratch1;
23327 rtx op0 = expand_normal (arg0);
23328 rtx op1 = expand_normal (arg1);
23329 rtx op2 = expand_normal (arg2);
23330 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23332 tmode0 = insn_data[d->icode].operand[0].mode;
23333 tmode1 = insn_data[d->icode].operand[1].mode;
23334 modev2 = insn_data[d->icode].operand[2].mode;
23335 modev3 = insn_data[d->icode].operand[3].mode;
23336 modeimm = insn_data[d->icode].operand[4].mode;
23338 if (VECTOR_MODE_P (modev2))
23339 op0 = safe_vector_operand (op0, modev2);
23340 if (VECTOR_MODE_P (modev3))
23341 op1 = safe_vector_operand (op1, modev3);
23343 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23344 op0 = copy_to_mode_reg (modev2, op0);
23345 if ((optimize && !register_operand (op1, modev3))
23346 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23347 op1 = copy_to_mode_reg (modev3, op1);
23349 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23351 error ("the third argument must be a 8-bit immediate");
23355 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23357 if (optimize || !target
23358 || GET_MODE (target) != tmode0
23359 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23360 target = gen_reg_rtx (tmode0);
23362 scratch1 = gen_reg_rtx (tmode1);
23364 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23366 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23368 if (optimize || !target
23369 || GET_MODE (target) != tmode1
23370 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23371 target = gen_reg_rtx (tmode1);
23373 scratch0 = gen_reg_rtx (tmode0);
23375 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23379 gcc_assert (d->flag);
23381 scratch0 = gen_reg_rtx (tmode0);
23382 scratch1 = gen_reg_rtx (tmode1);
23384 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23394 target = gen_reg_rtx (SImode);
23395 emit_move_insn (target, const0_rtx);
23396 target = gen_rtx_SUBREG (QImode, target, 0);
23399 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23400 gen_rtx_fmt_ee (EQ, QImode,
23401 gen_rtx_REG ((enum machine_mode) d->flag,
23404 return SUBREG_REG (target);
23410 /* Subroutine of ix86_expand_builtin to take care of insns with
23411 variable number of operands. */
23414 ix86_expand_args_builtin (const struct builtin_description *d,
23415 tree exp, rtx target)
23417 rtx pat, real_target;
23418 unsigned int i, nargs;
23419 unsigned int nargs_constant = 0;
23420 int num_memory = 0;
23424 enum machine_mode mode;
23426 bool last_arg_count = false;
23427 enum insn_code icode = d->icode;
23428 const struct insn_data *insn_p = &insn_data[icode];
23429 enum machine_mode tmode = insn_p->operand[0].mode;
23430 enum machine_mode rmode = VOIDmode;
23432 enum rtx_code comparison = d->comparison;
23434 switch ((enum ix86_builtin_func_type) d->flag)
23436 case INT_FTYPE_V8SF_V8SF_PTEST:
23437 case INT_FTYPE_V4DI_V4DI_PTEST:
23438 case INT_FTYPE_V4DF_V4DF_PTEST:
23439 case INT_FTYPE_V4SF_V4SF_PTEST:
23440 case INT_FTYPE_V2DI_V2DI_PTEST:
23441 case INT_FTYPE_V2DF_V2DF_PTEST:
23442 return ix86_expand_sse_ptest (d, exp, target);
23443 case FLOAT128_FTYPE_FLOAT128:
23444 case FLOAT_FTYPE_FLOAT:
23445 case INT_FTYPE_INT:
23446 case UINT64_FTYPE_INT:
23447 case UINT16_FTYPE_UINT16:
23448 case INT64_FTYPE_INT64:
23449 case INT64_FTYPE_V4SF:
23450 case INT64_FTYPE_V2DF:
23451 case INT_FTYPE_V16QI:
23452 case INT_FTYPE_V8QI:
23453 case INT_FTYPE_V8SF:
23454 case INT_FTYPE_V4DF:
23455 case INT_FTYPE_V4SF:
23456 case INT_FTYPE_V2DF:
23457 case V16QI_FTYPE_V16QI:
23458 case V8SI_FTYPE_V8SF:
23459 case V8SI_FTYPE_V4SI:
23460 case V8HI_FTYPE_V8HI:
23461 case V8HI_FTYPE_V16QI:
23462 case V8QI_FTYPE_V8QI:
23463 case V8SF_FTYPE_V8SF:
23464 case V8SF_FTYPE_V8SI:
23465 case V8SF_FTYPE_V4SF:
23466 case V4SI_FTYPE_V4SI:
23467 case V4SI_FTYPE_V16QI:
23468 case V4SI_FTYPE_V4SF:
23469 case V4SI_FTYPE_V8SI:
23470 case V4SI_FTYPE_V8HI:
23471 case V4SI_FTYPE_V4DF:
23472 case V4SI_FTYPE_V2DF:
23473 case V4HI_FTYPE_V4HI:
23474 case V4DF_FTYPE_V4DF:
23475 case V4DF_FTYPE_V4SI:
23476 case V4DF_FTYPE_V4SF:
23477 case V4DF_FTYPE_V2DF:
23478 case V4SF_FTYPE_V4SF:
23479 case V4SF_FTYPE_V4SI:
23480 case V4SF_FTYPE_V8SF:
23481 case V4SF_FTYPE_V4DF:
23482 case V4SF_FTYPE_V2DF:
23483 case V2DI_FTYPE_V2DI:
23484 case V2DI_FTYPE_V16QI:
23485 case V2DI_FTYPE_V8HI:
23486 case V2DI_FTYPE_V4SI:
23487 case V2DF_FTYPE_V2DF:
23488 case V2DF_FTYPE_V4SI:
23489 case V2DF_FTYPE_V4DF:
23490 case V2DF_FTYPE_V4SF:
23491 case V2DF_FTYPE_V2SI:
23492 case V2SI_FTYPE_V2SI:
23493 case V2SI_FTYPE_V4SF:
23494 case V2SI_FTYPE_V2SF:
23495 case V2SI_FTYPE_V2DF:
23496 case V2SF_FTYPE_V2SF:
23497 case V2SF_FTYPE_V2SI:
23500 case V4SF_FTYPE_V4SF_VEC_MERGE:
23501 case V2DF_FTYPE_V2DF_VEC_MERGE:
23502 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23503 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23504 case V16QI_FTYPE_V16QI_V16QI:
23505 case V16QI_FTYPE_V8HI_V8HI:
23506 case V8QI_FTYPE_V8QI_V8QI:
23507 case V8QI_FTYPE_V4HI_V4HI:
23508 case V8HI_FTYPE_V8HI_V8HI:
23509 case V8HI_FTYPE_V16QI_V16QI:
23510 case V8HI_FTYPE_V4SI_V4SI:
23511 case V8SF_FTYPE_V8SF_V8SF:
23512 case V8SF_FTYPE_V8SF_V8SI:
23513 case V4SI_FTYPE_V4SI_V4SI:
23514 case V4SI_FTYPE_V8HI_V8HI:
23515 case V4SI_FTYPE_V4SF_V4SF:
23516 case V4SI_FTYPE_V2DF_V2DF:
23517 case V4HI_FTYPE_V4HI_V4HI:
23518 case V4HI_FTYPE_V8QI_V8QI:
23519 case V4HI_FTYPE_V2SI_V2SI:
23520 case V4DF_FTYPE_V4DF_V4DF:
23521 case V4DF_FTYPE_V4DF_V4DI:
23522 case V4SF_FTYPE_V4SF_V4SF:
23523 case V4SF_FTYPE_V4SF_V4SI:
23524 case V4SF_FTYPE_V4SF_V2SI:
23525 case V4SF_FTYPE_V4SF_V2DF:
23526 case V4SF_FTYPE_V4SF_DI:
23527 case V4SF_FTYPE_V4SF_SI:
23528 case V2DI_FTYPE_V2DI_V2DI:
23529 case V2DI_FTYPE_V16QI_V16QI:
23530 case V2DI_FTYPE_V4SI_V4SI:
23531 case V2DI_FTYPE_V2DI_V16QI:
23532 case V2DI_FTYPE_V2DF_V2DF:
23533 case V2SI_FTYPE_V2SI_V2SI:
23534 case V2SI_FTYPE_V4HI_V4HI:
23535 case V2SI_FTYPE_V2SF_V2SF:
23536 case V2DF_FTYPE_V2DF_V2DF:
23537 case V2DF_FTYPE_V2DF_V4SF:
23538 case V2DF_FTYPE_V2DF_V2DI:
23539 case V2DF_FTYPE_V2DF_DI:
23540 case V2DF_FTYPE_V2DF_SI:
23541 case V2SF_FTYPE_V2SF_V2SF:
23542 case V1DI_FTYPE_V1DI_V1DI:
23543 case V1DI_FTYPE_V8QI_V8QI:
23544 case V1DI_FTYPE_V2SI_V2SI:
23545 if (comparison == UNKNOWN)
23546 return ix86_expand_binop_builtin (icode, exp, target);
23549 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23550 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23551 gcc_assert (comparison != UNKNOWN);
23555 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23556 case V8HI_FTYPE_V8HI_SI_COUNT:
23557 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23558 case V4SI_FTYPE_V4SI_SI_COUNT:
23559 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23560 case V4HI_FTYPE_V4HI_SI_COUNT:
23561 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23562 case V2DI_FTYPE_V2DI_SI_COUNT:
23563 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23564 case V2SI_FTYPE_V2SI_SI_COUNT:
23565 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23566 case V1DI_FTYPE_V1DI_SI_COUNT:
23568 last_arg_count = true;
23570 case UINT64_FTYPE_UINT64_UINT64:
23571 case UINT_FTYPE_UINT_UINT:
23572 case UINT_FTYPE_UINT_USHORT:
23573 case UINT_FTYPE_UINT_UCHAR:
23574 case UINT16_FTYPE_UINT16_INT:
23575 case UINT8_FTYPE_UINT8_INT:
23578 case V2DI_FTYPE_V2DI_INT_CONVERT:
23581 nargs_constant = 1;
23583 case V8HI_FTYPE_V8HI_INT:
23584 case V8SF_FTYPE_V8SF_INT:
23585 case V4SI_FTYPE_V4SI_INT:
23586 case V4SI_FTYPE_V8SI_INT:
23587 case V4HI_FTYPE_V4HI_INT:
23588 case V4DF_FTYPE_V4DF_INT:
23589 case V4SF_FTYPE_V4SF_INT:
23590 case V4SF_FTYPE_V8SF_INT:
23591 case V2DI_FTYPE_V2DI_INT:
23592 case V2DF_FTYPE_V2DF_INT:
23593 case V2DF_FTYPE_V4DF_INT:
23595 nargs_constant = 1;
23597 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23598 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23599 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23600 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23601 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23604 case V16QI_FTYPE_V16QI_V16QI_INT:
23605 case V8HI_FTYPE_V8HI_V8HI_INT:
23606 case V8SI_FTYPE_V8SI_V8SI_INT:
23607 case V8SI_FTYPE_V8SI_V4SI_INT:
23608 case V8SF_FTYPE_V8SF_V8SF_INT:
23609 case V8SF_FTYPE_V8SF_V4SF_INT:
23610 case V4SI_FTYPE_V4SI_V4SI_INT:
23611 case V4DF_FTYPE_V4DF_V4DF_INT:
23612 case V4DF_FTYPE_V4DF_V2DF_INT:
23613 case V4SF_FTYPE_V4SF_V4SF_INT:
23614 case V2DI_FTYPE_V2DI_V2DI_INT:
23615 case V2DF_FTYPE_V2DF_V2DF_INT:
23617 nargs_constant = 1;
23619 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23622 nargs_constant = 1;
23624 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23627 nargs_constant = 1;
23629 case V2DI_FTYPE_V2DI_UINT_UINT:
23631 nargs_constant = 2;
23633 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23634 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23635 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23636 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23638 nargs_constant = 1;
23640 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23642 nargs_constant = 2;
23645 gcc_unreachable ();
23648 gcc_assert (nargs <= ARRAY_SIZE (args));
23650 if (comparison != UNKNOWN)
23652 gcc_assert (nargs == 2);
23653 return ix86_expand_sse_compare (d, exp, target, swap);
23656 if (rmode == VOIDmode || rmode == tmode)
23660 || GET_MODE (target) != tmode
23661 || ! (*insn_p->operand[0].predicate) (target, tmode))
23662 target = gen_reg_rtx (tmode);
23663 real_target = target;
23667 target = gen_reg_rtx (rmode);
23668 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23671 for (i = 0; i < nargs; i++)
23673 tree arg = CALL_EXPR_ARG (exp, i);
23674 rtx op = expand_normal (arg);
23675 enum machine_mode mode = insn_p->operand[i + 1].mode;
23676 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23678 if (last_arg_count && (i + 1) == nargs)
23680 /* SIMD shift insns take either an 8-bit immediate or
23681 register as count. But builtin functions take int as
23682 count. If count doesn't match, we put it in register. */
23685 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23686 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23687 op = copy_to_reg (op);
23690 else if ((nargs - i) <= nargs_constant)
23695 case CODE_FOR_sse4_1_roundpd:
23696 case CODE_FOR_sse4_1_roundps:
23697 case CODE_FOR_sse4_1_roundsd:
23698 case CODE_FOR_sse4_1_roundss:
23699 case CODE_FOR_sse4_1_blendps:
23700 case CODE_FOR_avx_blendpd256:
23701 case CODE_FOR_avx_vpermilv4df:
23702 case CODE_FOR_avx_roundpd256:
23703 case CODE_FOR_avx_roundps256:
23704 error ("the last argument must be a 4-bit immediate");
23707 case CODE_FOR_sse4_1_blendpd:
23708 case CODE_FOR_avx_vpermilv2df:
23709 case CODE_FOR_xop_vpermil2v2df3:
23710 case CODE_FOR_xop_vpermil2v4sf3:
23711 case CODE_FOR_xop_vpermil2v4df3:
23712 case CODE_FOR_xop_vpermil2v8sf3:
23713 error ("the last argument must be a 2-bit immediate");
23716 case CODE_FOR_avx_vextractf128v4df:
23717 case CODE_FOR_avx_vextractf128v8sf:
23718 case CODE_FOR_avx_vextractf128v8si:
23719 case CODE_FOR_avx_vinsertf128v4df:
23720 case CODE_FOR_avx_vinsertf128v8sf:
23721 case CODE_FOR_avx_vinsertf128v8si:
23722 error ("the last argument must be a 1-bit immediate");
23725 case CODE_FOR_avx_cmpsdv2df3:
23726 case CODE_FOR_avx_cmpssv4sf3:
23727 case CODE_FOR_avx_cmppdv2df3:
23728 case CODE_FOR_avx_cmppsv4sf3:
23729 case CODE_FOR_avx_cmppdv4df3:
23730 case CODE_FOR_avx_cmppsv8sf3:
23731 error ("the last argument must be a 5-bit immediate");
23735 switch (nargs_constant)
23738 if ((nargs - i) == nargs_constant)
23740 error ("the next to last argument must be an 8-bit immediate");
23744 error ("the last argument must be an 8-bit immediate");
23747 gcc_unreachable ();
23754 if (VECTOR_MODE_P (mode))
23755 op = safe_vector_operand (op, mode);
23757 /* If we aren't optimizing, only allow one memory operand to
23759 if (memory_operand (op, mode))
23762 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23764 if (optimize || !match || num_memory > 1)
23765 op = copy_to_mode_reg (mode, op);
23769 op = copy_to_reg (op);
23770 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23775 args[i].mode = mode;
23781 pat = GEN_FCN (icode) (real_target, args[0].op);
23784 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23787 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23791 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23792 args[2].op, args[3].op);
23795 gcc_unreachable ();
23805 /* Subroutine of ix86_expand_builtin to take care of special insns
23806 with variable number of operands. */
23809 ix86_expand_special_args_builtin (const struct builtin_description *d,
23810 tree exp, rtx target)
23814 unsigned int i, nargs, arg_adjust, memory;
23818 enum machine_mode mode;
23820 enum insn_code icode = d->icode;
23821 bool last_arg_constant = false;
23822 const struct insn_data *insn_p = &insn_data[icode];
23823 enum machine_mode tmode = insn_p->operand[0].mode;
23824 enum { load, store } klass;
23826 switch ((enum ix86_builtin_func_type) d->flag)
23828 case VOID_FTYPE_VOID:
23829 emit_insn (GEN_FCN (icode) (target));
23831 case UINT64_FTYPE_VOID:
23836 case UINT64_FTYPE_PUNSIGNED:
23837 case V2DI_FTYPE_PV2DI:
23838 case V32QI_FTYPE_PCCHAR:
23839 case V16QI_FTYPE_PCCHAR:
23840 case V8SF_FTYPE_PCV4SF:
23841 case V8SF_FTYPE_PCFLOAT:
23842 case V4SF_FTYPE_PCFLOAT:
23843 case V4DF_FTYPE_PCV2DF:
23844 case V4DF_FTYPE_PCDOUBLE:
23845 case V2DF_FTYPE_PCDOUBLE:
23846 case VOID_FTYPE_PVOID:
23851 case VOID_FTYPE_PV2SF_V4SF:
23852 case VOID_FTYPE_PV4DI_V4DI:
23853 case VOID_FTYPE_PV2DI_V2DI:
23854 case VOID_FTYPE_PCHAR_V32QI:
23855 case VOID_FTYPE_PCHAR_V16QI:
23856 case VOID_FTYPE_PFLOAT_V8SF:
23857 case VOID_FTYPE_PFLOAT_V4SF:
23858 case VOID_FTYPE_PDOUBLE_V4DF:
23859 case VOID_FTYPE_PDOUBLE_V2DF:
23860 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23861 case VOID_FTYPE_PINT_INT:
23864 /* Reserve memory operand for target. */
23865 memory = ARRAY_SIZE (args);
23867 case V4SF_FTYPE_V4SF_PCV2SF:
23868 case V2DF_FTYPE_V2DF_PCDOUBLE:
23873 case V8SF_FTYPE_PCV8SF_V8SF:
23874 case V4DF_FTYPE_PCV4DF_V4DF:
23875 case V4SF_FTYPE_PCV4SF_V4SF:
23876 case V2DF_FTYPE_PCV2DF_V2DF:
23881 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23882 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23883 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23884 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23887 /* Reserve memory operand for target. */
23888 memory = ARRAY_SIZE (args);
23890 case VOID_FTYPE_UINT_UINT_UINT:
23891 case VOID_FTYPE_UINT64_UINT_UINT:
23892 case UCHAR_FTYPE_UINT_UINT_UINT:
23893 case UCHAR_FTYPE_UINT64_UINT_UINT:
23896 memory = ARRAY_SIZE (args);
23897 last_arg_constant = true;
23900 gcc_unreachable ();
23903 gcc_assert (nargs <= ARRAY_SIZE (args));
23905 if (klass == store)
23907 arg = CALL_EXPR_ARG (exp, 0);
23908 op = expand_normal (arg);
23909 gcc_assert (target == 0);
23910 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23918 || GET_MODE (target) != tmode
23919 || ! (*insn_p->operand[0].predicate) (target, tmode))
23920 target = gen_reg_rtx (tmode);
23923 for (i = 0; i < nargs; i++)
23925 enum machine_mode mode = insn_p->operand[i + 1].mode;
23928 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23929 op = expand_normal (arg);
23930 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23932 if (last_arg_constant && (i + 1) == nargs)
23936 if (icode == CODE_FOR_lwp_lwpvalsi3
23937 || icode == CODE_FOR_lwp_lwpinssi3
23938 || icode == CODE_FOR_lwp_lwpvaldi3
23939 || icode == CODE_FOR_lwp_lwpinsdi3)
23940 error ("the last argument must be a 32-bit immediate");
23942 error ("the last argument must be an 8-bit immediate");
23950 /* This must be the memory operand. */
23951 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
23952 gcc_assert (GET_MODE (op) == mode
23953 || GET_MODE (op) == VOIDmode);
23957 /* This must be register. */
23958 if (VECTOR_MODE_P (mode))
23959 op = safe_vector_operand (op, mode);
23961 gcc_assert (GET_MODE (op) == mode
23962 || GET_MODE (op) == VOIDmode);
23963 op = copy_to_mode_reg (mode, op);
23968 args[i].mode = mode;
23974 pat = GEN_FCN (icode) (target);
23977 pat = GEN_FCN (icode) (target, args[0].op);
23980 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23983 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23986 gcc_unreachable ();
23992 return klass == store ? 0 : target;
23995 /* Return the integer constant in ARG. Constrain it to be in the range
23996 of the subparts of VEC_TYPE; issue an error if not. */
23999 get_element_number (tree vec_type, tree arg)
24001 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24003 if (!host_integerp (arg, 1)
24004 || (elt = tree_low_cst (arg, 1), elt > max))
24006 error ("selector must be an integer constant in the range 0..%wi", max);
24013 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24014 ix86_expand_vector_init. We DO have language-level syntax for this, in
24015 the form of (type){ init-list }. Except that since we can't place emms
24016 instructions from inside the compiler, we can't allow the use of MMX
24017 registers unless the user explicitly asks for it. So we do *not* define
24018 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24019 we have builtins invoked by mmintrin.h that gives us license to emit
24020 these sorts of instructions. */
24023 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24025 enum machine_mode tmode = TYPE_MODE (type);
24026 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24027 int i, n_elt = GET_MODE_NUNITS (tmode);
24028 rtvec v = rtvec_alloc (n_elt);
24030 gcc_assert (VECTOR_MODE_P (tmode));
24031 gcc_assert (call_expr_nargs (exp) == n_elt);
24033 for (i = 0; i < n_elt; ++i)
24035 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24036 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24039 if (!target || !register_operand (target, tmode))
24040 target = gen_reg_rtx (tmode);
24042 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24046 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24047 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24048 had a language-level syntax for referencing vector elements. */
24051 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24053 enum machine_mode tmode, mode0;
24058 arg0 = CALL_EXPR_ARG (exp, 0);
24059 arg1 = CALL_EXPR_ARG (exp, 1);
24061 op0 = expand_normal (arg0);
24062 elt = get_element_number (TREE_TYPE (arg0), arg1);
24064 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24065 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24066 gcc_assert (VECTOR_MODE_P (mode0));
24068 op0 = force_reg (mode0, op0);
24070 if (optimize || !target || !register_operand (target, tmode))
24071 target = gen_reg_rtx (tmode);
24073 ix86_expand_vector_extract (true, target, op0, elt);
24078 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24079 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24080 a language-level syntax for referencing vector elements. */
24083 ix86_expand_vec_set_builtin (tree exp)
24085 enum machine_mode tmode, mode1;
24086 tree arg0, arg1, arg2;
24088 rtx op0, op1, target;
24090 arg0 = CALL_EXPR_ARG (exp, 0);
24091 arg1 = CALL_EXPR_ARG (exp, 1);
24092 arg2 = CALL_EXPR_ARG (exp, 2);
24094 tmode = TYPE_MODE (TREE_TYPE (arg0));
24095 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24096 gcc_assert (VECTOR_MODE_P (tmode));
24098 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24099 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24100 elt = get_element_number (TREE_TYPE (arg0), arg2);
24102 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24103 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24105 op0 = force_reg (tmode, op0);
24106 op1 = force_reg (mode1, op1);
24108 /* OP0 is the source of these builtin functions and shouldn't be
24109 modified. Create a copy, use it and return it as target. */
24110 target = gen_reg_rtx (tmode);
24111 emit_move_insn (target, op0);
24112 ix86_expand_vector_set (true, target, op1, elt);
24117 /* Expand an expression EXP that calls a built-in function,
24118 with result going to TARGET if that's convenient
24119 (and in mode MODE if that's convenient).
24120 SUBTARGET may be used as the target for computing one of EXP's operands.
24121 IGNORE is nonzero if the value is to be ignored. */
24124 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24125 enum machine_mode mode ATTRIBUTE_UNUSED,
24126 int ignore ATTRIBUTE_UNUSED)
24128 const struct builtin_description *d;
24130 enum insn_code icode;
24131 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24132 tree arg0, arg1, arg2;
24133 rtx op0, op1, op2, pat;
24134 enum machine_mode mode0, mode1, mode2;
24135 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24137 /* Determine whether the builtin function is available under the current ISA.
24138 Originally the builtin was not created if it wasn't applicable to the
24139 current ISA based on the command line switches. With function specific
24140 options, we need to check in the context of the function making the call
24141 whether it is supported. */
24142 if (ix86_builtins_isa[fcode].isa
24143 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24145 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24146 NULL, NULL, false);
24149 error ("%qE needs unknown isa option", fndecl);
24152 gcc_assert (opts != NULL);
24153 error ("%qE needs isa option %s", fndecl, opts);
24161 case IX86_BUILTIN_MASKMOVQ:
24162 case IX86_BUILTIN_MASKMOVDQU:
24163 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24164 ? CODE_FOR_mmx_maskmovq
24165 : CODE_FOR_sse2_maskmovdqu);
24166 /* Note the arg order is different from the operand order. */
24167 arg1 = CALL_EXPR_ARG (exp, 0);
24168 arg2 = CALL_EXPR_ARG (exp, 1);
24169 arg0 = CALL_EXPR_ARG (exp, 2);
24170 op0 = expand_normal (arg0);
24171 op1 = expand_normal (arg1);
24172 op2 = expand_normal (arg2);
24173 mode0 = insn_data[icode].operand[0].mode;
24174 mode1 = insn_data[icode].operand[1].mode;
24175 mode2 = insn_data[icode].operand[2].mode;
24177 op0 = force_reg (Pmode, op0);
24178 op0 = gen_rtx_MEM (mode1, op0);
24180 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24181 op0 = copy_to_mode_reg (mode0, op0);
24182 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24183 op1 = copy_to_mode_reg (mode1, op1);
24184 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24185 op2 = copy_to_mode_reg (mode2, op2);
24186 pat = GEN_FCN (icode) (op0, op1, op2);
24192 case IX86_BUILTIN_LDMXCSR:
24193 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24194 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24195 emit_move_insn (target, op0);
24196 emit_insn (gen_sse_ldmxcsr (target));
24199 case IX86_BUILTIN_STMXCSR:
24200 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24201 emit_insn (gen_sse_stmxcsr (target));
24202 return copy_to_mode_reg (SImode, target);
24204 case IX86_BUILTIN_CLFLUSH:
24205 arg0 = CALL_EXPR_ARG (exp, 0);
24206 op0 = expand_normal (arg0);
24207 icode = CODE_FOR_sse2_clflush;
24208 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24209 op0 = copy_to_mode_reg (Pmode, op0);
24211 emit_insn (gen_sse2_clflush (op0));
24214 case IX86_BUILTIN_MONITOR:
24215 arg0 = CALL_EXPR_ARG (exp, 0);
24216 arg1 = CALL_EXPR_ARG (exp, 1);
24217 arg2 = CALL_EXPR_ARG (exp, 2);
24218 op0 = expand_normal (arg0);
24219 op1 = expand_normal (arg1);
24220 op2 = expand_normal (arg2);
24222 op0 = copy_to_mode_reg (Pmode, op0);
24224 op1 = copy_to_mode_reg (SImode, op1);
24226 op2 = copy_to_mode_reg (SImode, op2);
24227 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24230 case IX86_BUILTIN_MWAIT:
24231 arg0 = CALL_EXPR_ARG (exp, 0);
24232 arg1 = CALL_EXPR_ARG (exp, 1);
24233 op0 = expand_normal (arg0);
24234 op1 = expand_normal (arg1);
24236 op0 = copy_to_mode_reg (SImode, op0);
24238 op1 = copy_to_mode_reg (SImode, op1);
24239 emit_insn (gen_sse3_mwait (op0, op1));
24242 case IX86_BUILTIN_VEC_INIT_V2SI:
24243 case IX86_BUILTIN_VEC_INIT_V4HI:
24244 case IX86_BUILTIN_VEC_INIT_V8QI:
24245 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24247 case IX86_BUILTIN_VEC_EXT_V2DF:
24248 case IX86_BUILTIN_VEC_EXT_V2DI:
24249 case IX86_BUILTIN_VEC_EXT_V4SF:
24250 case IX86_BUILTIN_VEC_EXT_V4SI:
24251 case IX86_BUILTIN_VEC_EXT_V8HI:
24252 case IX86_BUILTIN_VEC_EXT_V2SI:
24253 case IX86_BUILTIN_VEC_EXT_V4HI:
24254 case IX86_BUILTIN_VEC_EXT_V16QI:
24255 return ix86_expand_vec_ext_builtin (exp, target);
24257 case IX86_BUILTIN_VEC_SET_V2DI:
24258 case IX86_BUILTIN_VEC_SET_V4SF:
24259 case IX86_BUILTIN_VEC_SET_V4SI:
24260 case IX86_BUILTIN_VEC_SET_V8HI:
24261 case IX86_BUILTIN_VEC_SET_V4HI:
24262 case IX86_BUILTIN_VEC_SET_V16QI:
24263 return ix86_expand_vec_set_builtin (exp);
24265 case IX86_BUILTIN_VEC_PERM_V2DF:
24266 case IX86_BUILTIN_VEC_PERM_V4SF:
24267 case IX86_BUILTIN_VEC_PERM_V2DI:
24268 case IX86_BUILTIN_VEC_PERM_V4SI:
24269 case IX86_BUILTIN_VEC_PERM_V8HI:
24270 case IX86_BUILTIN_VEC_PERM_V16QI:
24271 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24272 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24273 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24274 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24275 case IX86_BUILTIN_VEC_PERM_V4DF:
24276 case IX86_BUILTIN_VEC_PERM_V8SF:
24277 return ix86_expand_vec_perm_builtin (exp);
24279 case IX86_BUILTIN_INFQ:
24280 case IX86_BUILTIN_HUGE_VALQ:
24282 REAL_VALUE_TYPE inf;
24286 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24288 tmp = validize_mem (force_const_mem (mode, tmp));
24291 target = gen_reg_rtx (mode);
24293 emit_move_insn (target, tmp);
24297 case IX86_BUILTIN_LLWPCB:
24298 arg0 = CALL_EXPR_ARG (exp, 0);
24299 op0 = expand_normal (arg0);
24300 icode = CODE_FOR_lwp_llwpcb;
24301 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24302 op0 = copy_to_mode_reg (Pmode, op0);
24303 emit_insn (gen_lwp_llwpcb (op0));
24306 case IX86_BUILTIN_SLWPCB:
24307 icode = CODE_FOR_lwp_slwpcb;
24309 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24310 target = gen_reg_rtx (Pmode);
24311 emit_insn (gen_lwp_slwpcb (target));
24318 for (i = 0, d = bdesc_special_args;
24319 i < ARRAY_SIZE (bdesc_special_args);
24321 if (d->code == fcode)
24322 return ix86_expand_special_args_builtin (d, exp, target);
24324 for (i = 0, d = bdesc_args;
24325 i < ARRAY_SIZE (bdesc_args);
24327 if (d->code == fcode)
24330 case IX86_BUILTIN_FABSQ:
24331 case IX86_BUILTIN_COPYSIGNQ:
24333 /* Emit a normal call if SSE2 isn't available. */
24334 return expand_call (exp, target, ignore);
24336 return ix86_expand_args_builtin (d, exp, target);
24339 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24340 if (d->code == fcode)
24341 return ix86_expand_sse_comi (d, exp, target);
24343 for (i = 0, d = bdesc_pcmpestr;
24344 i < ARRAY_SIZE (bdesc_pcmpestr);
24346 if (d->code == fcode)
24347 return ix86_expand_sse_pcmpestr (d, exp, target);
24349 for (i = 0, d = bdesc_pcmpistr;
24350 i < ARRAY_SIZE (bdesc_pcmpistr);
24352 if (d->code == fcode)
24353 return ix86_expand_sse_pcmpistr (d, exp, target);
24355 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24356 if (d->code == fcode)
24357 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24358 (enum ix86_builtin_func_type)
24359 d->flag, d->comparison);
24361 gcc_unreachable ();
24364 /* Returns a function decl for a vectorized version of the builtin function
24365 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24366 if it is not available. */
24369 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24372 enum machine_mode in_mode, out_mode;
24374 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24376 if (TREE_CODE (type_out) != VECTOR_TYPE
24377 || TREE_CODE (type_in) != VECTOR_TYPE
24378 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24381 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24382 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24383 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24384 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24388 case BUILT_IN_SQRT:
24389 if (out_mode == DFmode && out_n == 2
24390 && in_mode == DFmode && in_n == 2)
24391 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24394 case BUILT_IN_SQRTF:
24395 if (out_mode == SFmode && out_n == 4
24396 && in_mode == SFmode && in_n == 4)
24397 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24400 case BUILT_IN_LRINT:
24401 if (out_mode == SImode && out_n == 4
24402 && in_mode == DFmode && in_n == 2)
24403 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24406 case BUILT_IN_LRINTF:
24407 if (out_mode == SImode && out_n == 4
24408 && in_mode == SFmode && in_n == 4)
24409 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24412 case BUILT_IN_COPYSIGN:
24413 if (out_mode == DFmode && out_n == 2
24414 && in_mode == DFmode && in_n == 2)
24415 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24418 case BUILT_IN_COPYSIGNF:
24419 if (out_mode == SFmode && out_n == 4
24420 && in_mode == SFmode && in_n == 4)
24421 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24428 /* Dispatch to a handler for a vectorization library. */
24429 if (ix86_veclib_handler)
24430 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24436 /* Handler for an SVML-style interface to
24437 a library with vectorized intrinsics. */
24440 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24443 tree fntype, new_fndecl, args;
24446 enum machine_mode el_mode, in_mode;
24449 /* The SVML is suitable for unsafe math only. */
24450 if (!flag_unsafe_math_optimizations)
24453 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24454 n = TYPE_VECTOR_SUBPARTS (type_out);
24455 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24456 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24457 if (el_mode != in_mode
24465 case BUILT_IN_LOG10:
24467 case BUILT_IN_TANH:
24469 case BUILT_IN_ATAN:
24470 case BUILT_IN_ATAN2:
24471 case BUILT_IN_ATANH:
24472 case BUILT_IN_CBRT:
24473 case BUILT_IN_SINH:
24475 case BUILT_IN_ASINH:
24476 case BUILT_IN_ASIN:
24477 case BUILT_IN_COSH:
24479 case BUILT_IN_ACOSH:
24480 case BUILT_IN_ACOS:
24481 if (el_mode != DFmode || n != 2)
24485 case BUILT_IN_EXPF:
24486 case BUILT_IN_LOGF:
24487 case BUILT_IN_LOG10F:
24488 case BUILT_IN_POWF:
24489 case BUILT_IN_TANHF:
24490 case BUILT_IN_TANF:
24491 case BUILT_IN_ATANF:
24492 case BUILT_IN_ATAN2F:
24493 case BUILT_IN_ATANHF:
24494 case BUILT_IN_CBRTF:
24495 case BUILT_IN_SINHF:
24496 case BUILT_IN_SINF:
24497 case BUILT_IN_ASINHF:
24498 case BUILT_IN_ASINF:
24499 case BUILT_IN_COSHF:
24500 case BUILT_IN_COSF:
24501 case BUILT_IN_ACOSHF:
24502 case BUILT_IN_ACOSF:
24503 if (el_mode != SFmode || n != 4)
24511 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24513 if (fn == BUILT_IN_LOGF)
24514 strcpy (name, "vmlsLn4");
24515 else if (fn == BUILT_IN_LOG)
24516 strcpy (name, "vmldLn2");
24519 sprintf (name, "vmls%s", bname+10);
24520 name[strlen (name)-1] = '4';
24523 sprintf (name, "vmld%s2", bname+10);
24525 /* Convert to uppercase. */
24529 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24530 args = TREE_CHAIN (args))
24534 fntype = build_function_type_list (type_out, type_in, NULL);
24536 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24538 /* Build a function declaration for the vectorized function. */
24539 new_fndecl = build_decl (BUILTINS_LOCATION,
24540 FUNCTION_DECL, get_identifier (name), fntype);
24541 TREE_PUBLIC (new_fndecl) = 1;
24542 DECL_EXTERNAL (new_fndecl) = 1;
24543 DECL_IS_NOVOPS (new_fndecl) = 1;
24544 TREE_READONLY (new_fndecl) = 1;
24549 /* Handler for an ACML-style interface to
24550 a library with vectorized intrinsics. */
24553 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24555 char name[20] = "__vr.._";
24556 tree fntype, new_fndecl, args;
24559 enum machine_mode el_mode, in_mode;
24562 /* The ACML is 64bits only and suitable for unsafe math only as
24563 it does not correctly support parts of IEEE with the required
24564 precision such as denormals. */
24566 || !flag_unsafe_math_optimizations)
24569 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24570 n = TYPE_VECTOR_SUBPARTS (type_out);
24571 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24572 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24573 if (el_mode != in_mode
24583 case BUILT_IN_LOG2:
24584 case BUILT_IN_LOG10:
24587 if (el_mode != DFmode
24592 case BUILT_IN_SINF:
24593 case BUILT_IN_COSF:
24594 case BUILT_IN_EXPF:
24595 case BUILT_IN_POWF:
24596 case BUILT_IN_LOGF:
24597 case BUILT_IN_LOG2F:
24598 case BUILT_IN_LOG10F:
24601 if (el_mode != SFmode
24610 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24611 sprintf (name + 7, "%s", bname+10);
24614 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24615 args = TREE_CHAIN (args))
24619 fntype = build_function_type_list (type_out, type_in, NULL);
24621 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24623 /* Build a function declaration for the vectorized function. */
24624 new_fndecl = build_decl (BUILTINS_LOCATION,
24625 FUNCTION_DECL, get_identifier (name), fntype);
24626 TREE_PUBLIC (new_fndecl) = 1;
24627 DECL_EXTERNAL (new_fndecl) = 1;
24628 DECL_IS_NOVOPS (new_fndecl) = 1;
24629 TREE_READONLY (new_fndecl) = 1;
24635 /* Returns a decl of a function that implements conversion of an integer vector
24636 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24637 side of the conversion.
24638 Return NULL_TREE if it is not available. */
24641 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24643 if (! (TARGET_SSE2 && TREE_CODE (type) == VECTOR_TYPE))
24649 switch (TYPE_MODE (type))
24652 return TYPE_UNSIGNED (type)
24653 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24654 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24659 case FIX_TRUNC_EXPR:
24660 switch (TYPE_MODE (type))
24663 return TYPE_UNSIGNED (type)
24665 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24675 /* Returns a code for a target-specific builtin that implements
24676 reciprocal of the function, or NULL_TREE if not available. */
24679 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24680 bool sqrt ATTRIBUTE_UNUSED)
24682 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24683 && flag_finite_math_only && !flag_trapping_math
24684 && flag_unsafe_math_optimizations))
24688 /* Machine dependent builtins. */
24691 /* Vectorized version of sqrt to rsqrt conversion. */
24692 case IX86_BUILTIN_SQRTPS_NR:
24693 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24699 /* Normal builtins. */
24702 /* Sqrt to rsqrt conversion. */
24703 case BUILT_IN_SQRTF:
24704 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24711 /* Helper for avx_vpermilps256_operand et al. This is also used by
24712 the expansion functions to turn the parallel back into a mask.
24713 The return value is 0 for no match and the imm8+1 for a match. */
24716 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24718 unsigned i, nelt = GET_MODE_NUNITS (mode);
24720 unsigned char ipar[8];
24722 if (XVECLEN (par, 0) != (int) nelt)
24725 /* Validate that all of the elements are constants, and not totally
24726 out of range. Copy the data into an integral array to make the
24727 subsequent checks easier. */
24728 for (i = 0; i < nelt; ++i)
24730 rtx er = XVECEXP (par, 0, i);
24731 unsigned HOST_WIDE_INT ei;
24733 if (!CONST_INT_P (er))
24744 /* In the 256-bit DFmode case, we can only move elements within
24746 for (i = 0; i < 2; ++i)
24750 mask |= ipar[i] << i;
24752 for (i = 2; i < 4; ++i)
24756 mask |= (ipar[i] - 2) << i;
24761 /* In the 256-bit SFmode case, we have full freedom of movement
24762 within the low 128-bit lane, but the high 128-bit lane must
24763 mirror the exact same pattern. */
24764 for (i = 0; i < 4; ++i)
24765 if (ipar[i] + 4 != ipar[i + 4])
24772 /* In the 128-bit case, we've full freedom in the placement of
24773 the elements from the source operand. */
24774 for (i = 0; i < nelt; ++i)
24775 mask |= ipar[i] << (i * (nelt / 2));
24779 gcc_unreachable ();
24782 /* Make sure success has a non-zero value by adding one. */
24786 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24787 the expansion functions to turn the parallel back into a mask.
24788 The return value is 0 for no match and the imm8+1 for a match. */
24791 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24793 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24795 unsigned char ipar[8];
24797 if (XVECLEN (par, 0) != (int) nelt)
24800 /* Validate that all of the elements are constants, and not totally
24801 out of range. Copy the data into an integral array to make the
24802 subsequent checks easier. */
24803 for (i = 0; i < nelt; ++i)
24805 rtx er = XVECEXP (par, 0, i);
24806 unsigned HOST_WIDE_INT ei;
24808 if (!CONST_INT_P (er))
24811 if (ei >= 2 * nelt)
24816 /* Validate that the halves of the permute are halves. */
24817 for (i = 0; i < nelt2 - 1; ++i)
24818 if (ipar[i] + 1 != ipar[i + 1])
24820 for (i = nelt2; i < nelt - 1; ++i)
24821 if (ipar[i] + 1 != ipar[i + 1])
24824 /* Reconstruct the mask. */
24825 for (i = 0; i < 2; ++i)
24827 unsigned e = ipar[i * nelt2];
24831 mask |= e << (i * 4);
24834 /* Make sure success has a non-zero value by adding one. */
24839 /* Store OPERAND to the memory after reload is completed. This means
24840 that we can't easily use assign_stack_local. */
24842 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24846 gcc_assert (reload_completed);
24847 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24849 result = gen_rtx_MEM (mode,
24850 gen_rtx_PLUS (Pmode,
24852 GEN_INT (-RED_ZONE_SIZE)));
24853 emit_move_insn (result, operand);
24855 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24861 operand = gen_lowpart (DImode, operand);
24865 gen_rtx_SET (VOIDmode,
24866 gen_rtx_MEM (DImode,
24867 gen_rtx_PRE_DEC (DImode,
24868 stack_pointer_rtx)),
24872 gcc_unreachable ();
24874 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24883 split_di (&operand, 1, operands, operands + 1);
24885 gen_rtx_SET (VOIDmode,
24886 gen_rtx_MEM (SImode,
24887 gen_rtx_PRE_DEC (Pmode,
24888 stack_pointer_rtx)),
24891 gen_rtx_SET (VOIDmode,
24892 gen_rtx_MEM (SImode,
24893 gen_rtx_PRE_DEC (Pmode,
24894 stack_pointer_rtx)),
24899 /* Store HImodes as SImodes. */
24900 operand = gen_lowpart (SImode, operand);
24904 gen_rtx_SET (VOIDmode,
24905 gen_rtx_MEM (GET_MODE (operand),
24906 gen_rtx_PRE_DEC (SImode,
24907 stack_pointer_rtx)),
24911 gcc_unreachable ();
24913 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24918 /* Free operand from the memory. */
24920 ix86_free_from_memory (enum machine_mode mode)
24922 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24926 if (mode == DImode || TARGET_64BIT)
24930 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24931 to pop or add instruction if registers are available. */
24932 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24933 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24938 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
24939 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
24941 static const enum reg_class *
24942 i386_ira_cover_classes (void)
24944 static const enum reg_class sse_fpmath_classes[] = {
24945 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
24947 static const enum reg_class no_sse_fpmath_classes[] = {
24948 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
24951 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
24954 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24955 QImode must go into class Q_REGS.
24956 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24957 movdf to do mem-to-mem moves through integer regs. */
24959 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24961 enum machine_mode mode = GET_MODE (x);
24963 /* We're only allowed to return a subclass of CLASS. Many of the
24964 following checks fail for NO_REGS, so eliminate that early. */
24965 if (regclass == NO_REGS)
24968 /* All classes can load zeros. */
24969 if (x == CONST0_RTX (mode))
24972 /* Force constants into memory if we are loading a (nonzero) constant into
24973 an MMX or SSE register. This is because there are no MMX/SSE instructions
24974 to load from a constant. */
24976 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24979 /* Prefer SSE regs only, if we can use them for math. */
24980 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24981 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24983 /* Floating-point constants need more complex checks. */
24984 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24986 /* General regs can load everything. */
24987 if (reg_class_subset_p (regclass, GENERAL_REGS))
24990 /* Floats can load 0 and 1 plus some others. Note that we eliminated
24991 zero above. We only want to wind up preferring 80387 registers if
24992 we plan on doing computation with them. */
24994 && standard_80387_constant_p (x))
24996 /* Limit class to non-sse. */
24997 if (regclass == FLOAT_SSE_REGS)
24999 if (regclass == FP_TOP_SSE_REGS)
25001 if (regclass == FP_SECOND_SSE_REGS)
25002 return FP_SECOND_REG;
25003 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25010 /* Generally when we see PLUS here, it's the function invariant
25011 (plus soft-fp const_int). Which can only be computed into general
25013 if (GET_CODE (x) == PLUS)
25014 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25016 /* QImode constants are easy to load, but non-constant QImode data
25017 must go into Q_REGS. */
25018 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25020 if (reg_class_subset_p (regclass, Q_REGS))
25022 if (reg_class_subset_p (Q_REGS, regclass))
25030 /* Discourage putting floating-point values in SSE registers unless
25031 SSE math is being used, and likewise for the 387 registers. */
25033 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25035 enum machine_mode mode = GET_MODE (x);
25037 /* Restrict the output reload class to the register bank that we are doing
25038 math on. If we would like not to return a subset of CLASS, reject this
25039 alternative: if reload cannot do this, it will still use its choice. */
25040 mode = GET_MODE (x);
25041 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25042 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25044 if (X87_FLOAT_MODE_P (mode))
25046 if (regclass == FP_TOP_SSE_REGS)
25048 else if (regclass == FP_SECOND_SSE_REGS)
25049 return FP_SECOND_REG;
25051 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25057 static enum reg_class
25058 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25059 enum machine_mode mode,
25060 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25062 /* QImode spills from non-QI registers require
25063 intermediate register on 32bit targets. */
25064 if (!in_p && mode == QImode && !TARGET_64BIT
25065 && (rclass == GENERAL_REGS
25066 || rclass == LEGACY_REGS
25067 || rclass == INDEX_REGS))
25076 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25077 regno = true_regnum (x);
25079 /* Return Q_REGS if the operand is in memory. */
25087 /* If we are copying between general and FP registers, we need a memory
25088 location. The same is true for SSE and MMX registers.
25090 To optimize register_move_cost performance, allow inline variant.
25092 The macro can't work reliably when one of the CLASSES is class containing
25093 registers from multiple units (SSE, MMX, integer). We avoid this by never
25094 combining those units in single alternative in the machine description.
25095 Ensure that this constraint holds to avoid unexpected surprises.
25097 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25098 enforce these sanity checks. */
25101 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25102 enum machine_mode mode, int strict)
25104 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25105 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25106 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25107 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25108 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25109 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25111 gcc_assert (!strict);
25115 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25118 /* ??? This is a lie. We do have moves between mmx/general, and for
25119 mmx/sse2. But by saying we need secondary memory we discourage the
25120 register allocator from using the mmx registers unless needed. */
25121 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25124 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25126 /* SSE1 doesn't have any direct moves from other classes. */
25130 /* If the target says that inter-unit moves are more expensive
25131 than moving through memory, then don't generate them. */
25132 if (!TARGET_INTER_UNIT_MOVES)
25135 /* Between SSE and general, we have moves no larger than word size. */
25136 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25144 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25145 enum machine_mode mode, int strict)
25147 return inline_secondary_memory_needed (class1, class2, mode, strict);
25150 /* Return true if the registers in CLASS cannot represent the change from
25151 modes FROM to TO. */
25154 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25155 enum reg_class regclass)
25160 /* x87 registers can't do subreg at all, as all values are reformatted
25161 to extended precision. */
25162 if (MAYBE_FLOAT_CLASS_P (regclass))
25165 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25167 /* Vector registers do not support QI or HImode loads. If we don't
25168 disallow a change to these modes, reload will assume it's ok to
25169 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25170 the vec_dupv4hi pattern. */
25171 if (GET_MODE_SIZE (from) < 4)
25174 /* Vector registers do not support subreg with nonzero offsets, which
25175 are otherwise valid for integer registers. Since we can't see
25176 whether we have a nonzero offset from here, prohibit all
25177 nonparadoxical subregs changing size. */
25178 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25185 /* Return the cost of moving data of mode M between a
25186 register and memory. A value of 2 is the default; this cost is
25187 relative to those in `REGISTER_MOVE_COST'.
25189 This function is used extensively by register_move_cost that is used to
25190 build tables at startup. Make it inline in this case.
25191 When IN is 2, return maximum of in and out move cost.
25193 If moving between registers and memory is more expensive than
25194 between two registers, you should define this macro to express the
25197 Model also increased moving costs of QImode registers in non
25201 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25205 if (FLOAT_CLASS_P (regclass))
25223 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25224 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25226 if (SSE_CLASS_P (regclass))
25229 switch (GET_MODE_SIZE (mode))
25244 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25245 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25247 if (MMX_CLASS_P (regclass))
25250 switch (GET_MODE_SIZE (mode))
25262 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25263 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25265 switch (GET_MODE_SIZE (mode))
25268 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25271 return ix86_cost->int_store[0];
25272 if (TARGET_PARTIAL_REG_DEPENDENCY
25273 && optimize_function_for_speed_p (cfun))
25274 cost = ix86_cost->movzbl_load;
25276 cost = ix86_cost->int_load[0];
25278 return MAX (cost, ix86_cost->int_store[0]);
25284 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25286 return ix86_cost->movzbl_load;
25288 return ix86_cost->int_store[0] + 4;
25293 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25294 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25296 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25297 if (mode == TFmode)
25300 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25302 cost = ix86_cost->int_load[2];
25304 cost = ix86_cost->int_store[2];
25305 return (cost * (((int) GET_MODE_SIZE (mode)
25306 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25311 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25313 return inline_memory_move_cost (mode, regclass, in);
25317 /* Return the cost of moving data from a register in class CLASS1 to
25318 one in class CLASS2.
25320 It is not required that the cost always equal 2 when FROM is the same as TO;
25321 on some machines it is expensive to move between registers if they are not
25322 general registers. */
25325 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25326 enum reg_class class2)
25328 /* In case we require secondary memory, compute cost of the store followed
25329 by load. In order to avoid bad register allocation choices, we need
25330 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25332 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25336 cost += inline_memory_move_cost (mode, class1, 2);
25337 cost += inline_memory_move_cost (mode, class2, 2);
25339 /* In case of copying from general_purpose_register we may emit multiple
25340 stores followed by single load causing memory size mismatch stall.
25341 Count this as arbitrarily high cost of 20. */
25342 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25345 /* In the case of FP/MMX moves, the registers actually overlap, and we
25346 have to switch modes in order to treat them differently. */
25347 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25348 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25354 /* Moves between SSE/MMX and integer unit are expensive. */
25355 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25356 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25358 /* ??? By keeping returned value relatively high, we limit the number
25359 of moves between integer and MMX/SSE registers for all targets.
25360 Additionally, high value prevents problem with x86_modes_tieable_p(),
25361 where integer modes in MMX/SSE registers are not tieable
25362 because of missing QImode and HImode moves to, from or between
25363 MMX/SSE registers. */
25364 return MAX (8, ix86_cost->mmxsse_to_integer);
25366 if (MAYBE_FLOAT_CLASS_P (class1))
25367 return ix86_cost->fp_move;
25368 if (MAYBE_SSE_CLASS_P (class1))
25369 return ix86_cost->sse_move;
25370 if (MAYBE_MMX_CLASS_P (class1))
25371 return ix86_cost->mmx_move;
25375 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25378 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25380 /* Flags and only flags can only hold CCmode values. */
25381 if (CC_REGNO_P (regno))
25382 return GET_MODE_CLASS (mode) == MODE_CC;
25383 if (GET_MODE_CLASS (mode) == MODE_CC
25384 || GET_MODE_CLASS (mode) == MODE_RANDOM
25385 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25387 if (FP_REGNO_P (regno))
25388 return VALID_FP_MODE_P (mode);
25389 if (SSE_REGNO_P (regno))
25391 /* We implement the move patterns for all vector modes into and
25392 out of SSE registers, even when no operation instructions
25393 are available. OImode move is available only when AVX is
25395 return ((TARGET_AVX && mode == OImode)
25396 || VALID_AVX256_REG_MODE (mode)
25397 || VALID_SSE_REG_MODE (mode)
25398 || VALID_SSE2_REG_MODE (mode)
25399 || VALID_MMX_REG_MODE (mode)
25400 || VALID_MMX_REG_MODE_3DNOW (mode));
25402 if (MMX_REGNO_P (regno))
25404 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25405 so if the register is available at all, then we can move data of
25406 the given mode into or out of it. */
25407 return (VALID_MMX_REG_MODE (mode)
25408 || VALID_MMX_REG_MODE_3DNOW (mode));
25411 if (mode == QImode)
25413 /* Take care for QImode values - they can be in non-QI regs,
25414 but then they do cause partial register stalls. */
25415 if (regno <= BX_REG || TARGET_64BIT)
25417 if (!TARGET_PARTIAL_REG_STALL)
25419 return reload_in_progress || reload_completed;
25421 /* We handle both integer and floats in the general purpose registers. */
25422 else if (VALID_INT_MODE_P (mode))
25424 else if (VALID_FP_MODE_P (mode))
25426 else if (VALID_DFP_MODE_P (mode))
25428 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25429 on to use that value in smaller contexts, this can easily force a
25430 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25431 supporting DImode, allow it. */
25432 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25438 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25439 tieable integer mode. */
25442 ix86_tieable_integer_mode_p (enum machine_mode mode)
25451 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25454 return TARGET_64BIT;
25461 /* Return true if MODE1 is accessible in a register that can hold MODE2
25462 without copying. That is, all register classes that can hold MODE2
25463 can also hold MODE1. */
25466 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25468 if (mode1 == mode2)
25471 if (ix86_tieable_integer_mode_p (mode1)
25472 && ix86_tieable_integer_mode_p (mode2))
25475 /* MODE2 being XFmode implies fp stack or general regs, which means we
25476 can tie any smaller floating point modes to it. Note that we do not
25477 tie this with TFmode. */
25478 if (mode2 == XFmode)
25479 return mode1 == SFmode || mode1 == DFmode;
25481 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25482 that we can tie it with SFmode. */
25483 if (mode2 == DFmode)
25484 return mode1 == SFmode;
25486 /* If MODE2 is only appropriate for an SSE register, then tie with
25487 any other mode acceptable to SSE registers. */
25488 if (GET_MODE_SIZE (mode2) == 16
25489 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25490 return (GET_MODE_SIZE (mode1) == 16
25491 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25493 /* If MODE2 is appropriate for an MMX register, then tie
25494 with any other mode acceptable to MMX registers. */
25495 if (GET_MODE_SIZE (mode2) == 8
25496 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25497 return (GET_MODE_SIZE (mode1) == 8
25498 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25503 /* Compute a (partial) cost for rtx X. Return true if the complete
25504 cost has been computed, and false if subexpressions should be
25505 scanned. In either case, *TOTAL contains the cost result. */
25508 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25510 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25511 enum machine_mode mode = GET_MODE (x);
25512 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25520 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25522 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25524 else if (flag_pic && SYMBOLIC_CONST (x)
25526 || (!GET_CODE (x) != LABEL_REF
25527 && (GET_CODE (x) != SYMBOL_REF
25528 || !SYMBOL_REF_LOCAL_P (x)))))
25535 if (mode == VOIDmode)
25538 switch (standard_80387_constant_p (x))
25543 default: /* Other constants */
25548 /* Start with (MEM (SYMBOL_REF)), since that's where
25549 it'll probably end up. Add a penalty for size. */
25550 *total = (COSTS_N_INSNS (1)
25551 + (flag_pic != 0 && !TARGET_64BIT)
25552 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25558 /* The zero extensions is often completely free on x86_64, so make
25559 it as cheap as possible. */
25560 if (TARGET_64BIT && mode == DImode
25561 && GET_MODE (XEXP (x, 0)) == SImode)
25563 else if (TARGET_ZERO_EXTEND_WITH_AND)
25564 *total = cost->add;
25566 *total = cost->movzx;
25570 *total = cost->movsx;
25574 if (CONST_INT_P (XEXP (x, 1))
25575 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25577 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25580 *total = cost->add;
25583 if ((value == 2 || value == 3)
25584 && cost->lea <= cost->shift_const)
25586 *total = cost->lea;
25596 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25598 if (CONST_INT_P (XEXP (x, 1)))
25600 if (INTVAL (XEXP (x, 1)) > 32)
25601 *total = cost->shift_const + COSTS_N_INSNS (2);
25603 *total = cost->shift_const * 2;
25607 if (GET_CODE (XEXP (x, 1)) == AND)
25608 *total = cost->shift_var * 2;
25610 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25615 if (CONST_INT_P (XEXP (x, 1)))
25616 *total = cost->shift_const;
25618 *total = cost->shift_var;
25623 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25625 /* ??? SSE scalar cost should be used here. */
25626 *total = cost->fmul;
25629 else if (X87_FLOAT_MODE_P (mode))
25631 *total = cost->fmul;
25634 else if (FLOAT_MODE_P (mode))
25636 /* ??? SSE vector cost should be used here. */
25637 *total = cost->fmul;
25642 rtx op0 = XEXP (x, 0);
25643 rtx op1 = XEXP (x, 1);
25645 if (CONST_INT_P (XEXP (x, 1)))
25647 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25648 for (nbits = 0; value != 0; value &= value - 1)
25652 /* This is arbitrary. */
25655 /* Compute costs correctly for widening multiplication. */
25656 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25657 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25658 == GET_MODE_SIZE (mode))
25660 int is_mulwiden = 0;
25661 enum machine_mode inner_mode = GET_MODE (op0);
25663 if (GET_CODE (op0) == GET_CODE (op1))
25664 is_mulwiden = 1, op1 = XEXP (op1, 0);
25665 else if (CONST_INT_P (op1))
25667 if (GET_CODE (op0) == SIGN_EXTEND)
25668 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25671 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25675 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25678 *total = (cost->mult_init[MODE_INDEX (mode)]
25679 + nbits * cost->mult_bit
25680 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25689 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25690 /* ??? SSE cost should be used here. */
25691 *total = cost->fdiv;
25692 else if (X87_FLOAT_MODE_P (mode))
25693 *total = cost->fdiv;
25694 else if (FLOAT_MODE_P (mode))
25695 /* ??? SSE vector cost should be used here. */
25696 *total = cost->fdiv;
25698 *total = cost->divide[MODE_INDEX (mode)];
25702 if (GET_MODE_CLASS (mode) == MODE_INT
25703 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25705 if (GET_CODE (XEXP (x, 0)) == PLUS
25706 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25707 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25708 && CONSTANT_P (XEXP (x, 1)))
25710 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25711 if (val == 2 || val == 4 || val == 8)
25713 *total = cost->lea;
25714 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25715 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25716 outer_code, speed);
25717 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25721 else if (GET_CODE (XEXP (x, 0)) == MULT
25722 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25724 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25725 if (val == 2 || val == 4 || val == 8)
25727 *total = cost->lea;
25728 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25729 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25733 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25735 *total = cost->lea;
25736 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25737 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25738 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25745 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25747 /* ??? SSE cost should be used here. */
25748 *total = cost->fadd;
25751 else if (X87_FLOAT_MODE_P (mode))
25753 *total = cost->fadd;
25756 else if (FLOAT_MODE_P (mode))
25758 /* ??? SSE vector cost should be used here. */
25759 *total = cost->fadd;
25767 if (!TARGET_64BIT && mode == DImode)
25769 *total = (cost->add * 2
25770 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25771 << (GET_MODE (XEXP (x, 0)) != DImode))
25772 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25773 << (GET_MODE (XEXP (x, 1)) != DImode)));
25779 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25781 /* ??? SSE cost should be used here. */
25782 *total = cost->fchs;
25785 else if (X87_FLOAT_MODE_P (mode))
25787 *total = cost->fchs;
25790 else if (FLOAT_MODE_P (mode))
25792 /* ??? SSE vector cost should be used here. */
25793 *total = cost->fchs;
25799 if (!TARGET_64BIT && mode == DImode)
25800 *total = cost->add * 2;
25802 *total = cost->add;
25806 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25807 && XEXP (XEXP (x, 0), 1) == const1_rtx
25808 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25809 && XEXP (x, 1) == const0_rtx)
25811 /* This kind of construct is implemented using test[bwl].
25812 Treat it as if we had an AND. */
25813 *total = (cost->add
25814 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25815 + rtx_cost (const1_rtx, outer_code, speed));
25821 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25826 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25827 /* ??? SSE cost should be used here. */
25828 *total = cost->fabs;
25829 else if (X87_FLOAT_MODE_P (mode))
25830 *total = cost->fabs;
25831 else if (FLOAT_MODE_P (mode))
25832 /* ??? SSE vector cost should be used here. */
25833 *total = cost->fabs;
25837 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25838 /* ??? SSE cost should be used here. */
25839 *total = cost->fsqrt;
25840 else if (X87_FLOAT_MODE_P (mode))
25841 *total = cost->fsqrt;
25842 else if (FLOAT_MODE_P (mode))
25843 /* ??? SSE vector cost should be used here. */
25844 *total = cost->fsqrt;
25848 if (XINT (x, 1) == UNSPEC_TP)
25855 case VEC_DUPLICATE:
25856 /* ??? Assume all of these vector manipulation patterns are
25857 recognizable. In which case they all pretty much have the
25859 *total = COSTS_N_INSNS (1);
25869 static int current_machopic_label_num;
25871 /* Given a symbol name and its associated stub, write out the
25872 definition of the stub. */
25875 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25877 unsigned int length;
25878 char *binder_name, *symbol_name, lazy_ptr_name[32];
25879 int label = ++current_machopic_label_num;
25881 /* For 64-bit we shouldn't get here. */
25882 gcc_assert (!TARGET_64BIT);
25884 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25885 symb = (*targetm.strip_name_encoding) (symb);
25887 length = strlen (stub);
25888 binder_name = XALLOCAVEC (char, length + 32);
25889 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25891 length = strlen (symb);
25892 symbol_name = XALLOCAVEC (char, length + 32);
25893 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25895 sprintf (lazy_ptr_name, "L%d$lz", label);
25898 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25900 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25902 fprintf (file, "%s:\n", stub);
25903 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25907 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25908 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25909 fprintf (file, "\tjmp\t*%%edx\n");
25912 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25914 fprintf (file, "%s:\n", binder_name);
25918 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25919 fputs ("\tpushl\t%eax\n", file);
25922 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25924 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
25926 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25927 fprintf (file, "%s:\n", lazy_ptr_name);
25928 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25929 fprintf (file, ASM_LONG "%s\n", binder_name);
25931 #endif /* TARGET_MACHO */
25933 /* Order the registers for register allocator. */
25936 x86_order_regs_for_local_alloc (void)
25941 /* First allocate the local general purpose registers. */
25942 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25943 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25944 reg_alloc_order [pos++] = i;
25946 /* Global general purpose registers. */
25947 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25948 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25949 reg_alloc_order [pos++] = i;
25951 /* x87 registers come first in case we are doing FP math
25953 if (!TARGET_SSE_MATH)
25954 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25955 reg_alloc_order [pos++] = i;
25957 /* SSE registers. */
25958 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25959 reg_alloc_order [pos++] = i;
25960 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25961 reg_alloc_order [pos++] = i;
25963 /* x87 registers. */
25964 if (TARGET_SSE_MATH)
25965 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25966 reg_alloc_order [pos++] = i;
25968 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25969 reg_alloc_order [pos++] = i;
25971 /* Initialize the rest of array as we do not allocate some registers
25973 while (pos < FIRST_PSEUDO_REGISTER)
25974 reg_alloc_order [pos++] = 0;
25977 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25978 struct attribute_spec.handler. */
25980 ix86_handle_abi_attribute (tree *node, tree name,
25981 tree args ATTRIBUTE_UNUSED,
25982 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25984 if (TREE_CODE (*node) != FUNCTION_TYPE
25985 && TREE_CODE (*node) != METHOD_TYPE
25986 && TREE_CODE (*node) != FIELD_DECL
25987 && TREE_CODE (*node) != TYPE_DECL)
25989 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25991 *no_add_attrs = true;
25996 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
25998 *no_add_attrs = true;
26002 /* Can combine regparm with all attributes but fastcall. */
26003 if (is_attribute_p ("ms_abi", name))
26005 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26007 error ("ms_abi and sysv_abi attributes are not compatible");
26012 else if (is_attribute_p ("sysv_abi", name))
26014 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26016 error ("ms_abi and sysv_abi attributes are not compatible");
26025 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26026 struct attribute_spec.handler. */
26028 ix86_handle_struct_attribute (tree *node, tree name,
26029 tree args ATTRIBUTE_UNUSED,
26030 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26033 if (DECL_P (*node))
26035 if (TREE_CODE (*node) == TYPE_DECL)
26036 type = &TREE_TYPE (*node);
26041 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26042 || TREE_CODE (*type) == UNION_TYPE)))
26044 warning (OPT_Wattributes, "%qE attribute ignored",
26046 *no_add_attrs = true;
26049 else if ((is_attribute_p ("ms_struct", name)
26050 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26051 || ((is_attribute_p ("gcc_struct", name)
26052 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26054 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26056 *no_add_attrs = true;
26063 ix86_handle_fndecl_attribute (tree *node, tree name,
26064 tree args ATTRIBUTE_UNUSED,
26065 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26067 if (TREE_CODE (*node) != FUNCTION_DECL)
26069 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26071 *no_add_attrs = true;
26077 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26082 #ifndef HAVE_AS_IX86_SWAP
26083 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26090 ix86_ms_bitfield_layout_p (const_tree record_type)
26092 return (TARGET_MS_BITFIELD_LAYOUT &&
26093 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26094 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26097 /* Returns an expression indicating where the this parameter is
26098 located on entry to the FUNCTION. */
26101 x86_this_parameter (tree function)
26103 tree type = TREE_TYPE (function);
26104 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26109 const int *parm_regs;
26111 if (ix86_function_type_abi (type) == MS_ABI)
26112 parm_regs = x86_64_ms_abi_int_parameter_registers;
26114 parm_regs = x86_64_int_parameter_registers;
26115 return gen_rtx_REG (DImode, parm_regs[aggr]);
26118 nregs = ix86_function_regparm (type, function);
26120 if (nregs > 0 && !stdarg_p (type))
26124 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26125 regno = aggr ? DX_REG : CX_REG;
26133 return gen_rtx_MEM (SImode,
26134 plus_constant (stack_pointer_rtx, 4));
26137 return gen_rtx_REG (SImode, regno);
26140 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26143 /* Determine whether x86_output_mi_thunk can succeed. */
26146 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26147 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26148 HOST_WIDE_INT vcall_offset, const_tree function)
26150 /* 64-bit can handle anything. */
26154 /* For 32-bit, everything's fine if we have one free register. */
26155 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26158 /* Need a free register for vcall_offset. */
26162 /* Need a free register for GOT references. */
26163 if (flag_pic && !(*targetm.binds_local_p) (function))
26166 /* Otherwise ok. */
26170 /* Output the assembler code for a thunk function. THUNK_DECL is the
26171 declaration for the thunk function itself, FUNCTION is the decl for
26172 the target function. DELTA is an immediate constant offset to be
26173 added to THIS. If VCALL_OFFSET is nonzero, the word at
26174 *(*this + vcall_offset) should be added to THIS. */
26177 x86_output_mi_thunk (FILE *file,
26178 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26179 HOST_WIDE_INT vcall_offset, tree function)
26182 rtx this_param = x86_this_parameter (function);
26185 /* Make sure unwind info is emitted for the thunk if needed. */
26186 final_start_function (emit_barrier (), file, 1);
26188 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26189 pull it in now and let DELTA benefit. */
26190 if (REG_P (this_param))
26191 this_reg = this_param;
26192 else if (vcall_offset)
26194 /* Put the this parameter into %eax. */
26195 xops[0] = this_param;
26196 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26197 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26200 this_reg = NULL_RTX;
26202 /* Adjust the this parameter by a fixed constant. */
26205 xops[0] = GEN_INT (delta);
26206 xops[1] = this_reg ? this_reg : this_param;
26209 if (!x86_64_general_operand (xops[0], DImode))
26211 tmp = gen_rtx_REG (DImode, R10_REG);
26213 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26215 xops[1] = this_param;
26217 if (x86_maybe_negate_const_int (&xops[0], DImode))
26218 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26220 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26222 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26223 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26225 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26228 /* Adjust the this parameter by a value stored in the vtable. */
26232 tmp = gen_rtx_REG (DImode, R10_REG);
26235 int tmp_regno = CX_REG;
26236 if (lookup_attribute ("fastcall",
26237 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26238 tmp_regno = AX_REG;
26239 tmp = gen_rtx_REG (SImode, tmp_regno);
26242 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26244 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26246 /* Adjust the this parameter. */
26247 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26248 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26250 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26251 xops[0] = GEN_INT (vcall_offset);
26253 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26254 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26256 xops[1] = this_reg;
26257 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26260 /* If necessary, drop THIS back to its stack slot. */
26261 if (this_reg && this_reg != this_param)
26263 xops[0] = this_reg;
26264 xops[1] = this_param;
26265 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26268 xops[0] = XEXP (DECL_RTL (function), 0);
26271 if (!flag_pic || (*targetm.binds_local_p) (function))
26272 output_asm_insn ("jmp\t%P0", xops);
26273 /* All thunks should be in the same object as their target,
26274 and thus binds_local_p should be true. */
26275 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26276 gcc_unreachable ();
26279 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26280 tmp = gen_rtx_CONST (Pmode, tmp);
26281 tmp = gen_rtx_MEM (QImode, tmp);
26283 output_asm_insn ("jmp\t%A0", xops);
26288 if (!flag_pic || (*targetm.binds_local_p) (function))
26289 output_asm_insn ("jmp\t%P0", xops);
26294 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26295 tmp = (gen_rtx_SYMBOL_REF
26297 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26298 tmp = gen_rtx_MEM (QImode, tmp);
26300 output_asm_insn ("jmp\t%0", xops);
26303 #endif /* TARGET_MACHO */
26305 tmp = gen_rtx_REG (SImode, CX_REG);
26306 output_set_got (tmp, NULL_RTX);
26309 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26310 output_asm_insn ("jmp\t{*}%1", xops);
26313 final_end_function ();
26317 x86_file_start (void)
26319 default_file_start ();
26321 darwin_file_start ();
26323 if (X86_FILE_START_VERSION_DIRECTIVE)
26324 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26325 if (X86_FILE_START_FLTUSED)
26326 fputs ("\t.global\t__fltused\n", asm_out_file);
26327 if (ix86_asm_dialect == ASM_INTEL)
26328 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26332 x86_field_alignment (tree field, int computed)
26334 enum machine_mode mode;
26335 tree type = TREE_TYPE (field);
26337 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26339 mode = TYPE_MODE (strip_array_types (type));
26340 if (mode == DFmode || mode == DCmode
26341 || GET_MODE_CLASS (mode) == MODE_INT
26342 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26343 return MIN (32, computed);
26347 /* Output assembler code to FILE to increment profiler label # LABELNO
26348 for profiling a function entry. */
26350 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26354 #ifndef NO_PROFILE_COUNTERS
26355 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26358 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26359 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26361 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26365 #ifndef NO_PROFILE_COUNTERS
26366 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26369 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26373 #ifndef NO_PROFILE_COUNTERS
26374 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26377 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26381 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26382 /* We don't have exact information about the insn sizes, but we may assume
26383 quite safely that we are informed about all 1 byte insns and memory
26384 address sizes. This is enough to eliminate unnecessary padding in
26388 min_insn_size (rtx insn)
26392 if (!INSN_P (insn) || !active_insn_p (insn))
26395 /* Discard alignments we've emit and jump instructions. */
26396 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26397 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26399 if (JUMP_TABLE_DATA_P (insn))
26402 /* Important case - calls are always 5 bytes.
26403 It is common to have many calls in the row. */
26405 && symbolic_reference_mentioned_p (PATTERN (insn))
26406 && !SIBLING_CALL_P (insn))
26408 len = get_attr_length (insn);
26412 /* For normal instructions we rely on get_attr_length being exact,
26413 with a few exceptions. */
26414 if (!JUMP_P (insn))
26416 enum attr_type type = get_attr_type (insn);
26421 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26422 || asm_noperands (PATTERN (insn)) >= 0)
26429 /* Otherwise trust get_attr_length. */
26433 l = get_attr_length_address (insn);
26434 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26443 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26447 ix86_avoid_jump_mispredicts (void)
26449 rtx insn, start = get_insns ();
26450 int nbytes = 0, njumps = 0;
26453 /* Look for all minimal intervals of instructions containing 4 jumps.
26454 The intervals are bounded by START and INSN. NBYTES is the total
26455 size of instructions in the interval including INSN and not including
26456 START. When the NBYTES is smaller than 16 bytes, it is possible
26457 that the end of START and INSN ends up in the same 16byte page.
26459 The smallest offset in the page INSN can start is the case where START
26460 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26461 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26463 for (insn = start; insn; insn = NEXT_INSN (insn))
26467 if (LABEL_P (insn))
26469 int align = label_to_alignment (insn);
26470 int max_skip = label_to_max_skip (insn);
26474 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26475 already in the current 16 byte page, because otherwise
26476 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26477 bytes to reach 16 byte boundary. */
26479 || (align <= 3 && max_skip != (1 << align) - 1))
26482 fprintf (dump_file, "Label %i with max_skip %i\n",
26483 INSN_UID (insn), max_skip);
26486 while (nbytes + max_skip >= 16)
26488 start = NEXT_INSN (start);
26489 if ((JUMP_P (start)
26490 && GET_CODE (PATTERN (start)) != ADDR_VEC
26491 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26493 njumps--, isjump = 1;
26496 nbytes -= min_insn_size (start);
26502 min_size = min_insn_size (insn);
26503 nbytes += min_size;
26505 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26506 INSN_UID (insn), min_size);
26508 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26509 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26517 start = NEXT_INSN (start);
26518 if ((JUMP_P (start)
26519 && GET_CODE (PATTERN (start)) != ADDR_VEC
26520 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26522 njumps--, isjump = 1;
26525 nbytes -= min_insn_size (start);
26527 gcc_assert (njumps >= 0);
26529 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26530 INSN_UID (start), INSN_UID (insn), nbytes);
26532 if (njumps == 3 && isjump && nbytes < 16)
26534 int padsize = 15 - nbytes + min_insn_size (insn);
26537 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26538 INSN_UID (insn), padsize);
26539 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26545 /* AMD Athlon works faster
26546 when RET is not destination of conditional jump or directly preceded
26547 by other jump instruction. We avoid the penalty by inserting NOP just
26548 before the RET instructions in such cases. */
26550 ix86_pad_returns (void)
26555 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26557 basic_block bb = e->src;
26558 rtx ret = BB_END (bb);
26560 bool replace = false;
26562 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26563 || optimize_bb_for_size_p (bb))
26565 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26566 if (active_insn_p (prev) || LABEL_P (prev))
26568 if (prev && LABEL_P (prev))
26573 FOR_EACH_EDGE (e, ei, bb->preds)
26574 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26575 && !(e->flags & EDGE_FALLTHRU))
26580 prev = prev_active_insn (ret);
26582 && ((JUMP_P (prev) && any_condjump_p (prev))
26585 /* Empty functions get branch mispredict even when the jump destination
26586 is not visible to us. */
26587 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26592 emit_jump_insn_before (gen_return_internal_long (), ret);
26598 /* Implement machine specific optimizations. We implement padding of returns
26599 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26603 if (optimize && optimize_function_for_speed_p (cfun))
26605 if (TARGET_PAD_RETURNS)
26606 ix86_pad_returns ();
26607 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26608 if (TARGET_FOUR_JUMP_LIMIT)
26609 ix86_avoid_jump_mispredicts ();
26614 /* Return nonzero when QImode register that must be represented via REX prefix
26617 x86_extended_QIreg_mentioned_p (rtx insn)
26620 extract_insn_cached (insn);
26621 for (i = 0; i < recog_data.n_operands; i++)
26622 if (REG_P (recog_data.operand[i])
26623 && REGNO (recog_data.operand[i]) > BX_REG)
26628 /* Return nonzero when P points to register encoded via REX prefix.
26629 Called via for_each_rtx. */
26631 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26633 unsigned int regno;
26636 regno = REGNO (*p);
26637 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26640 /* Return true when INSN mentions register that must be encoded using REX
26643 x86_extended_reg_mentioned_p (rtx insn)
26645 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26646 extended_reg_mentioned_1, NULL);
26649 /* If profitable, negate (without causing overflow) integer constant
26650 of mode MODE at location LOC. Return true in this case. */
26652 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26656 if (!CONST_INT_P (*loc))
26662 /* DImode x86_64 constants must fit in 32 bits. */
26663 gcc_assert (x86_64_immediate_operand (*loc, mode));
26674 gcc_unreachable ();
26677 /* Avoid overflows. */
26678 if (mode_signbit_p (mode, *loc))
26681 val = INTVAL (*loc);
26683 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26684 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26685 if ((val < 0 && val != -128)
26688 *loc = GEN_INT (-val);
26695 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26696 optabs would emit if we didn't have TFmode patterns. */
26699 x86_emit_floatuns (rtx operands[2])
26701 rtx neglab, donelab, i0, i1, f0, in, out;
26702 enum machine_mode mode, inmode;
26704 inmode = GET_MODE (operands[1]);
26705 gcc_assert (inmode == SImode || inmode == DImode);
26708 in = force_reg (inmode, operands[1]);
26709 mode = GET_MODE (out);
26710 neglab = gen_label_rtx ();
26711 donelab = gen_label_rtx ();
26712 f0 = gen_reg_rtx (mode);
26714 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26716 expand_float (out, in, 0);
26718 emit_jump_insn (gen_jump (donelab));
26721 emit_label (neglab);
26723 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26725 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26727 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26729 expand_float (f0, i0, 0);
26731 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26733 emit_label (donelab);
26736 /* AVX does not support 32-byte integer vector operations,
26737 thus the longest vector we are faced with is V16QImode. */
26738 #define MAX_VECT_LEN 16
26740 struct expand_vec_perm_d
26742 rtx target, op0, op1;
26743 unsigned char perm[MAX_VECT_LEN];
26744 enum machine_mode vmode;
26745 unsigned char nelt;
26749 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26750 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26752 /* Get a vector mode of the same size as the original but with elements
26753 twice as wide. This is only guaranteed to apply to integral vectors. */
26755 static inline enum machine_mode
26756 get_mode_wider_vector (enum machine_mode o)
26758 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26759 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26760 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26761 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26765 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26766 with all elements equal to VAR. Return true if successful. */
26769 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26770 rtx target, rtx val)
26793 /* First attempt to recognize VAL as-is. */
26794 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26795 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26796 if (recog_memoized (insn) < 0)
26799 /* If that fails, force VAL into a register. */
26802 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26803 seq = get_insns ();
26806 emit_insn_before (seq, insn);
26808 ok = recog_memoized (insn) >= 0;
26817 if (TARGET_SSE || TARGET_3DNOW_A)
26821 val = gen_lowpart (SImode, val);
26822 x = gen_rtx_TRUNCATE (HImode, val);
26823 x = gen_rtx_VEC_DUPLICATE (mode, x);
26824 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26837 struct expand_vec_perm_d dperm;
26841 memset (&dperm, 0, sizeof (dperm));
26842 dperm.target = target;
26843 dperm.vmode = mode;
26844 dperm.nelt = GET_MODE_NUNITS (mode);
26845 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26847 /* Extend to SImode using a paradoxical SUBREG. */
26848 tmp1 = gen_reg_rtx (SImode);
26849 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26851 /* Insert the SImode value as low element of a V4SImode vector. */
26852 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26853 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26855 ok = (expand_vec_perm_1 (&dperm)
26856 || expand_vec_perm_broadcast_1 (&dperm));
26868 /* Replicate the value once into the next wider mode and recurse. */
26870 enum machine_mode smode, wsmode, wvmode;
26873 smode = GET_MODE_INNER (mode);
26874 wvmode = get_mode_wider_vector (mode);
26875 wsmode = GET_MODE_INNER (wvmode);
26877 val = convert_modes (wsmode, smode, val, true);
26878 x = expand_simple_binop (wsmode, ASHIFT, val,
26879 GEN_INT (GET_MODE_BITSIZE (smode)),
26880 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26881 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26883 x = gen_lowpart (wvmode, target);
26884 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
26892 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
26893 rtx x = gen_reg_rtx (hvmode);
26895 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
26898 x = gen_rtx_VEC_CONCAT (mode, x, x);
26899 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26908 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26909 whose ONE_VAR element is VAR, and other elements are zero. Return true
26913 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26914 rtx target, rtx var, int one_var)
26916 enum machine_mode vsimode;
26919 bool use_vector_set = false;
26924 /* For SSE4.1, we normally use vector set. But if the second
26925 element is zero and inter-unit moves are OK, we use movq
26927 use_vector_set = (TARGET_64BIT
26929 && !(TARGET_INTER_UNIT_MOVES
26935 use_vector_set = TARGET_SSE4_1;
26938 use_vector_set = TARGET_SSE2;
26941 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26948 use_vector_set = TARGET_AVX;
26951 /* Use ix86_expand_vector_set in 64bit mode only. */
26952 use_vector_set = TARGET_AVX && TARGET_64BIT;
26958 if (use_vector_set)
26960 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26961 var = force_reg (GET_MODE_INNER (mode), var);
26962 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26978 var = force_reg (GET_MODE_INNER (mode), var);
26979 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26980 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26985 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26986 new_target = gen_reg_rtx (mode);
26988 new_target = target;
26989 var = force_reg (GET_MODE_INNER (mode), var);
26990 x = gen_rtx_VEC_DUPLICATE (mode, var);
26991 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26992 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26995 /* We need to shuffle the value to the correct position, so
26996 create a new pseudo to store the intermediate result. */
26998 /* With SSE2, we can use the integer shuffle insns. */
26999 if (mode != V4SFmode && TARGET_SSE2)
27001 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27003 GEN_INT (one_var == 1 ? 0 : 1),
27004 GEN_INT (one_var == 2 ? 0 : 1),
27005 GEN_INT (one_var == 3 ? 0 : 1)));
27006 if (target != new_target)
27007 emit_move_insn (target, new_target);
27011 /* Otherwise convert the intermediate result to V4SFmode and
27012 use the SSE1 shuffle instructions. */
27013 if (mode != V4SFmode)
27015 tmp = gen_reg_rtx (V4SFmode);
27016 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27021 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27023 GEN_INT (one_var == 1 ? 0 : 1),
27024 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27025 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27027 if (mode != V4SFmode)
27028 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27029 else if (tmp != target)
27030 emit_move_insn (target, tmp);
27032 else if (target != new_target)
27033 emit_move_insn (target, new_target);
27038 vsimode = V4SImode;
27044 vsimode = V2SImode;
27050 /* Zero extend the variable element to SImode and recurse. */
27051 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27053 x = gen_reg_rtx (vsimode);
27054 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27056 gcc_unreachable ();
27058 emit_move_insn (target, gen_lowpart (mode, x));
27066 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27067 consisting of the values in VALS. It is known that all elements
27068 except ONE_VAR are constants. Return true if successful. */
27071 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27072 rtx target, rtx vals, int one_var)
27074 rtx var = XVECEXP (vals, 0, one_var);
27075 enum machine_mode wmode;
27078 const_vec = copy_rtx (vals);
27079 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27080 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27088 /* For the two element vectors, it's just as easy to use
27089 the general case. */
27093 /* Use ix86_expand_vector_set in 64bit mode only. */
27116 /* There's no way to set one QImode entry easily. Combine
27117 the variable value with its adjacent constant value, and
27118 promote to an HImode set. */
27119 x = XVECEXP (vals, 0, one_var ^ 1);
27122 var = convert_modes (HImode, QImode, var, true);
27123 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27124 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27125 x = GEN_INT (INTVAL (x) & 0xff);
27129 var = convert_modes (HImode, QImode, var, true);
27130 x = gen_int_mode (INTVAL (x) << 8, HImode);
27132 if (x != const0_rtx)
27133 var = expand_simple_binop (HImode, IOR, var, x, var,
27134 1, OPTAB_LIB_WIDEN);
27136 x = gen_reg_rtx (wmode);
27137 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27138 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27140 emit_move_insn (target, gen_lowpart (mode, x));
27147 emit_move_insn (target, const_vec);
27148 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27152 /* A subroutine of ix86_expand_vector_init_general. Use vector
27153 concatenate to handle the most general case: all values variable,
27154 and none identical. */
27157 ix86_expand_vector_init_concat (enum machine_mode mode,
27158 rtx target, rtx *ops, int n)
27160 enum machine_mode cmode, hmode = VOIDmode;
27161 rtx first[8], second[4];
27201 gcc_unreachable ();
27204 if (!register_operand (ops[1], cmode))
27205 ops[1] = force_reg (cmode, ops[1]);
27206 if (!register_operand (ops[0], cmode))
27207 ops[0] = force_reg (cmode, ops[0]);
27208 emit_insn (gen_rtx_SET (VOIDmode, target,
27209 gen_rtx_VEC_CONCAT (mode, ops[0],
27229 gcc_unreachable ();
27245 gcc_unreachable ();
27250 /* FIXME: We process inputs backward to help RA. PR 36222. */
27253 for (; i > 0; i -= 2, j--)
27255 first[j] = gen_reg_rtx (cmode);
27256 v = gen_rtvec (2, ops[i - 1], ops[i]);
27257 ix86_expand_vector_init (false, first[j],
27258 gen_rtx_PARALLEL (cmode, v));
27264 gcc_assert (hmode != VOIDmode);
27265 for (i = j = 0; i < n; i += 2, j++)
27267 second[j] = gen_reg_rtx (hmode);
27268 ix86_expand_vector_init_concat (hmode, second [j],
27272 ix86_expand_vector_init_concat (mode, target, second, n);
27275 ix86_expand_vector_init_concat (mode, target, first, n);
27279 gcc_unreachable ();
27283 /* A subroutine of ix86_expand_vector_init_general. Use vector
27284 interleave to handle the most general case: all values variable,
27285 and none identical. */
27288 ix86_expand_vector_init_interleave (enum machine_mode mode,
27289 rtx target, rtx *ops, int n)
27291 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27294 rtx (*gen_load_even) (rtx, rtx, rtx);
27295 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27296 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27301 gen_load_even = gen_vec_setv8hi;
27302 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27303 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27304 inner_mode = HImode;
27305 first_imode = V4SImode;
27306 second_imode = V2DImode;
27307 third_imode = VOIDmode;
27310 gen_load_even = gen_vec_setv16qi;
27311 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27312 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27313 inner_mode = QImode;
27314 first_imode = V8HImode;
27315 second_imode = V4SImode;
27316 third_imode = V2DImode;
27319 gcc_unreachable ();
27322 for (i = 0; i < n; i++)
27324 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27325 op0 = gen_reg_rtx (SImode);
27326 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27328 /* Insert the SImode value as low element of V4SImode vector. */
27329 op1 = gen_reg_rtx (V4SImode);
27330 op0 = gen_rtx_VEC_MERGE (V4SImode,
27331 gen_rtx_VEC_DUPLICATE (V4SImode,
27333 CONST0_RTX (V4SImode),
27335 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27337 /* Cast the V4SImode vector back to a vector in orignal mode. */
27338 op0 = gen_reg_rtx (mode);
27339 emit_move_insn (op0, gen_lowpart (mode, op1));
27341 /* Load even elements into the second positon. */
27342 emit_insn ((*gen_load_even) (op0,
27343 force_reg (inner_mode,
27347 /* Cast vector to FIRST_IMODE vector. */
27348 ops[i] = gen_reg_rtx (first_imode);
27349 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27352 /* Interleave low FIRST_IMODE vectors. */
27353 for (i = j = 0; i < n; i += 2, j++)
27355 op0 = gen_reg_rtx (first_imode);
27356 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27358 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27359 ops[j] = gen_reg_rtx (second_imode);
27360 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27363 /* Interleave low SECOND_IMODE vectors. */
27364 switch (second_imode)
27367 for (i = j = 0; i < n / 2; i += 2, j++)
27369 op0 = gen_reg_rtx (second_imode);
27370 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27373 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27375 ops[j] = gen_reg_rtx (third_imode);
27376 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27378 second_imode = V2DImode;
27379 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27383 op0 = gen_reg_rtx (second_imode);
27384 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27387 /* Cast the SECOND_IMODE vector back to a vector on original
27389 emit_insn (gen_rtx_SET (VOIDmode, target,
27390 gen_lowpart (mode, op0)));
27394 gcc_unreachable ();
27398 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27399 all values variable, and none identical. */
27402 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27403 rtx target, rtx vals)
27405 rtx ops[32], op0, op1;
27406 enum machine_mode half_mode = VOIDmode;
27413 if (!mmx_ok && !TARGET_SSE)
27425 n = GET_MODE_NUNITS (mode);
27426 for (i = 0; i < n; i++)
27427 ops[i] = XVECEXP (vals, 0, i);
27428 ix86_expand_vector_init_concat (mode, target, ops, n);
27432 half_mode = V16QImode;
27436 half_mode = V8HImode;
27440 n = GET_MODE_NUNITS (mode);
27441 for (i = 0; i < n; i++)
27442 ops[i] = XVECEXP (vals, 0, i);
27443 op0 = gen_reg_rtx (half_mode);
27444 op1 = gen_reg_rtx (half_mode);
27445 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27447 ix86_expand_vector_init_interleave (half_mode, op1,
27448 &ops [n >> 1], n >> 2);
27449 emit_insn (gen_rtx_SET (VOIDmode, target,
27450 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27454 if (!TARGET_SSE4_1)
27462 /* Don't use ix86_expand_vector_init_interleave if we can't
27463 move from GPR to SSE register directly. */
27464 if (!TARGET_INTER_UNIT_MOVES)
27467 n = GET_MODE_NUNITS (mode);
27468 for (i = 0; i < n; i++)
27469 ops[i] = XVECEXP (vals, 0, i);
27470 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27478 gcc_unreachable ();
27482 int i, j, n_elts, n_words, n_elt_per_word;
27483 enum machine_mode inner_mode;
27484 rtx words[4], shift;
27486 inner_mode = GET_MODE_INNER (mode);
27487 n_elts = GET_MODE_NUNITS (mode);
27488 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27489 n_elt_per_word = n_elts / n_words;
27490 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27492 for (i = 0; i < n_words; ++i)
27494 rtx word = NULL_RTX;
27496 for (j = 0; j < n_elt_per_word; ++j)
27498 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27499 elt = convert_modes (word_mode, inner_mode, elt, true);
27505 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27506 word, 1, OPTAB_LIB_WIDEN);
27507 word = expand_simple_binop (word_mode, IOR, word, elt,
27508 word, 1, OPTAB_LIB_WIDEN);
27516 emit_move_insn (target, gen_lowpart (mode, words[0]));
27517 else if (n_words == 2)
27519 rtx tmp = gen_reg_rtx (mode);
27520 emit_clobber (tmp);
27521 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27522 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27523 emit_move_insn (target, tmp);
27525 else if (n_words == 4)
27527 rtx tmp = gen_reg_rtx (V4SImode);
27528 gcc_assert (word_mode == SImode);
27529 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27530 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27531 emit_move_insn (target, gen_lowpart (mode, tmp));
27534 gcc_unreachable ();
27538 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27539 instructions unless MMX_OK is true. */
27542 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27544 enum machine_mode mode = GET_MODE (target);
27545 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27546 int n_elts = GET_MODE_NUNITS (mode);
27547 int n_var = 0, one_var = -1;
27548 bool all_same = true, all_const_zero = true;
27552 for (i = 0; i < n_elts; ++i)
27554 x = XVECEXP (vals, 0, i);
27555 if (!(CONST_INT_P (x)
27556 || GET_CODE (x) == CONST_DOUBLE
27557 || GET_CODE (x) == CONST_FIXED))
27558 n_var++, one_var = i;
27559 else if (x != CONST0_RTX (inner_mode))
27560 all_const_zero = false;
27561 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27565 /* Constants are best loaded from the constant pool. */
27568 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27572 /* If all values are identical, broadcast the value. */
27574 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27575 XVECEXP (vals, 0, 0)))
27578 /* Values where only one field is non-constant are best loaded from
27579 the pool and overwritten via move later. */
27583 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27584 XVECEXP (vals, 0, one_var),
27588 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27592 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27596 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27598 enum machine_mode mode = GET_MODE (target);
27599 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27600 enum machine_mode half_mode;
27601 bool use_vec_merge = false;
27603 static rtx (*gen_extract[6][2]) (rtx, rtx)
27605 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27606 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27607 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27608 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27609 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27610 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27612 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27614 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27615 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27616 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27617 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27618 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27619 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27629 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27630 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27632 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27634 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27635 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27641 use_vec_merge = TARGET_SSE4_1;
27649 /* For the two element vectors, we implement a VEC_CONCAT with
27650 the extraction of the other element. */
27652 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27653 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27656 op0 = val, op1 = tmp;
27658 op0 = tmp, op1 = val;
27660 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27661 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27666 use_vec_merge = TARGET_SSE4_1;
27673 use_vec_merge = true;
27677 /* tmp = target = A B C D */
27678 tmp = copy_to_reg (target);
27679 /* target = A A B B */
27680 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27681 /* target = X A B B */
27682 ix86_expand_vector_set (false, target, val, 0);
27683 /* target = A X C D */
27684 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27685 const1_rtx, const0_rtx,
27686 GEN_INT (2+4), GEN_INT (3+4)));
27690 /* tmp = target = A B C D */
27691 tmp = copy_to_reg (target);
27692 /* tmp = X B C D */
27693 ix86_expand_vector_set (false, tmp, val, 0);
27694 /* target = A B X D */
27695 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27696 const0_rtx, const1_rtx,
27697 GEN_INT (0+4), GEN_INT (3+4)));
27701 /* tmp = target = A B C D */
27702 tmp = copy_to_reg (target);
27703 /* tmp = X B C D */
27704 ix86_expand_vector_set (false, tmp, val, 0);
27705 /* target = A B X D */
27706 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27707 const0_rtx, const1_rtx,
27708 GEN_INT (2+4), GEN_INT (0+4)));
27712 gcc_unreachable ();
27717 use_vec_merge = TARGET_SSE4_1;
27721 /* Element 0 handled by vec_merge below. */
27724 use_vec_merge = true;
27730 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27731 store into element 0, then shuffle them back. */
27735 order[0] = GEN_INT (elt);
27736 order[1] = const1_rtx;
27737 order[2] = const2_rtx;
27738 order[3] = GEN_INT (3);
27739 order[elt] = const0_rtx;
27741 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27742 order[1], order[2], order[3]));
27744 ix86_expand_vector_set (false, target, val, 0);
27746 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27747 order[1], order[2], order[3]));
27751 /* For SSE1, we have to reuse the V4SF code. */
27752 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27753 gen_lowpart (SFmode, val), elt);
27758 use_vec_merge = TARGET_SSE2;
27761 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27765 use_vec_merge = TARGET_SSE4_1;
27772 half_mode = V16QImode;
27778 half_mode = V8HImode;
27784 half_mode = V4SImode;
27790 half_mode = V2DImode;
27796 half_mode = V4SFmode;
27802 half_mode = V2DFmode;
27808 /* Compute offset. */
27812 gcc_assert (i <= 1);
27814 /* Extract the half. */
27815 tmp = gen_reg_rtx (half_mode);
27816 emit_insn ((*gen_extract[j][i]) (tmp, target));
27818 /* Put val in tmp at elt. */
27819 ix86_expand_vector_set (false, tmp, val, elt);
27822 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27831 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27832 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27833 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27837 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27839 emit_move_insn (mem, target);
27841 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27842 emit_move_insn (tmp, val);
27844 emit_move_insn (target, mem);
27849 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27851 enum machine_mode mode = GET_MODE (vec);
27852 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27853 bool use_vec_extr = false;
27866 use_vec_extr = true;
27870 use_vec_extr = TARGET_SSE4_1;
27882 tmp = gen_reg_rtx (mode);
27883 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27884 GEN_INT (elt), GEN_INT (elt),
27885 GEN_INT (elt+4), GEN_INT (elt+4)));
27889 tmp = gen_reg_rtx (mode);
27890 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
27894 gcc_unreachable ();
27897 use_vec_extr = true;
27902 use_vec_extr = TARGET_SSE4_1;
27916 tmp = gen_reg_rtx (mode);
27917 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27918 GEN_INT (elt), GEN_INT (elt),
27919 GEN_INT (elt), GEN_INT (elt)));
27923 tmp = gen_reg_rtx (mode);
27924 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
27928 gcc_unreachable ();
27931 use_vec_extr = true;
27936 /* For SSE1, we have to reuse the V4SF code. */
27937 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27938 gen_lowpart (V4SFmode, vec), elt);
27944 use_vec_extr = TARGET_SSE2;
27947 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27951 use_vec_extr = TARGET_SSE4_1;
27955 /* ??? Could extract the appropriate HImode element and shift. */
27962 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27963 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27965 /* Let the rtl optimizers know about the zero extension performed. */
27966 if (inner_mode == QImode || inner_mode == HImode)
27968 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27969 target = gen_lowpart (SImode, target);
27972 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27976 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27978 emit_move_insn (mem, vec);
27980 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27981 emit_move_insn (target, tmp);
27985 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27986 pattern to reduce; DEST is the destination; IN is the input vector. */
27989 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27991 rtx tmp1, tmp2, tmp3;
27993 tmp1 = gen_reg_rtx (V4SFmode);
27994 tmp2 = gen_reg_rtx (V4SFmode);
27995 tmp3 = gen_reg_rtx (V4SFmode);
27997 emit_insn (gen_sse_movhlps (tmp1, in, in));
27998 emit_insn (fn (tmp2, tmp1, in));
28000 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28001 const1_rtx, const1_rtx,
28002 GEN_INT (1+4), GEN_INT (1+4)));
28003 emit_insn (fn (dest, tmp2, tmp3));
28006 /* Target hook for scalar_mode_supported_p. */
28008 ix86_scalar_mode_supported_p (enum machine_mode mode)
28010 if (DECIMAL_FLOAT_MODE_P (mode))
28011 return default_decimal_float_supported_p ();
28012 else if (mode == TFmode)
28015 return default_scalar_mode_supported_p (mode);
28018 /* Implements target hook vector_mode_supported_p. */
28020 ix86_vector_mode_supported_p (enum machine_mode mode)
28022 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28024 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28026 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28028 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28030 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28035 /* Target hook for c_mode_for_suffix. */
28036 static enum machine_mode
28037 ix86_c_mode_for_suffix (char suffix)
28047 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28049 We do this in the new i386 backend to maintain source compatibility
28050 with the old cc0-based compiler. */
28053 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28054 tree inputs ATTRIBUTE_UNUSED,
28057 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28059 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28064 /* Implements target vector targetm.asm.encode_section_info. This
28065 is not used by netware. */
28067 static void ATTRIBUTE_UNUSED
28068 ix86_encode_section_info (tree decl, rtx rtl, int first)
28070 default_encode_section_info (decl, rtl, first);
28072 if (TREE_CODE (decl) == VAR_DECL
28073 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28074 && ix86_in_large_data_p (decl))
28075 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28078 /* Worker function for REVERSE_CONDITION. */
28081 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28083 return (mode != CCFPmode && mode != CCFPUmode
28084 ? reverse_condition (code)
28085 : reverse_condition_maybe_unordered (code));
28088 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28092 output_387_reg_move (rtx insn, rtx *operands)
28094 if (REG_P (operands[0]))
28096 if (REG_P (operands[1])
28097 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28099 if (REGNO (operands[0]) == FIRST_STACK_REG)
28100 return output_387_ffreep (operands, 0);
28101 return "fstp\t%y0";
28103 if (STACK_TOP_P (operands[0]))
28104 return "fld%Z1\t%y1";
28107 else if (MEM_P (operands[0]))
28109 gcc_assert (REG_P (operands[1]));
28110 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28111 return "fstp%Z0\t%y0";
28114 /* There is no non-popping store to memory for XFmode.
28115 So if we need one, follow the store with a load. */
28116 if (GET_MODE (operands[0]) == XFmode)
28117 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28119 return "fst%Z0\t%y0";
28126 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28127 FP status register is set. */
28130 ix86_emit_fp_unordered_jump (rtx label)
28132 rtx reg = gen_reg_rtx (HImode);
28135 emit_insn (gen_x86_fnstsw_1 (reg));
28137 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28139 emit_insn (gen_x86_sahf_1 (reg));
28141 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28142 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28146 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28148 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28149 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28152 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28153 gen_rtx_LABEL_REF (VOIDmode, label),
28155 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28157 emit_jump_insn (temp);
28158 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28161 /* Output code to perform a log1p XFmode calculation. */
28163 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28165 rtx label1 = gen_label_rtx ();
28166 rtx label2 = gen_label_rtx ();
28168 rtx tmp = gen_reg_rtx (XFmode);
28169 rtx tmp2 = gen_reg_rtx (XFmode);
28172 emit_insn (gen_absxf2 (tmp, op1));
28173 test = gen_rtx_GE (VOIDmode, tmp,
28174 CONST_DOUBLE_FROM_REAL_VALUE (
28175 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28177 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28179 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28180 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28181 emit_jump (label2);
28183 emit_label (label1);
28184 emit_move_insn (tmp, CONST1_RTX (XFmode));
28185 emit_insn (gen_addxf3 (tmp, op1, tmp));
28186 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28187 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28189 emit_label (label2);
28192 /* Output code to perform a Newton-Rhapson approximation of a single precision
28193 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28195 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28197 rtx x0, x1, e0, e1, two;
28199 x0 = gen_reg_rtx (mode);
28200 e0 = gen_reg_rtx (mode);
28201 e1 = gen_reg_rtx (mode);
28202 x1 = gen_reg_rtx (mode);
28204 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28206 if (VECTOR_MODE_P (mode))
28207 two = ix86_build_const_vector (SFmode, true, two);
28209 two = force_reg (mode, two);
28211 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28213 /* x0 = rcp(b) estimate */
28214 emit_insn (gen_rtx_SET (VOIDmode, x0,
28215 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28218 emit_insn (gen_rtx_SET (VOIDmode, e0,
28219 gen_rtx_MULT (mode, x0, a)));
28221 emit_insn (gen_rtx_SET (VOIDmode, e1,
28222 gen_rtx_MULT (mode, x0, b)));
28224 emit_insn (gen_rtx_SET (VOIDmode, x1,
28225 gen_rtx_MINUS (mode, two, e1)));
28226 /* res = e0 * x1 */
28227 emit_insn (gen_rtx_SET (VOIDmode, res,
28228 gen_rtx_MULT (mode, e0, x1)));
28231 /* Output code to perform a Newton-Rhapson approximation of a
28232 single precision floating point [reciprocal] square root. */
28234 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28237 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28240 x0 = gen_reg_rtx (mode);
28241 e0 = gen_reg_rtx (mode);
28242 e1 = gen_reg_rtx (mode);
28243 e2 = gen_reg_rtx (mode);
28244 e3 = gen_reg_rtx (mode);
28246 real_from_integer (&r, VOIDmode, -3, -1, 0);
28247 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28249 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28250 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28252 if (VECTOR_MODE_P (mode))
28254 mthree = ix86_build_const_vector (SFmode, true, mthree);
28255 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28258 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28259 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28261 /* x0 = rsqrt(a) estimate */
28262 emit_insn (gen_rtx_SET (VOIDmode, x0,
28263 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28266 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28271 zero = gen_reg_rtx (mode);
28272 mask = gen_reg_rtx (mode);
28274 zero = force_reg (mode, CONST0_RTX(mode));
28275 emit_insn (gen_rtx_SET (VOIDmode, mask,
28276 gen_rtx_NE (mode, zero, a)));
28278 emit_insn (gen_rtx_SET (VOIDmode, x0,
28279 gen_rtx_AND (mode, x0, mask)));
28283 emit_insn (gen_rtx_SET (VOIDmode, e0,
28284 gen_rtx_MULT (mode, x0, a)));
28286 emit_insn (gen_rtx_SET (VOIDmode, e1,
28287 gen_rtx_MULT (mode, e0, x0)));
28290 mthree = force_reg (mode, mthree);
28291 emit_insn (gen_rtx_SET (VOIDmode, e2,
28292 gen_rtx_PLUS (mode, e1, mthree)));
28294 mhalf = force_reg (mode, mhalf);
28296 /* e3 = -.5 * x0 */
28297 emit_insn (gen_rtx_SET (VOIDmode, e3,
28298 gen_rtx_MULT (mode, x0, mhalf)));
28300 /* e3 = -.5 * e0 */
28301 emit_insn (gen_rtx_SET (VOIDmode, e3,
28302 gen_rtx_MULT (mode, e0, mhalf)));
28303 /* ret = e2 * e3 */
28304 emit_insn (gen_rtx_SET (VOIDmode, res,
28305 gen_rtx_MULT (mode, e2, e3)));
28308 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28310 static void ATTRIBUTE_UNUSED
28311 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28314 /* With Binutils 2.15, the "@unwind" marker must be specified on
28315 every occurrence of the ".eh_frame" section, not just the first
28318 && strcmp (name, ".eh_frame") == 0)
28320 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28321 flags & SECTION_WRITE ? "aw" : "a");
28324 default_elf_asm_named_section (name, flags, decl);
28327 /* Return the mangling of TYPE if it is an extended fundamental type. */
28329 static const char *
28330 ix86_mangle_type (const_tree type)
28332 type = TYPE_MAIN_VARIANT (type);
28334 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28335 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28338 switch (TYPE_MODE (type))
28341 /* __float128 is "g". */
28344 /* "long double" or __float80 is "e". */
28351 /* For 32-bit code we can save PIC register setup by using
28352 __stack_chk_fail_local hidden function instead of calling
28353 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28354 register, so it is better to call __stack_chk_fail directly. */
28357 ix86_stack_protect_fail (void)
28359 return TARGET_64BIT
28360 ? default_external_stack_protect_fail ()
28361 : default_hidden_stack_protect_fail ();
28364 /* Select a format to encode pointers in exception handling data. CODE
28365 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28366 true if the symbol may be affected by dynamic relocations.
28368 ??? All x86 object file formats are capable of representing this.
28369 After all, the relocation needed is the same as for the call insn.
28370 Whether or not a particular assembler allows us to enter such, I
28371 guess we'll have to see. */
28373 asm_preferred_eh_data_format (int code, int global)
28377 int type = DW_EH_PE_sdata8;
28379 || ix86_cmodel == CM_SMALL_PIC
28380 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28381 type = DW_EH_PE_sdata4;
28382 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28384 if (ix86_cmodel == CM_SMALL
28385 || (ix86_cmodel == CM_MEDIUM && code))
28386 return DW_EH_PE_udata4;
28387 return DW_EH_PE_absptr;
28390 /* Expand copysign from SIGN to the positive value ABS_VALUE
28391 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28394 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28396 enum machine_mode mode = GET_MODE (sign);
28397 rtx sgn = gen_reg_rtx (mode);
28398 if (mask == NULL_RTX)
28400 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28401 if (!VECTOR_MODE_P (mode))
28403 /* We need to generate a scalar mode mask in this case. */
28404 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28405 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28406 mask = gen_reg_rtx (mode);
28407 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28411 mask = gen_rtx_NOT (mode, mask);
28412 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28413 gen_rtx_AND (mode, mask, sign)));
28414 emit_insn (gen_rtx_SET (VOIDmode, result,
28415 gen_rtx_IOR (mode, abs_value, sgn)));
28418 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28419 mask for masking out the sign-bit is stored in *SMASK, if that is
28422 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28424 enum machine_mode mode = GET_MODE (op0);
28427 xa = gen_reg_rtx (mode);
28428 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28429 if (!VECTOR_MODE_P (mode))
28431 /* We need to generate a scalar mode mask in this case. */
28432 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28433 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28434 mask = gen_reg_rtx (mode);
28435 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28437 emit_insn (gen_rtx_SET (VOIDmode, xa,
28438 gen_rtx_AND (mode, op0, mask)));
28446 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28447 swapping the operands if SWAP_OPERANDS is true. The expanded
28448 code is a forward jump to a newly created label in case the
28449 comparison is true. The generated label rtx is returned. */
28451 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28452 bool swap_operands)
28463 label = gen_label_rtx ();
28464 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28465 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28466 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28467 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28468 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28469 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28470 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28471 JUMP_LABEL (tmp) = label;
28476 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28477 using comparison code CODE. Operands are swapped for the comparison if
28478 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28480 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28481 bool swap_operands)
28483 enum machine_mode mode = GET_MODE (op0);
28484 rtx mask = gen_reg_rtx (mode);
28493 if (mode == DFmode)
28494 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28495 gen_rtx_fmt_ee (code, mode, op0, op1)));
28497 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28498 gen_rtx_fmt_ee (code, mode, op0, op1)));
28503 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28504 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28506 ix86_gen_TWO52 (enum machine_mode mode)
28508 REAL_VALUE_TYPE TWO52r;
28511 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28512 TWO52 = const_double_from_real_value (TWO52r, mode);
28513 TWO52 = force_reg (mode, TWO52);
28518 /* Expand SSE sequence for computing lround from OP1 storing
28521 ix86_expand_lround (rtx op0, rtx op1)
28523 /* C code for the stuff we're doing below:
28524 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28527 enum machine_mode mode = GET_MODE (op1);
28528 const struct real_format *fmt;
28529 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28532 /* load nextafter (0.5, 0.0) */
28533 fmt = REAL_MODE_FORMAT (mode);
28534 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28535 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28537 /* adj = copysign (0.5, op1) */
28538 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28539 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28541 /* adj = op1 + adj */
28542 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28544 /* op0 = (imode)adj */
28545 expand_fix (op0, adj, 0);
28548 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28551 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28553 /* C code for the stuff we're doing below (for do_floor):
28555 xi -= (double)xi > op1 ? 1 : 0;
28558 enum machine_mode fmode = GET_MODE (op1);
28559 enum machine_mode imode = GET_MODE (op0);
28560 rtx ireg, freg, label, tmp;
28562 /* reg = (long)op1 */
28563 ireg = gen_reg_rtx (imode);
28564 expand_fix (ireg, op1, 0);
28566 /* freg = (double)reg */
28567 freg = gen_reg_rtx (fmode);
28568 expand_float (freg, ireg, 0);
28570 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28571 label = ix86_expand_sse_compare_and_jump (UNLE,
28572 freg, op1, !do_floor);
28573 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28574 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28575 emit_move_insn (ireg, tmp);
28577 emit_label (label);
28578 LABEL_NUSES (label) = 1;
28580 emit_move_insn (op0, ireg);
28583 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28584 result in OPERAND0. */
28586 ix86_expand_rint (rtx operand0, rtx operand1)
28588 /* C code for the stuff we're doing below:
28589 xa = fabs (operand1);
28590 if (!isless (xa, 2**52))
28592 xa = xa + 2**52 - 2**52;
28593 return copysign (xa, operand1);
28595 enum machine_mode mode = GET_MODE (operand0);
28596 rtx res, xa, label, TWO52, mask;
28598 res = gen_reg_rtx (mode);
28599 emit_move_insn (res, operand1);
28601 /* xa = abs (operand1) */
28602 xa = ix86_expand_sse_fabs (res, &mask);
28604 /* if (!isless (xa, TWO52)) goto label; */
28605 TWO52 = ix86_gen_TWO52 (mode);
28606 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28608 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28609 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28611 ix86_sse_copysign_to_positive (res, xa, res, mask);
28613 emit_label (label);
28614 LABEL_NUSES (label) = 1;
28616 emit_move_insn (operand0, res);
28619 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28622 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28624 /* C code for the stuff we expand below.
28625 double xa = fabs (x), x2;
28626 if (!isless (xa, TWO52))
28628 xa = xa + TWO52 - TWO52;
28629 x2 = copysign (xa, x);
28638 enum machine_mode mode = GET_MODE (operand0);
28639 rtx xa, TWO52, tmp, label, one, res, mask;
28641 TWO52 = ix86_gen_TWO52 (mode);
28643 /* Temporary for holding the result, initialized to the input
28644 operand to ease control flow. */
28645 res = gen_reg_rtx (mode);
28646 emit_move_insn (res, operand1);
28648 /* xa = abs (operand1) */
28649 xa = ix86_expand_sse_fabs (res, &mask);
28651 /* if (!isless (xa, TWO52)) goto label; */
28652 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28654 /* xa = xa + TWO52 - TWO52; */
28655 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28656 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28658 /* xa = copysign (xa, operand1) */
28659 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28661 /* generate 1.0 or -1.0 */
28662 one = force_reg (mode,
28663 const_double_from_real_value (do_floor
28664 ? dconst1 : dconstm1, mode));
28666 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28667 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28668 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28669 gen_rtx_AND (mode, one, tmp)));
28670 /* We always need to subtract here to preserve signed zero. */
28671 tmp = expand_simple_binop (mode, MINUS,
28672 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28673 emit_move_insn (res, tmp);
28675 emit_label (label);
28676 LABEL_NUSES (label) = 1;
28678 emit_move_insn (operand0, res);
28681 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28684 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28686 /* C code for the stuff we expand below.
28687 double xa = fabs (x), x2;
28688 if (!isless (xa, TWO52))
28690 x2 = (double)(long)x;
28697 if (HONOR_SIGNED_ZEROS (mode))
28698 return copysign (x2, x);
28701 enum machine_mode mode = GET_MODE (operand0);
28702 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28704 TWO52 = ix86_gen_TWO52 (mode);
28706 /* Temporary for holding the result, initialized to the input
28707 operand to ease control flow. */
28708 res = gen_reg_rtx (mode);
28709 emit_move_insn (res, operand1);
28711 /* xa = abs (operand1) */
28712 xa = ix86_expand_sse_fabs (res, &mask);
28714 /* if (!isless (xa, TWO52)) goto label; */
28715 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28717 /* xa = (double)(long)x */
28718 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28719 expand_fix (xi, res, 0);
28720 expand_float (xa, xi, 0);
28723 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28725 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28726 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28727 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28728 gen_rtx_AND (mode, one, tmp)));
28729 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28730 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28731 emit_move_insn (res, tmp);
28733 if (HONOR_SIGNED_ZEROS (mode))
28734 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28736 emit_label (label);
28737 LABEL_NUSES (label) = 1;
28739 emit_move_insn (operand0, res);
28742 /* Expand SSE sequence for computing round from OPERAND1 storing
28743 into OPERAND0. Sequence that works without relying on DImode truncation
28744 via cvttsd2siq that is only available on 64bit targets. */
28746 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28748 /* C code for the stuff we expand below.
28749 double xa = fabs (x), xa2, x2;
28750 if (!isless (xa, TWO52))
28752 Using the absolute value and copying back sign makes
28753 -0.0 -> -0.0 correct.
28754 xa2 = xa + TWO52 - TWO52;
28759 else if (dxa > 0.5)
28761 x2 = copysign (xa2, x);
28764 enum machine_mode mode = GET_MODE (operand0);
28765 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28767 TWO52 = ix86_gen_TWO52 (mode);
28769 /* Temporary for holding the result, initialized to the input
28770 operand to ease control flow. */
28771 res = gen_reg_rtx (mode);
28772 emit_move_insn (res, operand1);
28774 /* xa = abs (operand1) */
28775 xa = ix86_expand_sse_fabs (res, &mask);
28777 /* if (!isless (xa, TWO52)) goto label; */
28778 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28780 /* xa2 = xa + TWO52 - TWO52; */
28781 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28782 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28784 /* dxa = xa2 - xa; */
28785 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28787 /* generate 0.5, 1.0 and -0.5 */
28788 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28789 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28790 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28794 tmp = gen_reg_rtx (mode);
28795 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28796 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28797 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28798 gen_rtx_AND (mode, one, tmp)));
28799 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28800 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28801 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28802 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28803 gen_rtx_AND (mode, one, tmp)));
28804 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28806 /* res = copysign (xa2, operand1) */
28807 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28809 emit_label (label);
28810 LABEL_NUSES (label) = 1;
28812 emit_move_insn (operand0, res);
28815 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28818 ix86_expand_trunc (rtx operand0, rtx operand1)
28820 /* C code for SSE variant we expand below.
28821 double xa = fabs (x), x2;
28822 if (!isless (xa, TWO52))
28824 x2 = (double)(long)x;
28825 if (HONOR_SIGNED_ZEROS (mode))
28826 return copysign (x2, x);
28829 enum machine_mode mode = GET_MODE (operand0);
28830 rtx xa, xi, TWO52, label, res, mask;
28832 TWO52 = ix86_gen_TWO52 (mode);
28834 /* Temporary for holding the result, initialized to the input
28835 operand to ease control flow. */
28836 res = gen_reg_rtx (mode);
28837 emit_move_insn (res, operand1);
28839 /* xa = abs (operand1) */
28840 xa = ix86_expand_sse_fabs (res, &mask);
28842 /* if (!isless (xa, TWO52)) goto label; */
28843 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28845 /* x = (double)(long)x */
28846 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28847 expand_fix (xi, res, 0);
28848 expand_float (res, xi, 0);
28850 if (HONOR_SIGNED_ZEROS (mode))
28851 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28853 emit_label (label);
28854 LABEL_NUSES (label) = 1;
28856 emit_move_insn (operand0, res);
28859 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28862 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28864 enum machine_mode mode = GET_MODE (operand0);
28865 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28867 /* C code for SSE variant we expand below.
28868 double xa = fabs (x), x2;
28869 if (!isless (xa, TWO52))
28871 xa2 = xa + TWO52 - TWO52;
28875 x2 = copysign (xa2, x);
28879 TWO52 = ix86_gen_TWO52 (mode);
28881 /* Temporary for holding the result, initialized to the input
28882 operand to ease control flow. */
28883 res = gen_reg_rtx (mode);
28884 emit_move_insn (res, operand1);
28886 /* xa = abs (operand1) */
28887 xa = ix86_expand_sse_fabs (res, &smask);
28889 /* if (!isless (xa, TWO52)) goto label; */
28890 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28892 /* res = xa + TWO52 - TWO52; */
28893 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28894 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28895 emit_move_insn (res, tmp);
28898 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28900 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28901 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28902 emit_insn (gen_rtx_SET (VOIDmode, mask,
28903 gen_rtx_AND (mode, mask, one)));
28904 tmp = expand_simple_binop (mode, MINUS,
28905 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28906 emit_move_insn (res, tmp);
28908 /* res = copysign (res, operand1) */
28909 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28911 emit_label (label);
28912 LABEL_NUSES (label) = 1;
28914 emit_move_insn (operand0, res);
28917 /* Expand SSE sequence for computing round from OPERAND1 storing
28920 ix86_expand_round (rtx operand0, rtx operand1)
28922 /* C code for the stuff we're doing below:
28923 double xa = fabs (x);
28924 if (!isless (xa, TWO52))
28926 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28927 return copysign (xa, x);
28929 enum machine_mode mode = GET_MODE (operand0);
28930 rtx res, TWO52, xa, label, xi, half, mask;
28931 const struct real_format *fmt;
28932 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28934 /* Temporary for holding the result, initialized to the input
28935 operand to ease control flow. */
28936 res = gen_reg_rtx (mode);
28937 emit_move_insn (res, operand1);
28939 TWO52 = ix86_gen_TWO52 (mode);
28940 xa = ix86_expand_sse_fabs (res, &mask);
28941 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28943 /* load nextafter (0.5, 0.0) */
28944 fmt = REAL_MODE_FORMAT (mode);
28945 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28946 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28948 /* xa = xa + 0.5 */
28949 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28950 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28952 /* xa = (double)(int64_t)xa */
28953 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28954 expand_fix (xi, xa, 0);
28955 expand_float (xa, xi, 0);
28957 /* res = copysign (xa, operand1) */
28958 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28960 emit_label (label);
28961 LABEL_NUSES (label) = 1;
28963 emit_move_insn (operand0, res);
28967 /* Table of valid machine attributes. */
28968 static const struct attribute_spec ix86_attribute_table[] =
28970 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28971 /* Stdcall attribute says callee is responsible for popping arguments
28972 if they are not variable. */
28973 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28974 /* Fastcall attribute says callee is responsible for popping arguments
28975 if they are not variable. */
28976 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28977 /* Cdecl attribute says the callee is a normal C declaration */
28978 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28979 /* Regparm attribute specifies how many integer arguments are to be
28980 passed in registers. */
28981 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28982 /* Sseregparm attribute says we are using x86_64 calling conventions
28983 for FP arguments. */
28984 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28985 /* force_align_arg_pointer says this function realigns the stack at entry. */
28986 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28987 false, true, true, ix86_handle_cconv_attribute },
28988 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28989 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28990 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28991 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28993 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28994 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28995 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28996 SUBTARGET_ATTRIBUTE_TABLE,
28998 /* ms_abi and sysv_abi calling convention function attributes. */
28999 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29000 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29001 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29003 { NULL, 0, 0, false, false, false, NULL }
29006 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29008 ix86_builtin_vectorization_cost (bool runtime_test)
29010 /* If the branch of the runtime test is taken - i.e. - the vectorized
29011 version is skipped - this incurs a misprediction cost (because the
29012 vectorized version is expected to be the fall-through). So we subtract
29013 the latency of a mispredicted branch from the costs that are incured
29014 when the vectorized version is executed.
29016 TODO: The values in individual target tables have to be tuned or new
29017 fields may be needed. For eg. on K8, the default branch path is the
29018 not-taken path. If the taken path is predicted correctly, the minimum
29019 penalty of going down the taken-path is 1 cycle. If the taken-path is
29020 not predicted correctly, then the minimum penalty is 10 cycles. */
29024 return (-(ix86_cost->cond_taken_branch_cost));
29030 /* Implement targetm.vectorize.builtin_vec_perm. */
29033 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29035 tree itype = TREE_TYPE (vec_type);
29036 bool u = TYPE_UNSIGNED (itype);
29037 enum machine_mode vmode = TYPE_MODE (vec_type);
29038 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29039 bool ok = TARGET_SSE2;
29045 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29048 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29050 itype = ix86_get_builtin_type (IX86_BT_DI);
29055 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29059 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29061 itype = ix86_get_builtin_type (IX86_BT_SI);
29065 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29068 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29071 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29074 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29084 *mask_type = itype;
29085 return ix86_builtins[(int) fcode];
29088 /* Return a vector mode with twice as many elements as VMODE. */
29089 /* ??? Consider moving this to a table generated by genmodes.c. */
29091 static enum machine_mode
29092 doublesize_vector_mode (enum machine_mode vmode)
29096 case V2SFmode: return V4SFmode;
29097 case V1DImode: return V2DImode;
29098 case V2SImode: return V4SImode;
29099 case V4HImode: return V8HImode;
29100 case V8QImode: return V16QImode;
29102 case V2DFmode: return V4DFmode;
29103 case V4SFmode: return V8SFmode;
29104 case V2DImode: return V4DImode;
29105 case V4SImode: return V8SImode;
29106 case V8HImode: return V16HImode;
29107 case V16QImode: return V32QImode;
29109 case V4DFmode: return V8DFmode;
29110 case V8SFmode: return V16SFmode;
29111 case V4DImode: return V8DImode;
29112 case V8SImode: return V16SImode;
29113 case V16HImode: return V32HImode;
29114 case V32QImode: return V64QImode;
29117 gcc_unreachable ();
29121 /* Construct (set target (vec_select op0 (parallel perm))) and
29122 return true if that's a valid instruction in the active ISA. */
29125 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29127 rtx rperm[MAX_VECT_LEN], x;
29130 for (i = 0; i < nelt; ++i)
29131 rperm[i] = GEN_INT (perm[i]);
29133 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29134 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29135 x = gen_rtx_SET (VOIDmode, target, x);
29138 if (recog_memoized (x) < 0)
29146 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29149 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29150 const unsigned char *perm, unsigned nelt)
29152 enum machine_mode v2mode;
29155 v2mode = doublesize_vector_mode (GET_MODE (op0));
29156 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29157 return expand_vselect (target, x, perm, nelt);
29160 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29161 in terms of blendp[sd] / pblendw / pblendvb. */
29164 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29166 enum machine_mode vmode = d->vmode;
29167 unsigned i, mask, nelt = d->nelt;
29168 rtx target, op0, op1, x;
29170 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29172 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29175 /* This is a blend, not a permute. Elements must stay in their
29176 respective lanes. */
29177 for (i = 0; i < nelt; ++i)
29179 unsigned e = d->perm[i];
29180 if (!(e == i || e == i + nelt))
29187 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29188 decision should be extracted elsewhere, so that we only try that
29189 sequence once all budget==3 options have been tried. */
29191 /* For bytes, see if bytes move in pairs so we can use pblendw with
29192 an immediate argument, rather than pblendvb with a vector argument. */
29193 if (vmode == V16QImode)
29195 bool pblendw_ok = true;
29196 for (i = 0; i < 16 && pblendw_ok; i += 2)
29197 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29201 rtx rperm[16], vperm;
29203 for (i = 0; i < nelt; ++i)
29204 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29206 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29207 vperm = force_reg (V16QImode, vperm);
29209 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29214 target = d->target;
29226 for (i = 0; i < nelt; ++i)
29227 mask |= (d->perm[i] >= nelt) << i;
29231 for (i = 0; i < 2; ++i)
29232 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29236 for (i = 0; i < 4; ++i)
29237 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29241 for (i = 0; i < 8; ++i)
29242 mask |= (d->perm[i * 2] >= 16) << i;
29246 target = gen_lowpart (vmode, target);
29247 op0 = gen_lowpart (vmode, op0);
29248 op1 = gen_lowpart (vmode, op1);
29252 gcc_unreachable ();
29255 /* This matches five different patterns with the different modes. */
29256 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29257 x = gen_rtx_SET (VOIDmode, target, x);
29263 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29264 in terms of the variable form of vpermilps.
29266 Note that we will have already failed the immediate input vpermilps,
29267 which requires that the high and low part shuffle be identical; the
29268 variable form doesn't require that. */
29271 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29273 rtx rperm[8], vperm;
29276 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29279 /* We can only permute within the 128-bit lane. */
29280 for (i = 0; i < 8; ++i)
29282 unsigned e = d->perm[i];
29283 if (i < 4 ? e >= 4 : e < 4)
29290 for (i = 0; i < 8; ++i)
29292 unsigned e = d->perm[i];
29294 /* Within each 128-bit lane, the elements of op0 are numbered
29295 from 0 and the elements of op1 are numbered from 4. */
29301 rperm[i] = GEN_INT (e);
29304 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29305 vperm = force_reg (V8SImode, vperm);
29306 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29311 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29312 in terms of pshufb or vpperm. */
29315 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29317 unsigned i, nelt, eltsz;
29318 rtx rperm[16], vperm, target, op0, op1;
29320 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29322 if (GET_MODE_SIZE (d->vmode) != 16)
29329 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29331 for (i = 0; i < nelt; ++i)
29333 unsigned j, e = d->perm[i];
29334 for (j = 0; j < eltsz; ++j)
29335 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29338 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29339 vperm = force_reg (V16QImode, vperm);
29341 target = gen_lowpart (V16QImode, d->target);
29342 op0 = gen_lowpart (V16QImode, d->op0);
29343 if (d->op0 == d->op1)
29344 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29347 op1 = gen_lowpart (V16QImode, d->op1);
29348 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29354 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29355 in a single instruction. */
29358 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29360 unsigned i, nelt = d->nelt;
29361 unsigned char perm2[MAX_VECT_LEN];
29363 /* Check plain VEC_SELECT first, because AVX has instructions that could
29364 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29365 input where SEL+CONCAT may not. */
29366 if (d->op0 == d->op1)
29368 int mask = nelt - 1;
29370 for (i = 0; i < nelt; i++)
29371 perm2[i] = d->perm[i] & mask;
29373 if (expand_vselect (d->target, d->op0, perm2, nelt))
29376 /* There are plenty of patterns in sse.md that are written for
29377 SEL+CONCAT and are not replicated for a single op. Perhaps
29378 that should be changed, to avoid the nastiness here. */
29380 /* Recognize interleave style patterns, which means incrementing
29381 every other permutation operand. */
29382 for (i = 0; i < nelt; i += 2)
29384 perm2[i] = d->perm[i] & mask;
29385 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29387 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29390 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29393 for (i = 0; i < nelt; i += 4)
29395 perm2[i + 0] = d->perm[i + 0] & mask;
29396 perm2[i + 1] = d->perm[i + 1] & mask;
29397 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29398 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29401 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29406 /* Finally, try the fully general two operand permute. */
29407 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29410 /* Recognize interleave style patterns with reversed operands. */
29411 if (d->op0 != d->op1)
29413 for (i = 0; i < nelt; ++i)
29415 unsigned e = d->perm[i];
29423 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29427 /* Try the SSE4.1 blend variable merge instructions. */
29428 if (expand_vec_perm_blend (d))
29431 /* Try one of the AVX vpermil variable permutations. */
29432 if (expand_vec_perm_vpermil (d))
29435 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29436 if (expand_vec_perm_pshufb (d))
29442 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29443 in terms of a pair of pshuflw + pshufhw instructions. */
29446 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29448 unsigned char perm2[MAX_VECT_LEN];
29452 if (d->vmode != V8HImode || d->op0 != d->op1)
29455 /* The two permutations only operate in 64-bit lanes. */
29456 for (i = 0; i < 4; ++i)
29457 if (d->perm[i] >= 4)
29459 for (i = 4; i < 8; ++i)
29460 if (d->perm[i] < 4)
29466 /* Emit the pshuflw. */
29467 memcpy (perm2, d->perm, 4);
29468 for (i = 4; i < 8; ++i)
29470 ok = expand_vselect (d->target, d->op0, perm2, 8);
29473 /* Emit the pshufhw. */
29474 memcpy (perm2 + 4, d->perm + 4, 4);
29475 for (i = 0; i < 4; ++i)
29477 ok = expand_vselect (d->target, d->target, perm2, 8);
29483 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29484 the permutation using the SSSE3 palignr instruction. This succeeds
29485 when all of the elements in PERM fit within one vector and we merely
29486 need to shift them down so that a single vector permutation has a
29487 chance to succeed. */
29490 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29492 unsigned i, nelt = d->nelt;
29497 /* Even with AVX, palignr only operates on 128-bit vectors. */
29498 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29501 min = nelt, max = 0;
29502 for (i = 0; i < nelt; ++i)
29504 unsigned e = d->perm[i];
29510 if (min == 0 || max - min >= nelt)
29513 /* Given that we have SSSE3, we know we'll be able to implement the
29514 single operand permutation after the palignr with pshufb. */
29518 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29519 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29520 gen_lowpart (TImode, d->op1),
29521 gen_lowpart (TImode, d->op0), shift));
29523 d->op0 = d->op1 = d->target;
29526 for (i = 0; i < nelt; ++i)
29528 unsigned e = d->perm[i] - min;
29534 /* Test for the degenerate case where the alignment by itself
29535 produces the desired permutation. */
29539 ok = expand_vec_perm_1 (d);
29545 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29546 a two vector permutation into a single vector permutation by using
29547 an interleave operation to merge the vectors. */
29550 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29552 struct expand_vec_perm_d dremap, dfinal;
29553 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29554 unsigned contents, h1, h2, h3, h4;
29555 unsigned char remap[2 * MAX_VECT_LEN];
29559 if (d->op0 == d->op1)
29562 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29563 lanes. We can use similar techniques with the vperm2f128 instruction,
29564 but it requires slightly different logic. */
29565 if (GET_MODE_SIZE (d->vmode) != 16)
29568 /* Examine from whence the elements come. */
29570 for (i = 0; i < nelt; ++i)
29571 contents |= 1u << d->perm[i];
29573 /* Split the two input vectors into 4 halves. */
29574 h1 = (1u << nelt2) - 1;
29579 memset (remap, 0xff, sizeof (remap));
29582 /* If the elements from the low halves use interleave low, and similarly
29583 for interleave high. If the elements are from mis-matched halves, we
29584 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29585 if ((contents & (h1 | h3)) == contents)
29587 for (i = 0; i < nelt2; ++i)
29590 remap[i + nelt] = i * 2 + 1;
29591 dremap.perm[i * 2] = i;
29592 dremap.perm[i * 2 + 1] = i + nelt;
29595 else if ((contents & (h2 | h4)) == contents)
29597 for (i = 0; i < nelt2; ++i)
29599 remap[i + nelt2] = i * 2;
29600 remap[i + nelt + nelt2] = i * 2 + 1;
29601 dremap.perm[i * 2] = i + nelt2;
29602 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29605 else if ((contents & (h1 | h4)) == contents)
29607 for (i = 0; i < nelt2; ++i)
29610 remap[i + nelt + nelt2] = i + nelt2;
29611 dremap.perm[i] = i;
29612 dremap.perm[i + nelt2] = i + nelt + nelt2;
29616 dremap.vmode = V2DImode;
29618 dremap.perm[0] = 0;
29619 dremap.perm[1] = 3;
29622 else if ((contents & (h2 | h3)) == contents)
29624 for (i = 0; i < nelt2; ++i)
29626 remap[i + nelt2] = i;
29627 remap[i + nelt] = i + nelt2;
29628 dremap.perm[i] = i + nelt2;
29629 dremap.perm[i + nelt2] = i + nelt;
29633 dremap.vmode = V2DImode;
29635 dremap.perm[0] = 1;
29636 dremap.perm[1] = 2;
29642 /* Use the remapping array set up above to move the elements from their
29643 swizzled locations into their final destinations. */
29645 for (i = 0; i < nelt; ++i)
29647 unsigned e = remap[d->perm[i]];
29648 gcc_assert (e < nelt);
29649 dfinal.perm[i] = e;
29651 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29652 dfinal.op1 = dfinal.op0;
29653 dremap.target = dfinal.op0;
29655 /* Test if the final remap can be done with a single insn. For V4SFmode or
29656 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29658 ok = expand_vec_perm_1 (&dfinal);
29659 seq = get_insns ();
29665 if (dremap.vmode != dfinal.vmode)
29667 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29668 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29669 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29672 ok = expand_vec_perm_1 (&dremap);
29679 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29680 permutation with two pshufb insns and an ior. We should have already
29681 failed all two instruction sequences. */
29684 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29686 rtx rperm[2][16], vperm, l, h, op, m128;
29687 unsigned int i, nelt, eltsz;
29689 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29691 gcc_assert (d->op0 != d->op1);
29694 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29696 /* Generate two permutation masks. If the required element is within
29697 the given vector it is shuffled into the proper lane. If the required
29698 element is in the other vector, force a zero into the lane by setting
29699 bit 7 in the permutation mask. */
29700 m128 = GEN_INT (-128);
29701 for (i = 0; i < nelt; ++i)
29703 unsigned j, e = d->perm[i];
29704 unsigned which = (e >= nelt);
29708 for (j = 0; j < eltsz; ++j)
29710 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29711 rperm[1-which][i*eltsz + j] = m128;
29715 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29716 vperm = force_reg (V16QImode, vperm);
29718 l = gen_reg_rtx (V16QImode);
29719 op = gen_lowpart (V16QImode, d->op0);
29720 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29722 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29723 vperm = force_reg (V16QImode, vperm);
29725 h = gen_reg_rtx (V16QImode);
29726 op = gen_lowpart (V16QImode, d->op1);
29727 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29729 op = gen_lowpart (V16QImode, d->target);
29730 emit_insn (gen_iorv16qi3 (op, l, h));
29735 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29736 and extract-odd permutations. */
29739 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29741 rtx t1, t2, t3, t4;
29746 t1 = gen_reg_rtx (V4DFmode);
29747 t2 = gen_reg_rtx (V4DFmode);
29749 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29750 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29751 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29753 /* Now an unpck[lh]pd will produce the result required. */
29755 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29757 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29763 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29764 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29765 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29767 t1 = gen_reg_rtx (V8SFmode);
29768 t2 = gen_reg_rtx (V8SFmode);
29769 t3 = gen_reg_rtx (V8SFmode);
29770 t4 = gen_reg_rtx (V8SFmode);
29772 /* Shuffle within the 128-bit lanes to produce:
29773 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29774 expand_vselect (t1, d->op0, perm1, 8);
29775 expand_vselect (t2, d->op1, perm1, 8);
29777 /* Shuffle the lanes around to produce:
29778 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29779 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29780 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29782 /* Now a vpermil2p will produce the result required. */
29783 /* ??? The vpermil2p requires a vector constant. Another option
29784 is a unpck[lh]ps to merge the two vectors to produce
29785 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29786 vpermilps to get the elements into the final order. */
29789 memcpy (d->perm, odd ? permo: perme, 8);
29790 expand_vec_perm_vpermil (d);
29798 /* These are always directly implementable by expand_vec_perm_1. */
29799 gcc_unreachable ();
29803 return expand_vec_perm_pshufb2 (d);
29806 /* We need 2*log2(N)-1 operations to achieve odd/even
29807 with interleave. */
29808 t1 = gen_reg_rtx (V8HImode);
29809 t2 = gen_reg_rtx (V8HImode);
29810 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29811 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29812 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29813 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29815 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29817 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29824 return expand_vec_perm_pshufb2 (d);
29827 t1 = gen_reg_rtx (V16QImode);
29828 t2 = gen_reg_rtx (V16QImode);
29829 t3 = gen_reg_rtx (V16QImode);
29830 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29831 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29832 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29833 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29834 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29835 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29837 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29839 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29845 gcc_unreachable ();
29851 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29852 extract-even and extract-odd permutations. */
29855 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29857 unsigned i, odd, nelt = d->nelt;
29860 if (odd != 0 && odd != 1)
29863 for (i = 1; i < nelt; ++i)
29864 if (d->perm[i] != 2 * i + odd)
29867 return expand_vec_perm_even_odd_1 (d, odd);
29870 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29871 permutations. We assume that expand_vec_perm_1 has already failed. */
29874 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
29876 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
29877 enum machine_mode vmode = d->vmode;
29878 unsigned char perm2[4];
29886 /* These are special-cased in sse.md so that we can optionally
29887 use the vbroadcast instruction. They expand to two insns
29888 if the input happens to be in a register. */
29889 gcc_unreachable ();
29895 /* These are always implementable using standard shuffle patterns. */
29896 gcc_unreachable ();
29900 /* These can be implemented via interleave. We save one insn by
29901 stopping once we have promoted to V4SImode and then use pshufd. */
29904 optab otab = vec_interleave_low_optab;
29908 otab = vec_interleave_high_optab;
29913 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
29914 vmode = get_mode_wider_vector (vmode);
29915 op0 = gen_lowpart (vmode, op0);
29917 while (vmode != V4SImode);
29919 memset (perm2, elt, 4);
29920 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
29925 gcc_unreachable ();
29929 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29930 broadcast permutations. */
29933 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
29935 unsigned i, elt, nelt = d->nelt;
29937 if (d->op0 != d->op1)
29941 for (i = 1; i < nelt; ++i)
29942 if (d->perm[i] != elt)
29945 return expand_vec_perm_broadcast_1 (d);
29948 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
29949 With all of the interface bits taken care of, perform the expansion
29950 in D and return true on success. */
29953 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
29955 /* Try a single instruction expansion. */
29956 if (expand_vec_perm_1 (d))
29959 /* Try sequences of two instructions. */
29961 if (expand_vec_perm_pshuflw_pshufhw (d))
29964 if (expand_vec_perm_palignr (d))
29967 if (expand_vec_perm_interleave2 (d))
29970 if (expand_vec_perm_broadcast (d))
29973 /* Try sequences of three instructions. */
29975 if (expand_vec_perm_pshufb2 (d))
29978 /* ??? Look for narrow permutations whose element orderings would
29979 allow the promotion to a wider mode. */
29981 /* ??? Look for sequences of interleave or a wider permute that place
29982 the data into the correct lanes for a half-vector shuffle like
29983 pshuf[lh]w or vpermilps. */
29985 /* ??? Look for sequences of interleave that produce the desired results.
29986 The combinatorics of punpck[lh] get pretty ugly... */
29988 if (expand_vec_perm_even_odd (d))
29994 /* Extract the values from the vector CST into the permutation array in D.
29995 Return 0 on error, 1 if all values from the permutation come from the
29996 first vector, 2 if all values from the second vector, and 3 otherwise. */
29999 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30001 tree list = TREE_VECTOR_CST_ELTS (cst);
30002 unsigned i, nelt = d->nelt;
30005 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30007 unsigned HOST_WIDE_INT e;
30009 if (!host_integerp (TREE_VALUE (list), 1))
30011 e = tree_low_cst (TREE_VALUE (list), 1);
30015 ret |= (e < nelt ? 1 : 2);
30018 gcc_assert (list == NULL);
30020 /* For all elements from second vector, fold the elements to first. */
30022 for (i = 0; i < nelt; ++i)
30023 d->perm[i] -= nelt;
30029 ix86_expand_vec_perm_builtin (tree exp)
30031 struct expand_vec_perm_d d;
30032 tree arg0, arg1, arg2;
30034 arg0 = CALL_EXPR_ARG (exp, 0);
30035 arg1 = CALL_EXPR_ARG (exp, 1);
30036 arg2 = CALL_EXPR_ARG (exp, 2);
30038 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30039 d.nelt = GET_MODE_NUNITS (d.vmode);
30040 d.testing_p = false;
30041 gcc_assert (VECTOR_MODE_P (d.vmode));
30043 if (TREE_CODE (arg2) != VECTOR_CST)
30045 error_at (EXPR_LOCATION (exp),
30046 "vector permutation requires vector constant");
30050 switch (extract_vec_perm_cst (&d, arg2))
30056 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30060 if (!operand_equal_p (arg0, arg1, 0))
30062 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30063 d.op0 = force_reg (d.vmode, d.op0);
30064 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30065 d.op1 = force_reg (d.vmode, d.op1);
30069 /* The elements of PERM do not suggest that only the first operand
30070 is used, but both operands are identical. Allow easier matching
30071 of the permutation by folding the permutation into the single
30074 unsigned i, nelt = d.nelt;
30075 for (i = 0; i < nelt; ++i)
30076 if (d.perm[i] >= nelt)
30082 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30083 d.op0 = force_reg (d.vmode, d.op0);
30088 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30089 d.op0 = force_reg (d.vmode, d.op0);
30094 d.target = gen_reg_rtx (d.vmode);
30095 if (ix86_expand_vec_perm_builtin_1 (&d))
30098 /* For compiler generated permutations, we should never got here, because
30099 the compiler should also be checking the ok hook. But since this is a
30100 builtin the user has access too, so don't abort. */
30104 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30107 sorry ("vector permutation (%d %d %d %d)",
30108 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30111 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30112 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30113 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30116 sorry ("vector permutation "
30117 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30118 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30119 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30120 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30121 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30124 gcc_unreachable ();
30127 return CONST0_RTX (d.vmode);
30130 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30133 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30135 struct expand_vec_perm_d d;
30139 d.vmode = TYPE_MODE (vec_type);
30140 d.nelt = GET_MODE_NUNITS (d.vmode);
30141 d.testing_p = true;
30143 /* Given sufficient ISA support we can just return true here
30144 for selected vector modes. */
30145 if (GET_MODE_SIZE (d.vmode) == 16)
30147 /* All implementable with a single vpperm insn. */
30150 /* All implementable with 2 pshufb + 1 ior. */
30153 /* All implementable with shufpd or unpck[lh]pd. */
30158 vec_mask = extract_vec_perm_cst (&d, mask);
30160 /* This hook is cannot be called in response to something that the
30161 user does (unlike the builtin expander) so we shouldn't ever see
30162 an error generated from the extract. */
30163 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30164 one_vec = (vec_mask != 3);
30166 /* Implementable with shufps or pshufd. */
30167 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30170 /* Otherwise we have to go through the motions and see if we can
30171 figure out how to generate the requested permutation. */
30172 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30173 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30175 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30178 ret = ix86_expand_vec_perm_builtin_1 (&d);
30185 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30187 struct expand_vec_perm_d d;
30193 d.vmode = GET_MODE (targ);
30194 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30195 d.testing_p = false;
30197 for (i = 0; i < nelt; ++i)
30198 d.perm[i] = i * 2 + odd;
30200 /* We'll either be able to implement the permutation directly... */
30201 if (expand_vec_perm_1 (&d))
30204 /* ... or we use the special-case patterns. */
30205 expand_vec_perm_even_odd_1 (&d, odd);
30208 /* This function returns the calling abi specific va_list type node.
30209 It returns the FNDECL specific va_list type. */
30212 ix86_fn_abi_va_list (tree fndecl)
30215 return va_list_type_node;
30216 gcc_assert (fndecl != NULL_TREE);
30218 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30219 return ms_va_list_type_node;
30221 return sysv_va_list_type_node;
30224 /* Returns the canonical va_list type specified by TYPE. If there
30225 is no valid TYPE provided, it return NULL_TREE. */
30228 ix86_canonical_va_list_type (tree type)
30232 /* Resolve references and pointers to va_list type. */
30233 if (INDIRECT_REF_P (type))
30234 type = TREE_TYPE (type);
30235 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30236 type = TREE_TYPE (type);
30240 wtype = va_list_type_node;
30241 gcc_assert (wtype != NULL_TREE);
30243 if (TREE_CODE (wtype) == ARRAY_TYPE)
30245 /* If va_list is an array type, the argument may have decayed
30246 to a pointer type, e.g. by being passed to another function.
30247 In that case, unwrap both types so that we can compare the
30248 underlying records. */
30249 if (TREE_CODE (htype) == ARRAY_TYPE
30250 || POINTER_TYPE_P (htype))
30252 wtype = TREE_TYPE (wtype);
30253 htype = TREE_TYPE (htype);
30256 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30257 return va_list_type_node;
30258 wtype = sysv_va_list_type_node;
30259 gcc_assert (wtype != NULL_TREE);
30261 if (TREE_CODE (wtype) == ARRAY_TYPE)
30263 /* If va_list is an array type, the argument may have decayed
30264 to a pointer type, e.g. by being passed to another function.
30265 In that case, unwrap both types so that we can compare the
30266 underlying records. */
30267 if (TREE_CODE (htype) == ARRAY_TYPE
30268 || POINTER_TYPE_P (htype))
30270 wtype = TREE_TYPE (wtype);
30271 htype = TREE_TYPE (htype);
30274 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30275 return sysv_va_list_type_node;
30276 wtype = ms_va_list_type_node;
30277 gcc_assert (wtype != NULL_TREE);
30279 if (TREE_CODE (wtype) == ARRAY_TYPE)
30281 /* If va_list is an array type, the argument may have decayed
30282 to a pointer type, e.g. by being passed to another function.
30283 In that case, unwrap both types so that we can compare the
30284 underlying records. */
30285 if (TREE_CODE (htype) == ARRAY_TYPE
30286 || POINTER_TYPE_P (htype))
30288 wtype = TREE_TYPE (wtype);
30289 htype = TREE_TYPE (htype);
30292 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30293 return ms_va_list_type_node;
30296 return std_canonical_va_list_type (type);
30299 /* Iterate through the target-specific builtin types for va_list.
30300 IDX denotes the iterator, *PTREE is set to the result type of
30301 the va_list builtin, and *PNAME to its internal type.
30302 Returns zero if there is no element for this index, otherwise
30303 IDX should be increased upon the next call.
30304 Note, do not iterate a base builtin's name like __builtin_va_list.
30305 Used from c_common_nodes_and_builtins. */
30308 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30314 *ptree = ms_va_list_type_node;
30315 *pname = "__builtin_ms_va_list";
30318 *ptree = sysv_va_list_type_node;
30319 *pname = "__builtin_sysv_va_list";
30327 /* Initialize the GCC target structure. */
30328 #undef TARGET_RETURN_IN_MEMORY
30329 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30331 #undef TARGET_LEGITIMIZE_ADDRESS
30332 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30334 #undef TARGET_ATTRIBUTE_TABLE
30335 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30336 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30337 # undef TARGET_MERGE_DECL_ATTRIBUTES
30338 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30341 #undef TARGET_COMP_TYPE_ATTRIBUTES
30342 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30344 #undef TARGET_INIT_BUILTINS
30345 #define TARGET_INIT_BUILTINS ix86_init_builtins
30346 #undef TARGET_BUILTIN_DECL
30347 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30348 #undef TARGET_EXPAND_BUILTIN
30349 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30351 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30352 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30353 ix86_builtin_vectorized_function
30355 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30356 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30358 #undef TARGET_BUILTIN_RECIPROCAL
30359 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30361 #undef TARGET_ASM_FUNCTION_EPILOGUE
30362 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30364 #undef TARGET_ENCODE_SECTION_INFO
30365 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30366 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30368 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30371 #undef TARGET_ASM_OPEN_PAREN
30372 #define TARGET_ASM_OPEN_PAREN ""
30373 #undef TARGET_ASM_CLOSE_PAREN
30374 #define TARGET_ASM_CLOSE_PAREN ""
30376 #undef TARGET_ASM_BYTE_OP
30377 #define TARGET_ASM_BYTE_OP ASM_BYTE
30379 #undef TARGET_ASM_ALIGNED_HI_OP
30380 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30381 #undef TARGET_ASM_ALIGNED_SI_OP
30382 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30384 #undef TARGET_ASM_ALIGNED_DI_OP
30385 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30388 #undef TARGET_ASM_UNALIGNED_HI_OP
30389 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30390 #undef TARGET_ASM_UNALIGNED_SI_OP
30391 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30392 #undef TARGET_ASM_UNALIGNED_DI_OP
30393 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30395 #undef TARGET_SCHED_ADJUST_COST
30396 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30397 #undef TARGET_SCHED_ISSUE_RATE
30398 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30399 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30400 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30401 ia32_multipass_dfa_lookahead
30403 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30404 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30407 #undef TARGET_HAVE_TLS
30408 #define TARGET_HAVE_TLS true
30410 #undef TARGET_CANNOT_FORCE_CONST_MEM
30411 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30412 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30413 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30415 #undef TARGET_DELEGITIMIZE_ADDRESS
30416 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30418 #undef TARGET_MS_BITFIELD_LAYOUT_P
30419 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30422 #undef TARGET_BINDS_LOCAL_P
30423 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30425 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30426 #undef TARGET_BINDS_LOCAL_P
30427 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30430 #undef TARGET_ASM_OUTPUT_MI_THUNK
30431 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30432 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30433 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30435 #undef TARGET_ASM_FILE_START
30436 #define TARGET_ASM_FILE_START x86_file_start
30438 #undef TARGET_DEFAULT_TARGET_FLAGS
30439 #define TARGET_DEFAULT_TARGET_FLAGS \
30441 | TARGET_SUBTARGET_DEFAULT \
30442 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30445 #undef TARGET_HANDLE_OPTION
30446 #define TARGET_HANDLE_OPTION ix86_handle_option
30448 #undef TARGET_RTX_COSTS
30449 #define TARGET_RTX_COSTS ix86_rtx_costs
30450 #undef TARGET_ADDRESS_COST
30451 #define TARGET_ADDRESS_COST ix86_address_cost
30453 #undef TARGET_FIXED_CONDITION_CODE_REGS
30454 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30455 #undef TARGET_CC_MODES_COMPATIBLE
30456 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30458 #undef TARGET_MACHINE_DEPENDENT_REORG
30459 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30461 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30462 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30464 #undef TARGET_BUILD_BUILTIN_VA_LIST
30465 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30467 #undef TARGET_FN_ABI_VA_LIST
30468 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30470 #undef TARGET_CANONICAL_VA_LIST_TYPE
30471 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30473 #undef TARGET_EXPAND_BUILTIN_VA_START
30474 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30476 #undef TARGET_MD_ASM_CLOBBERS
30477 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30479 #undef TARGET_PROMOTE_PROTOTYPES
30480 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30481 #undef TARGET_STRUCT_VALUE_RTX
30482 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30483 #undef TARGET_SETUP_INCOMING_VARARGS
30484 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30485 #undef TARGET_MUST_PASS_IN_STACK
30486 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30487 #undef TARGET_PASS_BY_REFERENCE
30488 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30489 #undef TARGET_INTERNAL_ARG_POINTER
30490 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30491 #undef TARGET_UPDATE_STACK_BOUNDARY
30492 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30493 #undef TARGET_GET_DRAP_RTX
30494 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30495 #undef TARGET_STRICT_ARGUMENT_NAMING
30496 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30497 #undef TARGET_STATIC_CHAIN
30498 #define TARGET_STATIC_CHAIN ix86_static_chain
30499 #undef TARGET_TRAMPOLINE_INIT
30500 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30502 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30503 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30505 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30506 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30508 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30509 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30511 #undef TARGET_C_MODE_FOR_SUFFIX
30512 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30515 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30516 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30519 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30520 #undef TARGET_INSERT_ATTRIBUTES
30521 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30524 #undef TARGET_MANGLE_TYPE
30525 #define TARGET_MANGLE_TYPE ix86_mangle_type
30527 #undef TARGET_STACK_PROTECT_FAIL
30528 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30530 #undef TARGET_FUNCTION_VALUE
30531 #define TARGET_FUNCTION_VALUE ix86_function_value
30533 #undef TARGET_SECONDARY_RELOAD
30534 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30536 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30537 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30538 ix86_builtin_vectorization_cost
30539 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30540 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30541 ix86_vectorize_builtin_vec_perm
30542 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30543 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30544 ix86_vectorize_builtin_vec_perm_ok
30546 #undef TARGET_SET_CURRENT_FUNCTION
30547 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30549 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30550 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30552 #undef TARGET_OPTION_SAVE
30553 #define TARGET_OPTION_SAVE ix86_function_specific_save
30555 #undef TARGET_OPTION_RESTORE
30556 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30558 #undef TARGET_OPTION_PRINT
30559 #define TARGET_OPTION_PRINT ix86_function_specific_print
30561 #undef TARGET_CAN_INLINE_P
30562 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30564 #undef TARGET_EXPAND_TO_RTL_HOOK
30565 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30567 #undef TARGET_LEGITIMATE_ADDRESS_P
30568 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30570 #undef TARGET_IRA_COVER_CLASSES
30571 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30573 #undef TARGET_FRAME_POINTER_REQUIRED
30574 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30576 #undef TARGET_CAN_ELIMINATE
30577 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30579 #undef TARGET_ASM_CODE_END
30580 #define TARGET_ASM_CODE_END ix86_code_end
30582 struct gcc_target targetm = TARGET_INITIALIZER;
30584 #include "gt-i386.h"