1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1351 /* X86_TUNE_USE_MOV0 */
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1366 /* X86_TUNE_READ_MODIFY */
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1379 /* X86_TUNE_QIMODE_MATH */
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1454 /* X86_TUNE_SHIFT1 */
1457 /* X86_TUNE_USE_FFREEP */
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1521 m_AMDFAM10 | m_GENERIC,
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1567 static enum stringop_alg stringop_alg = no_stringop;
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1609 /* The "default" register map used in 32bit mode. */
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1622 /* The "default" register map used in 64bit mode. */
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1706 /* Define parameter passing and return registers. */
1708 static int const x86_64_int_parameter_registers[6] =
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1715 CX_REG, DX_REG, R8_REG, R9_REG
1718 static int const x86_64_int_return_registers[4] =
1720 AX_REG, DX_REG, DI_REG, SI_REG
1723 /* Define the structure for the machine field in struct function. */
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1729 struct stack_local_entry *next;
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1762 HOST_WIDE_INT frame;
1764 int outgoing_arguments_size;
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1820 /* Alignment for incoming stack boundary in bits specified at
1822 static unsigned int ix86_user_incoming_stack_boundary;
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1843 int ix86_section_threshold = 65536;
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1849 /* Fence to use after loop using movnt. */
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1874 #define MAX_CLASSES 4
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static rtx ix86_static_chain (const_tree, bool);
1884 static int ix86_function_regparm (const_tree, const_tree);
1885 static void ix86_compute_frame_layout (struct ix86_frame *);
1886 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1888 static void ix86_add_new_builtins (int);
1889 static rtx ix86_expand_vec_perm_builtin (tree);
1891 enum ix86_function_specific_strings
1893 IX86_FUNCTION_SPECIFIC_ARCH,
1894 IX86_FUNCTION_SPECIFIC_TUNE,
1895 IX86_FUNCTION_SPECIFIC_FPMATH,
1896 IX86_FUNCTION_SPECIFIC_MAX
1899 static char *ix86_target_string (int, int, const char *, const char *,
1900 const char *, bool);
1901 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1902 static void ix86_function_specific_save (struct cl_target_option *);
1903 static void ix86_function_specific_restore (struct cl_target_option *);
1904 static void ix86_function_specific_print (FILE *, int,
1905 struct cl_target_option *);
1906 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1907 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1908 static bool ix86_can_inline_p (tree, tree);
1909 static void ix86_set_current_function (tree);
1910 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1912 static enum calling_abi ix86_function_abi (const_tree);
1915 #ifndef SUBTARGET32_DEFAULT_CPU
1916 #define SUBTARGET32_DEFAULT_CPU "i386"
1919 /* The svr4 ABI for the i386 says that records and unions are returned
1921 #ifndef DEFAULT_PCC_STRUCT_RETURN
1922 #define DEFAULT_PCC_STRUCT_RETURN 1
1925 /* Whether -mtune= or -march= were specified */
1926 static int ix86_tune_defaulted;
1927 static int ix86_arch_specified;
1929 /* Bit flags that specify the ISA we are compiling for. */
1930 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1932 /* A mask of ix86_isa_flags that includes bit X if X
1933 was set or cleared on the command line. */
1934 static int ix86_isa_flags_explicit;
1936 /* Define a set of ISAs which are available when a given ISA is
1937 enabled. MMX and SSE ISAs are handled separately. */
1939 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1940 #define OPTION_MASK_ISA_3DNOW_SET \
1941 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1943 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1944 #define OPTION_MASK_ISA_SSE2_SET \
1945 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1946 #define OPTION_MASK_ISA_SSE3_SET \
1947 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1948 #define OPTION_MASK_ISA_SSSE3_SET \
1949 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1950 #define OPTION_MASK_ISA_SSE4_1_SET \
1951 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1952 #define OPTION_MASK_ISA_SSE4_2_SET \
1953 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1954 #define OPTION_MASK_ISA_AVX_SET \
1955 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1956 #define OPTION_MASK_ISA_FMA_SET \
1957 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1959 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1961 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1963 #define OPTION_MASK_ISA_SSE4A_SET \
1964 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1965 #define OPTION_MASK_ISA_FMA4_SET \
1966 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1967 | OPTION_MASK_ISA_AVX_SET)
1968 #define OPTION_MASK_ISA_XOP_SET \
1969 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1970 #define OPTION_MASK_ISA_LWP_SET \
1973 /* AES and PCLMUL need SSE2 because they use xmm registers */
1974 #define OPTION_MASK_ISA_AES_SET \
1975 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1976 #define OPTION_MASK_ISA_PCLMUL_SET \
1977 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1979 #define OPTION_MASK_ISA_ABM_SET \
1980 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1982 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1983 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1984 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1985 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1986 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1988 /* Define a set of ISAs which aren't available when a given ISA is
1989 disabled. MMX and SSE ISAs are handled separately. */
1991 #define OPTION_MASK_ISA_MMX_UNSET \
1992 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1993 #define OPTION_MASK_ISA_3DNOW_UNSET \
1994 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1995 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1997 #define OPTION_MASK_ISA_SSE_UNSET \
1998 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1999 #define OPTION_MASK_ISA_SSE2_UNSET \
2000 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2001 #define OPTION_MASK_ISA_SSE3_UNSET \
2002 (OPTION_MASK_ISA_SSE3 \
2003 | OPTION_MASK_ISA_SSSE3_UNSET \
2004 | OPTION_MASK_ISA_SSE4A_UNSET )
2005 #define OPTION_MASK_ISA_SSSE3_UNSET \
2006 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2007 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2008 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2009 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2010 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2011 #define OPTION_MASK_ISA_AVX_UNSET \
2012 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2013 | OPTION_MASK_ISA_FMA4_UNSET)
2014 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2016 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2018 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2020 #define OPTION_MASK_ISA_SSE4A_UNSET \
2021 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2023 #define OPTION_MASK_ISA_FMA4_UNSET \
2024 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2025 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2026 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2028 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2029 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2030 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2031 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2032 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2033 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2034 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2035 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2037 /* Vectorization library interface and handlers. */
2038 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2039 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2040 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2042 /* Processor target table, indexed by processor number */
2045 const struct processor_costs *cost; /* Processor costs */
2046 const int align_loop; /* Default alignments. */
2047 const int align_loop_max_skip;
2048 const int align_jump;
2049 const int align_jump_max_skip;
2050 const int align_func;
2053 static const struct ptt processor_target_table[PROCESSOR_max] =
2055 {&i386_cost, 4, 3, 4, 3, 4},
2056 {&i486_cost, 16, 15, 16, 15, 16},
2057 {&pentium_cost, 16, 7, 16, 7, 16},
2058 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2059 {&geode_cost, 0, 0, 0, 0, 0},
2060 {&k6_cost, 32, 7, 32, 7, 32},
2061 {&athlon_cost, 16, 7, 16, 7, 16},
2062 {&pentium4_cost, 0, 0, 0, 0, 0},
2063 {&k8_cost, 16, 7, 16, 7, 16},
2064 {&nocona_cost, 0, 0, 0, 0, 0},
2065 {&core2_cost, 16, 10, 16, 10, 16},
2066 {&generic32_cost, 16, 7, 16, 7, 16},
2067 {&generic64_cost, 16, 10, 16, 10, 16},
2068 {&amdfam10_cost, 32, 24, 32, 7, 32},
2069 {&atom_cost, 16, 7, 16, 7, 16}
2072 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2098 /* Implement TARGET_HANDLE_OPTION. */
2101 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2108 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2109 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2113 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2121 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2122 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2126 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2127 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2137 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2150 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2163 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2176 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2189 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2190 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2194 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2202 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2203 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2207 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2215 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2216 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2220 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2228 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2229 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2233 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2239 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2244 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2245 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2251 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2252 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2256 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2257 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2264 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2265 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2269 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2270 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2277 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2278 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2282 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2283 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2290 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2291 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2295 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2296 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2303 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2304 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2308 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2309 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2316 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2317 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2321 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2322 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2329 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2330 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2334 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2335 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2342 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2343 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2347 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2348 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2355 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2356 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2360 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2368 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2369 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2373 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2374 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2381 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2382 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2386 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2387 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2394 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2395 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2399 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2400 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2409 /* Return a string that documents the current -m options. The caller is
2410 responsible for freeing the string. */
2413 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2414 const char *fpmath, bool add_nl_p)
2416 struct ix86_target_opts
2418 const char *option; /* option string */
2419 int mask; /* isa mask options */
2422 /* This table is ordered so that options like -msse4.2 that imply
2423 preceding options while match those first. */
2424 static struct ix86_target_opts isa_opts[] =
2426 { "-m64", OPTION_MASK_ISA_64BIT },
2427 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2428 { "-mfma", OPTION_MASK_ISA_FMA },
2429 { "-mxop", OPTION_MASK_ISA_XOP },
2430 { "-mlwp", OPTION_MASK_ISA_LWP },
2431 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2432 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2433 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2434 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2435 { "-msse3", OPTION_MASK_ISA_SSE3 },
2436 { "-msse2", OPTION_MASK_ISA_SSE2 },
2437 { "-msse", OPTION_MASK_ISA_SSE },
2438 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2439 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2440 { "-mmmx", OPTION_MASK_ISA_MMX },
2441 { "-mabm", OPTION_MASK_ISA_ABM },
2442 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2443 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2444 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2445 { "-maes", OPTION_MASK_ISA_AES },
2446 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2450 static struct ix86_target_opts flag_opts[] =
2452 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2453 { "-m80387", MASK_80387 },
2454 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2455 { "-malign-double", MASK_ALIGN_DOUBLE },
2456 { "-mcld", MASK_CLD },
2457 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2458 { "-mieee-fp", MASK_IEEE_FP },
2459 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2460 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2461 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2462 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2463 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2464 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2465 { "-mno-red-zone", MASK_NO_RED_ZONE },
2466 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2467 { "-mrecip", MASK_RECIP },
2468 { "-mrtd", MASK_RTD },
2469 { "-msseregparm", MASK_SSEREGPARM },
2470 { "-mstack-arg-probe", MASK_STACK_PROBE },
2471 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2474 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2477 char target_other[40];
2486 memset (opts, '\0', sizeof (opts));
2488 /* Add -march= option. */
2491 opts[num][0] = "-march=";
2492 opts[num++][1] = arch;
2495 /* Add -mtune= option. */
2498 opts[num][0] = "-mtune=";
2499 opts[num++][1] = tune;
2502 /* Pick out the options in isa options. */
2503 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2505 if ((isa & isa_opts[i].mask) != 0)
2507 opts[num++][0] = isa_opts[i].option;
2508 isa &= ~ isa_opts[i].mask;
2512 if (isa && add_nl_p)
2514 opts[num++][0] = isa_other;
2515 sprintf (isa_other, "(other isa: 0x%x)", isa);
2518 /* Add flag options. */
2519 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2521 if ((flags & flag_opts[i].mask) != 0)
2523 opts[num++][0] = flag_opts[i].option;
2524 flags &= ~ flag_opts[i].mask;
2528 if (flags && add_nl_p)
2530 opts[num++][0] = target_other;
2531 sprintf (target_other, "(other flags: 0x%x)", isa);
2534 /* Add -fpmath= option. */
2537 opts[num][0] = "-mfpmath=";
2538 opts[num++][1] = fpmath;
2545 gcc_assert (num < ARRAY_SIZE (opts));
2547 /* Size the string. */
2549 sep_len = (add_nl_p) ? 3 : 1;
2550 for (i = 0; i < num; i++)
2553 for (j = 0; j < 2; j++)
2555 len += strlen (opts[i][j]);
2558 /* Build the string. */
2559 ret = ptr = (char *) xmalloc (len);
2562 for (i = 0; i < num; i++)
2566 for (j = 0; j < 2; j++)
2567 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2574 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2582 for (j = 0; j < 2; j++)
2585 memcpy (ptr, opts[i][j], len2[j]);
2587 line_len += len2[j];
2592 gcc_assert (ret + len >= ptr);
2597 /* Function that is callable from the debugger to print the current
2600 ix86_debug_options (void)
2602 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2603 ix86_arch_string, ix86_tune_string,
2604 ix86_fpmath_string, true);
2608 fprintf (stderr, "%s\n\n", opts);
2612 fputs ("<no options>\n\n", stderr);
2617 /* Sometimes certain combinations of command options do not make
2618 sense on a particular target machine. You can define a macro
2619 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2620 defined, is executed once just after all the command options have
2623 Don't use this macro to turn on various extra optimizations for
2624 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2627 override_options (bool main_args_p)
2630 unsigned int ix86_arch_mask, ix86_tune_mask;
2631 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2636 /* Comes from final.c -- no real reason to change it. */
2637 #define MAX_CODE_ALIGN 16
2645 PTA_PREFETCH_SSE = 1 << 4,
2647 PTA_3DNOW_A = 1 << 6,
2651 PTA_POPCNT = 1 << 10,
2653 PTA_SSE4A = 1 << 12,
2654 PTA_NO_SAHF = 1 << 13,
2655 PTA_SSE4_1 = 1 << 14,
2656 PTA_SSE4_2 = 1 << 15,
2658 PTA_PCLMUL = 1 << 17,
2661 PTA_MOVBE = 1 << 20,
2669 const char *const name; /* processor name or nickname. */
2670 const enum processor_type processor;
2671 const enum attr_cpu schedule;
2672 const unsigned /*enum pta_flags*/ flags;
2674 const processor_alias_table[] =
2676 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2677 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2678 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2679 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2681 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2682 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2683 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2685 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2686 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2688 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2690 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2692 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 PTA_MMX | PTA_SSE | PTA_SSE2},
2694 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2695 PTA_MMX |PTA_SSE | PTA_SSE2},
2696 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2697 PTA_MMX | PTA_SSE | PTA_SSE2},
2698 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2699 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2700 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2701 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2702 | PTA_CX16 | PTA_NO_SAHF},
2703 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2704 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2705 | PTA_SSSE3 | PTA_CX16},
2706 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2707 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2708 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2709 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2710 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2711 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2712 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2713 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2716 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2717 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2718 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2719 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2720 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2721 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2722 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2723 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2724 {"x86-64", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2726 {"k8", PROCESSOR_K8, CPU_K8,
2727 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2728 | PTA_SSE2 | PTA_NO_SAHF},
2729 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2730 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2731 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2732 {"opteron", PROCESSOR_K8, CPU_K8,
2733 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2734 | PTA_SSE2 | PTA_NO_SAHF},
2735 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2736 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2737 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2738 {"athlon64", PROCESSOR_K8, CPU_K8,
2739 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2740 | PTA_SSE2 | PTA_NO_SAHF},
2741 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2742 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2743 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2744 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2745 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2746 | PTA_SSE2 | PTA_NO_SAHF},
2747 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2748 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2749 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2750 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2751 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2752 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2753 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2754 0 /* flags are only used for -march switch. */ },
2755 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2756 PTA_64BIT /* flags are only used for -march switch. */ },
2759 int const pta_size = ARRAY_SIZE (processor_alias_table);
2761 /* Set up prefix/suffix so the error messages refer to either the command
2762 line argument, or the attribute(target). */
2771 prefix = "option(\"";
2776 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2777 SUBTARGET_OVERRIDE_OPTIONS;
2780 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2781 SUBSUBTARGET_OVERRIDE_OPTIONS;
2784 /* -fPIC is the default for x86_64. */
2785 if (TARGET_MACHO && TARGET_64BIT)
2788 /* Set the default values for switches whose default depends on TARGET_64BIT
2789 in case they weren't overwritten by command line options. */
2792 /* Mach-O doesn't support omitting the frame pointer for now. */
2793 if (flag_omit_frame_pointer == 2)
2794 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2795 if (flag_asynchronous_unwind_tables == 2)
2796 flag_asynchronous_unwind_tables = 1;
2797 if (flag_pcc_struct_return == 2)
2798 flag_pcc_struct_return = 0;
2802 if (flag_omit_frame_pointer == 2)
2803 flag_omit_frame_pointer = 0;
2804 if (flag_asynchronous_unwind_tables == 2)
2805 flag_asynchronous_unwind_tables = 0;
2806 if (flag_pcc_struct_return == 2)
2807 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2810 /* Need to check -mtune=generic first. */
2811 if (ix86_tune_string)
2813 if (!strcmp (ix86_tune_string, "generic")
2814 || !strcmp (ix86_tune_string, "i686")
2815 /* As special support for cross compilers we read -mtune=native
2816 as -mtune=generic. With native compilers we won't see the
2817 -mtune=native, as it was changed by the driver. */
2818 || !strcmp (ix86_tune_string, "native"))
2821 ix86_tune_string = "generic64";
2823 ix86_tune_string = "generic32";
2825 /* If this call is for setting the option attribute, allow the
2826 generic32/generic64 that was previously set. */
2827 else if (!main_args_p
2828 && (!strcmp (ix86_tune_string, "generic32")
2829 || !strcmp (ix86_tune_string, "generic64")))
2831 else if (!strncmp (ix86_tune_string, "generic", 7))
2832 error ("bad value (%s) for %stune=%s %s",
2833 ix86_tune_string, prefix, suffix, sw);
2834 else if (!strcmp (ix86_tune_string, "x86-64"))
2835 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2836 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2837 prefix, suffix, prefix, suffix, prefix, suffix);
2841 if (ix86_arch_string)
2842 ix86_tune_string = ix86_arch_string;
2843 if (!ix86_tune_string)
2845 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2846 ix86_tune_defaulted = 1;
2849 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2850 need to use a sensible tune option. */
2851 if (!strcmp (ix86_tune_string, "generic")
2852 || !strcmp (ix86_tune_string, "x86-64")
2853 || !strcmp (ix86_tune_string, "i686"))
2856 ix86_tune_string = "generic64";
2858 ix86_tune_string = "generic32";
2862 if (ix86_stringop_string)
2864 if (!strcmp (ix86_stringop_string, "rep_byte"))
2865 stringop_alg = rep_prefix_1_byte;
2866 else if (!strcmp (ix86_stringop_string, "libcall"))
2867 stringop_alg = libcall;
2868 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2869 stringop_alg = rep_prefix_4_byte;
2870 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2872 /* rep; movq isn't available in 32-bit code. */
2873 stringop_alg = rep_prefix_8_byte;
2874 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2875 stringop_alg = loop_1_byte;
2876 else if (!strcmp (ix86_stringop_string, "loop"))
2877 stringop_alg = loop;
2878 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2879 stringop_alg = unrolled_loop;
2881 error ("bad value (%s) for %sstringop-strategy=%s %s",
2882 ix86_stringop_string, prefix, suffix, sw);
2885 if (!ix86_arch_string)
2886 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2888 ix86_arch_specified = 1;
2890 /* Validate -mabi= value. */
2891 if (ix86_abi_string)
2893 if (strcmp (ix86_abi_string, "sysv") == 0)
2894 ix86_abi = SYSV_ABI;
2895 else if (strcmp (ix86_abi_string, "ms") == 0)
2898 error ("unknown ABI (%s) for %sabi=%s %s",
2899 ix86_abi_string, prefix, suffix, sw);
2902 ix86_abi = DEFAULT_ABI;
2904 if (ix86_cmodel_string != 0)
2906 if (!strcmp (ix86_cmodel_string, "small"))
2907 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2908 else if (!strcmp (ix86_cmodel_string, "medium"))
2909 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2910 else if (!strcmp (ix86_cmodel_string, "large"))
2911 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2913 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2914 else if (!strcmp (ix86_cmodel_string, "32"))
2915 ix86_cmodel = CM_32;
2916 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2917 ix86_cmodel = CM_KERNEL;
2919 error ("bad value (%s) for %scmodel=%s %s",
2920 ix86_cmodel_string, prefix, suffix, sw);
2924 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2925 use of rip-relative addressing. This eliminates fixups that
2926 would otherwise be needed if this object is to be placed in a
2927 DLL, and is essentially just as efficient as direct addressing. */
2928 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2929 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2930 else if (TARGET_64BIT)
2931 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2933 ix86_cmodel = CM_32;
2935 if (ix86_asm_string != 0)
2938 && !strcmp (ix86_asm_string, "intel"))
2939 ix86_asm_dialect = ASM_INTEL;
2940 else if (!strcmp (ix86_asm_string, "att"))
2941 ix86_asm_dialect = ASM_ATT;
2943 error ("bad value (%s) for %sasm=%s %s",
2944 ix86_asm_string, prefix, suffix, sw);
2946 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2947 error ("code model %qs not supported in the %s bit mode",
2948 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2949 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2950 sorry ("%i-bit mode not compiled in",
2951 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2953 for (i = 0; i < pta_size; i++)
2954 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2956 ix86_schedule = processor_alias_table[i].schedule;
2957 ix86_arch = processor_alias_table[i].processor;
2958 /* Default cpu tuning to the architecture. */
2959 ix86_tune = ix86_arch;
2961 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2962 error ("CPU you selected does not support x86-64 "
2965 if (processor_alias_table[i].flags & PTA_MMX
2966 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2967 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2968 if (processor_alias_table[i].flags & PTA_3DNOW
2969 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2970 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2971 if (processor_alias_table[i].flags & PTA_3DNOW_A
2972 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2973 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2974 if (processor_alias_table[i].flags & PTA_SSE
2975 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2976 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2977 if (processor_alias_table[i].flags & PTA_SSE2
2978 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2979 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2980 if (processor_alias_table[i].flags & PTA_SSE3
2981 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2982 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2983 if (processor_alias_table[i].flags & PTA_SSSE3
2984 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2985 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2986 if (processor_alias_table[i].flags & PTA_SSE4_1
2987 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2988 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2989 if (processor_alias_table[i].flags & PTA_SSE4_2
2990 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2991 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2992 if (processor_alias_table[i].flags & PTA_AVX
2993 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2994 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2995 if (processor_alias_table[i].flags & PTA_FMA
2996 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2997 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2998 if (processor_alias_table[i].flags & PTA_SSE4A
2999 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3000 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3001 if (processor_alias_table[i].flags & PTA_FMA4
3002 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3003 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3004 if (processor_alias_table[i].flags & PTA_XOP
3005 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3006 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3007 if (processor_alias_table[i].flags & PTA_LWP
3008 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3009 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3010 if (processor_alias_table[i].flags & PTA_ABM
3011 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3012 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3013 if (processor_alias_table[i].flags & PTA_CX16
3014 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3015 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3016 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3017 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3018 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3019 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3020 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3021 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3022 if (processor_alias_table[i].flags & PTA_MOVBE
3023 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3024 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3025 if (processor_alias_table[i].flags & PTA_AES
3026 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3027 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3028 if (processor_alias_table[i].flags & PTA_PCLMUL
3029 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3030 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3031 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3032 x86_prefetch_sse = true;
3037 if (!strcmp (ix86_arch_string, "generic"))
3038 error ("generic CPU can be used only for %stune=%s %s",
3039 prefix, suffix, sw);
3040 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3041 error ("bad value (%s) for %sarch=%s %s",
3042 ix86_arch_string, prefix, suffix, sw);
3044 ix86_arch_mask = 1u << ix86_arch;
3045 for (i = 0; i < X86_ARCH_LAST; ++i)
3046 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3048 for (i = 0; i < pta_size; i++)
3049 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3051 ix86_schedule = processor_alias_table[i].schedule;
3052 ix86_tune = processor_alias_table[i].processor;
3053 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3055 if (ix86_tune_defaulted)
3057 ix86_tune_string = "x86-64";
3058 for (i = 0; i < pta_size; i++)
3059 if (! strcmp (ix86_tune_string,
3060 processor_alias_table[i].name))
3062 ix86_schedule = processor_alias_table[i].schedule;
3063 ix86_tune = processor_alias_table[i].processor;
3066 error ("CPU you selected does not support x86-64 "
3069 /* Intel CPUs have always interpreted SSE prefetch instructions as
3070 NOPs; so, we can enable SSE prefetch instructions even when
3071 -mtune (rather than -march) points us to a processor that has them.
3072 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3073 higher processors. */
3075 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3076 x86_prefetch_sse = true;
3080 if (ix86_tune_specified && i == pta_size)
3081 error ("bad value (%s) for %stune=%s %s",
3082 ix86_tune_string, prefix, suffix, sw);
3084 ix86_tune_mask = 1u << ix86_tune;
3085 for (i = 0; i < X86_TUNE_LAST; ++i)
3086 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3089 ix86_cost = &ix86_size_cost;
3091 ix86_cost = processor_target_table[ix86_tune].cost;
3093 /* Arrange to set up i386_stack_locals for all functions. */
3094 init_machine_status = ix86_init_machine_status;
3096 /* Validate -mregparm= value. */
3097 if (ix86_regparm_string)
3100 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3101 i = atoi (ix86_regparm_string);
3102 if (i < 0 || i > REGPARM_MAX)
3103 error ("%sregparm=%d%s is not between 0 and %d",
3104 prefix, i, suffix, REGPARM_MAX);
3109 ix86_regparm = REGPARM_MAX;
3111 /* If the user has provided any of the -malign-* options,
3112 warn and use that value only if -falign-* is not set.
3113 Remove this code in GCC 3.2 or later. */
3114 if (ix86_align_loops_string)
3116 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3117 prefix, suffix, suffix);
3118 if (align_loops == 0)
3120 i = atoi (ix86_align_loops_string);
3121 if (i < 0 || i > MAX_CODE_ALIGN)
3122 error ("%salign-loops=%d%s is not between 0 and %d",
3123 prefix, i, suffix, MAX_CODE_ALIGN);
3125 align_loops = 1 << i;
3129 if (ix86_align_jumps_string)
3131 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3132 prefix, suffix, suffix);
3133 if (align_jumps == 0)
3135 i = atoi (ix86_align_jumps_string);
3136 if (i < 0 || i > MAX_CODE_ALIGN)
3137 error ("%salign-loops=%d%s is not between 0 and %d",
3138 prefix, i, suffix, MAX_CODE_ALIGN);
3140 align_jumps = 1 << i;
3144 if (ix86_align_funcs_string)
3146 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3147 prefix, suffix, suffix);
3148 if (align_functions == 0)
3150 i = atoi (ix86_align_funcs_string);
3151 if (i < 0 || i > MAX_CODE_ALIGN)
3152 error ("%salign-loops=%d%s is not between 0 and %d",
3153 prefix, i, suffix, MAX_CODE_ALIGN);
3155 align_functions = 1 << i;
3159 /* Default align_* from the processor table. */
3160 if (align_loops == 0)
3162 align_loops = processor_target_table[ix86_tune].align_loop;
3163 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3165 if (align_jumps == 0)
3167 align_jumps = processor_target_table[ix86_tune].align_jump;
3168 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3170 if (align_functions == 0)
3172 align_functions = processor_target_table[ix86_tune].align_func;
3175 /* Validate -mbranch-cost= value, or provide default. */
3176 ix86_branch_cost = ix86_cost->branch_cost;
3177 if (ix86_branch_cost_string)
3179 i = atoi (ix86_branch_cost_string);
3181 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3183 ix86_branch_cost = i;
3185 if (ix86_section_threshold_string)
3187 i = atoi (ix86_section_threshold_string);
3189 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3191 ix86_section_threshold = i;
3194 if (ix86_tls_dialect_string)
3196 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3197 ix86_tls_dialect = TLS_DIALECT_GNU;
3198 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3199 ix86_tls_dialect = TLS_DIALECT_GNU2;
3201 error ("bad value (%s) for %stls-dialect=%s %s",
3202 ix86_tls_dialect_string, prefix, suffix, sw);
3205 if (ix87_precision_string)
3207 i = atoi (ix87_precision_string);
3208 if (i != 32 && i != 64 && i != 80)
3209 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3214 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3216 /* Enable by default the SSE and MMX builtins. Do allow the user to
3217 explicitly disable any of these. In particular, disabling SSE and
3218 MMX for kernel code is extremely useful. */
3219 if (!ix86_arch_specified)
3221 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3222 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3225 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3229 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3231 if (!ix86_arch_specified)
3233 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3235 /* i386 ABI does not specify red zone. It still makes sense to use it
3236 when programmer takes care to stack from being destroyed. */
3237 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3238 target_flags |= MASK_NO_RED_ZONE;
3241 /* Keep nonleaf frame pointers. */
3242 if (flag_omit_frame_pointer)
3243 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3244 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3245 flag_omit_frame_pointer = 1;
3247 /* If we're doing fast math, we don't care about comparison order
3248 wrt NaNs. This lets us use a shorter comparison sequence. */
3249 if (flag_finite_math_only)
3250 target_flags &= ~MASK_IEEE_FP;
3252 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3253 since the insns won't need emulation. */
3254 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3255 target_flags &= ~MASK_NO_FANCY_MATH_387;
3257 /* Likewise, if the target doesn't have a 387, or we've specified
3258 software floating point, don't use 387 inline intrinsics. */
3260 target_flags |= MASK_NO_FANCY_MATH_387;
3262 /* Turn on MMX builtins for -msse. */
3265 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3266 x86_prefetch_sse = true;
3269 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3270 if (TARGET_SSE4_2 || TARGET_ABM)
3271 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3273 /* Validate -mpreferred-stack-boundary= value or default it to
3274 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3275 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3276 if (ix86_preferred_stack_boundary_string)
3278 i = atoi (ix86_preferred_stack_boundary_string);
3279 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3280 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3281 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3283 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3286 /* Set the default value for -mstackrealign. */
3287 if (ix86_force_align_arg_pointer == -1)
3288 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3290 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3292 /* Validate -mincoming-stack-boundary= value or default it to
3293 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3294 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3295 if (ix86_incoming_stack_boundary_string)
3297 i = atoi (ix86_incoming_stack_boundary_string);
3298 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3299 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3300 i, TARGET_64BIT ? 4 : 2);
3303 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3304 ix86_incoming_stack_boundary
3305 = ix86_user_incoming_stack_boundary;
3309 /* Accept -msseregparm only if at least SSE support is enabled. */
3310 if (TARGET_SSEREGPARM
3312 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3314 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3315 if (ix86_fpmath_string != 0)
3317 if (! strcmp (ix86_fpmath_string, "387"))
3318 ix86_fpmath = FPMATH_387;
3319 else if (! strcmp (ix86_fpmath_string, "sse"))
3323 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3324 ix86_fpmath = FPMATH_387;
3327 ix86_fpmath = FPMATH_SSE;
3329 else if (! strcmp (ix86_fpmath_string, "387,sse")
3330 || ! strcmp (ix86_fpmath_string, "387+sse")
3331 || ! strcmp (ix86_fpmath_string, "sse,387")
3332 || ! strcmp (ix86_fpmath_string, "sse+387")
3333 || ! strcmp (ix86_fpmath_string, "both"))
3337 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3338 ix86_fpmath = FPMATH_387;
3340 else if (!TARGET_80387)
3342 warning (0, "387 instruction set disabled, using SSE arithmetics");
3343 ix86_fpmath = FPMATH_SSE;
3346 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3349 error ("bad value (%s) for %sfpmath=%s %s",
3350 ix86_fpmath_string, prefix, suffix, sw);
3353 /* If the i387 is disabled, then do not return values in it. */
3355 target_flags &= ~MASK_FLOAT_RETURNS;
3357 /* Use external vectorized library in vectorizing intrinsics. */
3358 if (ix86_veclibabi_string)
3360 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3361 ix86_veclib_handler = ix86_veclibabi_svml;
3362 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3363 ix86_veclib_handler = ix86_veclibabi_acml;
3365 error ("unknown vectorization library ABI type (%s) for "
3366 "%sveclibabi=%s %s", ix86_veclibabi_string,
3367 prefix, suffix, sw);
3370 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3371 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3373 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3375 /* ??? Unwind info is not correct around the CFG unless either a frame
3376 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3377 unwind info generation to be aware of the CFG and propagating states
3379 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3380 || flag_exceptions || flag_non_call_exceptions)
3381 && flag_omit_frame_pointer
3382 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3384 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3385 warning (0, "unwind tables currently require either a frame pointer "
3386 "or %saccumulate-outgoing-args%s for correctness",
3388 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3391 /* If stack probes are required, the space used for large function
3392 arguments on the stack must also be probed, so enable
3393 -maccumulate-outgoing-args so this happens in the prologue. */
3394 if (TARGET_STACK_PROBE
3395 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3397 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3398 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3399 "for correctness", prefix, suffix);
3400 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3403 /* For sane SSE instruction set generation we need fcomi instruction.
3404 It is safe to enable all CMOVE instructions. */
3408 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3411 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3412 p = strchr (internal_label_prefix, 'X');
3413 internal_label_prefix_len = p - internal_label_prefix;
3417 /* When scheduling description is not available, disable scheduler pass
3418 so it won't slow down the compilation and make x87 code slower. */
3419 if (!TARGET_SCHEDULE)
3420 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3422 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3423 set_param_value ("simultaneous-prefetches",
3424 ix86_cost->simultaneous_prefetches);
3425 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3426 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3427 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3428 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3429 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3430 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3432 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3433 can be optimized to ap = __builtin_next_arg (0). */
3435 targetm.expand_builtin_va_start = NULL;
3439 ix86_gen_leave = gen_leave_rex64;
3440 ix86_gen_pop1 = gen_popdi1;
3441 ix86_gen_add3 = gen_adddi3;
3442 ix86_gen_sub3 = gen_subdi3;
3443 ix86_gen_sub3_carry = gen_subdi3_carry;
3444 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3445 ix86_gen_monitor = gen_sse3_monitor64;
3446 ix86_gen_andsp = gen_anddi3;
3450 ix86_gen_leave = gen_leave;
3451 ix86_gen_pop1 = gen_popsi1;
3452 ix86_gen_add3 = gen_addsi3;
3453 ix86_gen_sub3 = gen_subsi3;
3454 ix86_gen_sub3_carry = gen_subsi3_carry;
3455 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3456 ix86_gen_monitor = gen_sse3_monitor;
3457 ix86_gen_andsp = gen_andsi3;
3461 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3463 target_flags |= MASK_CLD & ~target_flags_explicit;
3466 /* Save the initial options in case the user does function specific options */
3468 target_option_default_node = target_option_current_node
3469 = build_target_option_node ();
3472 /* Update register usage after having seen the compiler flags. */
3475 ix86_conditional_register_usage (void)
3480 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3482 if (fixed_regs[i] > 1)
3483 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 if (call_used_regs[i] > 1)
3485 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3488 /* The PIC register, if it exists, is fixed. */
3489 j = PIC_OFFSET_TABLE_REGNUM;
3490 if (j != INVALID_REGNUM)
3491 fixed_regs[j] = call_used_regs[j] = 1;
3493 /* The MS_ABI changes the set of call-used registers. */
3494 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3496 call_used_regs[SI_REG] = 0;
3497 call_used_regs[DI_REG] = 0;
3498 call_used_regs[XMM6_REG] = 0;
3499 call_used_regs[XMM7_REG] = 0;
3500 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3501 call_used_regs[i] = 0;
3504 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3505 other call-clobbered regs for 64-bit. */
3508 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3511 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3512 && call_used_regs[i])
3513 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3516 /* If MMX is disabled, squash the registers. */
3518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3519 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3520 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3522 /* If SSE is disabled, squash the registers. */
3524 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3525 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3526 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3528 /* If the FPU is disabled, squash the registers. */
3529 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3530 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3531 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3532 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3534 /* If 32-bit, squash the 64-bit registers. */
3537 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3539 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3545 /* Save the current options */
3548 ix86_function_specific_save (struct cl_target_option *ptr)
3550 ptr->arch = ix86_arch;
3551 ptr->schedule = ix86_schedule;
3552 ptr->tune = ix86_tune;
3553 ptr->fpmath = ix86_fpmath;
3554 ptr->branch_cost = ix86_branch_cost;
3555 ptr->tune_defaulted = ix86_tune_defaulted;
3556 ptr->arch_specified = ix86_arch_specified;
3557 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3558 ptr->target_flags_explicit = target_flags_explicit;
3560 /* The fields are char but the variables are not; make sure the
3561 values fit in the fields. */
3562 gcc_assert (ptr->arch == ix86_arch);
3563 gcc_assert (ptr->schedule == ix86_schedule);
3564 gcc_assert (ptr->tune == ix86_tune);
3565 gcc_assert (ptr->fpmath == ix86_fpmath);
3566 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3569 /* Restore the current options */
3572 ix86_function_specific_restore (struct cl_target_option *ptr)
3574 enum processor_type old_tune = ix86_tune;
3575 enum processor_type old_arch = ix86_arch;
3576 unsigned int ix86_arch_mask, ix86_tune_mask;
3579 ix86_arch = (enum processor_type) ptr->arch;
3580 ix86_schedule = (enum attr_cpu) ptr->schedule;
3581 ix86_tune = (enum processor_type) ptr->tune;
3582 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3583 ix86_branch_cost = ptr->branch_cost;
3584 ix86_tune_defaulted = ptr->tune_defaulted;
3585 ix86_arch_specified = ptr->arch_specified;
3586 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3587 target_flags_explicit = ptr->target_flags_explicit;
3589 /* Recreate the arch feature tests if the arch changed */
3590 if (old_arch != ix86_arch)
3592 ix86_arch_mask = 1u << ix86_arch;
3593 for (i = 0; i < X86_ARCH_LAST; ++i)
3594 ix86_arch_features[i]
3595 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3598 /* Recreate the tune optimization tests */
3599 if (old_tune != ix86_tune)
3601 ix86_tune_mask = 1u << ix86_tune;
3602 for (i = 0; i < X86_TUNE_LAST; ++i)
3603 ix86_tune_features[i]
3604 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3608 /* Print the current options */
3611 ix86_function_specific_print (FILE *file, int indent,
3612 struct cl_target_option *ptr)
3615 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3616 NULL, NULL, NULL, false);
3618 fprintf (file, "%*sarch = %d (%s)\n",
3621 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3622 ? cpu_names[ptr->arch]
3625 fprintf (file, "%*stune = %d (%s)\n",
3628 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3629 ? cpu_names[ptr->tune]
3632 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3633 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3634 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3635 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3639 fprintf (file, "%*s%s\n", indent, "", target_string);
3640 free (target_string);
3645 /* Inner function to process the attribute((target(...))), take an argument and
3646 set the current options from the argument. If we have a list, recursively go
3650 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3655 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3656 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3657 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3658 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3673 enum ix86_opt_type type;
3678 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3679 IX86_ATTR_ISA ("abm", OPT_mabm),
3680 IX86_ATTR_ISA ("aes", OPT_maes),
3681 IX86_ATTR_ISA ("avx", OPT_mavx),
3682 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3683 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3684 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3685 IX86_ATTR_ISA ("sse", OPT_msse),
3686 IX86_ATTR_ISA ("sse2", OPT_msse2),
3687 IX86_ATTR_ISA ("sse3", OPT_msse3),
3688 IX86_ATTR_ISA ("sse4", OPT_msse4),
3689 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3690 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3691 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3692 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3693 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3694 IX86_ATTR_ISA ("xop", OPT_mxop),
3695 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3697 /* string options */
3698 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3699 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3700 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3703 IX86_ATTR_YES ("cld",
3707 IX86_ATTR_NO ("fancy-math-387",
3708 OPT_mfancy_math_387,
3709 MASK_NO_FANCY_MATH_387),
3711 IX86_ATTR_YES ("ieee-fp",
3715 IX86_ATTR_YES ("inline-all-stringops",
3716 OPT_minline_all_stringops,
3717 MASK_INLINE_ALL_STRINGOPS),
3719 IX86_ATTR_YES ("inline-stringops-dynamically",
3720 OPT_minline_stringops_dynamically,
3721 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3723 IX86_ATTR_NO ("align-stringops",
3724 OPT_mno_align_stringops,
3725 MASK_NO_ALIGN_STRINGOPS),
3727 IX86_ATTR_YES ("recip",
3733 /* If this is a list, recurse to get the options. */
3734 if (TREE_CODE (args) == TREE_LIST)
3738 for (; args; args = TREE_CHAIN (args))
3739 if (TREE_VALUE (args)
3740 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3746 else if (TREE_CODE (args) != STRING_CST)
3749 /* Handle multiple arguments separated by commas. */
3750 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3752 while (next_optstr && *next_optstr != '\0')
3754 char *p = next_optstr;
3756 char *comma = strchr (next_optstr, ',');
3757 const char *opt_string;
3758 size_t len, opt_len;
3763 enum ix86_opt_type type = ix86_opt_unknown;
3769 len = comma - next_optstr;
3770 next_optstr = comma + 1;
3778 /* Recognize no-xxx. */
3779 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3788 /* Find the option. */
3791 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3793 type = attrs[i].type;
3794 opt_len = attrs[i].len;
3795 if (ch == attrs[i].string[0]
3796 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3797 && memcmp (p, attrs[i].string, opt_len) == 0)
3800 mask = attrs[i].mask;
3801 opt_string = attrs[i].string;
3806 /* Process the option. */
3809 error ("attribute(target(\"%s\")) is unknown", orig_p);
3813 else if (type == ix86_opt_isa)
3814 ix86_handle_option (opt, p, opt_set_p);
3816 else if (type == ix86_opt_yes || type == ix86_opt_no)
3818 if (type == ix86_opt_no)
3819 opt_set_p = !opt_set_p;
3822 target_flags |= mask;
3824 target_flags &= ~mask;
3827 else if (type == ix86_opt_str)
3831 error ("option(\"%s\") was already specified", opt_string);
3835 p_strings[opt] = xstrdup (p + opt_len);
3845 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3848 ix86_valid_target_attribute_tree (tree args)
3850 const char *orig_arch_string = ix86_arch_string;
3851 const char *orig_tune_string = ix86_tune_string;
3852 const char *orig_fpmath_string = ix86_fpmath_string;
3853 int orig_tune_defaulted = ix86_tune_defaulted;
3854 int orig_arch_specified = ix86_arch_specified;
3855 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3858 struct cl_target_option *def
3859 = TREE_TARGET_OPTION (target_option_default_node);
3861 /* Process each of the options on the chain. */
3862 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3865 /* If the changed options are different from the default, rerun override_options,
3866 and then save the options away. The string options are are attribute options,
3867 and will be undone when we copy the save structure. */
3868 if (ix86_isa_flags != def->ix86_isa_flags
3869 || target_flags != def->target_flags
3870 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3871 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3874 /* If we are using the default tune= or arch=, undo the string assigned,
3875 and use the default. */
3876 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3877 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3878 else if (!orig_arch_specified)
3879 ix86_arch_string = NULL;
3881 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3882 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3883 else if (orig_tune_defaulted)
3884 ix86_tune_string = NULL;
3886 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3887 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3888 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3889 else if (!TARGET_64BIT && TARGET_SSE)
3890 ix86_fpmath_string = "sse,387";
3892 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3893 override_options (false);
3895 /* Add any builtin functions with the new isa if any. */
3896 ix86_add_new_builtins (ix86_isa_flags);
3898 /* Save the current options unless we are validating options for
3900 t = build_target_option_node ();
3902 ix86_arch_string = orig_arch_string;
3903 ix86_tune_string = orig_tune_string;
3904 ix86_fpmath_string = orig_fpmath_string;
3906 /* Free up memory allocated to hold the strings */
3907 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3908 if (option_strings[i])
3909 free (option_strings[i]);
3915 /* Hook to validate attribute((target("string"))). */
3918 ix86_valid_target_attribute_p (tree fndecl,
3919 tree ARG_UNUSED (name),
3921 int ARG_UNUSED (flags))
3923 struct cl_target_option cur_target;
3925 tree old_optimize = build_optimization_node ();
3926 tree new_target, new_optimize;
3927 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3929 /* If the function changed the optimization levels as well as setting target
3930 options, start with the optimizations specified. */
3931 if (func_optimize && func_optimize != old_optimize)
3932 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3934 /* The target attributes may also change some optimization flags, so update
3935 the optimization options if necessary. */
3936 cl_target_option_save (&cur_target);
3937 new_target = ix86_valid_target_attribute_tree (args);
3938 new_optimize = build_optimization_node ();
3945 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3947 if (old_optimize != new_optimize)
3948 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3951 cl_target_option_restore (&cur_target);
3953 if (old_optimize != new_optimize)
3954 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3960 /* Hook to determine if one function can safely inline another. */
3963 ix86_can_inline_p (tree caller, tree callee)
3966 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3967 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3969 /* If callee has no option attributes, then it is ok to inline. */
3973 /* If caller has no option attributes, but callee does then it is not ok to
3975 else if (!caller_tree)
3980 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3981 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3983 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3984 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3986 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3987 != callee_opts->ix86_isa_flags)
3990 /* See if we have the same non-isa options. */
3991 else if (caller_opts->target_flags != callee_opts->target_flags)
3994 /* See if arch, tune, etc. are the same. */
3995 else if (caller_opts->arch != callee_opts->arch)
3998 else if (caller_opts->tune != callee_opts->tune)
4001 else if (caller_opts->fpmath != callee_opts->fpmath)
4004 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4015 /* Remember the last target of ix86_set_current_function. */
4016 static GTY(()) tree ix86_previous_fndecl;
4018 /* Establish appropriate back-end context for processing the function
4019 FNDECL. The argument might be NULL to indicate processing at top
4020 level, outside of any function scope. */
4022 ix86_set_current_function (tree fndecl)
4024 /* Only change the context if the function changes. This hook is called
4025 several times in the course of compiling a function, and we don't want to
4026 slow things down too much or call target_reinit when it isn't safe. */
4027 if (fndecl && fndecl != ix86_previous_fndecl)
4029 tree old_tree = (ix86_previous_fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4033 tree new_tree = (fndecl
4034 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4037 ix86_previous_fndecl = fndecl;
4038 if (old_tree == new_tree)
4043 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4049 struct cl_target_option *def
4050 = TREE_TARGET_OPTION (target_option_current_node);
4052 cl_target_option_restore (def);
4059 /* Return true if this goes in large data/bss. */
4062 ix86_in_large_data_p (tree exp)
4064 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4067 /* Functions are never large data. */
4068 if (TREE_CODE (exp) == FUNCTION_DECL)
4071 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4073 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4074 if (strcmp (section, ".ldata") == 0
4075 || strcmp (section, ".lbss") == 0)
4081 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4083 /* If this is an incomplete type with size 0, then we can't put it
4084 in data because it might be too big when completed. */
4085 if (!size || size > ix86_section_threshold)
4092 /* Switch to the appropriate section for output of DECL.
4093 DECL is either a `VAR_DECL' node or a constant of some sort.
4094 RELOC indicates whether forming the initial value of DECL requires
4095 link-time relocations. */
4097 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4101 x86_64_elf_select_section (tree decl, int reloc,
4102 unsigned HOST_WIDE_INT align)
4104 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4105 && ix86_in_large_data_p (decl))
4107 const char *sname = NULL;
4108 unsigned int flags = SECTION_WRITE;
4109 switch (categorize_decl_for_section (decl, reloc))
4114 case SECCAT_DATA_REL:
4115 sname = ".ldata.rel";
4117 case SECCAT_DATA_REL_LOCAL:
4118 sname = ".ldata.rel.local";
4120 case SECCAT_DATA_REL_RO:
4121 sname = ".ldata.rel.ro";
4123 case SECCAT_DATA_REL_RO_LOCAL:
4124 sname = ".ldata.rel.ro.local";
4128 flags |= SECTION_BSS;
4131 case SECCAT_RODATA_MERGE_STR:
4132 case SECCAT_RODATA_MERGE_STR_INIT:
4133 case SECCAT_RODATA_MERGE_CONST:
4137 case SECCAT_SRODATA:
4144 /* We don't split these for medium model. Place them into
4145 default sections and hope for best. */
4147 case SECCAT_EMUTLS_VAR:
4148 case SECCAT_EMUTLS_TMPL:
4153 /* We might get called with string constants, but get_named_section
4154 doesn't like them as they are not DECLs. Also, we need to set
4155 flags in that case. */
4157 return get_section (sname, flags, NULL);
4158 return get_named_section (decl, sname, reloc);
4161 return default_elf_select_section (decl, reloc, align);
4164 /* Build up a unique section name, expressed as a
4165 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4166 RELOC indicates whether the initial value of EXP requires
4167 link-time relocations. */
4169 static void ATTRIBUTE_UNUSED
4170 x86_64_elf_unique_section (tree decl, int reloc)
4172 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4173 && ix86_in_large_data_p (decl))
4175 const char *prefix = NULL;
4176 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4177 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4179 switch (categorize_decl_for_section (decl, reloc))
4182 case SECCAT_DATA_REL:
4183 case SECCAT_DATA_REL_LOCAL:
4184 case SECCAT_DATA_REL_RO:
4185 case SECCAT_DATA_REL_RO_LOCAL:
4186 prefix = one_only ? ".ld" : ".ldata";
4189 prefix = one_only ? ".lb" : ".lbss";
4192 case SECCAT_RODATA_MERGE_STR:
4193 case SECCAT_RODATA_MERGE_STR_INIT:
4194 case SECCAT_RODATA_MERGE_CONST:
4195 prefix = one_only ? ".lr" : ".lrodata";
4197 case SECCAT_SRODATA:
4204 /* We don't split these for medium model. Place them into
4205 default sections and hope for best. */
4207 case SECCAT_EMUTLS_VAR:
4208 prefix = targetm.emutls.var_section;
4210 case SECCAT_EMUTLS_TMPL:
4211 prefix = targetm.emutls.tmpl_section;
4216 const char *name, *linkonce;
4219 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4220 name = targetm.strip_name_encoding (name);
4222 /* If we're using one_only, then there needs to be a .gnu.linkonce
4223 prefix to the section name. */
4224 linkonce = one_only ? ".gnu.linkonce" : "";
4226 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4228 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4232 default_unique_section (decl, reloc);
4235 #ifdef COMMON_ASM_OP
4236 /* This says how to output assembler code to declare an
4237 uninitialized external linkage data object.
4239 For medium model x86-64 we need to use .largecomm opcode for
4242 x86_elf_aligned_common (FILE *file,
4243 const char *name, unsigned HOST_WIDE_INT size,
4246 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4247 && size > (unsigned int)ix86_section_threshold)
4248 fputs (".largecomm\t", file);
4250 fputs (COMMON_ASM_OP, file);
4251 assemble_name (file, name);
4252 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4253 size, align / BITS_PER_UNIT);
4257 /* Utility function for targets to use in implementing
4258 ASM_OUTPUT_ALIGNED_BSS. */
4261 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4262 const char *name, unsigned HOST_WIDE_INT size,
4265 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4266 && size > (unsigned int)ix86_section_threshold)
4267 switch_to_section (get_named_section (decl, ".lbss", 0));
4269 switch_to_section (bss_section);
4270 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4271 #ifdef ASM_DECLARE_OBJECT_NAME
4272 last_assemble_variable_decl = decl;
4273 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4275 /* Standard thing is just output label for the object. */
4276 ASM_OUTPUT_LABEL (file, name);
4277 #endif /* ASM_DECLARE_OBJECT_NAME */
4278 ASM_OUTPUT_SKIP (file, size ? size : 1);
4282 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4284 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4285 make the problem with not enough registers even worse. */
4286 #ifdef INSN_SCHEDULING
4288 flag_schedule_insns = 0;
4292 /* The Darwin libraries never set errno, so we might as well
4293 avoid calling them when that's the only reason we would. */
4294 flag_errno_math = 0;
4296 /* The default values of these switches depend on the TARGET_64BIT
4297 that is not known at this moment. Mark these values with 2 and
4298 let user the to override these. In case there is no command line option
4299 specifying them, we will set the defaults in override_options. */
4301 flag_omit_frame_pointer = 2;
4302 flag_pcc_struct_return = 2;
4303 flag_asynchronous_unwind_tables = 2;
4304 flag_vect_cost_model = 1;
4305 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4306 SUBTARGET_OPTIMIZATION_OPTIONS;
4310 /* Decide whether we can make a sibling call to a function. DECL is the
4311 declaration of the function being targeted by the call and EXP is the
4312 CALL_EXPR representing the call. */
4315 ix86_function_ok_for_sibcall (tree decl, tree exp)
4317 tree type, decl_or_type;
4320 /* If we are generating position-independent code, we cannot sibcall
4321 optimize any indirect call, or a direct call to a global function,
4322 as the PLT requires %ebx be live. */
4323 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4326 /* If we need to align the outgoing stack, then sibcalling would
4327 unalign the stack, which may break the called function. */
4328 if (ix86_minimum_incoming_stack_boundary (true)
4329 < PREFERRED_STACK_BOUNDARY)
4334 decl_or_type = decl;
4335 type = TREE_TYPE (decl);
4339 /* We're looking at the CALL_EXPR, we need the type of the function. */
4340 type = CALL_EXPR_FN (exp); /* pointer expression */
4341 type = TREE_TYPE (type); /* pointer type */
4342 type = TREE_TYPE (type); /* function type */
4343 decl_or_type = type;
4346 /* Check that the return value locations are the same. Like
4347 if we are returning floats on the 80387 register stack, we cannot
4348 make a sibcall from a function that doesn't return a float to a
4349 function that does or, conversely, from a function that does return
4350 a float to a function that doesn't; the necessary stack adjustment
4351 would not be executed. This is also the place we notice
4352 differences in the return value ABI. Note that it is ok for one
4353 of the functions to have void return type as long as the return
4354 value of the other is passed in a register. */
4355 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4356 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4358 if (STACK_REG_P (a) || STACK_REG_P (b))
4360 if (!rtx_equal_p (a, b))
4363 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4365 else if (!rtx_equal_p (a, b))
4370 /* The SYSV ABI has more call-clobbered registers;
4371 disallow sibcalls from MS to SYSV. */
4372 if (cfun->machine->call_abi == MS_ABI
4373 && ix86_function_type_abi (type) == SYSV_ABI)
4378 /* If this call is indirect, we'll need to be able to use a
4379 call-clobbered register for the address of the target function.
4380 Make sure that all such registers are not used for passing
4381 parameters. Note that DLLIMPORT functions are indirect. */
4383 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4385 if (ix86_function_regparm (type, NULL) >= 3)
4387 /* ??? Need to count the actual number of registers to be used,
4388 not the possible number of registers. Fix later. */
4394 /* Otherwise okay. That also includes certain types of indirect calls. */
4398 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4399 calling convention attributes;
4400 arguments as in struct attribute_spec.handler. */
4403 ix86_handle_cconv_attribute (tree *node, tree name,
4405 int flags ATTRIBUTE_UNUSED,
4408 if (TREE_CODE (*node) != FUNCTION_TYPE
4409 && TREE_CODE (*node) != METHOD_TYPE
4410 && TREE_CODE (*node) != FIELD_DECL
4411 && TREE_CODE (*node) != TYPE_DECL)
4413 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4415 *no_add_attrs = true;
4419 /* Can combine regparm with all attributes but fastcall. */
4420 if (is_attribute_p ("regparm", name))
4424 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4426 error ("fastcall and regparm attributes are not compatible");
4429 cst = TREE_VALUE (args);
4430 if (TREE_CODE (cst) != INTEGER_CST)
4432 warning (OPT_Wattributes,
4433 "%qE attribute requires an integer constant argument",
4435 *no_add_attrs = true;
4437 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4439 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4441 *no_add_attrs = true;
4449 /* Do not warn when emulating the MS ABI. */
4450 if (TREE_CODE (*node) != FUNCTION_TYPE
4451 || ix86_function_type_abi (*node) != MS_ABI)
4452 warning (OPT_Wattributes, "%qE attribute ignored",
4454 *no_add_attrs = true;
4458 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4459 if (is_attribute_p ("fastcall", name))
4461 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4463 error ("fastcall and cdecl attributes are not compatible");
4465 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4467 error ("fastcall and stdcall attributes are not compatible");
4469 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4471 error ("fastcall and regparm attributes are not compatible");
4475 /* Can combine stdcall with fastcall (redundant), regparm and
4477 else if (is_attribute_p ("stdcall", name))
4479 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4481 error ("stdcall and cdecl attributes are not compatible");
4483 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4485 error ("stdcall and fastcall attributes are not compatible");
4489 /* Can combine cdecl with regparm and sseregparm. */
4490 else if (is_attribute_p ("cdecl", name))
4492 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4494 error ("stdcall and cdecl attributes are not compatible");
4496 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4498 error ("fastcall and cdecl attributes are not compatible");
4502 /* Can combine sseregparm with all attributes. */
4507 /* Return 0 if the attributes for two types are incompatible, 1 if they
4508 are compatible, and 2 if they are nearly compatible (which causes a
4509 warning to be generated). */
4512 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4514 /* Check for mismatch of non-default calling convention. */
4515 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4517 if (TREE_CODE (type1) != FUNCTION_TYPE
4518 && TREE_CODE (type1) != METHOD_TYPE)
4521 /* Check for mismatched fastcall/regparm types. */
4522 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4523 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4524 || (ix86_function_regparm (type1, NULL)
4525 != ix86_function_regparm (type2, NULL)))
4528 /* Check for mismatched sseregparm types. */
4529 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4530 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4533 /* Check for mismatched return types (cdecl vs stdcall). */
4534 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4535 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4541 /* Return the regparm value for a function with the indicated TYPE and DECL.
4542 DECL may be NULL when calling function indirectly
4543 or considering a libcall. */
4546 ix86_function_regparm (const_tree type, const_tree decl)
4552 return (ix86_function_type_abi (type) == SYSV_ABI
4553 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4555 regparm = ix86_regparm;
4556 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4559 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4563 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4566 /* Use register calling convention for local functions when possible. */
4568 && TREE_CODE (decl) == FUNCTION_DECL
4572 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4573 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4576 int local_regparm, globals = 0, regno;
4578 /* Make sure no regparm register is taken by a
4579 fixed register variable. */
4580 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4581 if (fixed_regs[local_regparm])
4584 /* We don't want to use regparm(3) for nested functions as
4585 these use a static chain pointer in the third argument. */
4586 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4589 /* Each fixed register usage increases register pressure,
4590 so less registers should be used for argument passing.
4591 This functionality can be overriden by an explicit
4593 for (regno = 0; regno <= DI_REG; regno++)
4594 if (fixed_regs[regno])
4598 = globals < local_regparm ? local_regparm - globals : 0;
4600 if (local_regparm > regparm)
4601 regparm = local_regparm;
4608 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4609 DFmode (2) arguments in SSE registers for a function with the
4610 indicated TYPE and DECL. DECL may be NULL when calling function
4611 indirectly or considering a libcall. Otherwise return 0. */
4614 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4616 gcc_assert (!TARGET_64BIT);
4618 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4619 by the sseregparm attribute. */
4620 if (TARGET_SSEREGPARM
4621 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4628 error ("Calling %qD with attribute sseregparm without "
4629 "SSE/SSE2 enabled", decl);
4631 error ("Calling %qT with attribute sseregparm without "
4632 "SSE/SSE2 enabled", type);
4640 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4641 (and DFmode for SSE2) arguments in SSE registers. */
4642 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4644 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4645 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4647 return TARGET_SSE2 ? 2 : 1;
4653 /* Return true if EAX is live at the start of the function. Used by
4654 ix86_expand_prologue to determine if we need special help before
4655 calling allocate_stack_worker. */
4658 ix86_eax_live_at_start_p (void)
4660 /* Cheat. Don't bother working forward from ix86_function_regparm
4661 to the function type to whether an actual argument is located in
4662 eax. Instead just look at cfg info, which is still close enough
4663 to correct at this point. This gives false positives for broken
4664 functions that might use uninitialized data that happens to be
4665 allocated in eax, but who cares? */
4666 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4669 /* Value is the number of bytes of arguments automatically
4670 popped when returning from a subroutine call.
4671 FUNDECL is the declaration node of the function (as a tree),
4672 FUNTYPE is the data type of the function (as a tree),
4673 or for a library call it is an identifier node for the subroutine name.
4674 SIZE is the number of bytes of arguments passed on the stack.
4676 On the 80386, the RTD insn may be used to pop them if the number
4677 of args is fixed, but if the number is variable then the caller
4678 must pop them all. RTD can't be used for library calls now
4679 because the library is compiled with the Unix compiler.
4680 Use of RTD is a selectable option, since it is incompatible with
4681 standard Unix calling sequences. If the option is not selected,
4682 the caller must always pop the args.
4684 The attribute stdcall is equivalent to RTD on a per module basis. */
4687 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4691 /* None of the 64-bit ABIs pop arguments. */
4695 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4697 /* Cdecl functions override -mrtd, and never pop the stack. */
4698 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4700 /* Stdcall and fastcall functions will pop the stack if not
4702 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4703 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4706 if (rtd && ! stdarg_p (funtype))
4710 /* Lose any fake structure return argument if it is passed on the stack. */
4711 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4712 && !KEEP_AGGREGATE_RETURN_POINTER)
4714 int nregs = ix86_function_regparm (funtype, fundecl);
4716 return GET_MODE_SIZE (Pmode);
4722 /* Argument support functions. */
4724 /* Return true when register may be used to pass function parameters. */
4726 ix86_function_arg_regno_p (int regno)
4729 const int *parm_regs;
4734 return (regno < REGPARM_MAX
4735 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4737 return (regno < REGPARM_MAX
4738 || (TARGET_MMX && MMX_REGNO_P (regno)
4739 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4740 || (TARGET_SSE && SSE_REGNO_P (regno)
4741 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4746 if (SSE_REGNO_P (regno) && TARGET_SSE)
4751 if (TARGET_SSE && SSE_REGNO_P (regno)
4752 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4756 /* TODO: The function should depend on current function ABI but
4757 builtins.c would need updating then. Therefore we use the
4760 /* RAX is used as hidden argument to va_arg functions. */
4761 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4764 if (ix86_abi == MS_ABI)
4765 parm_regs = x86_64_ms_abi_int_parameter_registers;
4767 parm_regs = x86_64_int_parameter_registers;
4768 for (i = 0; i < (ix86_abi == MS_ABI
4769 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4770 if (regno == parm_regs[i])
4775 /* Return if we do not know how to pass TYPE solely in registers. */
4778 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4780 if (must_pass_in_stack_var_size_or_pad (mode, type))
4783 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4784 The layout_type routine is crafty and tries to trick us into passing
4785 currently unsupported vector types on the stack by using TImode. */
4786 return (!TARGET_64BIT && mode == TImode
4787 && type && TREE_CODE (type) != VECTOR_TYPE);
4790 /* It returns the size, in bytes, of the area reserved for arguments passed
4791 in registers for the function represented by fndecl dependent to the used
4794 ix86_reg_parm_stack_space (const_tree fndecl)
4796 enum calling_abi call_abi = SYSV_ABI;
4797 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4798 call_abi = ix86_function_abi (fndecl);
4800 call_abi = ix86_function_type_abi (fndecl);
4801 if (call_abi == MS_ABI)
4806 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4809 ix86_function_type_abi (const_tree fntype)
4811 if (TARGET_64BIT && fntype != NULL)
4813 enum calling_abi abi = ix86_abi;
4814 if (abi == SYSV_ABI)
4816 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4819 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4827 ix86_function_ms_hook_prologue (const_tree fntype)
4831 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4833 if (decl_function_context (fntype) != NULL_TREE)
4835 error_at (DECL_SOURCE_LOCATION (fntype),
4836 "ms_hook_prologue is not compatible with nested function");
4845 static enum calling_abi
4846 ix86_function_abi (const_tree fndecl)
4850 return ix86_function_type_abi (TREE_TYPE (fndecl));
4853 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4856 ix86_cfun_abi (void)
4858 if (! cfun || ! TARGET_64BIT)
4860 return cfun->machine->call_abi;
4864 extern void init_regs (void);
4866 /* Implementation of call abi switching target hook. Specific to FNDECL
4867 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4868 for more details. */
4870 ix86_call_abi_override (const_tree fndecl)
4872 if (fndecl == NULL_TREE)
4873 cfun->machine->call_abi = ix86_abi;
4875 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4878 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4879 re-initialization of init_regs each time we switch function context since
4880 this is needed only during RTL expansion. */
4882 ix86_maybe_switch_abi (void)
4885 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4889 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4890 for a call to a function whose data type is FNTYPE.
4891 For a library call, FNTYPE is 0. */
4894 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4895 tree fntype, /* tree ptr for function decl */
4896 rtx libname, /* SYMBOL_REF of library name or 0 */
4899 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4900 memset (cum, 0, sizeof (*cum));
4903 cum->call_abi = ix86_function_abi (fndecl);
4905 cum->call_abi = ix86_function_type_abi (fntype);
4906 /* Set up the number of registers to use for passing arguments. */
4908 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4909 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4910 "or subtarget optimization implying it");
4911 cum->nregs = ix86_regparm;
4914 if (cum->call_abi != ix86_abi)
4915 cum->nregs = (ix86_abi != SYSV_ABI
4916 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4920 cum->sse_nregs = SSE_REGPARM_MAX;
4923 if (cum->call_abi != ix86_abi)
4924 cum->sse_nregs = (ix86_abi != SYSV_ABI
4925 ? X86_64_SSE_REGPARM_MAX
4926 : X86_64_MS_SSE_REGPARM_MAX);
4930 cum->mmx_nregs = MMX_REGPARM_MAX;
4931 cum->warn_avx = true;
4932 cum->warn_sse = true;
4933 cum->warn_mmx = true;
4935 /* Because type might mismatch in between caller and callee, we need to
4936 use actual type of function for local calls.
4937 FIXME: cgraph_analyze can be told to actually record if function uses
4938 va_start so for local functions maybe_vaarg can be made aggressive
4940 FIXME: once typesytem is fixed, we won't need this code anymore. */
4942 fntype = TREE_TYPE (fndecl);
4943 cum->maybe_vaarg = (fntype
4944 ? (!prototype_p (fntype) || stdarg_p (fntype))
4949 /* If there are variable arguments, then we won't pass anything
4950 in registers in 32-bit mode. */
4951 if (stdarg_p (fntype))
4962 /* Use ecx and edx registers if function has fastcall attribute,
4963 else look for regparm information. */
4966 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4972 cum->nregs = ix86_function_regparm (fntype, fndecl);
4975 /* Set up the number of SSE registers used for passing SFmode
4976 and DFmode arguments. Warn for mismatching ABI. */
4977 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4981 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4982 But in the case of vector types, it is some vector mode.
4984 When we have only some of our vector isa extensions enabled, then there
4985 are some modes for which vector_mode_supported_p is false. For these
4986 modes, the generic vector support in gcc will choose some non-vector mode
4987 in order to implement the type. By computing the natural mode, we'll
4988 select the proper ABI location for the operand and not depend on whatever
4989 the middle-end decides to do with these vector types.
4991 The midde-end can't deal with the vector types > 16 bytes. In this
4992 case, we return the original mode and warn ABI change if CUM isn't
4995 static enum machine_mode
4996 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4998 enum machine_mode mode = TYPE_MODE (type);
5000 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5002 HOST_WIDE_INT size = int_size_in_bytes (type);
5003 if ((size == 8 || size == 16 || size == 32)
5004 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5005 && TYPE_VECTOR_SUBPARTS (type) > 1)
5007 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5009 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5010 mode = MIN_MODE_VECTOR_FLOAT;
5012 mode = MIN_MODE_VECTOR_INT;
5014 /* Get the mode which has this inner mode and number of units. */
5015 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5016 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5017 && GET_MODE_INNER (mode) == innermode)
5019 if (size == 32 && !TARGET_AVX)
5021 static bool warnedavx;
5028 warning (0, "AVX vector argument without AVX "
5029 "enabled changes the ABI");
5031 return TYPE_MODE (type);
5044 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5045 this may not agree with the mode that the type system has chosen for the
5046 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5047 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5050 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5055 if (orig_mode != BLKmode)
5056 tmp = gen_rtx_REG (orig_mode, regno);
5059 tmp = gen_rtx_REG (mode, regno);
5060 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5061 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5067 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5068 of this code is to classify each 8bytes of incoming argument by the register
5069 class and assign registers accordingly. */
5071 /* Return the union class of CLASS1 and CLASS2.
5072 See the x86-64 PS ABI for details. */
5074 static enum x86_64_reg_class
5075 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5077 /* Rule #1: If both classes are equal, this is the resulting class. */
5078 if (class1 == class2)
5081 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5083 if (class1 == X86_64_NO_CLASS)
5085 if (class2 == X86_64_NO_CLASS)
5088 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5089 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5090 return X86_64_MEMORY_CLASS;
5092 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5093 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5094 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5095 return X86_64_INTEGERSI_CLASS;
5096 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5097 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5098 return X86_64_INTEGER_CLASS;
5100 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5102 if (class1 == X86_64_X87_CLASS
5103 || class1 == X86_64_X87UP_CLASS
5104 || class1 == X86_64_COMPLEX_X87_CLASS
5105 || class2 == X86_64_X87_CLASS
5106 || class2 == X86_64_X87UP_CLASS
5107 || class2 == X86_64_COMPLEX_X87_CLASS)
5108 return X86_64_MEMORY_CLASS;
5110 /* Rule #6: Otherwise class SSE is used. */
5111 return X86_64_SSE_CLASS;
5114 /* Classify the argument of type TYPE and mode MODE.
5115 CLASSES will be filled by the register class used to pass each word
5116 of the operand. The number of words is returned. In case the parameter
5117 should be passed in memory, 0 is returned. As a special case for zero
5118 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5120 BIT_OFFSET is used internally for handling records and specifies offset
5121 of the offset in bits modulo 256 to avoid overflow cases.
5123 See the x86-64 PS ABI for details.
5127 classify_argument (enum machine_mode mode, const_tree type,
5128 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5130 HOST_WIDE_INT bytes =
5131 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5132 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5134 /* Variable sized entities are always passed/returned in memory. */
5138 if (mode != VOIDmode
5139 && targetm.calls.must_pass_in_stack (mode, type))
5142 if (type && AGGREGATE_TYPE_P (type))
5146 enum x86_64_reg_class subclasses[MAX_CLASSES];
5148 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5152 for (i = 0; i < words; i++)
5153 classes[i] = X86_64_NO_CLASS;
5155 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5156 signalize memory class, so handle it as special case. */
5159 classes[0] = X86_64_NO_CLASS;
5163 /* Classify each field of record and merge classes. */
5164 switch (TREE_CODE (type))
5167 /* And now merge the fields of structure. */
5168 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5170 if (TREE_CODE (field) == FIELD_DECL)
5174 if (TREE_TYPE (field) == error_mark_node)
5177 /* Bitfields are always classified as integer. Handle them
5178 early, since later code would consider them to be
5179 misaligned integers. */
5180 if (DECL_BIT_FIELD (field))
5182 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5183 i < ((int_bit_position (field) + (bit_offset % 64))
5184 + tree_low_cst (DECL_SIZE (field), 0)
5187 merge_classes (X86_64_INTEGER_CLASS,
5194 type = TREE_TYPE (field);
5196 /* Flexible array member is ignored. */
5197 if (TYPE_MODE (type) == BLKmode
5198 && TREE_CODE (type) == ARRAY_TYPE
5199 && TYPE_SIZE (type) == NULL_TREE
5200 && TYPE_DOMAIN (type) != NULL_TREE
5201 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5206 if (!warned && warn_psabi)
5209 inform (input_location,
5210 "The ABI of passing struct with"
5211 " a flexible array member has"
5212 " changed in GCC 4.4");
5216 num = classify_argument (TYPE_MODE (type), type,
5218 (int_bit_position (field)
5219 + bit_offset) % 256);
5222 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5223 for (i = 0; i < num && (i + pos) < words; i++)
5225 merge_classes (subclasses[i], classes[i + pos]);
5232 /* Arrays are handled as small records. */
5235 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5236 TREE_TYPE (type), subclasses, bit_offset);
5240 /* The partial classes are now full classes. */
5241 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5242 subclasses[0] = X86_64_SSE_CLASS;
5243 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5244 && !((bit_offset % 64) == 0 && bytes == 4))
5245 subclasses[0] = X86_64_INTEGER_CLASS;
5247 for (i = 0; i < words; i++)
5248 classes[i] = subclasses[i % num];
5253 case QUAL_UNION_TYPE:
5254 /* Unions are similar to RECORD_TYPE but offset is always 0.
5256 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5258 if (TREE_CODE (field) == FIELD_DECL)
5262 if (TREE_TYPE (field) == error_mark_node)
5265 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5266 TREE_TYPE (field), subclasses,
5270 for (i = 0; i < num; i++)
5271 classes[i] = merge_classes (subclasses[i], classes[i]);
5282 /* When size > 16 bytes, if the first one isn't
5283 X86_64_SSE_CLASS or any other ones aren't
5284 X86_64_SSEUP_CLASS, everything should be passed in
5286 if (classes[0] != X86_64_SSE_CLASS)
5289 for (i = 1; i < words; i++)
5290 if (classes[i] != X86_64_SSEUP_CLASS)
5294 /* Final merger cleanup. */
5295 for (i = 0; i < words; i++)
5297 /* If one class is MEMORY, everything should be passed in
5299 if (classes[i] == X86_64_MEMORY_CLASS)
5302 /* The X86_64_SSEUP_CLASS should be always preceded by
5303 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5304 if (classes[i] == X86_64_SSEUP_CLASS
5305 && classes[i - 1] != X86_64_SSE_CLASS
5306 && classes[i - 1] != X86_64_SSEUP_CLASS)
5308 /* The first one should never be X86_64_SSEUP_CLASS. */
5309 gcc_assert (i != 0);
5310 classes[i] = X86_64_SSE_CLASS;
5313 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5314 everything should be passed in memory. */
5315 if (classes[i] == X86_64_X87UP_CLASS
5316 && (classes[i - 1] != X86_64_X87_CLASS))
5320 /* The first one should never be X86_64_X87UP_CLASS. */
5321 gcc_assert (i != 0);
5322 if (!warned && warn_psabi)
5325 inform (input_location,
5326 "The ABI of passing union with long double"
5327 " has changed in GCC 4.4");
5335 /* Compute alignment needed. We align all types to natural boundaries with
5336 exception of XFmode that is aligned to 64bits. */
5337 if (mode != VOIDmode && mode != BLKmode)
5339 int mode_alignment = GET_MODE_BITSIZE (mode);
5342 mode_alignment = 128;
5343 else if (mode == XCmode)
5344 mode_alignment = 256;
5345 if (COMPLEX_MODE_P (mode))
5346 mode_alignment /= 2;
5347 /* Misaligned fields are always returned in memory. */
5348 if (bit_offset % mode_alignment)
5352 /* for V1xx modes, just use the base mode */
5353 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5354 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5355 mode = GET_MODE_INNER (mode);
5357 /* Classification of atomic types. */
5362 classes[0] = X86_64_SSE_CLASS;
5365 classes[0] = X86_64_SSE_CLASS;
5366 classes[1] = X86_64_SSEUP_CLASS;
5376 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5380 classes[0] = X86_64_INTEGERSI_CLASS;
5383 else if (size <= 64)
5385 classes[0] = X86_64_INTEGER_CLASS;
5388 else if (size <= 64+32)
5390 classes[0] = X86_64_INTEGER_CLASS;
5391 classes[1] = X86_64_INTEGERSI_CLASS;
5394 else if (size <= 64+64)
5396 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5404 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5408 /* OImode shouldn't be used directly. */
5413 if (!(bit_offset % 64))
5414 classes[0] = X86_64_SSESF_CLASS;
5416 classes[0] = X86_64_SSE_CLASS;
5419 classes[0] = X86_64_SSEDF_CLASS;
5422 classes[0] = X86_64_X87_CLASS;
5423 classes[1] = X86_64_X87UP_CLASS;
5426 classes[0] = X86_64_SSE_CLASS;
5427 classes[1] = X86_64_SSEUP_CLASS;
5430 classes[0] = X86_64_SSE_CLASS;
5431 if (!(bit_offset % 64))
5437 if (!warned && warn_psabi)
5440 inform (input_location,
5441 "The ABI of passing structure with complex float"
5442 " member has changed in GCC 4.4");
5444 classes[1] = X86_64_SSESF_CLASS;
5448 classes[0] = X86_64_SSEDF_CLASS;
5449 classes[1] = X86_64_SSEDF_CLASS;
5452 classes[0] = X86_64_COMPLEX_X87_CLASS;
5455 /* This modes is larger than 16 bytes. */
5463 classes[0] = X86_64_SSE_CLASS;
5464 classes[1] = X86_64_SSEUP_CLASS;
5465 classes[2] = X86_64_SSEUP_CLASS;
5466 classes[3] = X86_64_SSEUP_CLASS;
5474 classes[0] = X86_64_SSE_CLASS;
5475 classes[1] = X86_64_SSEUP_CLASS;
5483 classes[0] = X86_64_SSE_CLASS;
5489 gcc_assert (VECTOR_MODE_P (mode));
5494 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5496 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5497 classes[0] = X86_64_INTEGERSI_CLASS;
5499 classes[0] = X86_64_INTEGER_CLASS;
5500 classes[1] = X86_64_INTEGER_CLASS;
5501 return 1 + (bytes > 8);
5505 /* Examine the argument and return set number of register required in each
5506 class. Return 0 iff parameter should be passed in memory. */
5508 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5509 int *int_nregs, int *sse_nregs)
5511 enum x86_64_reg_class regclass[MAX_CLASSES];
5512 int n = classify_argument (mode, type, regclass, 0);
5518 for (n--; n >= 0; n--)
5519 switch (regclass[n])
5521 case X86_64_INTEGER_CLASS:
5522 case X86_64_INTEGERSI_CLASS:
5525 case X86_64_SSE_CLASS:
5526 case X86_64_SSESF_CLASS:
5527 case X86_64_SSEDF_CLASS:
5530 case X86_64_NO_CLASS:
5531 case X86_64_SSEUP_CLASS:
5533 case X86_64_X87_CLASS:
5534 case X86_64_X87UP_CLASS:
5538 case X86_64_COMPLEX_X87_CLASS:
5539 return in_return ? 2 : 0;
5540 case X86_64_MEMORY_CLASS:
5546 /* Construct container for the argument used by GCC interface. See
5547 FUNCTION_ARG for the detailed description. */
5550 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5551 const_tree type, int in_return, int nintregs, int nsseregs,
5552 const int *intreg, int sse_regno)
5554 /* The following variables hold the static issued_error state. */
5555 static bool issued_sse_arg_error;
5556 static bool issued_sse_ret_error;
5557 static bool issued_x87_ret_error;
5559 enum machine_mode tmpmode;
5561 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5562 enum x86_64_reg_class regclass[MAX_CLASSES];
5566 int needed_sseregs, needed_intregs;
5567 rtx exp[MAX_CLASSES];
5570 n = classify_argument (mode, type, regclass, 0);
5573 if (!examine_argument (mode, type, in_return, &needed_intregs,
5576 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5579 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5580 some less clueful developer tries to use floating-point anyway. */
5581 if (needed_sseregs && !TARGET_SSE)
5585 if (!issued_sse_ret_error)
5587 error ("SSE register return with SSE disabled");
5588 issued_sse_ret_error = true;
5591 else if (!issued_sse_arg_error)
5593 error ("SSE register argument with SSE disabled");
5594 issued_sse_arg_error = true;
5599 /* Likewise, error if the ABI requires us to return values in the
5600 x87 registers and the user specified -mno-80387. */
5601 if (!TARGET_80387 && in_return)
5602 for (i = 0; i < n; i++)
5603 if (regclass[i] == X86_64_X87_CLASS
5604 || regclass[i] == X86_64_X87UP_CLASS
5605 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5607 if (!issued_x87_ret_error)
5609 error ("x87 register return with x87 disabled");
5610 issued_x87_ret_error = true;
5615 /* First construct simple cases. Avoid SCmode, since we want to use
5616 single register to pass this type. */
5617 if (n == 1 && mode != SCmode)
5618 switch (regclass[0])
5620 case X86_64_INTEGER_CLASS:
5621 case X86_64_INTEGERSI_CLASS:
5622 return gen_rtx_REG (mode, intreg[0]);
5623 case X86_64_SSE_CLASS:
5624 case X86_64_SSESF_CLASS:
5625 case X86_64_SSEDF_CLASS:
5626 if (mode != BLKmode)
5627 return gen_reg_or_parallel (mode, orig_mode,
5628 SSE_REGNO (sse_regno));
5630 case X86_64_X87_CLASS:
5631 case X86_64_COMPLEX_X87_CLASS:
5632 return gen_rtx_REG (mode, FIRST_STACK_REG);
5633 case X86_64_NO_CLASS:
5634 /* Zero sized array, struct or class. */
5639 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5640 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5641 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5643 && regclass[0] == X86_64_SSE_CLASS
5644 && regclass[1] == X86_64_SSEUP_CLASS
5645 && regclass[2] == X86_64_SSEUP_CLASS
5646 && regclass[3] == X86_64_SSEUP_CLASS
5648 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5651 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5652 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5653 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5654 && regclass[1] == X86_64_INTEGER_CLASS
5655 && (mode == CDImode || mode == TImode || mode == TFmode)
5656 && intreg[0] + 1 == intreg[1])
5657 return gen_rtx_REG (mode, intreg[0]);
5659 /* Otherwise figure out the entries of the PARALLEL. */
5660 for (i = 0; i < n; i++)
5664 switch (regclass[i])
5666 case X86_64_NO_CLASS:
5668 case X86_64_INTEGER_CLASS:
5669 case X86_64_INTEGERSI_CLASS:
5670 /* Merge TImodes on aligned occasions here too. */
5671 if (i * 8 + 8 > bytes)
5672 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5673 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5677 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5678 if (tmpmode == BLKmode)
5680 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5681 gen_rtx_REG (tmpmode, *intreg),
5685 case X86_64_SSESF_CLASS:
5686 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5687 gen_rtx_REG (SFmode,
5688 SSE_REGNO (sse_regno)),
5692 case X86_64_SSEDF_CLASS:
5693 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5694 gen_rtx_REG (DFmode,
5695 SSE_REGNO (sse_regno)),
5699 case X86_64_SSE_CLASS:
5707 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5717 && regclass[1] == X86_64_SSEUP_CLASS
5718 && regclass[2] == X86_64_SSEUP_CLASS
5719 && regclass[3] == X86_64_SSEUP_CLASS);
5726 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5727 gen_rtx_REG (tmpmode,
5728 SSE_REGNO (sse_regno)),
5737 /* Empty aligned struct, union or class. */
5741 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5742 for (i = 0; i < nexps; i++)
5743 XVECEXP (ret, 0, i) = exp [i];
5747 /* Update the data in CUM to advance over an argument of mode MODE
5748 and data type TYPE. (TYPE is null for libcalls where that information
5749 may not be available.) */
5752 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5753 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5769 cum->words += words;
5770 cum->nregs -= words;
5771 cum->regno += words;
5773 if (cum->nregs <= 0)
5781 /* OImode shouldn't be used directly. */
5785 if (cum->float_in_sse < 2)
5788 if (cum->float_in_sse < 1)
5805 if (!type || !AGGREGATE_TYPE_P (type))
5807 cum->sse_words += words;
5808 cum->sse_nregs -= 1;
5809 cum->sse_regno += 1;
5810 if (cum->sse_nregs <= 0)
5824 if (!type || !AGGREGATE_TYPE_P (type))
5826 cum->mmx_words += words;
5827 cum->mmx_nregs -= 1;
5828 cum->mmx_regno += 1;
5829 if (cum->mmx_nregs <= 0)
5840 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5841 tree type, HOST_WIDE_INT words, int named)
5843 int int_nregs, sse_nregs;
5845 /* Unnamed 256bit vector mode parameters are passed on stack. */
5846 if (!named && VALID_AVX256_REG_MODE (mode))
5849 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5850 cum->words += words;
5851 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5853 cum->nregs -= int_nregs;
5854 cum->sse_nregs -= sse_nregs;
5855 cum->regno += int_nregs;
5856 cum->sse_regno += sse_nregs;
5859 cum->words += words;
5863 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5864 HOST_WIDE_INT words)
5866 /* Otherwise, this should be passed indirect. */
5867 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5869 cum->words += words;
5878 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5879 tree type, int named)
5881 HOST_WIDE_INT bytes, words;
5883 if (mode == BLKmode)
5884 bytes = int_size_in_bytes (type);
5886 bytes = GET_MODE_SIZE (mode);
5887 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5890 mode = type_natural_mode (type, NULL);
5892 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5893 function_arg_advance_ms_64 (cum, bytes, words);
5894 else if (TARGET_64BIT)
5895 function_arg_advance_64 (cum, mode, type, words, named);
5897 function_arg_advance_32 (cum, mode, type, bytes, words);
5900 /* Define where to put the arguments to a function.
5901 Value is zero to push the argument on the stack,
5902 or a hard register in which to store the argument.
5904 MODE is the argument's machine mode.
5905 TYPE is the data type of the argument (as a tree).
5906 This is null for libcalls where that information may
5908 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5909 the preceding args and about the function being called.
5910 NAMED is nonzero if this argument is a named parameter
5911 (otherwise it is an extra parameter matching an ellipsis). */
5914 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5915 enum machine_mode orig_mode, tree type,
5916 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5918 static bool warnedsse, warnedmmx;
5920 /* Avoid the AL settings for the Unix64 ABI. */
5921 if (mode == VOIDmode)
5937 if (words <= cum->nregs)
5939 int regno = cum->regno;
5941 /* Fastcall allocates the first two DWORD (SImode) or
5942 smaller arguments to ECX and EDX if it isn't an
5948 || (type && AGGREGATE_TYPE_P (type)))
5951 /* ECX not EAX is the first allocated register. */
5952 if (regno == AX_REG)
5955 return gen_rtx_REG (mode, regno);
5960 if (cum->float_in_sse < 2)
5963 if (cum->float_in_sse < 1)
5967 /* In 32bit, we pass TImode in xmm registers. */
5974 if (!type || !AGGREGATE_TYPE_P (type))
5976 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5979 warning (0, "SSE vector argument without SSE enabled "
5983 return gen_reg_or_parallel (mode, orig_mode,
5984 cum->sse_regno + FIRST_SSE_REG);
5989 /* OImode shouldn't be used directly. */
5998 if (!type || !AGGREGATE_TYPE_P (type))
6001 return gen_reg_or_parallel (mode, orig_mode,
6002 cum->sse_regno + FIRST_SSE_REG);
6012 if (!type || !AGGREGATE_TYPE_P (type))
6014 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6017 warning (0, "MMX vector argument without MMX enabled "
6021 return gen_reg_or_parallel (mode, orig_mode,
6022 cum->mmx_regno + FIRST_MMX_REG);
6031 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6032 enum machine_mode orig_mode, tree type, int named)
6034 /* Handle a hidden AL argument containing number of registers
6035 for varargs x86-64 functions. */
6036 if (mode == VOIDmode)
6037 return GEN_INT (cum->maybe_vaarg
6038 ? (cum->sse_nregs < 0
6039 ? (cum->call_abi == ix86_abi
6041 : (ix86_abi != SYSV_ABI
6042 ? X86_64_SSE_REGPARM_MAX
6043 : X86_64_MS_SSE_REGPARM_MAX))
6058 /* Unnamed 256bit vector mode parameters are passed on stack. */
6064 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6066 &x86_64_int_parameter_registers [cum->regno],
6071 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6072 enum machine_mode orig_mode, int named,
6073 HOST_WIDE_INT bytes)
6077 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6078 We use value of -2 to specify that current function call is MSABI. */
6079 if (mode == VOIDmode)
6080 return GEN_INT (-2);
6082 /* If we've run out of registers, it goes on the stack. */
6083 if (cum->nregs == 0)
6086 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6088 /* Only floating point modes are passed in anything but integer regs. */
6089 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6092 regno = cum->regno + FIRST_SSE_REG;
6097 /* Unnamed floating parameters are passed in both the
6098 SSE and integer registers. */
6099 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6100 t2 = gen_rtx_REG (mode, regno);
6101 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6102 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6103 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6106 /* Handle aggregated types passed in register. */
6107 if (orig_mode == BLKmode)
6109 if (bytes > 0 && bytes <= 8)
6110 mode = (bytes > 4 ? DImode : SImode);
6111 if (mode == BLKmode)
6115 return gen_reg_or_parallel (mode, orig_mode, regno);
6119 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6120 tree type, int named)
6122 enum machine_mode mode = omode;
6123 HOST_WIDE_INT bytes, words;
6125 if (mode == BLKmode)
6126 bytes = int_size_in_bytes (type);
6128 bytes = GET_MODE_SIZE (mode);
6129 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6131 /* To simplify the code below, represent vector types with a vector mode
6132 even if MMX/SSE are not active. */
6133 if (type && TREE_CODE (type) == VECTOR_TYPE)
6134 mode = type_natural_mode (type, cum);
6136 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6137 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6138 else if (TARGET_64BIT)
6139 return function_arg_64 (cum, mode, omode, type, named);
6141 return function_arg_32 (cum, mode, omode, type, bytes, words);
6144 /* A C expression that indicates when an argument must be passed by
6145 reference. If nonzero for an argument, a copy of that argument is
6146 made in memory and a pointer to the argument is passed instead of
6147 the argument itself. The pointer is passed in whatever way is
6148 appropriate for passing a pointer to that type. */
6151 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6152 enum machine_mode mode ATTRIBUTE_UNUSED,
6153 const_tree type, bool named ATTRIBUTE_UNUSED)
6155 /* See Windows x64 Software Convention. */
6156 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6158 int msize = (int) GET_MODE_SIZE (mode);
6161 /* Arrays are passed by reference. */
6162 if (TREE_CODE (type) == ARRAY_TYPE)
6165 if (AGGREGATE_TYPE_P (type))
6167 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6168 are passed by reference. */
6169 msize = int_size_in_bytes (type);
6173 /* __m128 is passed by reference. */
6175 case 1: case 2: case 4: case 8:
6181 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6187 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6190 contains_aligned_value_p (tree type)
6192 enum machine_mode mode = TYPE_MODE (type);
6193 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6197 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6199 if (TYPE_ALIGN (type) < 128)
6202 if (AGGREGATE_TYPE_P (type))
6204 /* Walk the aggregates recursively. */
6205 switch (TREE_CODE (type))
6209 case QUAL_UNION_TYPE:
6213 /* Walk all the structure fields. */
6214 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6216 if (TREE_CODE (field) == FIELD_DECL
6217 && contains_aligned_value_p (TREE_TYPE (field)))
6224 /* Just for use if some languages passes arrays by value. */
6225 if (contains_aligned_value_p (TREE_TYPE (type)))
6236 /* Gives the alignment boundary, in bits, of an argument with the
6237 specified mode and type. */
6240 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6245 /* Since canonical type is used for call, we convert it to
6246 canonical type if needed. */
6247 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6248 type = TYPE_CANONICAL (type);
6249 align = TYPE_ALIGN (type);
6252 align = GET_MODE_ALIGNMENT (mode);
6253 if (align < PARM_BOUNDARY)
6254 align = PARM_BOUNDARY;
6255 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6256 natural boundaries. */
6257 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6259 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6260 make an exception for SSE modes since these require 128bit
6263 The handling here differs from field_alignment. ICC aligns MMX
6264 arguments to 4 byte boundaries, while structure fields are aligned
6265 to 8 byte boundaries. */
6268 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6269 align = PARM_BOUNDARY;
6273 if (!contains_aligned_value_p (type))
6274 align = PARM_BOUNDARY;
6277 if (align > BIGGEST_ALIGNMENT)
6278 align = BIGGEST_ALIGNMENT;
6282 /* Return true if N is a possible register number of function value. */
6285 ix86_function_value_regno_p (int regno)
6292 case FIRST_FLOAT_REG:
6293 /* TODO: The function should depend on current function ABI but
6294 builtins.c would need updating then. Therefore we use the
6296 if (TARGET_64BIT && ix86_abi == MS_ABI)
6298 return TARGET_FLOAT_RETURNS_IN_80387;
6304 if (TARGET_MACHO || TARGET_64BIT)
6312 /* Define how to find the value returned by a function.
6313 VALTYPE is the data type of the value (as a tree).
6314 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6315 otherwise, FUNC is 0. */
6318 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6319 const_tree fntype, const_tree fn)
6323 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6324 we normally prevent this case when mmx is not available. However
6325 some ABIs may require the result to be returned like DImode. */
6326 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6327 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6329 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6330 we prevent this case when sse is not available. However some ABIs
6331 may require the result to be returned like integer TImode. */
6332 else if (mode == TImode
6333 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6334 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6336 /* 32-byte vector modes in %ymm0. */
6337 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6338 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6340 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6341 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6342 regno = FIRST_FLOAT_REG;
6344 /* Most things go in %eax. */
6347 /* Override FP return register with %xmm0 for local functions when
6348 SSE math is enabled or for functions with sseregparm attribute. */
6349 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6351 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6352 if ((sse_level >= 1 && mode == SFmode)
6353 || (sse_level == 2 && mode == DFmode))
6354 regno = FIRST_SSE_REG;
6357 /* OImode shouldn't be used directly. */
6358 gcc_assert (mode != OImode);
6360 return gen_rtx_REG (orig_mode, regno);
6364 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6369 /* Handle libcalls, which don't provide a type node. */
6370 if (valtype == NULL)
6382 return gen_rtx_REG (mode, FIRST_SSE_REG);
6385 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6389 return gen_rtx_REG (mode, AX_REG);
6393 ret = construct_container (mode, orig_mode, valtype, 1,
6394 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6395 x86_64_int_return_registers, 0);
6397 /* For zero sized structures, construct_container returns NULL, but we
6398 need to keep rest of compiler happy by returning meaningful value. */
6400 ret = gen_rtx_REG (orig_mode, AX_REG);
6406 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6408 unsigned int regno = AX_REG;
6412 switch (GET_MODE_SIZE (mode))
6415 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6416 && !COMPLEX_MODE_P (mode))
6417 regno = FIRST_SSE_REG;
6421 if (mode == SFmode || mode == DFmode)
6422 regno = FIRST_SSE_REG;
6428 return gen_rtx_REG (orig_mode, regno);
6432 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6433 enum machine_mode orig_mode, enum machine_mode mode)
6435 const_tree fn, fntype;
6438 if (fntype_or_decl && DECL_P (fntype_or_decl))
6439 fn = fntype_or_decl;
6440 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6442 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6443 return function_value_ms_64 (orig_mode, mode);
6444 else if (TARGET_64BIT)
6445 return function_value_64 (orig_mode, mode, valtype);
6447 return function_value_32 (orig_mode, mode, fntype, fn);
6451 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6452 bool outgoing ATTRIBUTE_UNUSED)
6454 enum machine_mode mode, orig_mode;
6456 orig_mode = TYPE_MODE (valtype);
6457 mode = type_natural_mode (valtype, NULL);
6458 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6462 ix86_libcall_value (enum machine_mode mode)
6464 return ix86_function_value_1 (NULL, NULL, mode, mode);
6467 /* Return true iff type is returned in memory. */
6469 static int ATTRIBUTE_UNUSED
6470 return_in_memory_32 (const_tree type, enum machine_mode mode)
6474 if (mode == BLKmode)
6477 size = int_size_in_bytes (type);
6479 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6482 if (VECTOR_MODE_P (mode) || mode == TImode)
6484 /* User-created vectors small enough to fit in EAX. */
6488 /* MMX/3dNow values are returned in MM0,
6489 except when it doesn't exits. */
6491 return (TARGET_MMX ? 0 : 1);
6493 /* SSE values are returned in XMM0, except when it doesn't exist. */
6495 return (TARGET_SSE ? 0 : 1);
6497 /* AVX values are returned in YMM0, except when it doesn't exist. */
6499 return TARGET_AVX ? 0 : 1;
6508 /* OImode shouldn't be used directly. */
6509 gcc_assert (mode != OImode);
6514 static int ATTRIBUTE_UNUSED
6515 return_in_memory_64 (const_tree type, enum machine_mode mode)
6517 int needed_intregs, needed_sseregs;
6518 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6521 static int ATTRIBUTE_UNUSED
6522 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6524 HOST_WIDE_INT size = int_size_in_bytes (type);
6526 /* __m128 is returned in xmm0. */
6527 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6528 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6531 /* Otherwise, the size must be exactly in [1248]. */
6532 return (size != 1 && size != 2 && size != 4 && size != 8);
6536 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6538 #ifdef SUBTARGET_RETURN_IN_MEMORY
6539 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6541 const enum machine_mode mode = type_natural_mode (type, NULL);
6545 if (ix86_function_type_abi (fntype) == MS_ABI)
6546 return return_in_memory_ms_64 (type, mode);
6548 return return_in_memory_64 (type, mode);
6551 return return_in_memory_32 (type, mode);
6555 /* Return false iff TYPE is returned in memory. This version is used
6556 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6557 but differs notably in that when MMX is available, 8-byte vectors
6558 are returned in memory, rather than in MMX registers. */
6561 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6564 enum machine_mode mode = type_natural_mode (type, NULL);
6567 return return_in_memory_64 (type, mode);
6569 if (mode == BLKmode)
6572 size = int_size_in_bytes (type);
6574 if (VECTOR_MODE_P (mode))
6576 /* Return in memory only if MMX registers *are* available. This
6577 seems backwards, but it is consistent with the existing
6584 else if (mode == TImode)
6586 else if (mode == XFmode)
6592 /* When returning SSE vector types, we have a choice of either
6593 (1) being abi incompatible with a -march switch, or
6594 (2) generating an error.
6595 Given no good solution, I think the safest thing is one warning.
6596 The user won't be able to use -Werror, but....
6598 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6599 called in response to actually generating a caller or callee that
6600 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6601 via aggregate_value_p for general type probing from tree-ssa. */
6604 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6606 static bool warnedsse, warnedmmx;
6608 if (!TARGET_64BIT && type)
6610 /* Look at the return type of the function, not the function type. */
6611 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6613 if (!TARGET_SSE && !warnedsse)
6616 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6619 warning (0, "SSE vector return without SSE enabled "
6624 if (!TARGET_MMX && !warnedmmx)
6626 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6629 warning (0, "MMX vector return without MMX enabled "
6639 /* Create the va_list data type. */
6641 /* Returns the calling convention specific va_list date type.
6642 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6645 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6647 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6649 /* For i386 we use plain pointer to argument area. */
6650 if (!TARGET_64BIT || abi == MS_ABI)
6651 return build_pointer_type (char_type_node);
6653 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6654 type_decl = build_decl (BUILTINS_LOCATION,
6655 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6657 f_gpr = build_decl (BUILTINS_LOCATION,
6658 FIELD_DECL, get_identifier ("gp_offset"),
6659 unsigned_type_node);
6660 f_fpr = build_decl (BUILTINS_LOCATION,
6661 FIELD_DECL, get_identifier ("fp_offset"),
6662 unsigned_type_node);
6663 f_ovf = build_decl (BUILTINS_LOCATION,
6664 FIELD_DECL, get_identifier ("overflow_arg_area"),
6666 f_sav = build_decl (BUILTINS_LOCATION,
6667 FIELD_DECL, get_identifier ("reg_save_area"),
6670 va_list_gpr_counter_field = f_gpr;
6671 va_list_fpr_counter_field = f_fpr;
6673 DECL_FIELD_CONTEXT (f_gpr) = record;
6674 DECL_FIELD_CONTEXT (f_fpr) = record;
6675 DECL_FIELD_CONTEXT (f_ovf) = record;
6676 DECL_FIELD_CONTEXT (f_sav) = record;
6678 TREE_CHAIN (record) = type_decl;
6679 TYPE_NAME (record) = type_decl;
6680 TYPE_FIELDS (record) = f_gpr;
6681 TREE_CHAIN (f_gpr) = f_fpr;
6682 TREE_CHAIN (f_fpr) = f_ovf;
6683 TREE_CHAIN (f_ovf) = f_sav;
6685 layout_type (record);
6687 /* The correct type is an array type of one element. */
6688 return build_array_type (record, build_index_type (size_zero_node));
6691 /* Setup the builtin va_list data type and for 64-bit the additional
6692 calling convention specific va_list data types. */
6695 ix86_build_builtin_va_list (void)
6697 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6699 /* Initialize abi specific va_list builtin types. */
6703 if (ix86_abi == MS_ABI)
6705 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6706 if (TREE_CODE (t) != RECORD_TYPE)
6707 t = build_variant_type_copy (t);
6708 sysv_va_list_type_node = t;
6713 if (TREE_CODE (t) != RECORD_TYPE)
6714 t = build_variant_type_copy (t);
6715 sysv_va_list_type_node = t;
6717 if (ix86_abi != MS_ABI)
6719 t = ix86_build_builtin_va_list_abi (MS_ABI);
6720 if (TREE_CODE (t) != RECORD_TYPE)
6721 t = build_variant_type_copy (t);
6722 ms_va_list_type_node = t;
6727 if (TREE_CODE (t) != RECORD_TYPE)
6728 t = build_variant_type_copy (t);
6729 ms_va_list_type_node = t;
6736 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6739 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6748 int regparm = ix86_regparm;
6750 if (cum->call_abi != ix86_abi)
6751 regparm = (ix86_abi != SYSV_ABI
6752 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6754 /* GPR size of varargs save area. */
6755 if (cfun->va_list_gpr_size)
6756 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6758 ix86_varargs_gpr_size = 0;
6760 /* FPR size of varargs save area. We don't need it if we don't pass
6761 anything in SSE registers. */
6762 if (cum->sse_nregs && cfun->va_list_fpr_size)
6763 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6765 ix86_varargs_fpr_size = 0;
6767 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6770 save_area = frame_pointer_rtx;
6771 set = get_varargs_alias_set ();
6773 for (i = cum->regno;
6775 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6778 mem = gen_rtx_MEM (Pmode,
6779 plus_constant (save_area, i * UNITS_PER_WORD));
6780 MEM_NOTRAP_P (mem) = 1;
6781 set_mem_alias_set (mem, set);
6782 emit_move_insn (mem, gen_rtx_REG (Pmode,
6783 x86_64_int_parameter_registers[i]));
6786 if (ix86_varargs_fpr_size)
6788 /* Now emit code to save SSE registers. The AX parameter contains number
6789 of SSE parameter registers used to call this function. We use
6790 sse_prologue_save insn template that produces computed jump across
6791 SSE saves. We need some preparation work to get this working. */
6793 label = gen_label_rtx ();
6794 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6796 /* Compute address to jump to :
6797 label - eax*4 + nnamed_sse_arguments*4 Or
6798 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6799 tmp_reg = gen_reg_rtx (Pmode);
6800 nsse_reg = gen_reg_rtx (Pmode);
6801 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6802 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6803 gen_rtx_MULT (Pmode, nsse_reg,
6806 /* vmovaps is one byte longer than movaps. */
6808 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6809 gen_rtx_PLUS (Pmode, tmp_reg,
6815 gen_rtx_CONST (DImode,
6816 gen_rtx_PLUS (DImode,
6818 GEN_INT (cum->sse_regno
6819 * (TARGET_AVX ? 5 : 4)))));
6821 emit_move_insn (nsse_reg, label_ref);
6822 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6824 /* Compute address of memory block we save into. We always use pointer
6825 pointing 127 bytes after first byte to store - this is needed to keep
6826 instruction size limited by 4 bytes (5 bytes for AVX) with one
6827 byte displacement. */
6828 tmp_reg = gen_reg_rtx (Pmode);
6829 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6830 plus_constant (save_area,
6831 ix86_varargs_gpr_size + 127)));
6832 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6833 MEM_NOTRAP_P (mem) = 1;
6834 set_mem_alias_set (mem, set);
6835 set_mem_align (mem, BITS_PER_WORD);
6837 /* And finally do the dirty job! */
6838 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6839 GEN_INT (cum->sse_regno), label));
6844 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6846 alias_set_type set = get_varargs_alias_set ();
6849 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6853 mem = gen_rtx_MEM (Pmode,
6854 plus_constant (virtual_incoming_args_rtx,
6855 i * UNITS_PER_WORD));
6856 MEM_NOTRAP_P (mem) = 1;
6857 set_mem_alias_set (mem, set);
6859 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6860 emit_move_insn (mem, reg);
6865 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6866 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6869 CUMULATIVE_ARGS next_cum;
6872 /* This argument doesn't appear to be used anymore. Which is good,
6873 because the old code here didn't suppress rtl generation. */
6874 gcc_assert (!no_rtl);
6879 fntype = TREE_TYPE (current_function_decl);
6881 /* For varargs, we do not want to skip the dummy va_dcl argument.
6882 For stdargs, we do want to skip the last named argument. */
6884 if (stdarg_p (fntype))
6885 function_arg_advance (&next_cum, mode, type, 1);
6887 if (cum->call_abi == MS_ABI)
6888 setup_incoming_varargs_ms_64 (&next_cum);
6890 setup_incoming_varargs_64 (&next_cum);
6893 /* Checks if TYPE is of kind va_list char *. */
6896 is_va_list_char_pointer (tree type)
6900 /* For 32-bit it is always true. */
6903 canonic = ix86_canonical_va_list_type (type);
6904 return (canonic == ms_va_list_type_node
6905 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6908 /* Implement va_start. */
6911 ix86_va_start (tree valist, rtx nextarg)
6913 HOST_WIDE_INT words, n_gpr, n_fpr;
6914 tree f_gpr, f_fpr, f_ovf, f_sav;
6915 tree gpr, fpr, ovf, sav, t;
6918 /* Only 64bit target needs something special. */
6919 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6921 std_expand_builtin_va_start (valist, nextarg);
6925 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6926 f_fpr = TREE_CHAIN (f_gpr);
6927 f_ovf = TREE_CHAIN (f_fpr);
6928 f_sav = TREE_CHAIN (f_ovf);
6930 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6931 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6932 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6933 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6934 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6936 /* Count number of gp and fp argument registers used. */
6937 words = crtl->args.info.words;
6938 n_gpr = crtl->args.info.regno;
6939 n_fpr = crtl->args.info.sse_regno;
6941 if (cfun->va_list_gpr_size)
6943 type = TREE_TYPE (gpr);
6944 t = build2 (MODIFY_EXPR, type,
6945 gpr, build_int_cst (type, n_gpr * 8));
6946 TREE_SIDE_EFFECTS (t) = 1;
6947 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6950 if (TARGET_SSE && cfun->va_list_fpr_size)
6952 type = TREE_TYPE (fpr);
6953 t = build2 (MODIFY_EXPR, type, fpr,
6954 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6955 TREE_SIDE_EFFECTS (t) = 1;
6956 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6959 /* Find the overflow area. */
6960 type = TREE_TYPE (ovf);
6961 t = make_tree (type, crtl->args.internal_arg_pointer);
6963 t = build2 (POINTER_PLUS_EXPR, type, t,
6964 size_int (words * UNITS_PER_WORD));
6965 t = build2 (MODIFY_EXPR, type, ovf, t);
6966 TREE_SIDE_EFFECTS (t) = 1;
6967 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6969 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6971 /* Find the register save area.
6972 Prologue of the function save it right above stack frame. */
6973 type = TREE_TYPE (sav);
6974 t = make_tree (type, frame_pointer_rtx);
6975 if (!ix86_varargs_gpr_size)
6976 t = build2 (POINTER_PLUS_EXPR, type, t,
6977 size_int (-8 * X86_64_REGPARM_MAX));
6978 t = build2 (MODIFY_EXPR, type, sav, t);
6979 TREE_SIDE_EFFECTS (t) = 1;
6980 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6984 /* Implement va_arg. */
6987 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6990 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6991 tree f_gpr, f_fpr, f_ovf, f_sav;
6992 tree gpr, fpr, ovf, sav, t;
6994 tree lab_false, lab_over = NULL_TREE;
6999 enum machine_mode nat_mode;
7002 /* Only 64bit target needs something special. */
7003 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7004 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7006 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7007 f_fpr = TREE_CHAIN (f_gpr);
7008 f_ovf = TREE_CHAIN (f_fpr);
7009 f_sav = TREE_CHAIN (f_ovf);
7011 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7012 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7013 valist = build_va_arg_indirect_ref (valist);
7014 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7015 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7016 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7018 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7020 type = build_pointer_type (type);
7021 size = int_size_in_bytes (type);
7022 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7024 nat_mode = type_natural_mode (type, NULL);
7033 /* Unnamed 256bit vector mode parameters are passed on stack. */
7034 if (ix86_cfun_abi () == SYSV_ABI)
7041 container = construct_container (nat_mode, TYPE_MODE (type),
7042 type, 0, X86_64_REGPARM_MAX,
7043 X86_64_SSE_REGPARM_MAX, intreg,
7048 /* Pull the value out of the saved registers. */
7050 addr = create_tmp_var (ptr_type_node, "addr");
7054 int needed_intregs, needed_sseregs;
7056 tree int_addr, sse_addr;
7058 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7059 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7061 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7063 need_temp = (!REG_P (container)
7064 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7065 || TYPE_ALIGN (type) > 128));
7067 /* In case we are passing structure, verify that it is consecutive block
7068 on the register save area. If not we need to do moves. */
7069 if (!need_temp && !REG_P (container))
7071 /* Verify that all registers are strictly consecutive */
7072 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7076 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7078 rtx slot = XVECEXP (container, 0, i);
7079 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7080 || INTVAL (XEXP (slot, 1)) != i * 16)
7088 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7090 rtx slot = XVECEXP (container, 0, i);
7091 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7092 || INTVAL (XEXP (slot, 1)) != i * 8)
7104 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7105 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7108 /* First ensure that we fit completely in registers. */
7111 t = build_int_cst (TREE_TYPE (gpr),
7112 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7113 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7114 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7115 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7116 gimplify_and_add (t, pre_p);
7120 t = build_int_cst (TREE_TYPE (fpr),
7121 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7122 + X86_64_REGPARM_MAX * 8);
7123 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7124 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7125 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7126 gimplify_and_add (t, pre_p);
7129 /* Compute index to start of area used for integer regs. */
7132 /* int_addr = gpr + sav; */
7133 t = fold_convert (sizetype, gpr);
7134 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7135 gimplify_assign (int_addr, t, pre_p);
7139 /* sse_addr = fpr + sav; */
7140 t = fold_convert (sizetype, fpr);
7141 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7142 gimplify_assign (sse_addr, t, pre_p);
7147 tree temp = create_tmp_var (type, "va_arg_tmp");
7150 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7151 gimplify_assign (addr, t, pre_p);
7153 for (i = 0; i < XVECLEN (container, 0); i++)
7155 rtx slot = XVECEXP (container, 0, i);
7156 rtx reg = XEXP (slot, 0);
7157 enum machine_mode mode = GET_MODE (reg);
7158 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7159 tree addr_type = build_pointer_type (piece_type);
7160 tree daddr_type = build_pointer_type_for_mode (piece_type,
7164 tree dest_addr, dest;
7166 if (SSE_REGNO_P (REGNO (reg)))
7168 src_addr = sse_addr;
7169 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7173 src_addr = int_addr;
7174 src_offset = REGNO (reg) * 8;
7176 src_addr = fold_convert (addr_type, src_addr);
7177 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7178 size_int (src_offset));
7179 src = build_va_arg_indirect_ref (src_addr);
7181 dest_addr = fold_convert (daddr_type, addr);
7182 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7183 size_int (INTVAL (XEXP (slot, 1))));
7184 dest = build_va_arg_indirect_ref (dest_addr);
7186 gimplify_assign (dest, src, pre_p);
7192 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7193 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7194 gimplify_assign (gpr, t, pre_p);
7199 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7200 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7201 gimplify_assign (fpr, t, pre_p);
7204 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7206 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7209 /* ... otherwise out of the overflow area. */
7211 /* When we align parameter on stack for caller, if the parameter
7212 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7213 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7214 here with caller. */
7215 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7216 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7217 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7219 /* Care for on-stack alignment if needed. */
7220 if (arg_boundary <= 64
7221 || integer_zerop (TYPE_SIZE (type)))
7225 HOST_WIDE_INT align = arg_boundary / 8;
7226 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7227 size_int (align - 1));
7228 t = fold_convert (sizetype, t);
7229 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7231 t = fold_convert (TREE_TYPE (ovf), t);
7233 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7234 gimplify_assign (addr, t, pre_p);
7236 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7237 size_int (rsize * UNITS_PER_WORD));
7238 gimplify_assign (unshare_expr (ovf), t, pre_p);
7241 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7243 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7244 addr = fold_convert (ptrtype, addr);
7247 addr = build_va_arg_indirect_ref (addr);
7248 return build_va_arg_indirect_ref (addr);
7251 /* Return nonzero if OPNUM's MEM should be matched
7252 in movabs* patterns. */
7255 ix86_check_movabs (rtx insn, int opnum)
7259 set = PATTERN (insn);
7260 if (GET_CODE (set) == PARALLEL)
7261 set = XVECEXP (set, 0, 0);
7262 gcc_assert (GET_CODE (set) == SET);
7263 mem = XEXP (set, opnum);
7264 while (GET_CODE (mem) == SUBREG)
7265 mem = SUBREG_REG (mem);
7266 gcc_assert (MEM_P (mem));
7267 return (volatile_ok || !MEM_VOLATILE_P (mem));
7270 /* Initialize the table of extra 80387 mathematical constants. */
7273 init_ext_80387_constants (void)
7275 static const char * cst[5] =
7277 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7278 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7279 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7280 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7281 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7285 for (i = 0; i < 5; i++)
7287 real_from_string (&ext_80387_constants_table[i], cst[i]);
7288 /* Ensure each constant is rounded to XFmode precision. */
7289 real_convert (&ext_80387_constants_table[i],
7290 XFmode, &ext_80387_constants_table[i]);
7293 ext_80387_constants_init = 1;
7296 /* Return true if the constant is something that can be loaded with
7297 a special instruction. */
7300 standard_80387_constant_p (rtx x)
7302 enum machine_mode mode = GET_MODE (x);
7306 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7309 if (x == CONST0_RTX (mode))
7311 if (x == CONST1_RTX (mode))
7314 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7316 /* For XFmode constants, try to find a special 80387 instruction when
7317 optimizing for size or on those CPUs that benefit from them. */
7319 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7323 if (! ext_80387_constants_init)
7324 init_ext_80387_constants ();
7326 for (i = 0; i < 5; i++)
7327 if (real_identical (&r, &ext_80387_constants_table[i]))
7331 /* Load of the constant -0.0 or -1.0 will be split as
7332 fldz;fchs or fld1;fchs sequence. */
7333 if (real_isnegzero (&r))
7335 if (real_identical (&r, &dconstm1))
7341 /* Return the opcode of the special instruction to be used to load
7345 standard_80387_constant_opcode (rtx x)
7347 switch (standard_80387_constant_p (x))
7371 /* Return the CONST_DOUBLE representing the 80387 constant that is
7372 loaded by the specified special instruction. The argument IDX
7373 matches the return value from standard_80387_constant_p. */
7376 standard_80387_constant_rtx (int idx)
7380 if (! ext_80387_constants_init)
7381 init_ext_80387_constants ();
7397 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7401 /* Return 1 if X is all 0s and 2 if x is all 1s
7402 in supported SSE vector mode. */
7405 standard_sse_constant_p (rtx x)
7407 enum machine_mode mode = GET_MODE (x);
7409 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7411 if (vector_all_ones_operand (x, mode))
7427 /* Return the opcode of the special instruction to be used to load
7431 standard_sse_constant_opcode (rtx insn, rtx x)
7433 switch (standard_sse_constant_p (x))
7436 switch (get_attr_mode (insn))
7439 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7441 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7443 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7445 return "vxorps\t%x0, %x0, %x0";
7447 return "vxorpd\t%x0, %x0, %x0";
7449 return "vpxor\t%x0, %x0, %x0";
7454 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7461 /* Returns 1 if OP contains a symbol reference */
7464 symbolic_reference_mentioned_p (rtx op)
7469 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7472 fmt = GET_RTX_FORMAT (GET_CODE (op));
7473 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7479 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7480 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7484 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7491 /* Return 1 if it is appropriate to emit `ret' instructions in the
7492 body of a function. Do this only if the epilogue is simple, needing a
7493 couple of insns. Prior to reloading, we can't tell how many registers
7494 must be saved, so return 0 then. Return 0 if there is no frame
7495 marker to de-allocate. */
7498 ix86_can_use_return_insn_p (void)
7500 struct ix86_frame frame;
7502 if (! reload_completed || frame_pointer_needed)
7505 /* Don't allow more than 32 pop, since that's all we can do
7506 with one instruction. */
7507 if (crtl->args.pops_args
7508 && crtl->args.size >= 32768)
7511 ix86_compute_frame_layout (&frame);
7512 return frame.to_allocate == 0 && frame.padding0 == 0
7513 && (frame.nregs + frame.nsseregs) == 0;
7516 /* Value should be nonzero if functions must have frame pointers.
7517 Zero means the frame pointer need not be set up (and parms may
7518 be accessed via the stack pointer) in functions that seem suitable. */
7521 ix86_frame_pointer_required (void)
7523 /* If we accessed previous frames, then the generated code expects
7524 to be able to access the saved ebp value in our frame. */
7525 if (cfun->machine->accesses_prev_frame)
7528 /* Several x86 os'es need a frame pointer for other reasons,
7529 usually pertaining to setjmp. */
7530 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7533 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7534 the frame pointer by default. Turn it back on now if we've not
7535 got a leaf function. */
7536 if (TARGET_OMIT_LEAF_FRAME_POINTER
7537 && (!current_function_is_leaf
7538 || ix86_current_function_calls_tls_descriptor))
7547 /* Record that the current function accesses previous call frames. */
7550 ix86_setup_frame_addresses (void)
7552 cfun->machine->accesses_prev_frame = 1;
7555 #ifndef USE_HIDDEN_LINKONCE
7556 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7557 # define USE_HIDDEN_LINKONCE 1
7559 # define USE_HIDDEN_LINKONCE 0
7563 static int pic_labels_used;
7565 /* Fills in the label name that should be used for a pc thunk for
7566 the given register. */
7569 get_pc_thunk_name (char name[32], unsigned int regno)
7571 gcc_assert (!TARGET_64BIT);
7573 if (USE_HIDDEN_LINKONCE)
7574 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7576 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7580 /* This function generates code for -fpic that loads %ebx with
7581 the return address of the caller and then returns. */
7584 ix86_code_end (void)
7589 for (regno = 0; regno < 8; ++regno)
7594 if (! ((pic_labels_used >> regno) & 1))
7597 get_pc_thunk_name (name, regno);
7599 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7600 get_identifier (name),
7601 build_function_type (void_type_node, void_list_node));
7602 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7603 NULL_TREE, void_type_node);
7604 TREE_PUBLIC (decl) = 1;
7605 TREE_STATIC (decl) = 1;
7610 switch_to_section (darwin_sections[text_coal_section]);
7611 fputs ("\t.weak_definition\t", asm_out_file);
7612 assemble_name (asm_out_file, name);
7613 fputs ("\n\t.private_extern\t", asm_out_file);
7614 assemble_name (asm_out_file, name);
7615 fputs ("\n", asm_out_file);
7616 ASM_OUTPUT_LABEL (asm_out_file, name);
7617 DECL_WEAK (decl) = 1;
7621 if (USE_HIDDEN_LINKONCE)
7623 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7625 (*targetm.asm_out.unique_section) (decl, 0);
7626 switch_to_section (get_named_section (decl, NULL, 0));
7628 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7629 fputs ("\t.hidden\t", asm_out_file);
7630 assemble_name (asm_out_file, name);
7631 putc ('\n', asm_out_file);
7632 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7636 switch_to_section (text_section);
7637 ASM_OUTPUT_LABEL (asm_out_file, name);
7640 DECL_INITIAL (decl) = make_node (BLOCK);
7641 current_function_decl = decl;
7642 init_function_start (decl);
7643 first_function_block_is_cold = false;
7644 /* Make sure unwind info is emitted for the thunk if needed. */
7645 final_start_function (emit_barrier (), asm_out_file, 1);
7647 xops[0] = gen_rtx_REG (Pmode, regno);
7648 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7649 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7650 output_asm_insn ("ret", xops);
7651 final_end_function ();
7652 init_insn_lengths ();
7653 free_after_compilation (cfun);
7655 current_function_decl = NULL;
7659 /* Emit code for the SET_GOT patterns. */
7662 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7668 if (TARGET_VXWORKS_RTP && flag_pic)
7670 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7671 xops[2] = gen_rtx_MEM (Pmode,
7672 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7673 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7675 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7676 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7677 an unadorned address. */
7678 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7679 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7680 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7684 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7686 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7688 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7691 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7694 output_asm_insn ("call\t%a2", xops);
7695 #ifdef DWARF2_UNWIND_INFO
7696 /* The call to next label acts as a push. */
7697 if (dwarf2out_do_frame ())
7701 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7702 gen_rtx_PLUS (Pmode,
7705 RTX_FRAME_RELATED_P (insn) = 1;
7706 dwarf2out_frame_debug (insn, true);
7713 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7714 is what will be referenced by the Mach-O PIC subsystem. */
7716 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7720 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7724 output_asm_insn ("pop%z0\t%0", xops);
7725 #ifdef DWARF2_UNWIND_INFO
7726 /* The pop is a pop and clobbers dest, but doesn't restore it
7727 for unwind info purposes. */
7728 if (dwarf2out_do_frame ())
7732 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7733 dwarf2out_frame_debug (insn, true);
7734 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7735 gen_rtx_PLUS (Pmode,
7738 RTX_FRAME_RELATED_P (insn) = 1;
7739 dwarf2out_frame_debug (insn, true);
7748 get_pc_thunk_name (name, REGNO (dest));
7749 pic_labels_used |= 1 << REGNO (dest);
7751 #ifdef DWARF2_UNWIND_INFO
7752 /* Ensure all queued register saves are flushed before the
7754 if (dwarf2out_do_frame ())
7758 insn = emit_barrier ();
7760 dwarf2out_frame_debug (insn, false);
7763 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7764 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7765 output_asm_insn ("call\t%X2", xops);
7766 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7767 is what will be referenced by the Mach-O PIC subsystem. */
7770 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7772 targetm.asm_out.internal_label (asm_out_file, "L",
7773 CODE_LABEL_NUMBER (label));
7780 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7781 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7783 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7788 /* Generate an "push" pattern for input ARG. */
7793 if (ix86_cfa_state->reg == stack_pointer_rtx)
7794 ix86_cfa_state->offset += UNITS_PER_WORD;
7796 return gen_rtx_SET (VOIDmode,
7798 gen_rtx_PRE_DEC (Pmode,
7799 stack_pointer_rtx)),
7803 /* Return >= 0 if there is an unused call-clobbered register available
7804 for the entire function. */
7807 ix86_select_alt_pic_regnum (void)
7809 if (current_function_is_leaf && !crtl->profile
7810 && !ix86_current_function_calls_tls_descriptor)
7813 /* Can't use the same register for both PIC and DRAP. */
7815 drap = REGNO (crtl->drap_reg);
7818 for (i = 2; i >= 0; --i)
7819 if (i != drap && !df_regs_ever_live_p (i))
7823 return INVALID_REGNUM;
7826 /* Return 1 if we need to save REGNO. */
7828 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7830 if (pic_offset_table_rtx
7831 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7832 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7834 || crtl->calls_eh_return
7835 || crtl->uses_const_pool))
7837 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7842 if (crtl->calls_eh_return && maybe_eh_return)
7847 unsigned test = EH_RETURN_DATA_REGNO (i);
7848 if (test == INVALID_REGNUM)
7855 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7858 return (df_regs_ever_live_p (regno)
7859 && !call_used_regs[regno]
7860 && !fixed_regs[regno]
7861 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7864 /* Return number of saved general prupose registers. */
7867 ix86_nsaved_regs (void)
7872 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7873 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7878 /* Return number of saved SSE registrers. */
7881 ix86_nsaved_sseregs (void)
7886 if (ix86_cfun_abi () != MS_ABI)
7888 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7889 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7894 /* Given FROM and TO register numbers, say whether this elimination is
7895 allowed. If stack alignment is needed, we can only replace argument
7896 pointer with hard frame pointer, or replace frame pointer with stack
7897 pointer. Otherwise, frame pointer elimination is automatically
7898 handled and all other eliminations are valid. */
7901 ix86_can_eliminate (const int from, const int to)
7903 if (stack_realign_fp)
7904 return ((from == ARG_POINTER_REGNUM
7905 && to == HARD_FRAME_POINTER_REGNUM)
7906 || (from == FRAME_POINTER_REGNUM
7907 && to == STACK_POINTER_REGNUM));
7909 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7912 /* Return the offset between two registers, one to be eliminated, and the other
7913 its replacement, at the start of a routine. */
7916 ix86_initial_elimination_offset (int from, int to)
7918 struct ix86_frame frame;
7919 ix86_compute_frame_layout (&frame);
7921 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7922 return frame.hard_frame_pointer_offset;
7923 else if (from == FRAME_POINTER_REGNUM
7924 && to == HARD_FRAME_POINTER_REGNUM)
7925 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7928 gcc_assert (to == STACK_POINTER_REGNUM);
7930 if (from == ARG_POINTER_REGNUM)
7931 return frame.stack_pointer_offset;
7933 gcc_assert (from == FRAME_POINTER_REGNUM);
7934 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7938 /* In a dynamically-aligned function, we can't know the offset from
7939 stack pointer to frame pointer, so we must ensure that setjmp
7940 eliminates fp against the hard fp (%ebp) rather than trying to
7941 index from %esp up to the top of the frame across a gap that is
7942 of unknown (at compile-time) size. */
7944 ix86_builtin_setjmp_frame_value (void)
7946 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7949 /* Fill structure ix86_frame about frame of currently computed function. */
7952 ix86_compute_frame_layout (struct ix86_frame *frame)
7954 unsigned int stack_alignment_needed;
7955 HOST_WIDE_INT offset;
7956 unsigned int preferred_alignment;
7957 HOST_WIDE_INT size = get_frame_size ();
7959 frame->nregs = ix86_nsaved_regs ();
7960 frame->nsseregs = ix86_nsaved_sseregs ();
7962 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7963 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7965 /* MS ABI seem to require stack alignment to be always 16 except for function
7967 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7969 preferred_alignment = 16;
7970 stack_alignment_needed = 16;
7971 crtl->preferred_stack_boundary = 128;
7972 crtl->stack_alignment_needed = 128;
7975 gcc_assert (!size || stack_alignment_needed);
7976 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7977 gcc_assert (preferred_alignment <= stack_alignment_needed);
7979 /* During reload iteration the amount of registers saved can change.
7980 Recompute the value as needed. Do not recompute when amount of registers
7981 didn't change as reload does multiple calls to the function and does not
7982 expect the decision to change within single iteration. */
7983 if (!optimize_function_for_size_p (cfun)
7984 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7986 int count = frame->nregs;
7988 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7989 /* The fast prologue uses move instead of push to save registers. This
7990 is significantly longer, but also executes faster as modern hardware
7991 can execute the moves in parallel, but can't do that for push/pop.
7993 Be careful about choosing what prologue to emit: When function takes
7994 many instructions to execute we may use slow version as well as in
7995 case function is known to be outside hot spot (this is known with
7996 feedback only). Weight the size of function by number of registers
7997 to save as it is cheap to use one or two push instructions but very
7998 slow to use many of them. */
8000 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8001 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
8002 || (flag_branch_probabilities
8003 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
8004 cfun->machine->use_fast_prologue_epilogue = false;
8006 cfun->machine->use_fast_prologue_epilogue
8007 = !expensive_function_p (count);
8009 if (TARGET_PROLOGUE_USING_MOVE
8010 && cfun->machine->use_fast_prologue_epilogue)
8011 frame->save_regs_using_mov = true;
8013 frame->save_regs_using_mov = false;
8015 /* Skip return address. */
8016 offset = UNITS_PER_WORD;
8018 /* Skip pushed static chain. */
8019 if (ix86_static_chain_on_stack)
8020 offset += UNITS_PER_WORD;
8022 /* Skip saved base pointer. */
8023 if (frame_pointer_needed)
8024 offset += UNITS_PER_WORD;
8026 frame->hard_frame_pointer_offset = offset;
8028 /* Set offset to aligned because the realigned frame starts from
8030 if (stack_realign_fp)
8031 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8033 /* Register save area */
8034 offset += frame->nregs * UNITS_PER_WORD;
8036 /* Align SSE reg save area. */
8037 if (frame->nsseregs)
8038 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8040 frame->padding0 = 0;
8042 /* SSE register save area. */
8043 offset += frame->padding0 + frame->nsseregs * 16;
8046 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8047 offset += frame->va_arg_size;
8049 /* Align start of frame for local function. */
8050 frame->padding1 = ((offset + stack_alignment_needed - 1)
8051 & -stack_alignment_needed) - offset;
8053 offset += frame->padding1;
8055 /* Frame pointer points here. */
8056 frame->frame_pointer_offset = offset;
8060 /* Add outgoing arguments area. Can be skipped if we eliminated
8061 all the function calls as dead code.
8062 Skipping is however impossible when function calls alloca. Alloca
8063 expander assumes that last crtl->outgoing_args_size
8064 of stack frame are unused. */
8065 if (ACCUMULATE_OUTGOING_ARGS
8066 && (!current_function_is_leaf || cfun->calls_alloca
8067 || ix86_current_function_calls_tls_descriptor))
8069 offset += crtl->outgoing_args_size;
8070 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8073 frame->outgoing_arguments_size = 0;
8075 /* Align stack boundary. Only needed if we're calling another function
8077 if (!current_function_is_leaf || cfun->calls_alloca
8078 || ix86_current_function_calls_tls_descriptor)
8079 frame->padding2 = ((offset + preferred_alignment - 1)
8080 & -preferred_alignment) - offset;
8082 frame->padding2 = 0;
8084 offset += frame->padding2;
8086 /* We've reached end of stack frame. */
8087 frame->stack_pointer_offset = offset;
8089 /* Size prologue needs to allocate. */
8090 frame->to_allocate =
8091 (size + frame->padding1 + frame->padding2
8092 + frame->outgoing_arguments_size + frame->va_arg_size);
8094 if ((!frame->to_allocate && frame->nregs <= 1)
8095 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8096 frame->save_regs_using_mov = false;
8098 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8099 && current_function_sp_is_unchanging
8100 && current_function_is_leaf
8101 && !ix86_current_function_calls_tls_descriptor)
8103 frame->red_zone_size = frame->to_allocate;
8104 if (frame->save_regs_using_mov)
8105 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8106 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8107 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8110 frame->red_zone_size = 0;
8111 frame->to_allocate -= frame->red_zone_size;
8112 frame->stack_pointer_offset -= frame->red_zone_size;
8115 /* Emit code to save registers in the prologue. */
8118 ix86_emit_save_regs (void)
8123 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8124 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8126 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8127 RTX_FRAME_RELATED_P (insn) = 1;
8131 /* Emit code to save registers using MOV insns. First register
8132 is restored from POINTER + OFFSET. */
8134 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8139 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8140 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8142 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8144 gen_rtx_REG (Pmode, regno));
8145 RTX_FRAME_RELATED_P (insn) = 1;
8146 offset += UNITS_PER_WORD;
8150 /* Emit code to save registers using MOV insns. First register
8151 is restored from POINTER + OFFSET. */
8153 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8159 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8160 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8162 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8163 set_mem_align (mem, 128);
8164 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8165 RTX_FRAME_RELATED_P (insn) = 1;
8170 static GTY(()) rtx queued_cfa_restores;
8172 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8173 manipulation insn. Don't add it if the previously
8174 saved value will be left untouched within stack red-zone till return,
8175 as unwinders can find the same value in the register and
8179 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8182 && !TARGET_64BIT_MS_ABI
8183 && red_offset + RED_ZONE_SIZE >= 0
8184 && crtl->args.pops_args < 65536)
8189 add_reg_note (insn, REG_CFA_RESTORE, reg);
8190 RTX_FRAME_RELATED_P (insn) = 1;
8194 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8197 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8200 ix86_add_queued_cfa_restore_notes (rtx insn)
8203 if (!queued_cfa_restores)
8205 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8207 XEXP (last, 1) = REG_NOTES (insn);
8208 REG_NOTES (insn) = queued_cfa_restores;
8209 queued_cfa_restores = NULL_RTX;
8210 RTX_FRAME_RELATED_P (insn) = 1;
8213 /* Expand prologue or epilogue stack adjustment.
8214 The pattern exist to put a dependency on all ebp-based memory accesses.
8215 STYLE should be negative if instructions should be marked as frame related,
8216 zero if %r11 register is live and cannot be freely used and positive
8220 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8221 int style, bool set_cfa)
8226 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8227 else if (x86_64_immediate_operand (offset, DImode))
8228 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8232 /* r11 is used by indirect sibcall return as well, set before the
8233 epilogue and used after the epilogue. ATM indirect sibcall
8234 shouldn't be used together with huge frame sizes in one
8235 function because of the frame_size check in sibcall.c. */
8237 r11 = gen_rtx_REG (DImode, R11_REG);
8238 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8240 RTX_FRAME_RELATED_P (insn) = 1;
8241 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8246 ix86_add_queued_cfa_restore_notes (insn);
8252 gcc_assert (ix86_cfa_state->reg == src);
8253 ix86_cfa_state->offset += INTVAL (offset);
8254 ix86_cfa_state->reg = dest;
8256 r = gen_rtx_PLUS (Pmode, src, offset);
8257 r = gen_rtx_SET (VOIDmode, dest, r);
8258 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8259 RTX_FRAME_RELATED_P (insn) = 1;
8262 RTX_FRAME_RELATED_P (insn) = 1;
8265 /* Find an available register to be used as dynamic realign argument
8266 pointer regsiter. Such a register will be written in prologue and
8267 used in begin of body, so it must not be
8268 1. parameter passing register.
8270 We reuse static-chain register if it is available. Otherwise, we
8271 use DI for i386 and R13 for x86-64. We chose R13 since it has
8274 Return: the regno of chosen register. */
8277 find_drap_reg (void)
8279 tree decl = cfun->decl;
8283 /* Use R13 for nested function or function need static chain.
8284 Since function with tail call may use any caller-saved
8285 registers in epilogue, DRAP must not use caller-saved
8286 register in such case. */
8287 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8294 /* Use DI for nested function or function need static chain.
8295 Since function with tail call may use any caller-saved
8296 registers in epilogue, DRAP must not use caller-saved
8297 register in such case. */
8298 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8301 /* Reuse static chain register if it isn't used for parameter
8303 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8304 && !lookup_attribute ("fastcall",
8305 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8312 /* Return minimum incoming stack alignment. */
8315 ix86_minimum_incoming_stack_boundary (bool sibcall)
8317 unsigned int incoming_stack_boundary;
8319 /* Prefer the one specified at command line. */
8320 if (ix86_user_incoming_stack_boundary)
8321 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8322 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8323 if -mstackrealign is used, it isn't used for sibcall check and
8324 estimated stack alignment is 128bit. */
8327 && ix86_force_align_arg_pointer
8328 && crtl->stack_alignment_estimated == 128)
8329 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8331 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8333 /* Incoming stack alignment can be changed on individual functions
8334 via force_align_arg_pointer attribute. We use the smallest
8335 incoming stack boundary. */
8336 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8337 && lookup_attribute (ix86_force_align_arg_pointer_string,
8338 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8339 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8341 /* The incoming stack frame has to be aligned at least at
8342 parm_stack_boundary. */
8343 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8344 incoming_stack_boundary = crtl->parm_stack_boundary;
8346 /* Stack at entrance of main is aligned by runtime. We use the
8347 smallest incoming stack boundary. */
8348 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8349 && DECL_NAME (current_function_decl)
8350 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8351 && DECL_FILE_SCOPE_P (current_function_decl))
8352 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8354 return incoming_stack_boundary;
8357 /* Update incoming stack boundary and estimated stack alignment. */
8360 ix86_update_stack_boundary (void)
8362 ix86_incoming_stack_boundary
8363 = ix86_minimum_incoming_stack_boundary (false);
8365 /* x86_64 vararg needs 16byte stack alignment for register save
8369 && crtl->stack_alignment_estimated < 128)
8370 crtl->stack_alignment_estimated = 128;
8373 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8374 needed or an rtx for DRAP otherwise. */
8377 ix86_get_drap_rtx (void)
8379 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8380 crtl->need_drap = true;
8382 if (stack_realign_drap)
8384 /* Assign DRAP to vDRAP and returns vDRAP */
8385 unsigned int regno = find_drap_reg ();
8390 arg_ptr = gen_rtx_REG (Pmode, regno);
8391 crtl->drap_reg = arg_ptr;
8394 drap_vreg = copy_to_reg (arg_ptr);
8398 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8401 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8402 RTX_FRAME_RELATED_P (insn) = 1;
8410 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8413 ix86_internal_arg_pointer (void)
8415 return virtual_incoming_args_rtx;
8418 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8419 to be generated in correct form. */
8421 ix86_finalize_stack_realign_flags (void)
8423 /* Check if stack realign is really needed after reload, and
8424 stores result in cfun */
8425 unsigned int incoming_stack_boundary
8426 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8427 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8428 unsigned int stack_realign = (incoming_stack_boundary
8429 < (current_function_is_leaf
8430 ? crtl->max_used_stack_slot_alignment
8431 : crtl->stack_alignment_needed));
8433 if (crtl->stack_realign_finalized)
8435 /* After stack_realign_needed is finalized, we can't no longer
8437 gcc_assert (crtl->stack_realign_needed == stack_realign);
8441 crtl->stack_realign_needed = stack_realign;
8442 crtl->stack_realign_finalized = true;
8446 /* Expand the prologue into a bunch of separate insns. */
8449 ix86_expand_prologue (void)
8453 struct ix86_frame frame;
8454 HOST_WIDE_INT allocate;
8455 int gen_frame_pointer = frame_pointer_needed;
8457 ix86_finalize_stack_realign_flags ();
8459 /* DRAP should not coexist with stack_realign_fp */
8460 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8462 /* Initialize CFA state for before the prologue. */
8463 ix86_cfa_state->reg = stack_pointer_rtx;
8464 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8466 ix86_compute_frame_layout (&frame);
8468 if (ix86_function_ms_hook_prologue (current_function_decl))
8472 /* Make sure the function starts with
8473 8b ff movl.s %edi,%edi
8475 8b ec movl.s %esp,%ebp
8477 This matches the hookable function prologue in Win32 API
8478 functions in Microsoft Windows XP Service Pack 2 and newer.
8479 Wine uses this to enable Windows apps to hook the Win32 API
8480 functions provided by Wine. */
8481 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8482 gen_rtx_REG (SImode, DI_REG)));
8483 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8484 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8485 stack_pointer_rtx));
8487 if (frame_pointer_needed && !(crtl->drap_reg
8488 && crtl->stack_realign_needed))
8490 /* The push %ebp and movl.s %esp, %ebp already set up
8491 the frame pointer. No need to do this again. */
8492 gen_frame_pointer = 0;
8493 RTX_FRAME_RELATED_P (push) = 1;
8494 RTX_FRAME_RELATED_P (mov) = 1;
8495 if (ix86_cfa_state->reg == stack_pointer_rtx)
8496 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8499 /* If the frame pointer is not needed, pop %ebp again. This
8500 could be optimized for cases where ebp needs to be backed up
8501 for some other reason. If stack realignment is needed, pop
8502 the base pointer again, align the stack, and later regenerate
8503 the frame pointer setup. The frame pointer generated by the
8504 hook prologue is not aligned, so it can't be used. */
8505 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8508 /* The first insn of a function that accepts its static chain on the
8509 stack is to push the register that would be filled in by a direct
8510 call. This insn will be skipped by the trampoline. */
8511 if (ix86_static_chain_on_stack)
8515 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8516 emit_insn (gen_blockage ());
8518 /* We don't want to interpret this push insn as a register save,
8519 only as a stack adjustment. The real copy of the register as
8520 a save will be done later, if needed. */
8521 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8522 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8523 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8524 RTX_FRAME_RELATED_P (insn) = 1;
8527 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8528 of DRAP is needed and stack realignment is really needed after reload */
8529 if (crtl->drap_reg && crtl->stack_realign_needed)
8532 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8533 int param_ptr_offset = UNITS_PER_WORD;
8535 if (ix86_static_chain_on_stack)
8536 param_ptr_offset += UNITS_PER_WORD;
8537 if (!call_used_regs[REGNO (crtl->drap_reg)])
8538 param_ptr_offset += UNITS_PER_WORD;
8540 gcc_assert (stack_realign_drap);
8542 /* Grab the argument pointer. */
8543 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8546 /* Only need to push parameter pointer reg if it is caller
8548 if (!call_used_regs[REGNO (crtl->drap_reg)])
8550 /* Push arg pointer reg */
8551 insn = emit_insn (gen_push (y));
8552 RTX_FRAME_RELATED_P (insn) = 1;
8555 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8556 RTX_FRAME_RELATED_P (insn) = 1;
8557 ix86_cfa_state->reg = crtl->drap_reg;
8559 /* Align the stack. */
8560 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8562 GEN_INT (-align_bytes)));
8563 RTX_FRAME_RELATED_P (insn) = 1;
8565 /* Replicate the return address on the stack so that return
8566 address can be reached via (argp - 1) slot. This is needed
8567 to implement macro RETURN_ADDR_RTX and intrinsic function
8568 expand_builtin_return_addr etc. */
8570 x = gen_frame_mem (Pmode,
8571 plus_constant (x, -UNITS_PER_WORD));
8572 insn = emit_insn (gen_push (x));
8573 RTX_FRAME_RELATED_P (insn) = 1;
8576 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8577 slower on all targets. Also sdb doesn't like it. */
8579 if (gen_frame_pointer)
8581 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8582 RTX_FRAME_RELATED_P (insn) = 1;
8584 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8585 RTX_FRAME_RELATED_P (insn) = 1;
8587 if (ix86_cfa_state->reg == stack_pointer_rtx)
8588 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8591 if (stack_realign_fp)
8593 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8594 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8596 /* Align the stack. */
8597 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8599 GEN_INT (-align_bytes)));
8600 RTX_FRAME_RELATED_P (insn) = 1;
8603 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8605 if (!frame.save_regs_using_mov)
8606 ix86_emit_save_regs ();
8608 allocate += frame.nregs * UNITS_PER_WORD;
8610 /* When using red zone we may start register saving before allocating
8611 the stack frame saving one cycle of the prologue. However I will
8612 avoid doing this if I am going to have to probe the stack since
8613 at least on x86_64 the stack probe can turn into a call that clobbers
8614 a red zone location */
8615 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8616 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8617 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8618 && !crtl->stack_realign_needed)
8619 ? hard_frame_pointer_rtx
8620 : stack_pointer_rtx,
8621 -frame.nregs * UNITS_PER_WORD);
8625 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8626 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8627 GEN_INT (-allocate), -1,
8628 ix86_cfa_state->reg == stack_pointer_rtx);
8631 /* Only valid for Win32. */
8632 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8636 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8638 if (cfun->machine->call_abi == MS_ABI)
8641 eax_live = ix86_eax_live_at_start_p ();
8645 emit_insn (gen_push (eax));
8646 allocate -= UNITS_PER_WORD;
8649 emit_move_insn (eax, GEN_INT (allocate));
8652 insn = gen_allocate_stack_worker_64 (eax, eax);
8654 insn = gen_allocate_stack_worker_32 (eax, eax);
8655 insn = emit_insn (insn);
8657 if (ix86_cfa_state->reg == stack_pointer_rtx)
8659 ix86_cfa_state->offset += allocate;
8660 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8661 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8662 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8663 RTX_FRAME_RELATED_P (insn) = 1;
8668 if (frame_pointer_needed)
8669 t = plus_constant (hard_frame_pointer_rtx,
8672 - frame.nregs * UNITS_PER_WORD);
8674 t = plus_constant (stack_pointer_rtx, allocate);
8675 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8679 if (frame.save_regs_using_mov
8680 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8681 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8683 if (!frame_pointer_needed
8684 || !(frame.to_allocate + frame.padding0)
8685 || crtl->stack_realign_needed)
8686 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8688 + frame.nsseregs * 16 + frame.padding0);
8690 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8691 -frame.nregs * UNITS_PER_WORD);
8693 if (!frame_pointer_needed
8694 || !(frame.to_allocate + frame.padding0)
8695 || crtl->stack_realign_needed)
8696 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8699 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8700 - frame.nregs * UNITS_PER_WORD
8701 - frame.nsseregs * 16
8704 pic_reg_used = false;
8705 if (pic_offset_table_rtx
8706 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8709 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8711 if (alt_pic_reg_used != INVALID_REGNUM)
8712 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8714 pic_reg_used = true;
8721 if (ix86_cmodel == CM_LARGE_PIC)
8723 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8724 rtx label = gen_label_rtx ();
8726 LABEL_PRESERVE_P (label) = 1;
8727 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8728 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8729 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8730 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8731 pic_offset_table_rtx, tmp_reg));
8734 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8737 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8740 /* In the pic_reg_used case, make sure that the got load isn't deleted
8741 when mcount needs it. Blockage to avoid call movement across mcount
8742 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8744 if (crtl->profile && pic_reg_used)
8745 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8747 if (crtl->drap_reg && !crtl->stack_realign_needed)
8749 /* vDRAP is setup but after reload it turns out stack realign
8750 isn't necessary, here we will emit prologue to setup DRAP
8751 without stack realign adjustment */
8753 int drap_bp_offset = UNITS_PER_WORD * 2;
8755 if (ix86_static_chain_on_stack)
8756 drap_bp_offset += UNITS_PER_WORD;
8757 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8758 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8761 /* Prevent instructions from being scheduled into register save push
8762 sequence when access to the redzone area is done through frame pointer.
8763 The offset between the frame pointer and the stack pointer is calculated
8764 relative to the value of the stack pointer at the end of the function
8765 prologue, and moving instructions that access redzone area via frame
8766 pointer inside push sequence violates this assumption. */
8767 if (frame_pointer_needed && frame.red_zone_size)
8768 emit_insn (gen_memory_blockage ());
8770 /* Emit cld instruction if stringops are used in the function. */
8771 if (TARGET_CLD && ix86_current_function_needs_cld)
8772 emit_insn (gen_cld ());
8775 /* Emit code to restore REG using a POP insn. */
8778 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8780 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8782 if (ix86_cfa_state->reg == crtl->drap_reg
8783 && REGNO (reg) == REGNO (crtl->drap_reg))
8785 /* Previously we'd represented the CFA as an expression
8786 like *(%ebp - 8). We've just popped that value from
8787 the stack, which means we need to reset the CFA to
8788 the drap register. This will remain until we restore
8789 the stack pointer. */
8790 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8791 RTX_FRAME_RELATED_P (insn) = 1;
8795 if (ix86_cfa_state->reg == stack_pointer_rtx)
8797 ix86_cfa_state->offset -= UNITS_PER_WORD;
8798 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8799 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8800 RTX_FRAME_RELATED_P (insn) = 1;
8803 /* When the frame pointer is the CFA, and we pop it, we are
8804 swapping back to the stack pointer as the CFA. This happens
8805 for stack frames that don't allocate other data, so we assume
8806 the stack pointer is now pointing at the return address, i.e.
8807 the function entry state, which makes the offset be 1 word. */
8808 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8809 && reg == hard_frame_pointer_rtx)
8811 ix86_cfa_state->reg = stack_pointer_rtx;
8812 ix86_cfa_state->offset -= UNITS_PER_WORD;
8814 add_reg_note (insn, REG_CFA_DEF_CFA,
8815 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8816 GEN_INT (ix86_cfa_state->offset)));
8817 RTX_FRAME_RELATED_P (insn) = 1;
8820 ix86_add_cfa_restore_note (insn, reg, red_offset);
8823 /* Emit code to restore saved registers using POP insns. */
8826 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8830 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8831 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8833 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8835 red_offset += UNITS_PER_WORD;
8839 /* Emit code and notes for the LEAVE instruction. */
8842 ix86_emit_leave (HOST_WIDE_INT red_offset)
8844 rtx insn = emit_insn (ix86_gen_leave ());
8846 ix86_add_queued_cfa_restore_notes (insn);
8848 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8850 ix86_cfa_state->reg = stack_pointer_rtx;
8851 ix86_cfa_state->offset -= UNITS_PER_WORD;
8853 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8854 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8855 RTX_FRAME_RELATED_P (insn) = 1;
8856 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8860 /* Emit code to restore saved registers using MOV insns. First register
8861 is restored from POINTER + OFFSET. */
8863 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8864 HOST_WIDE_INT red_offset,
8865 int maybe_eh_return)
8868 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8871 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8872 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8874 rtx reg = gen_rtx_REG (Pmode, regno);
8876 /* Ensure that adjust_address won't be forced to produce pointer
8877 out of range allowed by x86-64 instruction set. */
8878 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8882 r11 = gen_rtx_REG (DImode, R11_REG);
8883 emit_move_insn (r11, GEN_INT (offset));
8884 emit_insn (gen_adddi3 (r11, r11, pointer));
8885 base_address = gen_rtx_MEM (Pmode, r11);
8888 insn = emit_move_insn (reg,
8889 adjust_address (base_address, Pmode, offset));
8890 offset += UNITS_PER_WORD;
8892 if (ix86_cfa_state->reg == crtl->drap_reg
8893 && regno == REGNO (crtl->drap_reg))
8895 /* Previously we'd represented the CFA as an expression
8896 like *(%ebp - 8). We've just popped that value from
8897 the stack, which means we need to reset the CFA to
8898 the drap register. This will remain until we restore
8899 the stack pointer. */
8900 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8901 RTX_FRAME_RELATED_P (insn) = 1;
8904 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8906 red_offset += UNITS_PER_WORD;
8910 /* Emit code to restore saved registers using MOV insns. First register
8911 is restored from POINTER + OFFSET. */
8913 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8914 HOST_WIDE_INT red_offset,
8915 int maybe_eh_return)
8918 rtx base_address = gen_rtx_MEM (TImode, pointer);
8921 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8922 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8924 rtx reg = gen_rtx_REG (TImode, regno);
8926 /* Ensure that adjust_address won't be forced to produce pointer
8927 out of range allowed by x86-64 instruction set. */
8928 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8932 r11 = gen_rtx_REG (DImode, R11_REG);
8933 emit_move_insn (r11, GEN_INT (offset));
8934 emit_insn (gen_adddi3 (r11, r11, pointer));
8935 base_address = gen_rtx_MEM (TImode, r11);
8938 mem = adjust_address (base_address, TImode, offset);
8939 set_mem_align (mem, 128);
8940 emit_move_insn (reg, mem);
8943 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8949 /* Restore function stack, frame, and registers. */
8952 ix86_expand_epilogue (int style)
8955 struct ix86_frame frame;
8956 HOST_WIDE_INT offset, red_offset;
8957 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8960 ix86_finalize_stack_realign_flags ();
8962 /* When stack is realigned, SP must be valid. */
8963 sp_valid = (!frame_pointer_needed
8964 || current_function_sp_is_unchanging
8965 || stack_realign_fp);
8967 ix86_compute_frame_layout (&frame);
8969 /* See the comment about red zone and frame
8970 pointer usage in ix86_expand_prologue. */
8971 if (frame_pointer_needed && frame.red_zone_size)
8972 emit_insn (gen_memory_blockage ());
8974 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
8975 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
8977 /* Calculate start of saved registers relative to ebp. Special care
8978 must be taken for the normal return case of a function using
8979 eh_return: the eax and edx registers are marked as saved, but not
8980 restored along this path. */
8981 offset = frame.nregs;
8982 if (crtl->calls_eh_return && style != 2)
8984 offset *= -UNITS_PER_WORD;
8985 offset -= frame.nsseregs * 16 + frame.padding0;
8987 /* Calculate start of saved registers relative to esp on entry of the
8988 function. When realigning stack, this needs to be the most negative
8989 value possible at runtime. */
8990 red_offset = offset;
8992 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8994 else if (stack_realign_fp)
8995 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8997 if (ix86_static_chain_on_stack)
8998 red_offset -= UNITS_PER_WORD;
8999 if (frame_pointer_needed)
9000 red_offset -= UNITS_PER_WORD;
9002 /* If we're only restoring one register and sp is not valid then
9003 using a move instruction to restore the register since it's
9004 less work than reloading sp and popping the register.
9006 The default code result in stack adjustment using add/lea instruction,
9007 while this code results in LEAVE instruction (or discrete equivalent),
9008 so it is profitable in some other cases as well. Especially when there
9009 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9010 and there is exactly one register to pop. This heuristic may need some
9011 tuning in future. */
9012 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9013 || (TARGET_EPILOGUE_USING_MOVE
9014 && cfun->machine->use_fast_prologue_epilogue
9015 && ((frame.nregs + frame.nsseregs) > 1
9016 || (frame.to_allocate + frame.padding0) != 0))
9017 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9018 && (frame.to_allocate + frame.padding0) != 0)
9019 || (frame_pointer_needed && TARGET_USE_LEAVE
9020 && cfun->machine->use_fast_prologue_epilogue
9021 && (frame.nregs + frame.nsseregs) == 1)
9022 || crtl->calls_eh_return)
9024 /* Restore registers. We can use ebp or esp to address the memory
9025 locations. If both are available, default to ebp, since offsets
9026 are known to be small. Only exception is esp pointing directly
9027 to the end of block of saved registers, where we may simplify
9030 If we are realigning stack with bp and sp, regs restore can't
9031 be addressed by bp. sp must be used instead. */
9033 if (!frame_pointer_needed
9034 || (sp_valid && !(frame.to_allocate + frame.padding0))
9035 || stack_realign_fp)
9037 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9038 frame.to_allocate, red_offset,
9040 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9042 + frame.nsseregs * 16
9045 + frame.nsseregs * 16
9046 + frame.padding0, style == 2);
9050 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9053 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9055 + frame.nsseregs * 16
9058 + frame.nsseregs * 16
9059 + frame.padding0, style == 2);
9062 red_offset -= offset;
9064 /* eh_return epilogues need %ecx added to the stack pointer. */
9067 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9069 /* Stack align doesn't work with eh_return. */
9070 gcc_assert (!crtl->stack_realign_needed);
9071 /* Neither does regparm nested functions. */
9072 gcc_assert (!ix86_static_chain_on_stack);
9074 if (frame_pointer_needed)
9076 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9077 tmp = plus_constant (tmp, UNITS_PER_WORD);
9078 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9080 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9081 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9083 /* Note that we use SA as a temporary CFA, as the return
9084 address is at the proper place relative to it. We
9085 pretend this happens at the FP restore insn because
9086 prior to this insn the FP would be stored at the wrong
9087 offset relative to SA, and after this insn we have no
9088 other reasonable register to use for the CFA. We don't
9089 bother resetting the CFA to the SP for the duration of
9091 add_reg_note (tmp, REG_CFA_DEF_CFA,
9092 plus_constant (sa, UNITS_PER_WORD));
9093 ix86_add_queued_cfa_restore_notes (tmp);
9094 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9095 RTX_FRAME_RELATED_P (tmp) = 1;
9096 ix86_cfa_state->reg = sa;
9097 ix86_cfa_state->offset = UNITS_PER_WORD;
9099 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9100 const0_rtx, style, false);
9104 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9105 tmp = plus_constant (tmp, (frame.to_allocate
9106 + frame.nregs * UNITS_PER_WORD
9107 + frame.nsseregs * 16
9109 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9110 ix86_add_queued_cfa_restore_notes (tmp);
9112 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9113 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9115 ix86_cfa_state->offset = UNITS_PER_WORD;
9116 add_reg_note (tmp, REG_CFA_DEF_CFA,
9117 plus_constant (stack_pointer_rtx,
9119 RTX_FRAME_RELATED_P (tmp) = 1;
9123 else if (!frame_pointer_needed)
9124 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9125 GEN_INT (frame.to_allocate
9126 + frame.nregs * UNITS_PER_WORD
9127 + frame.nsseregs * 16
9129 style, !using_drap);
9130 /* If not an i386, mov & pop is faster than "leave". */
9131 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9132 || !cfun->machine->use_fast_prologue_epilogue)
9133 ix86_emit_leave (red_offset);
9136 pro_epilogue_adjust_stack (stack_pointer_rtx,
9137 hard_frame_pointer_rtx,
9138 const0_rtx, style, !using_drap);
9140 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9145 /* First step is to deallocate the stack frame so that we can
9148 If we realign stack with frame pointer, then stack pointer
9149 won't be able to recover via lea $offset(%bp), %sp, because
9150 there is a padding area between bp and sp for realign.
9151 "add $to_allocate, %sp" must be used instead. */
9154 gcc_assert (frame_pointer_needed);
9155 gcc_assert (!stack_realign_fp);
9156 pro_epilogue_adjust_stack (stack_pointer_rtx,
9157 hard_frame_pointer_rtx,
9158 GEN_INT (offset), style, false);
9159 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9162 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9163 GEN_INT (frame.nsseregs * 16
9167 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9169 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9170 frame.to_allocate, red_offset,
9172 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9173 GEN_INT (frame.to_allocate
9174 + frame.nsseregs * 16
9175 + frame.padding0), style,
9176 !using_drap && !frame_pointer_needed);
9179 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9181 red_offset -= offset;
9183 if (frame_pointer_needed)
9185 /* Leave results in shorter dependency chains on CPUs that are
9186 able to grok it fast. */
9187 if (TARGET_USE_LEAVE)
9188 ix86_emit_leave (red_offset);
9191 /* For stack realigned really happens, recover stack
9192 pointer to hard frame pointer is a must, if not using
9194 if (stack_realign_fp)
9195 pro_epilogue_adjust_stack (stack_pointer_rtx,
9196 hard_frame_pointer_rtx,
9197 const0_rtx, style, !using_drap);
9198 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9206 int param_ptr_offset = UNITS_PER_WORD;
9209 gcc_assert (stack_realign_drap);
9211 if (ix86_static_chain_on_stack)
9212 param_ptr_offset += UNITS_PER_WORD;
9213 if (!call_used_regs[REGNO (crtl->drap_reg)])
9214 param_ptr_offset += UNITS_PER_WORD;
9216 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9218 GEN_INT (-param_ptr_offset)));
9220 ix86_cfa_state->reg = stack_pointer_rtx;
9221 ix86_cfa_state->offset = param_ptr_offset;
9223 add_reg_note (insn, REG_CFA_DEF_CFA,
9224 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9225 GEN_INT (ix86_cfa_state->offset)));
9226 RTX_FRAME_RELATED_P (insn) = 1;
9228 if (!call_used_regs[REGNO (crtl->drap_reg)])
9229 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9232 /* Remove the saved static chain from the stack. The use of ECX is
9233 merely as a scratch register, not as the actual static chain. */
9234 if (ix86_static_chain_on_stack)
9238 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9239 ix86_cfa_state->offset += UNITS_PER_WORD;
9241 r = gen_rtx_REG (Pmode, CX_REG);
9242 insn = emit_insn (ix86_gen_pop1 (r));
9244 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9245 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9246 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9247 RTX_FRAME_RELATED_P (insn) = 1;
9250 /* Sibcall epilogues don't want a return instruction. */
9253 *ix86_cfa_state = cfa_state_save;
9257 if (crtl->args.pops_args && crtl->args.size)
9259 rtx popc = GEN_INT (crtl->args.pops_args);
9261 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9262 address, do explicit add, and jump indirectly to the caller. */
9264 if (crtl->args.pops_args >= 65536)
9266 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9269 /* There is no "pascal" calling convention in any 64bit ABI. */
9270 gcc_assert (!TARGET_64BIT);
9272 insn = emit_insn (gen_popsi1 (ecx));
9273 ix86_cfa_state->offset -= UNITS_PER_WORD;
9275 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9276 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9277 add_reg_note (insn, REG_CFA_REGISTER,
9278 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9279 RTX_FRAME_RELATED_P (insn) = 1;
9281 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9283 emit_jump_insn (gen_return_indirect_internal (ecx));
9286 emit_jump_insn (gen_return_pop_internal (popc));
9289 emit_jump_insn (gen_return_internal ());
9291 /* Restore the state back to the state from the prologue,
9292 so that it's correct for the next epilogue. */
9293 *ix86_cfa_state = cfa_state_save;
9296 /* Reset from the function's potential modifications. */
9299 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9300 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9302 if (pic_offset_table_rtx)
9303 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9305 /* Mach-O doesn't support labels at the end of objects, so if
9306 it looks like we might want one, insert a NOP. */
9308 rtx insn = get_last_insn ();
9311 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9312 insn = PREV_INSN (insn);
9316 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9317 fputs ("\tnop\n", file);
9323 /* Extract the parts of an RTL expression that is a valid memory address
9324 for an instruction. Return 0 if the structure of the address is
9325 grossly off. Return -1 if the address contains ASHIFT, so it is not
9326 strictly valid, but still used for computing length of lea instruction. */
9329 ix86_decompose_address (rtx addr, struct ix86_address *out)
9331 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9332 rtx base_reg, index_reg;
9333 HOST_WIDE_INT scale = 1;
9334 rtx scale_rtx = NULL_RTX;
9336 enum ix86_address_seg seg = SEG_DEFAULT;
9338 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9340 else if (GET_CODE (addr) == PLUS)
9350 addends[n++] = XEXP (op, 1);
9353 while (GET_CODE (op) == PLUS);
9358 for (i = n; i >= 0; --i)
9361 switch (GET_CODE (op))
9366 index = XEXP (op, 0);
9367 scale_rtx = XEXP (op, 1);
9371 if (XINT (op, 1) == UNSPEC_TP
9372 && TARGET_TLS_DIRECT_SEG_REFS
9373 && seg == SEG_DEFAULT)
9374 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9403 else if (GET_CODE (addr) == MULT)
9405 index = XEXP (addr, 0); /* index*scale */
9406 scale_rtx = XEXP (addr, 1);
9408 else if (GET_CODE (addr) == ASHIFT)
9412 /* We're called for lea too, which implements ashift on occasion. */
9413 index = XEXP (addr, 0);
9414 tmp = XEXP (addr, 1);
9415 if (!CONST_INT_P (tmp))
9417 scale = INTVAL (tmp);
9418 if ((unsigned HOST_WIDE_INT) scale > 3)
9424 disp = addr; /* displacement */
9426 /* Extract the integral value of scale. */
9429 if (!CONST_INT_P (scale_rtx))
9431 scale = INTVAL (scale_rtx);
9434 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9435 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9437 /* Avoid useless 0 displacement. */
9438 if (disp == const0_rtx && (base || index))
9441 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9442 if (base_reg && index_reg && scale == 1
9443 && (index_reg == arg_pointer_rtx
9444 || index_reg == frame_pointer_rtx
9445 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9448 tmp = base, base = index, index = tmp;
9449 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9452 /* Special case: %ebp cannot be encoded as a base without a displacement.
9456 && (base_reg == hard_frame_pointer_rtx
9457 || base_reg == frame_pointer_rtx
9458 || base_reg == arg_pointer_rtx
9459 || (REG_P (base_reg)
9460 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9461 || REGNO (base_reg) == R13_REG))))
9464 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9465 Avoid this by transforming to [%esi+0].
9466 Reload calls address legitimization without cfun defined, so we need
9467 to test cfun for being non-NULL. */
9468 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9469 && base_reg && !index_reg && !disp
9471 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9474 /* Special case: encode reg+reg instead of reg*2. */
9475 if (!base && index && scale == 2)
9476 base = index, base_reg = index_reg, scale = 1;
9478 /* Special case: scaling cannot be encoded without base or displacement. */
9479 if (!base && !disp && index && scale != 1)
9491 /* Return cost of the memory address x.
9492 For i386, it is better to use a complex address than let gcc copy
9493 the address into a reg and make a new pseudo. But not if the address
9494 requires to two regs - that would mean more pseudos with longer
9497 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9499 struct ix86_address parts;
9501 int ok = ix86_decompose_address (x, &parts);
9505 if (parts.base && GET_CODE (parts.base) == SUBREG)
9506 parts.base = SUBREG_REG (parts.base);
9507 if (parts.index && GET_CODE (parts.index) == SUBREG)
9508 parts.index = SUBREG_REG (parts.index);
9510 /* Attempt to minimize number of registers in the address. */
9512 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9514 && (!REG_P (parts.index)
9515 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9519 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9521 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9522 && parts.base != parts.index)
9525 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9526 since it's predecode logic can't detect the length of instructions
9527 and it degenerates to vector decoded. Increase cost of such
9528 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9529 to split such addresses or even refuse such addresses at all.
9531 Following addressing modes are affected:
9536 The first and last case may be avoidable by explicitly coding the zero in
9537 memory address, but I don't have AMD-K6 machine handy to check this
9541 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9542 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9543 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9549 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9550 this is used for to form addresses to local data when -fPIC is in
9554 darwin_local_data_pic (rtx disp)
9556 return (GET_CODE (disp) == UNSPEC
9557 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9560 /* Determine if a given RTX is a valid constant. We already know this
9561 satisfies CONSTANT_P. */
9564 legitimate_constant_p (rtx x)
9566 switch (GET_CODE (x))
9571 if (GET_CODE (x) == PLUS)
9573 if (!CONST_INT_P (XEXP (x, 1)))
9578 if (TARGET_MACHO && darwin_local_data_pic (x))
9581 /* Only some unspecs are valid as "constants". */
9582 if (GET_CODE (x) == UNSPEC)
9583 switch (XINT (x, 1))
9588 return TARGET_64BIT;
9591 x = XVECEXP (x, 0, 0);
9592 return (GET_CODE (x) == SYMBOL_REF
9593 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9595 x = XVECEXP (x, 0, 0);
9596 return (GET_CODE (x) == SYMBOL_REF
9597 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9602 /* We must have drilled down to a symbol. */
9603 if (GET_CODE (x) == LABEL_REF)
9605 if (GET_CODE (x) != SYMBOL_REF)
9610 /* TLS symbols are never valid. */
9611 if (SYMBOL_REF_TLS_MODEL (x))
9614 /* DLLIMPORT symbols are never valid. */
9615 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9616 && SYMBOL_REF_DLLIMPORT_P (x))
9621 if (GET_MODE (x) == TImode
9622 && x != CONST0_RTX (TImode)
9628 if (!standard_sse_constant_p (x))
9635 /* Otherwise we handle everything else in the move patterns. */
9639 /* Determine if it's legal to put X into the constant pool. This
9640 is not possible for the address of thread-local symbols, which
9641 is checked above. */
9644 ix86_cannot_force_const_mem (rtx x)
9646 /* We can always put integral constants and vectors in memory. */
9647 switch (GET_CODE (x))
9657 return !legitimate_constant_p (x);
9661 /* Nonzero if the constant value X is a legitimate general operand
9662 when generating PIC code. It is given that flag_pic is on and
9663 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9666 legitimate_pic_operand_p (rtx x)
9670 switch (GET_CODE (x))
9673 inner = XEXP (x, 0);
9674 if (GET_CODE (inner) == PLUS
9675 && CONST_INT_P (XEXP (inner, 1)))
9676 inner = XEXP (inner, 0);
9678 /* Only some unspecs are valid as "constants". */
9679 if (GET_CODE (inner) == UNSPEC)
9680 switch (XINT (inner, 1))
9685 return TARGET_64BIT;
9687 x = XVECEXP (inner, 0, 0);
9688 return (GET_CODE (x) == SYMBOL_REF
9689 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9690 case UNSPEC_MACHOPIC_OFFSET:
9691 return legitimate_pic_address_disp_p (x);
9699 return legitimate_pic_address_disp_p (x);
9706 /* Determine if a given CONST RTX is a valid memory displacement
9710 legitimate_pic_address_disp_p (rtx disp)
9714 /* In 64bit mode we can allow direct addresses of symbols and labels
9715 when they are not dynamic symbols. */
9718 rtx op0 = disp, op1;
9720 switch (GET_CODE (disp))
9726 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9728 op0 = XEXP (XEXP (disp, 0), 0);
9729 op1 = XEXP (XEXP (disp, 0), 1);
9730 if (!CONST_INT_P (op1)
9731 || INTVAL (op1) >= 16*1024*1024
9732 || INTVAL (op1) < -16*1024*1024)
9734 if (GET_CODE (op0) == LABEL_REF)
9736 if (GET_CODE (op0) != SYMBOL_REF)
9741 /* TLS references should always be enclosed in UNSPEC. */
9742 if (SYMBOL_REF_TLS_MODEL (op0))
9744 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9745 && ix86_cmodel != CM_LARGE_PIC)
9753 if (GET_CODE (disp) != CONST)
9755 disp = XEXP (disp, 0);
9759 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9760 of GOT tables. We should not need these anyway. */
9761 if (GET_CODE (disp) != UNSPEC
9762 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9763 && XINT (disp, 1) != UNSPEC_GOTOFF
9764 && XINT (disp, 1) != UNSPEC_PLTOFF))
9767 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9768 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9774 if (GET_CODE (disp) == PLUS)
9776 if (!CONST_INT_P (XEXP (disp, 1)))
9778 disp = XEXP (disp, 0);
9782 if (TARGET_MACHO && darwin_local_data_pic (disp))
9785 if (GET_CODE (disp) != UNSPEC)
9788 switch (XINT (disp, 1))
9793 /* We need to check for both symbols and labels because VxWorks loads
9794 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9796 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9797 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9799 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9800 While ABI specify also 32bit relocation but we don't produce it in
9801 small PIC model at all. */
9802 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9803 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9805 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9807 case UNSPEC_GOTTPOFF:
9808 case UNSPEC_GOTNTPOFF:
9809 case UNSPEC_INDNTPOFF:
9812 disp = XVECEXP (disp, 0, 0);
9813 return (GET_CODE (disp) == SYMBOL_REF
9814 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9816 disp = XVECEXP (disp, 0, 0);
9817 return (GET_CODE (disp) == SYMBOL_REF
9818 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9820 disp = XVECEXP (disp, 0, 0);
9821 return (GET_CODE (disp) == SYMBOL_REF
9822 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9828 /* Recognizes RTL expressions that are valid memory addresses for an
9829 instruction. The MODE argument is the machine mode for the MEM
9830 expression that wants to use this address.
9832 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9833 convert common non-canonical forms to canonical form so that they will
9837 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9838 rtx addr, bool strict)
9840 struct ix86_address parts;
9841 rtx base, index, disp;
9842 HOST_WIDE_INT scale;
9844 if (ix86_decompose_address (addr, &parts) <= 0)
9845 /* Decomposition failed. */
9849 index = parts.index;
9851 scale = parts.scale;
9853 /* Validate base register.
9855 Don't allow SUBREG's that span more than a word here. It can lead to spill
9856 failures when the base is one word out of a two word structure, which is
9857 represented internally as a DImode int. */
9865 else if (GET_CODE (base) == SUBREG
9866 && REG_P (SUBREG_REG (base))
9867 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9869 reg = SUBREG_REG (base);
9871 /* Base is not a register. */
9874 if (GET_MODE (base) != Pmode)
9875 /* Base is not in Pmode. */
9878 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9879 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9880 /* Base is not valid. */
9884 /* Validate index register.
9886 Don't allow SUBREG's that span more than a word here -- same as above. */
9894 else if (GET_CODE (index) == SUBREG
9895 && REG_P (SUBREG_REG (index))
9896 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9898 reg = SUBREG_REG (index);
9900 /* Index is not a register. */
9903 if (GET_MODE (index) != Pmode)
9904 /* Index is not in Pmode. */
9907 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9908 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9909 /* Index is not valid. */
9913 /* Validate scale factor. */
9917 /* Scale without index. */
9920 if (scale != 2 && scale != 4 && scale != 8)
9921 /* Scale is not a valid multiplier. */
9925 /* Validate displacement. */
9928 if (GET_CODE (disp) == CONST
9929 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9930 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9931 switch (XINT (XEXP (disp, 0), 1))
9933 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9934 used. While ABI specify also 32bit relocations, we don't produce
9935 them at all and use IP relative instead. */
9938 gcc_assert (flag_pic);
9940 goto is_legitimate_pic;
9942 /* 64bit address unspec. */
9945 case UNSPEC_GOTPCREL:
9946 gcc_assert (flag_pic);
9947 goto is_legitimate_pic;
9949 case UNSPEC_GOTTPOFF:
9950 case UNSPEC_GOTNTPOFF:
9951 case UNSPEC_INDNTPOFF:
9957 /* Invalid address unspec. */
9961 else if (SYMBOLIC_CONST (disp)
9965 && MACHOPIC_INDIRECT
9966 && !machopic_operand_p (disp)
9972 if (TARGET_64BIT && (index || base))
9974 /* foo@dtpoff(%rX) is ok. */
9975 if (GET_CODE (disp) != CONST
9976 || GET_CODE (XEXP (disp, 0)) != PLUS
9977 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9978 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9979 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9980 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9981 /* Non-constant pic memory reference. */
9984 else if (! legitimate_pic_address_disp_p (disp))
9985 /* Displacement is an invalid pic construct. */
9988 /* This code used to verify that a symbolic pic displacement
9989 includes the pic_offset_table_rtx register.
9991 While this is good idea, unfortunately these constructs may
9992 be created by "adds using lea" optimization for incorrect
10001 This code is nonsensical, but results in addressing
10002 GOT table with pic_offset_table_rtx base. We can't
10003 just refuse it easily, since it gets matched by
10004 "addsi3" pattern, that later gets split to lea in the
10005 case output register differs from input. While this
10006 can be handled by separate addsi pattern for this case
10007 that never results in lea, this seems to be easier and
10008 correct fix for crash to disable this test. */
10010 else if (GET_CODE (disp) != LABEL_REF
10011 && !CONST_INT_P (disp)
10012 && (GET_CODE (disp) != CONST
10013 || !legitimate_constant_p (disp))
10014 && (GET_CODE (disp) != SYMBOL_REF
10015 || !legitimate_constant_p (disp)))
10016 /* Displacement is not constant. */
10018 else if (TARGET_64BIT
10019 && !x86_64_immediate_operand (disp, VOIDmode))
10020 /* Displacement is out of range. */
10024 /* Everything looks valid. */
10028 /* Determine if a given RTX is a valid constant address. */
10031 constant_address_p (rtx x)
10033 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10036 /* Return a unique alias set for the GOT. */
10038 static alias_set_type
10039 ix86_GOT_alias_set (void)
10041 static alias_set_type set = -1;
10043 set = new_alias_set ();
10047 /* Return a legitimate reference for ORIG (an address) using the
10048 register REG. If REG is 0, a new pseudo is generated.
10050 There are two types of references that must be handled:
10052 1. Global data references must load the address from the GOT, via
10053 the PIC reg. An insn is emitted to do this load, and the reg is
10056 2. Static data references, constant pool addresses, and code labels
10057 compute the address as an offset from the GOT, whose base is in
10058 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10059 differentiate them from global data objects. The returned
10060 address is the PIC reg + an unspec constant.
10062 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10063 reg also appears in the address. */
10066 legitimize_pic_address (rtx orig, rtx reg)
10069 rtx new_rtx = orig;
10073 if (TARGET_MACHO && !TARGET_64BIT)
10076 reg = gen_reg_rtx (Pmode);
10077 /* Use the generic Mach-O PIC machinery. */
10078 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10082 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10084 else if (TARGET_64BIT
10085 && ix86_cmodel != CM_SMALL_PIC
10086 && gotoff_operand (addr, Pmode))
10089 /* This symbol may be referenced via a displacement from the PIC
10090 base address (@GOTOFF). */
10092 if (reload_in_progress)
10093 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10094 if (GET_CODE (addr) == CONST)
10095 addr = XEXP (addr, 0);
10096 if (GET_CODE (addr) == PLUS)
10098 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10100 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10103 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10104 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10106 tmpreg = gen_reg_rtx (Pmode);
10109 emit_move_insn (tmpreg, new_rtx);
10113 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10114 tmpreg, 1, OPTAB_DIRECT);
10117 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10119 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10121 /* This symbol may be referenced via a displacement from the PIC
10122 base address (@GOTOFF). */
10124 if (reload_in_progress)
10125 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10126 if (GET_CODE (addr) == CONST)
10127 addr = XEXP (addr, 0);
10128 if (GET_CODE (addr) == PLUS)
10130 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10132 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10135 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10136 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10137 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10141 emit_move_insn (reg, new_rtx);
10145 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10146 /* We can't use @GOTOFF for text labels on VxWorks;
10147 see gotoff_operand. */
10148 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10150 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10152 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10153 return legitimize_dllimport_symbol (addr, true);
10154 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10155 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10156 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10158 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10159 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10163 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10165 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10166 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10167 new_rtx = gen_const_mem (Pmode, new_rtx);
10168 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10171 reg = gen_reg_rtx (Pmode);
10172 /* Use directly gen_movsi, otherwise the address is loaded
10173 into register for CSE. We don't want to CSE this addresses,
10174 instead we CSE addresses from the GOT table, so skip this. */
10175 emit_insn (gen_movsi (reg, new_rtx));
10180 /* This symbol must be referenced via a load from the
10181 Global Offset Table (@GOT). */
10183 if (reload_in_progress)
10184 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10185 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10186 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10188 new_rtx = force_reg (Pmode, new_rtx);
10189 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10190 new_rtx = gen_const_mem (Pmode, new_rtx);
10191 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10194 reg = gen_reg_rtx (Pmode);
10195 emit_move_insn (reg, new_rtx);
10201 if (CONST_INT_P (addr)
10202 && !x86_64_immediate_operand (addr, VOIDmode))
10206 emit_move_insn (reg, addr);
10210 new_rtx = force_reg (Pmode, addr);
10212 else if (GET_CODE (addr) == CONST)
10214 addr = XEXP (addr, 0);
10216 /* We must match stuff we generate before. Assume the only
10217 unspecs that can get here are ours. Not that we could do
10218 anything with them anyway.... */
10219 if (GET_CODE (addr) == UNSPEC
10220 || (GET_CODE (addr) == PLUS
10221 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10223 gcc_assert (GET_CODE (addr) == PLUS);
10225 if (GET_CODE (addr) == PLUS)
10227 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10229 /* Check first to see if this is a constant offset from a @GOTOFF
10230 symbol reference. */
10231 if (gotoff_operand (op0, Pmode)
10232 && CONST_INT_P (op1))
10236 if (reload_in_progress)
10237 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10238 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10240 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10241 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10242 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10246 emit_move_insn (reg, new_rtx);
10252 if (INTVAL (op1) < -16*1024*1024
10253 || INTVAL (op1) >= 16*1024*1024)
10255 if (!x86_64_immediate_operand (op1, Pmode))
10256 op1 = force_reg (Pmode, op1);
10257 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10263 base = legitimize_pic_address (XEXP (addr, 0), reg);
10264 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10265 base == reg ? NULL_RTX : reg);
10267 if (CONST_INT_P (new_rtx))
10268 new_rtx = plus_constant (base, INTVAL (new_rtx));
10271 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10273 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10274 new_rtx = XEXP (new_rtx, 1);
10276 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10284 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10287 get_thread_pointer (int to_reg)
10291 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10295 reg = gen_reg_rtx (Pmode);
10296 insn = gen_rtx_SET (VOIDmode, reg, tp);
10297 insn = emit_insn (insn);
10302 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10303 false if we expect this to be used for a memory address and true if
10304 we expect to load the address into a register. */
10307 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10309 rtx dest, base, off, pic, tp;
10314 case TLS_MODEL_GLOBAL_DYNAMIC:
10315 dest = gen_reg_rtx (Pmode);
10316 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10318 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10320 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10323 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10324 insns = get_insns ();
10327 RTL_CONST_CALL_P (insns) = 1;
10328 emit_libcall_block (insns, dest, rax, x);
10330 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10331 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10333 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10335 if (TARGET_GNU2_TLS)
10337 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10339 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10343 case TLS_MODEL_LOCAL_DYNAMIC:
10344 base = gen_reg_rtx (Pmode);
10345 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10347 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10349 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10352 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10353 insns = get_insns ();
10356 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10357 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10358 RTL_CONST_CALL_P (insns) = 1;
10359 emit_libcall_block (insns, base, rax, note);
10361 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10362 emit_insn (gen_tls_local_dynamic_base_64 (base));
10364 emit_insn (gen_tls_local_dynamic_base_32 (base));
10366 if (TARGET_GNU2_TLS)
10368 rtx x = ix86_tls_module_base ();
10370 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10371 gen_rtx_MINUS (Pmode, x, tp));
10374 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10375 off = gen_rtx_CONST (Pmode, off);
10377 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10379 if (TARGET_GNU2_TLS)
10381 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10383 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10388 case TLS_MODEL_INITIAL_EXEC:
10392 type = UNSPEC_GOTNTPOFF;
10396 if (reload_in_progress)
10397 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10398 pic = pic_offset_table_rtx;
10399 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10401 else if (!TARGET_ANY_GNU_TLS)
10403 pic = gen_reg_rtx (Pmode);
10404 emit_insn (gen_set_got (pic));
10405 type = UNSPEC_GOTTPOFF;
10410 type = UNSPEC_INDNTPOFF;
10413 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10414 off = gen_rtx_CONST (Pmode, off);
10416 off = gen_rtx_PLUS (Pmode, pic, off);
10417 off = gen_const_mem (Pmode, off);
10418 set_mem_alias_set (off, ix86_GOT_alias_set ());
10420 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10422 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10423 off = force_reg (Pmode, off);
10424 return gen_rtx_PLUS (Pmode, base, off);
10428 base = get_thread_pointer (true);
10429 dest = gen_reg_rtx (Pmode);
10430 emit_insn (gen_subsi3 (dest, base, off));
10434 case TLS_MODEL_LOCAL_EXEC:
10435 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10436 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10437 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10438 off = gen_rtx_CONST (Pmode, off);
10440 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10442 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10443 return gen_rtx_PLUS (Pmode, base, off);
10447 base = get_thread_pointer (true);
10448 dest = gen_reg_rtx (Pmode);
10449 emit_insn (gen_subsi3 (dest, base, off));
10454 gcc_unreachable ();
10460 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10463 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10464 htab_t dllimport_map;
10467 get_dllimport_decl (tree decl)
10469 struct tree_map *h, in;
10472 const char *prefix;
10473 size_t namelen, prefixlen;
10478 if (!dllimport_map)
10479 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10481 in.hash = htab_hash_pointer (decl);
10482 in.base.from = decl;
10483 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10484 h = (struct tree_map *) *loc;
10488 *loc = h = GGC_NEW (struct tree_map);
10490 h->base.from = decl;
10491 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10492 VAR_DECL, NULL, ptr_type_node);
10493 DECL_ARTIFICIAL (to) = 1;
10494 DECL_IGNORED_P (to) = 1;
10495 DECL_EXTERNAL (to) = 1;
10496 TREE_READONLY (to) = 1;
10498 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10499 name = targetm.strip_name_encoding (name);
10500 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10501 ? "*__imp_" : "*__imp__";
10502 namelen = strlen (name);
10503 prefixlen = strlen (prefix);
10504 imp_name = (char *) alloca (namelen + prefixlen + 1);
10505 memcpy (imp_name, prefix, prefixlen);
10506 memcpy (imp_name + prefixlen, name, namelen + 1);
10508 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10509 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10510 SET_SYMBOL_REF_DECL (rtl, to);
10511 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10513 rtl = gen_const_mem (Pmode, rtl);
10514 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10516 SET_DECL_RTL (to, rtl);
10517 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10522 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10523 true if we require the result be a register. */
10526 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10531 gcc_assert (SYMBOL_REF_DECL (symbol));
10532 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10534 x = DECL_RTL (imp_decl);
10536 x = force_reg (Pmode, x);
10540 /* Try machine-dependent ways of modifying an illegitimate address
10541 to be legitimate. If we find one, return the new, valid address.
10542 This macro is used in only one place: `memory_address' in explow.c.
10544 OLDX is the address as it was before break_out_memory_refs was called.
10545 In some cases it is useful to look at this to decide what needs to be done.
10547 It is always safe for this macro to do nothing. It exists to recognize
10548 opportunities to optimize the output.
10550 For the 80386, we handle X+REG by loading X into a register R and
10551 using R+REG. R will go in a general reg and indexing will be used.
10552 However, if REG is a broken-out memory address or multiplication,
10553 nothing needs to be done because REG can certainly go in a general reg.
10555 When -fpic is used, special handling is needed for symbolic references.
10556 See comments by legitimize_pic_address in i386.c for details. */
10559 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10560 enum machine_mode mode)
10565 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10567 return legitimize_tls_address (x, (enum tls_model) log, false);
10568 if (GET_CODE (x) == CONST
10569 && GET_CODE (XEXP (x, 0)) == PLUS
10570 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10571 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10573 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10574 (enum tls_model) log, false);
10575 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10578 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10580 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10581 return legitimize_dllimport_symbol (x, true);
10582 if (GET_CODE (x) == CONST
10583 && GET_CODE (XEXP (x, 0)) == PLUS
10584 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10585 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10587 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10588 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10592 if (flag_pic && SYMBOLIC_CONST (x))
10593 return legitimize_pic_address (x, 0);
10595 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10596 if (GET_CODE (x) == ASHIFT
10597 && CONST_INT_P (XEXP (x, 1))
10598 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10601 log = INTVAL (XEXP (x, 1));
10602 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10603 GEN_INT (1 << log));
10606 if (GET_CODE (x) == PLUS)
10608 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10610 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10611 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10612 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10615 log = INTVAL (XEXP (XEXP (x, 0), 1));
10616 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10617 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10618 GEN_INT (1 << log));
10621 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10622 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10623 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10626 log = INTVAL (XEXP (XEXP (x, 1), 1));
10627 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10628 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10629 GEN_INT (1 << log));
10632 /* Put multiply first if it isn't already. */
10633 if (GET_CODE (XEXP (x, 1)) == MULT)
10635 rtx tmp = XEXP (x, 0);
10636 XEXP (x, 0) = XEXP (x, 1);
10641 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10642 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10643 created by virtual register instantiation, register elimination, and
10644 similar optimizations. */
10645 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10648 x = gen_rtx_PLUS (Pmode,
10649 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10650 XEXP (XEXP (x, 1), 0)),
10651 XEXP (XEXP (x, 1), 1));
10655 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10656 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10657 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10658 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10659 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10660 && CONSTANT_P (XEXP (x, 1)))
10663 rtx other = NULL_RTX;
10665 if (CONST_INT_P (XEXP (x, 1)))
10667 constant = XEXP (x, 1);
10668 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10670 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10672 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10673 other = XEXP (x, 1);
10681 x = gen_rtx_PLUS (Pmode,
10682 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10683 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10684 plus_constant (other, INTVAL (constant)));
10688 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10691 if (GET_CODE (XEXP (x, 0)) == MULT)
10694 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10697 if (GET_CODE (XEXP (x, 1)) == MULT)
10700 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10704 && REG_P (XEXP (x, 1))
10705 && REG_P (XEXP (x, 0)))
10708 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10711 x = legitimize_pic_address (x, 0);
10714 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10717 if (REG_P (XEXP (x, 0)))
10719 rtx temp = gen_reg_rtx (Pmode);
10720 rtx val = force_operand (XEXP (x, 1), temp);
10722 emit_move_insn (temp, val);
10724 XEXP (x, 1) = temp;
10728 else if (REG_P (XEXP (x, 1)))
10730 rtx temp = gen_reg_rtx (Pmode);
10731 rtx val = force_operand (XEXP (x, 0), temp);
10733 emit_move_insn (temp, val);
10735 XEXP (x, 0) = temp;
10743 /* Print an integer constant expression in assembler syntax. Addition
10744 and subtraction are the only arithmetic that may appear in these
10745 expressions. FILE is the stdio stream to write to, X is the rtx, and
10746 CODE is the operand print code from the output string. */
10749 output_pic_addr_const (FILE *file, rtx x, int code)
10753 switch (GET_CODE (x))
10756 gcc_assert (flag_pic);
10761 if (! TARGET_MACHO || TARGET_64BIT)
10762 output_addr_const (file, x);
10765 const char *name = XSTR (x, 0);
10767 /* Mark the decl as referenced so that cgraph will
10768 output the function. */
10769 if (SYMBOL_REF_DECL (x))
10770 mark_decl_referenced (SYMBOL_REF_DECL (x));
10773 if (MACHOPIC_INDIRECT
10774 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10775 name = machopic_indirection_name (x, /*stub_p=*/true);
10777 assemble_name (file, name);
10779 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10780 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10781 fputs ("@PLT", file);
10788 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10789 assemble_name (asm_out_file, buf);
10793 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10797 /* This used to output parentheses around the expression,
10798 but that does not work on the 386 (either ATT or BSD assembler). */
10799 output_pic_addr_const (file, XEXP (x, 0), code);
10803 if (GET_MODE (x) == VOIDmode)
10805 /* We can use %d if the number is <32 bits and positive. */
10806 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10807 fprintf (file, "0x%lx%08lx",
10808 (unsigned long) CONST_DOUBLE_HIGH (x),
10809 (unsigned long) CONST_DOUBLE_LOW (x));
10811 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10814 /* We can't handle floating point constants;
10815 PRINT_OPERAND must handle them. */
10816 output_operand_lossage ("floating constant misused");
10820 /* Some assemblers need integer constants to appear first. */
10821 if (CONST_INT_P (XEXP (x, 0)))
10823 output_pic_addr_const (file, XEXP (x, 0), code);
10825 output_pic_addr_const (file, XEXP (x, 1), code);
10829 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10830 output_pic_addr_const (file, XEXP (x, 1), code);
10832 output_pic_addr_const (file, XEXP (x, 0), code);
10838 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10839 output_pic_addr_const (file, XEXP (x, 0), code);
10841 output_pic_addr_const (file, XEXP (x, 1), code);
10843 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10847 gcc_assert (XVECLEN (x, 0) == 1);
10848 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10849 switch (XINT (x, 1))
10852 fputs ("@GOT", file);
10854 case UNSPEC_GOTOFF:
10855 fputs ("@GOTOFF", file);
10857 case UNSPEC_PLTOFF:
10858 fputs ("@PLTOFF", file);
10860 case UNSPEC_GOTPCREL:
10861 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10862 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10864 case UNSPEC_GOTTPOFF:
10865 /* FIXME: This might be @TPOFF in Sun ld too. */
10866 fputs ("@gottpoff", file);
10869 fputs ("@tpoff", file);
10871 case UNSPEC_NTPOFF:
10873 fputs ("@tpoff", file);
10875 fputs ("@ntpoff", file);
10877 case UNSPEC_DTPOFF:
10878 fputs ("@dtpoff", file);
10880 case UNSPEC_GOTNTPOFF:
10882 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10883 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10885 fputs ("@gotntpoff", file);
10887 case UNSPEC_INDNTPOFF:
10888 fputs ("@indntpoff", file);
10891 case UNSPEC_MACHOPIC_OFFSET:
10893 machopic_output_function_base_name (file);
10897 output_operand_lossage ("invalid UNSPEC as operand");
10903 output_operand_lossage ("invalid expression as operand");
10907 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10908 We need to emit DTP-relative relocations. */
10910 static void ATTRIBUTE_UNUSED
10911 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10913 fputs (ASM_LONG, file);
10914 output_addr_const (file, x);
10915 fputs ("@dtpoff", file);
10921 fputs (", 0", file);
10924 gcc_unreachable ();
10928 /* Return true if X is a representation of the PIC register. This copes
10929 with calls from ix86_find_base_term, where the register might have
10930 been replaced by a cselib value. */
10933 ix86_pic_register_p (rtx x)
10935 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10936 return (pic_offset_table_rtx
10937 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10939 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10942 /* In the name of slightly smaller debug output, and to cater to
10943 general assembler lossage, recognize PIC+GOTOFF and turn it back
10944 into a direct symbol reference.
10946 On Darwin, this is necessary to avoid a crash, because Darwin
10947 has a different PIC label for each routine but the DWARF debugging
10948 information is not associated with any particular routine, so it's
10949 necessary to remove references to the PIC label from RTL stored by
10950 the DWARF output code. */
10953 ix86_delegitimize_address (rtx x)
10955 rtx orig_x = delegitimize_mem_from_attrs (x);
10956 /* addend is NULL or some rtx if x is something+GOTOFF where
10957 something doesn't include the PIC register. */
10958 rtx addend = NULL_RTX;
10959 /* reg_addend is NULL or a multiple of some register. */
10960 rtx reg_addend = NULL_RTX;
10961 /* const_addend is NULL or a const_int. */
10962 rtx const_addend = NULL_RTX;
10963 /* This is the result, or NULL. */
10964 rtx result = NULL_RTX;
10973 if (GET_CODE (x) != CONST
10974 || GET_CODE (XEXP (x, 0)) != UNSPEC
10975 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10976 || !MEM_P (orig_x))
10978 return XVECEXP (XEXP (x, 0), 0, 0);
10981 if (GET_CODE (x) != PLUS
10982 || GET_CODE (XEXP (x, 1)) != CONST)
10985 if (ix86_pic_register_p (XEXP (x, 0)))
10986 /* %ebx + GOT/GOTOFF */
10988 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10990 /* %ebx + %reg * scale + GOT/GOTOFF */
10991 reg_addend = XEXP (x, 0);
10992 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10993 reg_addend = XEXP (reg_addend, 1);
10994 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10995 reg_addend = XEXP (reg_addend, 0);
10998 reg_addend = NULL_RTX;
10999 addend = XEXP (x, 0);
11003 addend = XEXP (x, 0);
11005 x = XEXP (XEXP (x, 1), 0);
11006 if (GET_CODE (x) == PLUS
11007 && CONST_INT_P (XEXP (x, 1)))
11009 const_addend = XEXP (x, 1);
11013 if (GET_CODE (x) == UNSPEC
11014 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11015 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11016 result = XVECEXP (x, 0, 0);
11018 if (TARGET_MACHO && darwin_local_data_pic (x)
11019 && !MEM_P (orig_x))
11020 result = XVECEXP (x, 0, 0);
11026 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11028 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11031 /* If the rest of original X doesn't involve the PIC register, add
11032 addend and subtract pic_offset_table_rtx. This can happen e.g.
11034 leal (%ebx, %ecx, 4), %ecx
11036 movl foo@GOTOFF(%ecx), %edx
11037 in which case we return (%ecx - %ebx) + foo. */
11038 if (pic_offset_table_rtx)
11039 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11040 pic_offset_table_rtx),
11048 /* If X is a machine specific address (i.e. a symbol or label being
11049 referenced as a displacement from the GOT implemented using an
11050 UNSPEC), then return the base term. Otherwise return X. */
11053 ix86_find_base_term (rtx x)
11059 if (GET_CODE (x) != CONST)
11061 term = XEXP (x, 0);
11062 if (GET_CODE (term) == PLUS
11063 && (CONST_INT_P (XEXP (term, 1))
11064 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11065 term = XEXP (term, 0);
11066 if (GET_CODE (term) != UNSPEC
11067 || XINT (term, 1) != UNSPEC_GOTPCREL)
11070 return XVECEXP (term, 0, 0);
11073 return ix86_delegitimize_address (x);
11077 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11078 int fp, FILE *file)
11080 const char *suffix;
11082 if (mode == CCFPmode || mode == CCFPUmode)
11084 code = ix86_fp_compare_code_to_integer (code);
11088 code = reverse_condition (code);
11139 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11143 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11144 Those same assemblers have the same but opposite lossage on cmov. */
11145 if (mode == CCmode)
11146 suffix = fp ? "nbe" : "a";
11147 else if (mode == CCCmode)
11150 gcc_unreachable ();
11166 gcc_unreachable ();
11170 gcc_assert (mode == CCmode || mode == CCCmode);
11187 gcc_unreachable ();
11191 /* ??? As above. */
11192 gcc_assert (mode == CCmode || mode == CCCmode);
11193 suffix = fp ? "nb" : "ae";
11196 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11200 /* ??? As above. */
11201 if (mode == CCmode)
11203 else if (mode == CCCmode)
11204 suffix = fp ? "nb" : "ae";
11206 gcc_unreachable ();
11209 suffix = fp ? "u" : "p";
11212 suffix = fp ? "nu" : "np";
11215 gcc_unreachable ();
11217 fputs (suffix, file);
11220 /* Print the name of register X to FILE based on its machine mode and number.
11221 If CODE is 'w', pretend the mode is HImode.
11222 If CODE is 'b', pretend the mode is QImode.
11223 If CODE is 'k', pretend the mode is SImode.
11224 If CODE is 'q', pretend the mode is DImode.
11225 If CODE is 'x', pretend the mode is V4SFmode.
11226 If CODE is 't', pretend the mode is V8SFmode.
11227 If CODE is 'h', pretend the reg is the 'high' byte register.
11228 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11229 If CODE is 'd', duplicate the operand for AVX instruction.
11233 print_reg (rtx x, int code, FILE *file)
11236 bool duplicated = code == 'd' && TARGET_AVX;
11238 gcc_assert (x == pc_rtx
11239 || (REGNO (x) != ARG_POINTER_REGNUM
11240 && REGNO (x) != FRAME_POINTER_REGNUM
11241 && REGNO (x) != FLAGS_REG
11242 && REGNO (x) != FPSR_REG
11243 && REGNO (x) != FPCR_REG));
11245 if (ASSEMBLER_DIALECT == ASM_ATT)
11250 gcc_assert (TARGET_64BIT);
11251 fputs ("rip", file);
11255 if (code == 'w' || MMX_REG_P (x))
11257 else if (code == 'b')
11259 else if (code == 'k')
11261 else if (code == 'q')
11263 else if (code == 'y')
11265 else if (code == 'h')
11267 else if (code == 'x')
11269 else if (code == 't')
11272 code = GET_MODE_SIZE (GET_MODE (x));
11274 /* Irritatingly, AMD extended registers use different naming convention
11275 from the normal registers. */
11276 if (REX_INT_REG_P (x))
11278 gcc_assert (TARGET_64BIT);
11282 error ("extended registers have no high halves");
11285 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11288 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11291 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11294 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11297 error ("unsupported operand size for extended register");
11307 if (STACK_TOP_P (x))
11316 if (! ANY_FP_REG_P (x))
11317 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11322 reg = hi_reg_name[REGNO (x)];
11325 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11327 reg = qi_reg_name[REGNO (x)];
11330 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11332 reg = qi_high_reg_name[REGNO (x)];
11337 gcc_assert (!duplicated);
11339 fputs (hi_reg_name[REGNO (x)] + 1, file);
11344 gcc_unreachable ();
11350 if (ASSEMBLER_DIALECT == ASM_ATT)
11351 fprintf (file, ", %%%s", reg);
11353 fprintf (file, ", %s", reg);
11357 /* Locate some local-dynamic symbol still in use by this function
11358 so that we can print its name in some tls_local_dynamic_base
11362 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11366 if (GET_CODE (x) == SYMBOL_REF
11367 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11369 cfun->machine->some_ld_name = XSTR (x, 0);
11376 static const char *
11377 get_some_local_dynamic_name (void)
11381 if (cfun->machine->some_ld_name)
11382 return cfun->machine->some_ld_name;
11384 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11386 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11387 return cfun->machine->some_ld_name;
11392 /* Meaning of CODE:
11393 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11394 C -- print opcode suffix for set/cmov insn.
11395 c -- like C, but print reversed condition
11396 E,e -- likewise, but for compare-and-branch fused insn.
11397 F,f -- likewise, but for floating-point.
11398 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11400 R -- print the prefix for register names.
11401 z -- print the opcode suffix for the size of the current operand.
11402 Z -- likewise, with special suffixes for x87 instructions.
11403 * -- print a star (in certain assembler syntax)
11404 A -- print an absolute memory reference.
11405 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11406 s -- print a shift double count, followed by the assemblers argument
11408 b -- print the QImode name of the register for the indicated operand.
11409 %b0 would print %al if operands[0] is reg 0.
11410 w -- likewise, print the HImode name of the register.
11411 k -- likewise, print the SImode name of the register.
11412 q -- likewise, print the DImode name of the register.
11413 x -- likewise, print the V4SFmode name of the register.
11414 t -- likewise, print the V8SFmode name of the register.
11415 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11416 y -- print "st(0)" instead of "st" as a register.
11417 d -- print duplicated register operand for AVX instruction.
11418 D -- print condition for SSE cmp instruction.
11419 P -- if PIC, print an @PLT suffix.
11420 X -- don't print any sort of PIC '@' suffix for a symbol.
11421 & -- print some in-use local-dynamic symbol name.
11422 H -- print a memory address offset by 8; used for sse high-parts
11423 Y -- print condition for XOP pcom* instruction.
11424 + -- print a branch hint as 'cs' or 'ds' prefix
11425 ; -- print a semicolon (after prefixes due to bug in older gas).
11429 print_operand (FILE *file, rtx x, int code)
11436 if (ASSEMBLER_DIALECT == ASM_ATT)
11442 const char *name = get_some_local_dynamic_name ();
11444 output_operand_lossage ("'%%&' used without any "
11445 "local dynamic TLS references");
11447 assemble_name (file, name);
11452 switch (ASSEMBLER_DIALECT)
11459 /* Intel syntax. For absolute addresses, registers should not
11460 be surrounded by braces. */
11464 PRINT_OPERAND (file, x, 0);
11471 gcc_unreachable ();
11474 PRINT_OPERAND (file, x, 0);
11479 if (ASSEMBLER_DIALECT == ASM_ATT)
11484 if (ASSEMBLER_DIALECT == ASM_ATT)
11489 if (ASSEMBLER_DIALECT == ASM_ATT)
11494 if (ASSEMBLER_DIALECT == ASM_ATT)
11499 if (ASSEMBLER_DIALECT == ASM_ATT)
11504 if (ASSEMBLER_DIALECT == ASM_ATT)
11509 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11511 /* Opcodes don't get size suffixes if using Intel opcodes. */
11512 if (ASSEMBLER_DIALECT == ASM_INTEL)
11515 switch (GET_MODE_SIZE (GET_MODE (x)))
11534 output_operand_lossage
11535 ("invalid operand size for operand code '%c'", code);
11540 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11542 (0, "non-integer operand used with operand code '%c'", code);
11546 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11547 if (ASSEMBLER_DIALECT == ASM_INTEL)
11550 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11552 switch (GET_MODE_SIZE (GET_MODE (x)))
11555 #ifdef HAVE_AS_IX86_FILDS
11565 #ifdef HAVE_AS_IX86_FILDQ
11568 fputs ("ll", file);
11576 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11578 /* 387 opcodes don't get size suffixes
11579 if the operands are registers. */
11580 if (STACK_REG_P (x))
11583 switch (GET_MODE_SIZE (GET_MODE (x)))
11604 output_operand_lossage
11605 ("invalid operand type used with operand code '%c'", code);
11609 output_operand_lossage
11610 ("invalid operand size for operand code '%c'", code);
11627 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11629 PRINT_OPERAND (file, x, 0);
11630 fputs (", ", file);
11635 /* Little bit of braindamage here. The SSE compare instructions
11636 does use completely different names for the comparisons that the
11637 fp conditional moves. */
11640 switch (GET_CODE (x))
11643 fputs ("eq", file);
11646 fputs ("eq_us", file);
11649 fputs ("lt", file);
11652 fputs ("nge", file);
11655 fputs ("le", file);
11658 fputs ("ngt", file);
11661 fputs ("unord", file);
11664 fputs ("neq", file);
11667 fputs ("neq_oq", file);
11670 fputs ("ge", file);
11673 fputs ("nlt", file);
11676 fputs ("gt", file);
11679 fputs ("nle", file);
11682 fputs ("ord", file);
11685 output_operand_lossage ("operand is not a condition code, "
11686 "invalid operand code 'D'");
11692 switch (GET_CODE (x))
11696 fputs ("eq", file);
11700 fputs ("lt", file);
11704 fputs ("le", file);
11707 fputs ("unord", file);
11711 fputs ("neq", file);
11715 fputs ("nlt", file);
11719 fputs ("nle", file);
11722 fputs ("ord", file);
11725 output_operand_lossage ("operand is not a condition code, "
11726 "invalid operand code 'D'");
11732 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11733 if (ASSEMBLER_DIALECT == ASM_ATT)
11735 switch (GET_MODE (x))
11737 case HImode: putc ('w', file); break;
11739 case SFmode: putc ('l', file); break;
11741 case DFmode: putc ('q', file); break;
11742 default: gcc_unreachable ();
11749 if (!COMPARISON_P (x))
11751 output_operand_lossage ("operand is neither a constant nor a "
11752 "condition code, invalid operand code "
11756 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11759 if (!COMPARISON_P (x))
11761 output_operand_lossage ("operand is neither a constant nor a "
11762 "condition code, invalid operand code "
11766 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11767 if (ASSEMBLER_DIALECT == ASM_ATT)
11770 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11773 /* Like above, but reverse condition */
11775 /* Check to see if argument to %c is really a constant
11776 and not a condition code which needs to be reversed. */
11777 if (!COMPARISON_P (x))
11779 output_operand_lossage ("operand is neither a constant nor a "
11780 "condition code, invalid operand "
11784 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11787 if (!COMPARISON_P (x))
11789 output_operand_lossage ("operand is neither a constant nor a "
11790 "condition code, invalid operand "
11794 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11795 if (ASSEMBLER_DIALECT == ASM_ATT)
11798 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11802 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
11806 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
11810 /* It doesn't actually matter what mode we use here, as we're
11811 only going to use this for printing. */
11812 x = adjust_address_nv (x, DImode, 8);
11820 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11823 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11826 int pred_val = INTVAL (XEXP (x, 0));
11828 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11829 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11831 int taken = pred_val > REG_BR_PROB_BASE / 2;
11832 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11834 /* Emit hints only in the case default branch prediction
11835 heuristics would fail. */
11836 if (taken != cputaken)
11838 /* We use 3e (DS) prefix for taken branches and
11839 2e (CS) prefix for not taken branches. */
11841 fputs ("ds ; ", file);
11843 fputs ("cs ; ", file);
11851 switch (GET_CODE (x))
11854 fputs ("neq", file);
11857 fputs ("eq", file);
11861 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11865 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11869 fputs ("le", file);
11873 fputs ("lt", file);
11876 fputs ("unord", file);
11879 fputs ("ord", file);
11882 fputs ("ueq", file);
11885 fputs ("nlt", file);
11888 fputs ("nle", file);
11891 fputs ("ule", file);
11894 fputs ("ult", file);
11897 fputs ("une", file);
11900 output_operand_lossage ("operand is not a condition code, "
11901 "invalid operand code 'Y'");
11908 fputs (" ; ", file);
11915 output_operand_lossage ("invalid operand code '%c'", code);
11920 print_reg (x, code, file);
11922 else if (MEM_P (x))
11924 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11925 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11926 && GET_MODE (x) != BLKmode)
11929 switch (GET_MODE_SIZE (GET_MODE (x)))
11931 case 1: size = "BYTE"; break;
11932 case 2: size = "WORD"; break;
11933 case 4: size = "DWORD"; break;
11934 case 8: size = "QWORD"; break;
11935 case 12: size = "TBYTE"; break;
11937 if (GET_MODE (x) == XFmode)
11942 case 32: size = "YMMWORD"; break;
11944 gcc_unreachable ();
11947 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11950 else if (code == 'w')
11952 else if (code == 'k')
11955 fputs (size, file);
11956 fputs (" PTR ", file);
11960 /* Avoid (%rip) for call operands. */
11961 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11962 && !CONST_INT_P (x))
11963 output_addr_const (file, x);
11964 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11965 output_operand_lossage ("invalid constraints for operand");
11967 output_address (x);
11970 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11975 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11976 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11978 if (ASSEMBLER_DIALECT == ASM_ATT)
11980 fprintf (file, "0x%08lx", (long unsigned int) l);
11983 /* These float cases don't actually occur as immediate operands. */
11984 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11988 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11989 fputs (dstr, file);
11992 else if (GET_CODE (x) == CONST_DOUBLE
11993 && GET_MODE (x) == XFmode)
11997 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11998 fputs (dstr, file);
12003 /* We have patterns that allow zero sets of memory, for instance.
12004 In 64-bit mode, we should probably support all 8-byte vectors,
12005 since we can in fact encode that into an immediate. */
12006 if (GET_CODE (x) == CONST_VECTOR)
12008 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12014 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12016 if (ASSEMBLER_DIALECT == ASM_ATT)
12019 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12020 || GET_CODE (x) == LABEL_REF)
12022 if (ASSEMBLER_DIALECT == ASM_ATT)
12025 fputs ("OFFSET FLAT:", file);
12028 if (CONST_INT_P (x))
12029 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12031 output_pic_addr_const (file, x, code);
12033 output_addr_const (file, x);
12037 /* Print a memory operand whose address is ADDR. */
12040 print_operand_address (FILE *file, rtx addr)
12042 struct ix86_address parts;
12043 rtx base, index, disp;
12045 int ok = ix86_decompose_address (addr, &parts);
12050 index = parts.index;
12052 scale = parts.scale;
12060 if (ASSEMBLER_DIALECT == ASM_ATT)
12062 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12065 gcc_unreachable ();
12068 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12069 if (TARGET_64BIT && !base && !index)
12073 if (GET_CODE (disp) == CONST
12074 && GET_CODE (XEXP (disp, 0)) == PLUS
12075 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12076 symbol = XEXP (XEXP (disp, 0), 0);
12078 if (GET_CODE (symbol) == LABEL_REF
12079 || (GET_CODE (symbol) == SYMBOL_REF
12080 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12083 if (!base && !index)
12085 /* Displacement only requires special attention. */
12087 if (CONST_INT_P (disp))
12089 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12090 fputs ("ds:", file);
12091 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12094 output_pic_addr_const (file, disp, 0);
12096 output_addr_const (file, disp);
12100 if (ASSEMBLER_DIALECT == ASM_ATT)
12105 output_pic_addr_const (file, disp, 0);
12106 else if (GET_CODE (disp) == LABEL_REF)
12107 output_asm_label (disp);
12109 output_addr_const (file, disp);
12114 print_reg (base, 0, file);
12118 print_reg (index, 0, file);
12120 fprintf (file, ",%d", scale);
12126 rtx offset = NULL_RTX;
12130 /* Pull out the offset of a symbol; print any symbol itself. */
12131 if (GET_CODE (disp) == CONST
12132 && GET_CODE (XEXP (disp, 0)) == PLUS
12133 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12135 offset = XEXP (XEXP (disp, 0), 1);
12136 disp = gen_rtx_CONST (VOIDmode,
12137 XEXP (XEXP (disp, 0), 0));
12141 output_pic_addr_const (file, disp, 0);
12142 else if (GET_CODE (disp) == LABEL_REF)
12143 output_asm_label (disp);
12144 else if (CONST_INT_P (disp))
12147 output_addr_const (file, disp);
12153 print_reg (base, 0, file);
12156 if (INTVAL (offset) >= 0)
12158 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12162 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12169 print_reg (index, 0, file);
12171 fprintf (file, "*%d", scale);
12179 output_addr_const_extra (FILE *file, rtx x)
12183 if (GET_CODE (x) != UNSPEC)
12186 op = XVECEXP (x, 0, 0);
12187 switch (XINT (x, 1))
12189 case UNSPEC_GOTTPOFF:
12190 output_addr_const (file, op);
12191 /* FIXME: This might be @TPOFF in Sun ld. */
12192 fputs ("@gottpoff", file);
12195 output_addr_const (file, op);
12196 fputs ("@tpoff", file);
12198 case UNSPEC_NTPOFF:
12199 output_addr_const (file, op);
12201 fputs ("@tpoff", file);
12203 fputs ("@ntpoff", file);
12205 case UNSPEC_DTPOFF:
12206 output_addr_const (file, op);
12207 fputs ("@dtpoff", file);
12209 case UNSPEC_GOTNTPOFF:
12210 output_addr_const (file, op);
12212 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12213 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12215 fputs ("@gotntpoff", file);
12217 case UNSPEC_INDNTPOFF:
12218 output_addr_const (file, op);
12219 fputs ("@indntpoff", file);
12222 case UNSPEC_MACHOPIC_OFFSET:
12223 output_addr_const (file, op);
12225 machopic_output_function_base_name (file);
12236 /* Split one or more DImode RTL references into pairs of SImode
12237 references. The RTL can be REG, offsettable MEM, integer constant, or
12238 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12239 split and "num" is its length. lo_half and hi_half are output arrays
12240 that parallel "operands". */
12243 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12247 rtx op = operands[num];
12249 /* simplify_subreg refuse to split volatile memory addresses,
12250 but we still have to handle it. */
12253 lo_half[num] = adjust_address (op, SImode, 0);
12254 hi_half[num] = adjust_address (op, SImode, 4);
12258 lo_half[num] = simplify_gen_subreg (SImode, op,
12259 GET_MODE (op) == VOIDmode
12260 ? DImode : GET_MODE (op), 0);
12261 hi_half[num] = simplify_gen_subreg (SImode, op,
12262 GET_MODE (op) == VOIDmode
12263 ? DImode : GET_MODE (op), 4);
12267 /* Split one or more TImode RTL references into pairs of DImode
12268 references. The RTL can be REG, offsettable MEM, integer constant, or
12269 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12270 split and "num" is its length. lo_half and hi_half are output arrays
12271 that parallel "operands". */
12274 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12278 rtx op = operands[num];
12280 /* simplify_subreg refuse to split volatile memory addresses, but we
12281 still have to handle it. */
12284 lo_half[num] = adjust_address (op, DImode, 0);
12285 hi_half[num] = adjust_address (op, DImode, 8);
12289 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12290 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12295 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12296 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12297 is the expression of the binary operation. The output may either be
12298 emitted here, or returned to the caller, like all output_* functions.
12300 There is no guarantee that the operands are the same mode, as they
12301 might be within FLOAT or FLOAT_EXTEND expressions. */
12303 #ifndef SYSV386_COMPAT
12304 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12305 wants to fix the assemblers because that causes incompatibility
12306 with gcc. No-one wants to fix gcc because that causes
12307 incompatibility with assemblers... You can use the option of
12308 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12309 #define SYSV386_COMPAT 1
12313 output_387_binary_op (rtx insn, rtx *operands)
12315 static char buf[40];
12318 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12320 #ifdef ENABLE_CHECKING
12321 /* Even if we do not want to check the inputs, this documents input
12322 constraints. Which helps in understanding the following code. */
12323 if (STACK_REG_P (operands[0])
12324 && ((REG_P (operands[1])
12325 && REGNO (operands[0]) == REGNO (operands[1])
12326 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12327 || (REG_P (operands[2])
12328 && REGNO (operands[0]) == REGNO (operands[2])
12329 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12330 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12333 gcc_assert (is_sse);
12336 switch (GET_CODE (operands[3]))
12339 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12340 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12348 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12349 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12357 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12358 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12366 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12367 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12375 gcc_unreachable ();
12382 strcpy (buf, ssep);
12383 if (GET_MODE (operands[0]) == SFmode)
12384 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12386 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12390 strcpy (buf, ssep + 1);
12391 if (GET_MODE (operands[0]) == SFmode)
12392 strcat (buf, "ss\t{%2, %0|%0, %2}");
12394 strcat (buf, "sd\t{%2, %0|%0, %2}");
12400 switch (GET_CODE (operands[3]))
12404 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12406 rtx temp = operands[2];
12407 operands[2] = operands[1];
12408 operands[1] = temp;
12411 /* know operands[0] == operands[1]. */
12413 if (MEM_P (operands[2]))
12419 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12421 if (STACK_TOP_P (operands[0]))
12422 /* How is it that we are storing to a dead operand[2]?
12423 Well, presumably operands[1] is dead too. We can't
12424 store the result to st(0) as st(0) gets popped on this
12425 instruction. Instead store to operands[2] (which I
12426 think has to be st(1)). st(1) will be popped later.
12427 gcc <= 2.8.1 didn't have this check and generated
12428 assembly code that the Unixware assembler rejected. */
12429 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12431 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12435 if (STACK_TOP_P (operands[0]))
12436 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12438 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12443 if (MEM_P (operands[1]))
12449 if (MEM_P (operands[2]))
12455 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12458 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12459 derived assemblers, confusingly reverse the direction of
12460 the operation for fsub{r} and fdiv{r} when the
12461 destination register is not st(0). The Intel assembler
12462 doesn't have this brain damage. Read !SYSV386_COMPAT to
12463 figure out what the hardware really does. */
12464 if (STACK_TOP_P (operands[0]))
12465 p = "{p\t%0, %2|rp\t%2, %0}";
12467 p = "{rp\t%2, %0|p\t%0, %2}";
12469 if (STACK_TOP_P (operands[0]))
12470 /* As above for fmul/fadd, we can't store to st(0). */
12471 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12473 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12478 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12481 if (STACK_TOP_P (operands[0]))
12482 p = "{rp\t%0, %1|p\t%1, %0}";
12484 p = "{p\t%1, %0|rp\t%0, %1}";
12486 if (STACK_TOP_P (operands[0]))
12487 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12489 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12494 if (STACK_TOP_P (operands[0]))
12496 if (STACK_TOP_P (operands[1]))
12497 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12499 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12502 else if (STACK_TOP_P (operands[1]))
12505 p = "{\t%1, %0|r\t%0, %1}";
12507 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12513 p = "{r\t%2, %0|\t%0, %2}";
12515 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12521 gcc_unreachable ();
12528 /* Return needed mode for entity in optimize_mode_switching pass. */
12531 ix86_mode_needed (int entity, rtx insn)
12533 enum attr_i387_cw mode;
12535 /* The mode UNINITIALIZED is used to store control word after a
12536 function call or ASM pattern. The mode ANY specify that function
12537 has no requirements on the control word and make no changes in the
12538 bits we are interested in. */
12541 || (NONJUMP_INSN_P (insn)
12542 && (asm_noperands (PATTERN (insn)) >= 0
12543 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12544 return I387_CW_UNINITIALIZED;
12546 if (recog_memoized (insn) < 0)
12547 return I387_CW_ANY;
12549 mode = get_attr_i387_cw (insn);
12554 if (mode == I387_CW_TRUNC)
12559 if (mode == I387_CW_FLOOR)
12564 if (mode == I387_CW_CEIL)
12569 if (mode == I387_CW_MASK_PM)
12574 gcc_unreachable ();
12577 return I387_CW_ANY;
12580 /* Output code to initialize control word copies used by trunc?f?i and
12581 rounding patterns. CURRENT_MODE is set to current control word,
12582 while NEW_MODE is set to new control word. */
12585 emit_i387_cw_initialization (int mode)
12587 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12590 enum ix86_stack_slot slot;
12592 rtx reg = gen_reg_rtx (HImode);
12594 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12595 emit_move_insn (reg, copy_rtx (stored_mode));
12597 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12598 || optimize_function_for_size_p (cfun))
12602 case I387_CW_TRUNC:
12603 /* round toward zero (truncate) */
12604 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12605 slot = SLOT_CW_TRUNC;
12608 case I387_CW_FLOOR:
12609 /* round down toward -oo */
12610 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12611 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12612 slot = SLOT_CW_FLOOR;
12616 /* round up toward +oo */
12617 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12618 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12619 slot = SLOT_CW_CEIL;
12622 case I387_CW_MASK_PM:
12623 /* mask precision exception for nearbyint() */
12624 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12625 slot = SLOT_CW_MASK_PM;
12629 gcc_unreachable ();
12636 case I387_CW_TRUNC:
12637 /* round toward zero (truncate) */
12638 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12639 slot = SLOT_CW_TRUNC;
12642 case I387_CW_FLOOR:
12643 /* round down toward -oo */
12644 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12645 slot = SLOT_CW_FLOOR;
12649 /* round up toward +oo */
12650 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12651 slot = SLOT_CW_CEIL;
12654 case I387_CW_MASK_PM:
12655 /* mask precision exception for nearbyint() */
12656 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12657 slot = SLOT_CW_MASK_PM;
12661 gcc_unreachable ();
12665 gcc_assert (slot < MAX_386_STACK_LOCALS);
12667 new_mode = assign_386_stack_local (HImode, slot);
12668 emit_move_insn (new_mode, reg);
12671 /* Output code for INSN to convert a float to a signed int. OPERANDS
12672 are the insn operands. The output may be [HSD]Imode and the input
12673 operand may be [SDX]Fmode. */
12676 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12678 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12679 int dimode_p = GET_MODE (operands[0]) == DImode;
12680 int round_mode = get_attr_i387_cw (insn);
12682 /* Jump through a hoop or two for DImode, since the hardware has no
12683 non-popping instruction. We used to do this a different way, but
12684 that was somewhat fragile and broke with post-reload splitters. */
12685 if ((dimode_p || fisttp) && !stack_top_dies)
12686 output_asm_insn ("fld\t%y1", operands);
12688 gcc_assert (STACK_TOP_P (operands[1]));
12689 gcc_assert (MEM_P (operands[0]));
12690 gcc_assert (GET_MODE (operands[1]) != TFmode);
12693 output_asm_insn ("fisttp%Z0\t%0", operands);
12696 if (round_mode != I387_CW_ANY)
12697 output_asm_insn ("fldcw\t%3", operands);
12698 if (stack_top_dies || dimode_p)
12699 output_asm_insn ("fistp%Z0\t%0", operands);
12701 output_asm_insn ("fist%Z0\t%0", operands);
12702 if (round_mode != I387_CW_ANY)
12703 output_asm_insn ("fldcw\t%2", operands);
12709 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12710 have the values zero or one, indicates the ffreep insn's operand
12711 from the OPERANDS array. */
12713 static const char *
12714 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12716 if (TARGET_USE_FFREEP)
12717 #ifdef HAVE_AS_IX86_FFREEP
12718 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12721 static char retval[32];
12722 int regno = REGNO (operands[opno]);
12724 gcc_assert (FP_REGNO_P (regno));
12726 regno -= FIRST_STACK_REG;
12728 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12733 return opno ? "fstp\t%y1" : "fstp\t%y0";
12737 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12738 should be used. UNORDERED_P is true when fucom should be used. */
12741 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12743 int stack_top_dies;
12744 rtx cmp_op0, cmp_op1;
12745 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12749 cmp_op0 = operands[0];
12750 cmp_op1 = operands[1];
12754 cmp_op0 = operands[1];
12755 cmp_op1 = operands[2];
12760 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12761 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12762 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12763 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12765 if (GET_MODE (operands[0]) == SFmode)
12767 return &ucomiss[TARGET_AVX ? 0 : 1];
12769 return &comiss[TARGET_AVX ? 0 : 1];
12772 return &ucomisd[TARGET_AVX ? 0 : 1];
12774 return &comisd[TARGET_AVX ? 0 : 1];
12777 gcc_assert (STACK_TOP_P (cmp_op0));
12779 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12781 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12783 if (stack_top_dies)
12785 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12786 return output_387_ffreep (operands, 1);
12789 return "ftst\n\tfnstsw\t%0";
12792 if (STACK_REG_P (cmp_op1)
12794 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12795 && REGNO (cmp_op1) != FIRST_STACK_REG)
12797 /* If both the top of the 387 stack dies, and the other operand
12798 is also a stack register that dies, then this must be a
12799 `fcompp' float compare */
12803 /* There is no double popping fcomi variant. Fortunately,
12804 eflags is immune from the fstp's cc clobbering. */
12806 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12808 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12809 return output_387_ffreep (operands, 0);
12814 return "fucompp\n\tfnstsw\t%0";
12816 return "fcompp\n\tfnstsw\t%0";
12821 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12823 static const char * const alt[16] =
12825 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12826 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12827 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12828 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12830 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12831 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12835 "fcomi\t{%y1, %0|%0, %y1}",
12836 "fcomip\t{%y1, %0|%0, %y1}",
12837 "fucomi\t{%y1, %0|%0, %y1}",
12838 "fucomip\t{%y1, %0|%0, %y1}",
12849 mask = eflags_p << 3;
12850 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12851 mask |= unordered_p << 1;
12852 mask |= stack_top_dies;
12854 gcc_assert (mask < 16);
12863 ix86_output_addr_vec_elt (FILE *file, int value)
12865 const char *directive = ASM_LONG;
12869 directive = ASM_QUAD;
12871 gcc_assert (!TARGET_64BIT);
12874 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12878 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12880 const char *directive = ASM_LONG;
12883 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12884 directive = ASM_QUAD;
12886 gcc_assert (!TARGET_64BIT);
12888 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12889 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12890 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12891 directive, value, rel);
12892 else if (HAVE_AS_GOTOFF_IN_DATA)
12893 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12895 else if (TARGET_MACHO)
12897 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12898 machopic_output_function_base_name (file);
12903 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12904 GOT_SYMBOL_NAME, value);
12907 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12911 ix86_expand_clear (rtx dest)
12915 /* We play register width games, which are only valid after reload. */
12916 gcc_assert (reload_completed);
12918 /* Avoid HImode and its attendant prefix byte. */
12919 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12920 dest = gen_rtx_REG (SImode, REGNO (dest));
12921 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12923 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12924 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12926 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12927 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12933 /* X is an unchanging MEM. If it is a constant pool reference, return
12934 the constant pool rtx, else NULL. */
12937 maybe_get_pool_constant (rtx x)
12939 x = ix86_delegitimize_address (XEXP (x, 0));
12941 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12942 return get_pool_constant (x);
12948 ix86_expand_move (enum machine_mode mode, rtx operands[])
12951 enum tls_model model;
12956 if (GET_CODE (op1) == SYMBOL_REF)
12958 model = SYMBOL_REF_TLS_MODEL (op1);
12961 op1 = legitimize_tls_address (op1, model, true);
12962 op1 = force_operand (op1, op0);
12966 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12967 && SYMBOL_REF_DLLIMPORT_P (op1))
12968 op1 = legitimize_dllimport_symbol (op1, false);
12970 else if (GET_CODE (op1) == CONST
12971 && GET_CODE (XEXP (op1, 0)) == PLUS
12972 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12974 rtx addend = XEXP (XEXP (op1, 0), 1);
12975 rtx symbol = XEXP (XEXP (op1, 0), 0);
12978 model = SYMBOL_REF_TLS_MODEL (symbol);
12980 tmp = legitimize_tls_address (symbol, model, true);
12981 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12982 && SYMBOL_REF_DLLIMPORT_P (symbol))
12983 tmp = legitimize_dllimport_symbol (symbol, true);
12987 tmp = force_operand (tmp, NULL);
12988 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12989 op0, 1, OPTAB_DIRECT);
12995 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12997 if (TARGET_MACHO && !TARGET_64BIT)
13002 rtx temp = ((reload_in_progress
13003 || ((op0 && REG_P (op0))
13005 ? op0 : gen_reg_rtx (Pmode));
13006 op1 = machopic_indirect_data_reference (op1, temp);
13007 op1 = machopic_legitimize_pic_address (op1, mode,
13008 temp == op1 ? 0 : temp);
13010 else if (MACHOPIC_INDIRECT)
13011 op1 = machopic_indirect_data_reference (op1, 0);
13019 op1 = force_reg (Pmode, op1);
13020 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13022 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13023 op1 = legitimize_pic_address (op1, reg);
13032 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13033 || !push_operand (op0, mode))
13035 op1 = force_reg (mode, op1);
13037 if (push_operand (op0, mode)
13038 && ! general_no_elim_operand (op1, mode))
13039 op1 = copy_to_mode_reg (mode, op1);
13041 /* Force large constants in 64bit compilation into register
13042 to get them CSEed. */
13043 if (can_create_pseudo_p ()
13044 && (mode == DImode) && TARGET_64BIT
13045 && immediate_operand (op1, mode)
13046 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13047 && !register_operand (op0, mode)
13049 op1 = copy_to_mode_reg (mode, op1);
13051 if (can_create_pseudo_p ()
13052 && FLOAT_MODE_P (mode)
13053 && GET_CODE (op1) == CONST_DOUBLE)
13055 /* If we are loading a floating point constant to a register,
13056 force the value to memory now, since we'll get better code
13057 out the back end. */
13059 op1 = validize_mem (force_const_mem (mode, op1));
13060 if (!register_operand (op0, mode))
13062 rtx temp = gen_reg_rtx (mode);
13063 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13064 emit_move_insn (op0, temp);
13070 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13074 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13076 rtx op0 = operands[0], op1 = operands[1];
13077 unsigned int align = GET_MODE_ALIGNMENT (mode);
13079 /* Force constants other than zero into memory. We do not know how
13080 the instructions used to build constants modify the upper 64 bits
13081 of the register, once we have that information we may be able
13082 to handle some of them more efficiently. */
13083 if (can_create_pseudo_p ()
13084 && register_operand (op0, mode)
13085 && (CONSTANT_P (op1)
13086 || (GET_CODE (op1) == SUBREG
13087 && CONSTANT_P (SUBREG_REG (op1))))
13088 && !standard_sse_constant_p (op1))
13089 op1 = validize_mem (force_const_mem (mode, op1));
13091 /* We need to check memory alignment for SSE mode since attribute
13092 can make operands unaligned. */
13093 if (can_create_pseudo_p ()
13094 && SSE_REG_MODE_P (mode)
13095 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13096 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13100 /* ix86_expand_vector_move_misalign() does not like constants ... */
13101 if (CONSTANT_P (op1)
13102 || (GET_CODE (op1) == SUBREG
13103 && CONSTANT_P (SUBREG_REG (op1))))
13104 op1 = validize_mem (force_const_mem (mode, op1));
13106 /* ... nor both arguments in memory. */
13107 if (!register_operand (op0, mode)
13108 && !register_operand (op1, mode))
13109 op1 = force_reg (mode, op1);
13111 tmp[0] = op0; tmp[1] = op1;
13112 ix86_expand_vector_move_misalign (mode, tmp);
13116 /* Make operand1 a register if it isn't already. */
13117 if (can_create_pseudo_p ()
13118 && !register_operand (op0, mode)
13119 && !register_operand (op1, mode))
13121 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13125 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13128 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13129 straight to ix86_expand_vector_move. */
13130 /* Code generation for scalar reg-reg moves of single and double precision data:
13131 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13135 if (x86_sse_partial_reg_dependency == true)
13140 Code generation for scalar loads of double precision data:
13141 if (x86_sse_split_regs == true)
13142 movlpd mem, reg (gas syntax)
13146 Code generation for unaligned packed loads of single precision data
13147 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13148 if (x86_sse_unaligned_move_optimal)
13151 if (x86_sse_partial_reg_dependency == true)
13163 Code generation for unaligned packed loads of double precision data
13164 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13165 if (x86_sse_unaligned_move_optimal)
13168 if (x86_sse_split_regs == true)
13181 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13190 switch (GET_MODE_CLASS (mode))
13192 case MODE_VECTOR_INT:
13194 switch (GET_MODE_SIZE (mode))
13197 op0 = gen_lowpart (V16QImode, op0);
13198 op1 = gen_lowpart (V16QImode, op1);
13199 emit_insn (gen_avx_movdqu (op0, op1));
13202 op0 = gen_lowpart (V32QImode, op0);
13203 op1 = gen_lowpart (V32QImode, op1);
13204 emit_insn (gen_avx_movdqu256 (op0, op1));
13207 gcc_unreachable ();
13210 case MODE_VECTOR_FLOAT:
13211 op0 = gen_lowpart (mode, op0);
13212 op1 = gen_lowpart (mode, op1);
13217 emit_insn (gen_avx_movups (op0, op1));
13220 emit_insn (gen_avx_movups256 (op0, op1));
13223 emit_insn (gen_avx_movupd (op0, op1));
13226 emit_insn (gen_avx_movupd256 (op0, op1));
13229 gcc_unreachable ();
13234 gcc_unreachable ();
13242 /* If we're optimizing for size, movups is the smallest. */
13243 if (optimize_insn_for_size_p ())
13245 op0 = gen_lowpart (V4SFmode, op0);
13246 op1 = gen_lowpart (V4SFmode, op1);
13247 emit_insn (gen_sse_movups (op0, op1));
13251 /* ??? If we have typed data, then it would appear that using
13252 movdqu is the only way to get unaligned data loaded with
13254 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13256 op0 = gen_lowpart (V16QImode, op0);
13257 op1 = gen_lowpart (V16QImode, op1);
13258 emit_insn (gen_sse2_movdqu (op0, op1));
13262 if (TARGET_SSE2 && mode == V2DFmode)
13266 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13268 op0 = gen_lowpart (V2DFmode, op0);
13269 op1 = gen_lowpart (V2DFmode, op1);
13270 emit_insn (gen_sse2_movupd (op0, op1));
13274 /* When SSE registers are split into halves, we can avoid
13275 writing to the top half twice. */
13276 if (TARGET_SSE_SPLIT_REGS)
13278 emit_clobber (op0);
13283 /* ??? Not sure about the best option for the Intel chips.
13284 The following would seem to satisfy; the register is
13285 entirely cleared, breaking the dependency chain. We
13286 then store to the upper half, with a dependency depth
13287 of one. A rumor has it that Intel recommends two movsd
13288 followed by an unpacklpd, but this is unconfirmed. And
13289 given that the dependency depth of the unpacklpd would
13290 still be one, I'm not sure why this would be better. */
13291 zero = CONST0_RTX (V2DFmode);
13294 m = adjust_address (op1, DFmode, 0);
13295 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13296 m = adjust_address (op1, DFmode, 8);
13297 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13301 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13303 op0 = gen_lowpart (V4SFmode, op0);
13304 op1 = gen_lowpart (V4SFmode, op1);
13305 emit_insn (gen_sse_movups (op0, op1));
13309 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13310 emit_move_insn (op0, CONST0_RTX (mode));
13312 emit_clobber (op0);
13314 if (mode != V4SFmode)
13315 op0 = gen_lowpart (V4SFmode, op0);
13316 m = adjust_address (op1, V2SFmode, 0);
13317 emit_insn (gen_sse_loadlps (op0, op0, m));
13318 m = adjust_address (op1, V2SFmode, 8);
13319 emit_insn (gen_sse_loadhps (op0, op0, m));
13322 else if (MEM_P (op0))
13324 /* If we're optimizing for size, movups is the smallest. */
13325 if (optimize_insn_for_size_p ())
13327 op0 = gen_lowpart (V4SFmode, op0);
13328 op1 = gen_lowpart (V4SFmode, op1);
13329 emit_insn (gen_sse_movups (op0, op1));
13333 /* ??? Similar to above, only less clear because of quote
13334 typeless stores unquote. */
13335 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13336 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13338 op0 = gen_lowpart (V16QImode, op0);
13339 op1 = gen_lowpart (V16QImode, op1);
13340 emit_insn (gen_sse2_movdqu (op0, op1));
13344 if (TARGET_SSE2 && mode == V2DFmode)
13346 m = adjust_address (op0, DFmode, 0);
13347 emit_insn (gen_sse2_storelpd (m, op1));
13348 m = adjust_address (op0, DFmode, 8);
13349 emit_insn (gen_sse2_storehpd (m, op1));
13353 if (mode != V4SFmode)
13354 op1 = gen_lowpart (V4SFmode, op1);
13355 m = adjust_address (op0, V2SFmode, 0);
13356 emit_insn (gen_sse_storelps (m, op1));
13357 m = adjust_address (op0, V2SFmode, 8);
13358 emit_insn (gen_sse_storehps (m, op1));
13362 gcc_unreachable ();
13365 /* Expand a push in MODE. This is some mode for which we do not support
13366 proper push instructions, at least from the registers that we expect
13367 the value to live in. */
13370 ix86_expand_push (enum machine_mode mode, rtx x)
13374 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13375 GEN_INT (-GET_MODE_SIZE (mode)),
13376 stack_pointer_rtx, 1, OPTAB_DIRECT);
13377 if (tmp != stack_pointer_rtx)
13378 emit_move_insn (stack_pointer_rtx, tmp);
13380 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13382 /* When we push an operand onto stack, it has to be aligned at least
13383 at the function argument boundary. However since we don't have
13384 the argument type, we can't determine the actual argument
13386 emit_move_insn (tmp, x);
13389 /* Helper function of ix86_fixup_binary_operands to canonicalize
13390 operand order. Returns true if the operands should be swapped. */
13393 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13396 rtx dst = operands[0];
13397 rtx src1 = operands[1];
13398 rtx src2 = operands[2];
13400 /* If the operation is not commutative, we can't do anything. */
13401 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13404 /* Highest priority is that src1 should match dst. */
13405 if (rtx_equal_p (dst, src1))
13407 if (rtx_equal_p (dst, src2))
13410 /* Next highest priority is that immediate constants come second. */
13411 if (immediate_operand (src2, mode))
13413 if (immediate_operand (src1, mode))
13416 /* Lowest priority is that memory references should come second. */
13426 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13427 destination to use for the operation. If different from the true
13428 destination in operands[0], a copy operation will be required. */
13431 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13434 rtx dst = operands[0];
13435 rtx src1 = operands[1];
13436 rtx src2 = operands[2];
13438 /* Canonicalize operand order. */
13439 if (ix86_swap_binary_operands_p (code, mode, operands))
13443 /* It is invalid to swap operands of different modes. */
13444 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13451 /* Both source operands cannot be in memory. */
13452 if (MEM_P (src1) && MEM_P (src2))
13454 /* Optimization: Only read from memory once. */
13455 if (rtx_equal_p (src1, src2))
13457 src2 = force_reg (mode, src2);
13461 src2 = force_reg (mode, src2);
13464 /* If the destination is memory, and we do not have matching source
13465 operands, do things in registers. */
13466 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13467 dst = gen_reg_rtx (mode);
13469 /* Source 1 cannot be a constant. */
13470 if (CONSTANT_P (src1))
13471 src1 = force_reg (mode, src1);
13473 /* Source 1 cannot be a non-matching memory. */
13474 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13475 src1 = force_reg (mode, src1);
13477 operands[1] = src1;
13478 operands[2] = src2;
13482 /* Similarly, but assume that the destination has already been
13483 set up properly. */
13486 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13487 enum machine_mode mode, rtx operands[])
13489 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13490 gcc_assert (dst == operands[0]);
13493 /* Attempt to expand a binary operator. Make the expansion closer to the
13494 actual machine, then just general_operand, which will allow 3 separate
13495 memory references (one output, two input) in a single insn. */
13498 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13501 rtx src1, src2, dst, op, clob;
13503 dst = ix86_fixup_binary_operands (code, mode, operands);
13504 src1 = operands[1];
13505 src2 = operands[2];
13507 /* Emit the instruction. */
13509 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13510 if (reload_in_progress)
13512 /* Reload doesn't know about the flags register, and doesn't know that
13513 it doesn't want to clobber it. We can only do this with PLUS. */
13514 gcc_assert (code == PLUS);
13519 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13520 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13523 /* Fix up the destination if needed. */
13524 if (dst != operands[0])
13525 emit_move_insn (operands[0], dst);
13528 /* Return TRUE or FALSE depending on whether the binary operator meets the
13529 appropriate constraints. */
13532 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13535 rtx dst = operands[0];
13536 rtx src1 = operands[1];
13537 rtx src2 = operands[2];
13539 /* Both source operands cannot be in memory. */
13540 if (MEM_P (src1) && MEM_P (src2))
13543 /* Canonicalize operand order for commutative operators. */
13544 if (ix86_swap_binary_operands_p (code, mode, operands))
13551 /* If the destination is memory, we must have a matching source operand. */
13552 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13555 /* Source 1 cannot be a constant. */
13556 if (CONSTANT_P (src1))
13559 /* Source 1 cannot be a non-matching memory. */
13560 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13566 /* Attempt to expand a unary operator. Make the expansion closer to the
13567 actual machine, then just general_operand, which will allow 2 separate
13568 memory references (one output, one input) in a single insn. */
13571 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13574 int matching_memory;
13575 rtx src, dst, op, clob;
13580 /* If the destination is memory, and we do not have matching source
13581 operands, do things in registers. */
13582 matching_memory = 0;
13585 if (rtx_equal_p (dst, src))
13586 matching_memory = 1;
13588 dst = gen_reg_rtx (mode);
13591 /* When source operand is memory, destination must match. */
13592 if (MEM_P (src) && !matching_memory)
13593 src = force_reg (mode, src);
13595 /* Emit the instruction. */
13597 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13598 if (reload_in_progress || code == NOT)
13600 /* Reload doesn't know about the flags register, and doesn't know that
13601 it doesn't want to clobber it. */
13602 gcc_assert (code == NOT);
13607 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13608 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13611 /* Fix up the destination if needed. */
13612 if (dst != operands[0])
13613 emit_move_insn (operands[0], dst);
13616 #define LEA_SEARCH_THRESHOLD 12
13618 /* Search backward for non-agu definition of register number REGNO1
13619 or register number REGNO2 in INSN's basic block until
13620 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13621 2. Reach BB boundary, or
13622 3. Reach agu definition.
13623 Returns the distance between the non-agu definition point and INSN.
13624 If no definition point, returns -1. */
13627 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13630 basic_block bb = BLOCK_FOR_INSN (insn);
13633 enum attr_type insn_type;
13635 if (insn != BB_HEAD (bb))
13637 rtx prev = PREV_INSN (insn);
13638 while (prev && distance < LEA_SEARCH_THRESHOLD)
13643 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13644 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13645 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13646 && (regno1 == DF_REF_REGNO (*def_rec)
13647 || regno2 == DF_REF_REGNO (*def_rec)))
13649 insn_type = get_attr_type (prev);
13650 if (insn_type != TYPE_LEA)
13654 if (prev == BB_HEAD (bb))
13656 prev = PREV_INSN (prev);
13660 if (distance < LEA_SEARCH_THRESHOLD)
13664 bool simple_loop = false;
13666 FOR_EACH_EDGE (e, ei, bb->preds)
13669 simple_loop = true;
13675 rtx prev = BB_END (bb);
13678 && distance < LEA_SEARCH_THRESHOLD)
13683 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13684 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13685 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13686 && (regno1 == DF_REF_REGNO (*def_rec)
13687 || regno2 == DF_REF_REGNO (*def_rec)))
13689 insn_type = get_attr_type (prev);
13690 if (insn_type != TYPE_LEA)
13694 prev = PREV_INSN (prev);
13702 /* get_attr_type may modify recog data. We want to make sure
13703 that recog data is valid for instruction INSN, on which
13704 distance_non_agu_define is called. INSN is unchanged here. */
13705 extract_insn_cached (insn);
13709 /* Return the distance between INSN and the next insn that uses
13710 register number REGNO0 in memory address. Return -1 if no such
13711 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13714 distance_agu_use (unsigned int regno0, rtx insn)
13716 basic_block bb = BLOCK_FOR_INSN (insn);
13721 if (insn != BB_END (bb))
13723 rtx next = NEXT_INSN (insn);
13724 while (next && distance < LEA_SEARCH_THRESHOLD)
13730 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13731 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13732 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13733 && regno0 == DF_REF_REGNO (*use_rec))
13735 /* Return DISTANCE if OP0 is used in memory
13736 address in NEXT. */
13740 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13741 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13742 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13743 && regno0 == DF_REF_REGNO (*def_rec))
13745 /* Return -1 if OP0 is set in NEXT. */
13749 if (next == BB_END (bb))
13751 next = NEXT_INSN (next);
13755 if (distance < LEA_SEARCH_THRESHOLD)
13759 bool simple_loop = false;
13761 FOR_EACH_EDGE (e, ei, bb->succs)
13764 simple_loop = true;
13770 rtx next = BB_HEAD (bb);
13773 && distance < LEA_SEARCH_THRESHOLD)
13779 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13780 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13781 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13782 && regno0 == DF_REF_REGNO (*use_rec))
13784 /* Return DISTANCE if OP0 is used in memory
13785 address in NEXT. */
13789 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13790 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13791 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13792 && regno0 == DF_REF_REGNO (*def_rec))
13794 /* Return -1 if OP0 is set in NEXT. */
13799 next = NEXT_INSN (next);
13807 /* Define this macro to tune LEA priority vs ADD, it take effect when
13808 there is a dilemma of choicing LEA or ADD
13809 Negative value: ADD is more preferred than LEA
13811 Positive value: LEA is more preferred than ADD*/
13812 #define IX86_LEA_PRIORITY 2
13814 /* Return true if it is ok to optimize an ADD operation to LEA
13815 operation to avoid flag register consumation. For the processors
13816 like ATOM, if the destination register of LEA holds an actual
13817 address which will be used soon, LEA is better and otherwise ADD
13821 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13822 rtx insn, rtx operands[])
13824 unsigned int regno0 = true_regnum (operands[0]);
13825 unsigned int regno1 = true_regnum (operands[1]);
13826 unsigned int regno2;
13828 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13829 return regno0 != regno1;
13831 regno2 = true_regnum (operands[2]);
13833 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13834 if (regno0 != regno1 && regno0 != regno2)
13838 int dist_define, dist_use;
13839 dist_define = distance_non_agu_define (regno1, regno2, insn);
13840 if (dist_define <= 0)
13843 /* If this insn has both backward non-agu dependence and forward
13844 agu dependence, the one with short distance take effect. */
13845 dist_use = distance_agu_use (regno0, insn);
13847 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13854 /* Return true if destination reg of SET_BODY is shift count of
13858 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13864 /* Retrieve destination of SET_BODY. */
13865 switch (GET_CODE (set_body))
13868 set_dest = SET_DEST (set_body);
13869 if (!set_dest || !REG_P (set_dest))
13873 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13874 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13882 /* Retrieve shift count of USE_BODY. */
13883 switch (GET_CODE (use_body))
13886 shift_rtx = XEXP (use_body, 1);
13889 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13890 if (ix86_dep_by_shift_count_body (set_body,
13891 XVECEXP (use_body, 0, i)))
13899 && (GET_CODE (shift_rtx) == ASHIFT
13900 || GET_CODE (shift_rtx) == LSHIFTRT
13901 || GET_CODE (shift_rtx) == ASHIFTRT
13902 || GET_CODE (shift_rtx) == ROTATE
13903 || GET_CODE (shift_rtx) == ROTATERT))
13905 rtx shift_count = XEXP (shift_rtx, 1);
13907 /* Return true if shift count is dest of SET_BODY. */
13908 if (REG_P (shift_count)
13909 && true_regnum (set_dest) == true_regnum (shift_count))
13916 /* Return true if destination reg of SET_INSN is shift count of
13920 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13922 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13923 PATTERN (use_insn));
13926 /* Return TRUE or FALSE depending on whether the unary operator meets the
13927 appropriate constraints. */
13930 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13931 enum machine_mode mode ATTRIBUTE_UNUSED,
13932 rtx operands[2] ATTRIBUTE_UNUSED)
13934 /* If one of operands is memory, source and destination must match. */
13935 if ((MEM_P (operands[0])
13936 || MEM_P (operands[1]))
13937 && ! rtx_equal_p (operands[0], operands[1]))
13942 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13943 are ok, keeping in mind the possible movddup alternative. */
13946 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13948 if (MEM_P (operands[0]))
13949 return rtx_equal_p (operands[0], operands[1 + high]);
13950 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13951 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13955 /* Post-reload splitter for converting an SF or DFmode value in an
13956 SSE register into an unsigned SImode. */
13959 ix86_split_convert_uns_si_sse (rtx operands[])
13961 enum machine_mode vecmode;
13962 rtx value, large, zero_or_two31, input, two31, x;
13964 large = operands[1];
13965 zero_or_two31 = operands[2];
13966 input = operands[3];
13967 two31 = operands[4];
13968 vecmode = GET_MODE (large);
13969 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13971 /* Load up the value into the low element. We must ensure that the other
13972 elements are valid floats -- zero is the easiest such value. */
13975 if (vecmode == V4SFmode)
13976 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
13978 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
13982 input = gen_rtx_REG (vecmode, REGNO (input));
13983 emit_move_insn (value, CONST0_RTX (vecmode));
13984 if (vecmode == V4SFmode)
13985 emit_insn (gen_sse_movss (value, value, input));
13987 emit_insn (gen_sse2_movsd (value, value, input));
13990 emit_move_insn (large, two31);
13991 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
13993 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
13994 emit_insn (gen_rtx_SET (VOIDmode, large, x));
13996 x = gen_rtx_AND (vecmode, zero_or_two31, large);
13997 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
13999 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14000 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14002 large = gen_rtx_REG (V4SImode, REGNO (large));
14003 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14005 x = gen_rtx_REG (V4SImode, REGNO (value));
14006 if (vecmode == V4SFmode)
14007 emit_insn (gen_sse2_cvttps2dq (x, value));
14009 emit_insn (gen_sse2_cvttpd2dq (x, value));
14012 emit_insn (gen_xorv4si3 (value, value, large));
14015 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14016 Expects the 64-bit DImode to be supplied in a pair of integral
14017 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14018 -mfpmath=sse, !optimize_size only. */
14021 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14023 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14024 rtx int_xmm, fp_xmm;
14025 rtx biases, exponents;
14028 int_xmm = gen_reg_rtx (V4SImode);
14029 if (TARGET_INTER_UNIT_MOVES)
14030 emit_insn (gen_movdi_to_sse (int_xmm, input));
14031 else if (TARGET_SSE_SPLIT_REGS)
14033 emit_clobber (int_xmm);
14034 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14038 x = gen_reg_rtx (V2DImode);
14039 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14040 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14043 x = gen_rtx_CONST_VECTOR (V4SImode,
14044 gen_rtvec (4, GEN_INT (0x43300000UL),
14045 GEN_INT (0x45300000UL),
14046 const0_rtx, const0_rtx));
14047 exponents = validize_mem (force_const_mem (V4SImode, x));
14049 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14050 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14052 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14053 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14054 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14055 (0x1.0p84 + double(fp_value_hi_xmm)).
14056 Note these exponents differ by 32. */
14058 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14060 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14061 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14062 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14063 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14064 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14065 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14066 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14067 biases = validize_mem (force_const_mem (V2DFmode, biases));
14068 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14070 /* Add the upper and lower DFmode values together. */
14072 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14075 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14076 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14077 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14080 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14083 /* Not used, but eases macroization of patterns. */
14085 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14086 rtx input ATTRIBUTE_UNUSED)
14088 gcc_unreachable ();
14091 /* Convert an unsigned SImode value into a DFmode. Only currently used
14092 for SSE, but applicable anywhere. */
14095 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14097 REAL_VALUE_TYPE TWO31r;
14100 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14101 NULL, 1, OPTAB_DIRECT);
14103 fp = gen_reg_rtx (DFmode);
14104 emit_insn (gen_floatsidf2 (fp, x));
14106 real_ldexp (&TWO31r, &dconst1, 31);
14107 x = const_double_from_real_value (TWO31r, DFmode);
14109 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14111 emit_move_insn (target, x);
14114 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14115 32-bit mode; otherwise we have a direct convert instruction. */
14118 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14120 REAL_VALUE_TYPE TWO32r;
14121 rtx fp_lo, fp_hi, x;
14123 fp_lo = gen_reg_rtx (DFmode);
14124 fp_hi = gen_reg_rtx (DFmode);
14126 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14128 real_ldexp (&TWO32r, &dconst1, 32);
14129 x = const_double_from_real_value (TWO32r, DFmode);
14130 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14132 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14134 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14137 emit_move_insn (target, x);
14140 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14141 For x86_32, -mfpmath=sse, !optimize_size only. */
14143 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14145 REAL_VALUE_TYPE ONE16r;
14146 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14148 real_ldexp (&ONE16r, &dconst1, 16);
14149 x = const_double_from_real_value (ONE16r, SFmode);
14150 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14151 NULL, 0, OPTAB_DIRECT);
14152 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14153 NULL, 0, OPTAB_DIRECT);
14154 fp_hi = gen_reg_rtx (SFmode);
14155 fp_lo = gen_reg_rtx (SFmode);
14156 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14157 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14158 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14160 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14162 if (!rtx_equal_p (target, fp_hi))
14163 emit_move_insn (target, fp_hi);
14166 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14167 then replicate the value for all elements of the vector
14171 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14178 v = gen_rtvec (4, value, value, value, value);
14179 return gen_rtx_CONST_VECTOR (V4SImode, v);
14183 v = gen_rtvec (2, value, value);
14184 return gen_rtx_CONST_VECTOR (V2DImode, v);
14188 v = gen_rtvec (4, value, value, value, value);
14190 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14191 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14192 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14196 v = gen_rtvec (2, value, value);
14198 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14199 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14202 gcc_unreachable ();
14206 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14207 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14208 for an SSE register. If VECT is true, then replicate the mask for
14209 all elements of the vector register. If INVERT is true, then create
14210 a mask excluding the sign bit. */
14213 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14215 enum machine_mode vec_mode, imode;
14216 HOST_WIDE_INT hi, lo;
14221 /* Find the sign bit, sign extended to 2*HWI. */
14227 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14228 lo = 0x80000000, hi = lo < 0;
14234 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14235 if (HOST_BITS_PER_WIDE_INT >= 64)
14236 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14238 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14243 vec_mode = VOIDmode;
14244 if (HOST_BITS_PER_WIDE_INT >= 64)
14247 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14254 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14258 lo = ~lo, hi = ~hi;
14264 mask = immed_double_const (lo, hi, imode);
14266 vec = gen_rtvec (2, v, mask);
14267 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14268 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14275 gcc_unreachable ();
14279 lo = ~lo, hi = ~hi;
14281 /* Force this value into the low part of a fp vector constant. */
14282 mask = immed_double_const (lo, hi, imode);
14283 mask = gen_lowpart (mode, mask);
14285 if (vec_mode == VOIDmode)
14286 return force_reg (mode, mask);
14288 v = ix86_build_const_vector (mode, vect, mask);
14289 return force_reg (vec_mode, v);
14292 /* Generate code for floating point ABS or NEG. */
14295 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14298 rtx mask, set, use, clob, dst, src;
14299 bool use_sse = false;
14300 bool vector_mode = VECTOR_MODE_P (mode);
14301 enum machine_mode elt_mode = mode;
14305 elt_mode = GET_MODE_INNER (mode);
14308 else if (mode == TFmode)
14310 else if (TARGET_SSE_MATH)
14311 use_sse = SSE_FLOAT_MODE_P (mode);
14313 /* NEG and ABS performed with SSE use bitwise mask operations.
14314 Create the appropriate mask now. */
14316 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14325 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14326 set = gen_rtx_SET (VOIDmode, dst, set);
14331 set = gen_rtx_fmt_e (code, mode, src);
14332 set = gen_rtx_SET (VOIDmode, dst, set);
14335 use = gen_rtx_USE (VOIDmode, mask);
14336 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14337 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14338 gen_rtvec (3, set, use, clob)));
14345 /* Expand a copysign operation. Special case operand 0 being a constant. */
14348 ix86_expand_copysign (rtx operands[])
14350 enum machine_mode mode;
14351 rtx dest, op0, op1, mask, nmask;
14353 dest = operands[0];
14357 mode = GET_MODE (dest);
14359 if (GET_CODE (op0) == CONST_DOUBLE)
14361 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14363 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14364 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14366 if (mode == SFmode || mode == DFmode)
14368 enum machine_mode vmode;
14370 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14372 if (op0 == CONST0_RTX (mode))
14373 op0 = CONST0_RTX (vmode);
14376 rtx v = ix86_build_const_vector (mode, false, op0);
14378 op0 = force_reg (vmode, v);
14381 else if (op0 != CONST0_RTX (mode))
14382 op0 = force_reg (mode, op0);
14384 mask = ix86_build_signbit_mask (mode, 0, 0);
14386 if (mode == SFmode)
14387 copysign_insn = gen_copysignsf3_const;
14388 else if (mode == DFmode)
14389 copysign_insn = gen_copysigndf3_const;
14391 copysign_insn = gen_copysigntf3_const;
14393 emit_insn (copysign_insn (dest, op0, op1, mask));
14397 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14399 nmask = ix86_build_signbit_mask (mode, 0, 1);
14400 mask = ix86_build_signbit_mask (mode, 0, 0);
14402 if (mode == SFmode)
14403 copysign_insn = gen_copysignsf3_var;
14404 else if (mode == DFmode)
14405 copysign_insn = gen_copysigndf3_var;
14407 copysign_insn = gen_copysigntf3_var;
14409 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14413 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14414 be a constant, and so has already been expanded into a vector constant. */
14417 ix86_split_copysign_const (rtx operands[])
14419 enum machine_mode mode, vmode;
14420 rtx dest, op0, mask, x;
14422 dest = operands[0];
14424 mask = operands[3];
14426 mode = GET_MODE (dest);
14427 vmode = GET_MODE (mask);
14429 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14430 x = gen_rtx_AND (vmode, dest, mask);
14431 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14433 if (op0 != CONST0_RTX (vmode))
14435 x = gen_rtx_IOR (vmode, dest, op0);
14436 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14440 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14441 so we have to do two masks. */
14444 ix86_split_copysign_var (rtx operands[])
14446 enum machine_mode mode, vmode;
14447 rtx dest, scratch, op0, op1, mask, nmask, x;
14449 dest = operands[0];
14450 scratch = operands[1];
14453 nmask = operands[4];
14454 mask = operands[5];
14456 mode = GET_MODE (dest);
14457 vmode = GET_MODE (mask);
14459 if (rtx_equal_p (op0, op1))
14461 /* Shouldn't happen often (it's useless, obviously), but when it does
14462 we'd generate incorrect code if we continue below. */
14463 emit_move_insn (dest, op0);
14467 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14469 gcc_assert (REGNO (op1) == REGNO (scratch));
14471 x = gen_rtx_AND (vmode, scratch, mask);
14472 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14475 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14476 x = gen_rtx_NOT (vmode, dest);
14477 x = gen_rtx_AND (vmode, x, op0);
14478 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14482 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14484 x = gen_rtx_AND (vmode, scratch, mask);
14486 else /* alternative 2,4 */
14488 gcc_assert (REGNO (mask) == REGNO (scratch));
14489 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14490 x = gen_rtx_AND (vmode, scratch, op1);
14492 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14494 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14496 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14497 x = gen_rtx_AND (vmode, dest, nmask);
14499 else /* alternative 3,4 */
14501 gcc_assert (REGNO (nmask) == REGNO (dest));
14503 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14504 x = gen_rtx_AND (vmode, dest, op0);
14506 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14509 x = gen_rtx_IOR (vmode, dest, scratch);
14510 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14513 /* Return TRUE or FALSE depending on whether the first SET in INSN
14514 has source and destination with matching CC modes, and that the
14515 CC mode is at least as constrained as REQ_MODE. */
14518 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14521 enum machine_mode set_mode;
14523 set = PATTERN (insn);
14524 if (GET_CODE (set) == PARALLEL)
14525 set = XVECEXP (set, 0, 0);
14526 gcc_assert (GET_CODE (set) == SET);
14527 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14529 set_mode = GET_MODE (SET_DEST (set));
14533 if (req_mode != CCNOmode
14534 && (req_mode != CCmode
14535 || XEXP (SET_SRC (set), 1) != const0_rtx))
14539 if (req_mode == CCGCmode)
14543 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14547 if (req_mode == CCZmode)
14558 gcc_unreachable ();
14561 return (GET_MODE (SET_SRC (set)) == set_mode);
14564 /* Generate insn patterns to do an integer compare of OPERANDS. */
14567 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14569 enum machine_mode cmpmode;
14572 cmpmode = SELECT_CC_MODE (code, op0, op1);
14573 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14575 /* This is very simple, but making the interface the same as in the
14576 FP case makes the rest of the code easier. */
14577 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14578 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14580 /* Return the test that should be put into the flags user, i.e.
14581 the bcc, scc, or cmov instruction. */
14582 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14585 /* Figure out whether to use ordered or unordered fp comparisons.
14586 Return the appropriate mode to use. */
14589 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14591 /* ??? In order to make all comparisons reversible, we do all comparisons
14592 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14593 all forms trapping and nontrapping comparisons, we can make inequality
14594 comparisons trapping again, since it results in better code when using
14595 FCOM based compares. */
14596 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14600 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14602 enum machine_mode mode = GET_MODE (op0);
14604 if (SCALAR_FLOAT_MODE_P (mode))
14606 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14607 return ix86_fp_compare_mode (code);
14612 /* Only zero flag is needed. */
14613 case EQ: /* ZF=0 */
14614 case NE: /* ZF!=0 */
14616 /* Codes needing carry flag. */
14617 case GEU: /* CF=0 */
14618 case LTU: /* CF=1 */
14619 /* Detect overflow checks. They need just the carry flag. */
14620 if (GET_CODE (op0) == PLUS
14621 && rtx_equal_p (op1, XEXP (op0, 0)))
14625 case GTU: /* CF=0 & ZF=0 */
14626 case LEU: /* CF=1 | ZF=1 */
14627 /* Detect overflow checks. They need just the carry flag. */
14628 if (GET_CODE (op0) == MINUS
14629 && rtx_equal_p (op1, XEXP (op0, 0)))
14633 /* Codes possibly doable only with sign flag when
14634 comparing against zero. */
14635 case GE: /* SF=OF or SF=0 */
14636 case LT: /* SF<>OF or SF=1 */
14637 if (op1 == const0_rtx)
14640 /* For other cases Carry flag is not required. */
14642 /* Codes doable only with sign flag when comparing
14643 against zero, but we miss jump instruction for it
14644 so we need to use relational tests against overflow
14645 that thus needs to be zero. */
14646 case GT: /* ZF=0 & SF=OF */
14647 case LE: /* ZF=1 | SF<>OF */
14648 if (op1 == const0_rtx)
14652 /* strcmp pattern do (use flags) and combine may ask us for proper
14657 gcc_unreachable ();
14661 /* Return the fixed registers used for condition codes. */
14664 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14671 /* If two condition code modes are compatible, return a condition code
14672 mode which is compatible with both. Otherwise, return
14675 static enum machine_mode
14676 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14681 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14684 if ((m1 == CCGCmode && m2 == CCGOCmode)
14685 || (m1 == CCGOCmode && m2 == CCGCmode))
14691 gcc_unreachable ();
14721 /* These are only compatible with themselves, which we already
14728 /* Return a comparison we can do and that it is equivalent to
14729 swap_condition (code) apart possibly from orderedness.
14730 But, never change orderedness if TARGET_IEEE_FP, returning
14731 UNKNOWN in that case if necessary. */
14733 static enum rtx_code
14734 ix86_fp_swap_condition (enum rtx_code code)
14738 case GT: /* GTU - CF=0 & ZF=0 */
14739 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14740 case GE: /* GEU - CF=0 */
14741 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14742 case UNLT: /* LTU - CF=1 */
14743 return TARGET_IEEE_FP ? UNKNOWN : GT;
14744 case UNLE: /* LEU - CF=1 | ZF=1 */
14745 return TARGET_IEEE_FP ? UNKNOWN : GE;
14747 return swap_condition (code);
14751 /* Return cost of comparison CODE using the best strategy for performance.
14752 All following functions do use number of instructions as a cost metrics.
14753 In future this should be tweaked to compute bytes for optimize_size and
14754 take into account performance of various instructions on various CPUs. */
14757 ix86_fp_comparison_cost (enum rtx_code code)
14761 /* The cost of code using bit-twiddling on %ah. */
14778 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14782 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14785 gcc_unreachable ();
14788 switch (ix86_fp_comparison_strategy (code))
14790 case IX86_FPCMP_COMI:
14791 return arith_cost > 4 ? 3 : 2;
14792 case IX86_FPCMP_SAHF:
14793 return arith_cost > 4 ? 4 : 3;
14799 /* Return strategy to use for floating-point. We assume that fcomi is always
14800 preferrable where available, since that is also true when looking at size
14801 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14803 enum ix86_fpcmp_strategy
14804 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14806 /* Do fcomi/sahf based test when profitable. */
14809 return IX86_FPCMP_COMI;
14811 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14812 return IX86_FPCMP_SAHF;
14814 return IX86_FPCMP_ARITH;
14817 /* Swap, force into registers, or otherwise massage the two operands
14818 to a fp comparison. The operands are updated in place; the new
14819 comparison code is returned. */
14821 static enum rtx_code
14822 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14824 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14825 rtx op0 = *pop0, op1 = *pop1;
14826 enum machine_mode op_mode = GET_MODE (op0);
14827 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14829 /* All of the unordered compare instructions only work on registers.
14830 The same is true of the fcomi compare instructions. The XFmode
14831 compare instructions require registers except when comparing
14832 against zero or when converting operand 1 from fixed point to
14836 && (fpcmp_mode == CCFPUmode
14837 || (op_mode == XFmode
14838 && ! (standard_80387_constant_p (op0) == 1
14839 || standard_80387_constant_p (op1) == 1)
14840 && GET_CODE (op1) != FLOAT)
14841 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14843 op0 = force_reg (op_mode, op0);
14844 op1 = force_reg (op_mode, op1);
14848 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14849 things around if they appear profitable, otherwise force op0
14850 into a register. */
14852 if (standard_80387_constant_p (op0) == 0
14854 && ! (standard_80387_constant_p (op1) == 0
14857 enum rtx_code new_code = ix86_fp_swap_condition (code);
14858 if (new_code != UNKNOWN)
14861 tmp = op0, op0 = op1, op1 = tmp;
14867 op0 = force_reg (op_mode, op0);
14869 if (CONSTANT_P (op1))
14871 int tmp = standard_80387_constant_p (op1);
14873 op1 = validize_mem (force_const_mem (op_mode, op1));
14877 op1 = force_reg (op_mode, op1);
14880 op1 = force_reg (op_mode, op1);
14884 /* Try to rearrange the comparison to make it cheaper. */
14885 if (ix86_fp_comparison_cost (code)
14886 > ix86_fp_comparison_cost (swap_condition (code))
14887 && (REG_P (op1) || can_create_pseudo_p ()))
14890 tmp = op0, op0 = op1, op1 = tmp;
14891 code = swap_condition (code);
14893 op0 = force_reg (op_mode, op0);
14901 /* Convert comparison codes we use to represent FP comparison to integer
14902 code that will result in proper branch. Return UNKNOWN if no such code
14906 ix86_fp_compare_code_to_integer (enum rtx_code code)
14935 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14938 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14940 enum machine_mode fpcmp_mode, intcmp_mode;
14943 fpcmp_mode = ix86_fp_compare_mode (code);
14944 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14946 /* Do fcomi/sahf based test when profitable. */
14947 switch (ix86_fp_comparison_strategy (code))
14949 case IX86_FPCMP_COMI:
14950 intcmp_mode = fpcmp_mode;
14951 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14952 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14957 case IX86_FPCMP_SAHF:
14958 intcmp_mode = fpcmp_mode;
14959 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14960 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14964 scratch = gen_reg_rtx (HImode);
14965 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14966 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14969 case IX86_FPCMP_ARITH:
14970 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14971 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14972 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14974 scratch = gen_reg_rtx (HImode);
14975 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14977 /* In the unordered case, we have to check C2 for NaN's, which
14978 doesn't happen to work out to anything nice combination-wise.
14979 So do some bit twiddling on the value we've got in AH to come
14980 up with an appropriate set of condition codes. */
14982 intcmp_mode = CCNOmode;
14987 if (code == GT || !TARGET_IEEE_FP)
14989 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14994 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14995 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14996 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14997 intcmp_mode = CCmode;
15003 if (code == LT && TARGET_IEEE_FP)
15005 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15006 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15007 intcmp_mode = CCmode;
15012 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15018 if (code == GE || !TARGET_IEEE_FP)
15020 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15025 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15026 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15032 if (code == LE && TARGET_IEEE_FP)
15034 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15035 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15036 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15037 intcmp_mode = CCmode;
15042 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15048 if (code == EQ && TARGET_IEEE_FP)
15050 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15051 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15052 intcmp_mode = CCmode;
15057 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15063 if (code == NE && TARGET_IEEE_FP)
15065 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15066 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15072 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15078 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15082 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15087 gcc_unreachable ();
15095 /* Return the test that should be put into the flags user, i.e.
15096 the bcc, scc, or cmov instruction. */
15097 return gen_rtx_fmt_ee (code, VOIDmode,
15098 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15103 ix86_expand_compare (enum rtx_code code)
15106 op0 = ix86_compare_op0;
15107 op1 = ix86_compare_op1;
15109 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15110 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15112 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15114 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15115 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15118 ret = ix86_expand_int_compare (code, op0, op1);
15124 ix86_expand_branch (enum rtx_code code, rtx label)
15128 switch (GET_MODE (ix86_compare_op0))
15137 tmp = ix86_expand_compare (code);
15138 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15139 gen_rtx_LABEL_REF (VOIDmode, label),
15141 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15148 /* Expand DImode branch into multiple compare+branch. */
15150 rtx lo[2], hi[2], label2;
15151 enum rtx_code code1, code2, code3;
15152 enum machine_mode submode;
15154 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15156 tmp = ix86_compare_op0;
15157 ix86_compare_op0 = ix86_compare_op1;
15158 ix86_compare_op1 = tmp;
15159 code = swap_condition (code);
15161 if (GET_MODE (ix86_compare_op0) == DImode)
15163 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15164 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15169 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15170 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15174 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15175 avoid two branches. This costs one extra insn, so disable when
15176 optimizing for size. */
15178 if ((code == EQ || code == NE)
15179 && (!optimize_insn_for_size_p ()
15180 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15185 if (hi[1] != const0_rtx)
15186 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15187 NULL_RTX, 0, OPTAB_WIDEN);
15190 if (lo[1] != const0_rtx)
15191 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15192 NULL_RTX, 0, OPTAB_WIDEN);
15194 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15195 NULL_RTX, 0, OPTAB_WIDEN);
15197 ix86_compare_op0 = tmp;
15198 ix86_compare_op1 = const0_rtx;
15199 ix86_expand_branch (code, label);
15203 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15204 op1 is a constant and the low word is zero, then we can just
15205 examine the high word. Similarly for low word -1 and
15206 less-or-equal-than or greater-than. */
15208 if (CONST_INT_P (hi[1]))
15211 case LT: case LTU: case GE: case GEU:
15212 if (lo[1] == const0_rtx)
15214 ix86_compare_op0 = hi[0];
15215 ix86_compare_op1 = hi[1];
15216 ix86_expand_branch (code, label);
15220 case LE: case LEU: case GT: case GTU:
15221 if (lo[1] == constm1_rtx)
15223 ix86_compare_op0 = hi[0];
15224 ix86_compare_op1 = hi[1];
15225 ix86_expand_branch (code, label);
15233 /* Otherwise, we need two or three jumps. */
15235 label2 = gen_label_rtx ();
15238 code2 = swap_condition (code);
15239 code3 = unsigned_condition (code);
15243 case LT: case GT: case LTU: case GTU:
15246 case LE: code1 = LT; code2 = GT; break;
15247 case GE: code1 = GT; code2 = LT; break;
15248 case LEU: code1 = LTU; code2 = GTU; break;
15249 case GEU: code1 = GTU; code2 = LTU; break;
15251 case EQ: code1 = UNKNOWN; code2 = NE; break;
15252 case NE: code2 = UNKNOWN; break;
15255 gcc_unreachable ();
15260 * if (hi(a) < hi(b)) goto true;
15261 * if (hi(a) > hi(b)) goto false;
15262 * if (lo(a) < lo(b)) goto true;
15266 ix86_compare_op0 = hi[0];
15267 ix86_compare_op1 = hi[1];
15269 if (code1 != UNKNOWN)
15270 ix86_expand_branch (code1, label);
15271 if (code2 != UNKNOWN)
15272 ix86_expand_branch (code2, label2);
15274 ix86_compare_op0 = lo[0];
15275 ix86_compare_op1 = lo[1];
15276 ix86_expand_branch (code3, label);
15278 if (code2 != UNKNOWN)
15279 emit_label (label2);
15284 /* If we have already emitted a compare insn, go straight to simple.
15285 ix86_expand_compare won't emit anything if ix86_compare_emitted
15287 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15292 /* Split branch based on floating point condition. */
15294 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15295 rtx target1, rtx target2, rtx tmp, rtx pushed)
15300 if (target2 != pc_rtx)
15303 code = reverse_condition_maybe_unordered (code);
15308 condition = ix86_expand_fp_compare (code, op1, op2,
15311 /* Remove pushed operand from stack. */
15313 ix86_free_from_memory (GET_MODE (pushed));
15315 i = emit_jump_insn (gen_rtx_SET
15317 gen_rtx_IF_THEN_ELSE (VOIDmode,
15318 condition, target1, target2)));
15319 if (split_branch_probability >= 0)
15320 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15324 ix86_expand_setcc (enum rtx_code code, rtx dest)
15328 gcc_assert (GET_MODE (dest) == QImode);
15330 ret = ix86_expand_compare (code);
15331 PUT_MODE (ret, QImode);
15332 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15335 /* Expand comparison setting or clearing carry flag. Return true when
15336 successful and set pop for the operation. */
15338 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15340 enum machine_mode mode =
15341 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15343 /* Do not handle DImode compares that go through special path. */
15344 if (mode == (TARGET_64BIT ? TImode : DImode))
15347 if (SCALAR_FLOAT_MODE_P (mode))
15349 rtx compare_op, compare_seq;
15351 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15353 /* Shortcut: following common codes never translate
15354 into carry flag compares. */
15355 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15356 || code == ORDERED || code == UNORDERED)
15359 /* These comparisons require zero flag; swap operands so they won't. */
15360 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15361 && !TARGET_IEEE_FP)
15366 code = swap_condition (code);
15369 /* Try to expand the comparison and verify that we end up with
15370 carry flag based comparison. This fails to be true only when
15371 we decide to expand comparison using arithmetic that is not
15372 too common scenario. */
15374 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15375 compare_seq = get_insns ();
15378 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15379 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15380 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15382 code = GET_CODE (compare_op);
15384 if (code != LTU && code != GEU)
15387 emit_insn (compare_seq);
15392 if (!INTEGRAL_MODE_P (mode))
15401 /* Convert a==0 into (unsigned)a<1. */
15404 if (op1 != const0_rtx)
15407 code = (code == EQ ? LTU : GEU);
15410 /* Convert a>b into b<a or a>=b-1. */
15413 if (CONST_INT_P (op1))
15415 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15416 /* Bail out on overflow. We still can swap operands but that
15417 would force loading of the constant into register. */
15418 if (op1 == const0_rtx
15419 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15421 code = (code == GTU ? GEU : LTU);
15428 code = (code == GTU ? LTU : GEU);
15432 /* Convert a>=0 into (unsigned)a<0x80000000. */
15435 if (mode == DImode || op1 != const0_rtx)
15437 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15438 code = (code == LT ? GEU : LTU);
15442 if (mode == DImode || op1 != constm1_rtx)
15444 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15445 code = (code == LE ? GEU : LTU);
15451 /* Swapping operands may cause constant to appear as first operand. */
15452 if (!nonimmediate_operand (op0, VOIDmode))
15454 if (!can_create_pseudo_p ())
15456 op0 = force_reg (mode, op0);
15458 ix86_compare_op0 = op0;
15459 ix86_compare_op1 = op1;
15460 *pop = ix86_expand_compare (code);
15461 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15466 ix86_expand_int_movcc (rtx operands[])
15468 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15469 rtx compare_seq, compare_op;
15470 enum machine_mode mode = GET_MODE (operands[0]);
15471 bool sign_bit_compare_p = false;
15474 ix86_compare_op0 = XEXP (operands[1], 0);
15475 ix86_compare_op1 = XEXP (operands[1], 1);
15476 compare_op = ix86_expand_compare (code);
15477 compare_seq = get_insns ();
15480 compare_code = GET_CODE (compare_op);
15482 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15483 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15484 sign_bit_compare_p = true;
15486 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15487 HImode insns, we'd be swallowed in word prefix ops. */
15489 if ((mode != HImode || TARGET_FAST_PREFIX)
15490 && (mode != (TARGET_64BIT ? TImode : DImode))
15491 && CONST_INT_P (operands[2])
15492 && CONST_INT_P (operands[3]))
15494 rtx out = operands[0];
15495 HOST_WIDE_INT ct = INTVAL (operands[2]);
15496 HOST_WIDE_INT cf = INTVAL (operands[3]);
15497 HOST_WIDE_INT diff;
15500 /* Sign bit compares are better done using shifts than we do by using
15502 if (sign_bit_compare_p
15503 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15504 ix86_compare_op1, &compare_op))
15506 /* Detect overlap between destination and compare sources. */
15509 if (!sign_bit_compare_p)
15512 bool fpcmp = false;
15514 compare_code = GET_CODE (compare_op);
15516 flags = XEXP (compare_op, 0);
15518 if (GET_MODE (flags) == CCFPmode
15519 || GET_MODE (flags) == CCFPUmode)
15523 = ix86_fp_compare_code_to_integer (compare_code);
15526 /* To simplify rest of code, restrict to the GEU case. */
15527 if (compare_code == LTU)
15529 HOST_WIDE_INT tmp = ct;
15532 compare_code = reverse_condition (compare_code);
15533 code = reverse_condition (code);
15538 PUT_CODE (compare_op,
15539 reverse_condition_maybe_unordered
15540 (GET_CODE (compare_op)));
15542 PUT_CODE (compare_op,
15543 reverse_condition (GET_CODE (compare_op)));
15547 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15548 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15549 tmp = gen_reg_rtx (mode);
15551 if (mode == DImode)
15552 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15554 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15555 flags, compare_op));
15559 if (code == GT || code == GE)
15560 code = reverse_condition (code);
15563 HOST_WIDE_INT tmp = ct;
15568 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15569 ix86_compare_op1, VOIDmode, 0, -1);
15582 tmp = expand_simple_binop (mode, PLUS,
15584 copy_rtx (tmp), 1, OPTAB_DIRECT);
15595 tmp = expand_simple_binop (mode, IOR,
15597 copy_rtx (tmp), 1, OPTAB_DIRECT);
15599 else if (diff == -1 && ct)
15609 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15611 tmp = expand_simple_binop (mode, PLUS,
15612 copy_rtx (tmp), GEN_INT (cf),
15613 copy_rtx (tmp), 1, OPTAB_DIRECT);
15621 * andl cf - ct, dest
15631 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15634 tmp = expand_simple_binop (mode, AND,
15636 gen_int_mode (cf - ct, mode),
15637 copy_rtx (tmp), 1, OPTAB_DIRECT);
15639 tmp = expand_simple_binop (mode, PLUS,
15640 copy_rtx (tmp), GEN_INT (ct),
15641 copy_rtx (tmp), 1, OPTAB_DIRECT);
15644 if (!rtx_equal_p (tmp, out))
15645 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15647 return 1; /* DONE */
15652 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15655 tmp = ct, ct = cf, cf = tmp;
15658 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15660 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15662 /* We may be reversing unordered compare to normal compare, that
15663 is not valid in general (we may convert non-trapping condition
15664 to trapping one), however on i386 we currently emit all
15665 comparisons unordered. */
15666 compare_code = reverse_condition_maybe_unordered (compare_code);
15667 code = reverse_condition_maybe_unordered (code);
15671 compare_code = reverse_condition (compare_code);
15672 code = reverse_condition (code);
15676 compare_code = UNKNOWN;
15677 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15678 && CONST_INT_P (ix86_compare_op1))
15680 if (ix86_compare_op1 == const0_rtx
15681 && (code == LT || code == GE))
15682 compare_code = code;
15683 else if (ix86_compare_op1 == constm1_rtx)
15687 else if (code == GT)
15692 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15693 if (compare_code != UNKNOWN
15694 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15695 && (cf == -1 || ct == -1))
15697 /* If lea code below could be used, only optimize
15698 if it results in a 2 insn sequence. */
15700 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15701 || diff == 3 || diff == 5 || diff == 9)
15702 || (compare_code == LT && ct == -1)
15703 || (compare_code == GE && cf == -1))
15706 * notl op1 (if necessary)
15714 code = reverse_condition (code);
15717 out = emit_store_flag (out, code, ix86_compare_op0,
15718 ix86_compare_op1, VOIDmode, 0, -1);
15720 out = expand_simple_binop (mode, IOR,
15722 out, 1, OPTAB_DIRECT);
15723 if (out != operands[0])
15724 emit_move_insn (operands[0], out);
15726 return 1; /* DONE */
15731 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15732 || diff == 3 || diff == 5 || diff == 9)
15733 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15735 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15741 * lea cf(dest*(ct-cf)),dest
15745 * This also catches the degenerate setcc-only case.
15751 out = emit_store_flag (out, code, ix86_compare_op0,
15752 ix86_compare_op1, VOIDmode, 0, 1);
15755 /* On x86_64 the lea instruction operates on Pmode, so we need
15756 to get arithmetics done in proper mode to match. */
15758 tmp = copy_rtx (out);
15762 out1 = copy_rtx (out);
15763 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15767 tmp = gen_rtx_PLUS (mode, tmp, out1);
15773 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15776 if (!rtx_equal_p (tmp, out))
15779 out = force_operand (tmp, copy_rtx (out));
15781 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15783 if (!rtx_equal_p (out, operands[0]))
15784 emit_move_insn (operands[0], copy_rtx (out));
15786 return 1; /* DONE */
15790 * General case: Jumpful:
15791 * xorl dest,dest cmpl op1, op2
15792 * cmpl op1, op2 movl ct, dest
15793 * setcc dest jcc 1f
15794 * decl dest movl cf, dest
15795 * andl (cf-ct),dest 1:
15798 * Size 20. Size 14.
15800 * This is reasonably steep, but branch mispredict costs are
15801 * high on modern cpus, so consider failing only if optimizing
15805 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15806 && BRANCH_COST (optimize_insn_for_speed_p (),
15811 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15816 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15818 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15820 /* We may be reversing unordered compare to normal compare,
15821 that is not valid in general (we may convert non-trapping
15822 condition to trapping one), however on i386 we currently
15823 emit all comparisons unordered. */
15824 code = reverse_condition_maybe_unordered (code);
15828 code = reverse_condition (code);
15829 if (compare_code != UNKNOWN)
15830 compare_code = reverse_condition (compare_code);
15834 if (compare_code != UNKNOWN)
15836 /* notl op1 (if needed)
15841 For x < 0 (resp. x <= -1) there will be no notl,
15842 so if possible swap the constants to get rid of the
15844 True/false will be -1/0 while code below (store flag
15845 followed by decrement) is 0/-1, so the constants need
15846 to be exchanged once more. */
15848 if (compare_code == GE || !cf)
15850 code = reverse_condition (code);
15855 HOST_WIDE_INT tmp = cf;
15860 out = emit_store_flag (out, code, ix86_compare_op0,
15861 ix86_compare_op1, VOIDmode, 0, -1);
15865 out = emit_store_flag (out, code, ix86_compare_op0,
15866 ix86_compare_op1, VOIDmode, 0, 1);
15868 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15869 copy_rtx (out), 1, OPTAB_DIRECT);
15872 out = expand_simple_binop (mode, AND, copy_rtx (out),
15873 gen_int_mode (cf - ct, mode),
15874 copy_rtx (out), 1, OPTAB_DIRECT);
15876 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15877 copy_rtx (out), 1, OPTAB_DIRECT);
15878 if (!rtx_equal_p (out, operands[0]))
15879 emit_move_insn (operands[0], copy_rtx (out));
15881 return 1; /* DONE */
15885 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15887 /* Try a few things more with specific constants and a variable. */
15890 rtx var, orig_out, out, tmp;
15892 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15893 return 0; /* FAIL */
15895 /* If one of the two operands is an interesting constant, load a
15896 constant with the above and mask it in with a logical operation. */
15898 if (CONST_INT_P (operands[2]))
15901 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15902 operands[3] = constm1_rtx, op = and_optab;
15903 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15904 operands[3] = const0_rtx, op = ior_optab;
15906 return 0; /* FAIL */
15908 else if (CONST_INT_P (operands[3]))
15911 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15912 operands[2] = constm1_rtx, op = and_optab;
15913 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15914 operands[2] = const0_rtx, op = ior_optab;
15916 return 0; /* FAIL */
15919 return 0; /* FAIL */
15921 orig_out = operands[0];
15922 tmp = gen_reg_rtx (mode);
15925 /* Recurse to get the constant loaded. */
15926 if (ix86_expand_int_movcc (operands) == 0)
15927 return 0; /* FAIL */
15929 /* Mask in the interesting variable. */
15930 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15932 if (!rtx_equal_p (out, orig_out))
15933 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15935 return 1; /* DONE */
15939 * For comparison with above,
15949 if (! nonimmediate_operand (operands[2], mode))
15950 operands[2] = force_reg (mode, operands[2]);
15951 if (! nonimmediate_operand (operands[3], mode))
15952 operands[3] = force_reg (mode, operands[3]);
15954 if (! register_operand (operands[2], VOIDmode)
15956 || ! register_operand (operands[3], VOIDmode)))
15957 operands[2] = force_reg (mode, operands[2]);
15960 && ! register_operand (operands[3], VOIDmode))
15961 operands[3] = force_reg (mode, operands[3]);
15963 emit_insn (compare_seq);
15964 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15965 gen_rtx_IF_THEN_ELSE (mode,
15966 compare_op, operands[2],
15969 return 1; /* DONE */
15972 /* Swap, force into registers, or otherwise massage the two operands
15973 to an sse comparison with a mask result. Thus we differ a bit from
15974 ix86_prepare_fp_compare_args which expects to produce a flags result.
15976 The DEST operand exists to help determine whether to commute commutative
15977 operators. The POP0/POP1 operands are updated in place. The new
15978 comparison code is returned, or UNKNOWN if not implementable. */
15980 static enum rtx_code
15981 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15982 rtx *pop0, rtx *pop1)
15990 /* We have no LTGT as an operator. We could implement it with
15991 NE & ORDERED, but this requires an extra temporary. It's
15992 not clear that it's worth it. */
15999 /* These are supported directly. */
16006 /* For commutative operators, try to canonicalize the destination
16007 operand to be first in the comparison - this helps reload to
16008 avoid extra moves. */
16009 if (!dest || !rtx_equal_p (dest, *pop1))
16017 /* These are not supported directly. Swap the comparison operands
16018 to transform into something that is supported. */
16022 code = swap_condition (code);
16026 gcc_unreachable ();
16032 /* Detect conditional moves that exactly match min/max operational
16033 semantics. Note that this is IEEE safe, as long as we don't
16034 interchange the operands.
16036 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16037 and TRUE if the operation is successful and instructions are emitted. */
16040 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16041 rtx cmp_op1, rtx if_true, rtx if_false)
16043 enum machine_mode mode;
16049 else if (code == UNGE)
16052 if_true = if_false;
16058 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16060 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16065 mode = GET_MODE (dest);
16067 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16068 but MODE may be a vector mode and thus not appropriate. */
16069 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16071 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16074 if_true = force_reg (mode, if_true);
16075 v = gen_rtvec (2, if_true, if_false);
16076 tmp = gen_rtx_UNSPEC (mode, v, u);
16080 code = is_min ? SMIN : SMAX;
16081 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16084 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16088 /* Expand an sse vector comparison. Return the register with the result. */
16091 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16092 rtx op_true, rtx op_false)
16094 enum machine_mode mode = GET_MODE (dest);
16097 cmp_op0 = force_reg (mode, cmp_op0);
16098 if (!nonimmediate_operand (cmp_op1, mode))
16099 cmp_op1 = force_reg (mode, cmp_op1);
16102 || reg_overlap_mentioned_p (dest, op_true)
16103 || reg_overlap_mentioned_p (dest, op_false))
16104 dest = gen_reg_rtx (mode);
16106 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16107 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16112 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16113 operations. This is used for both scalar and vector conditional moves. */
16116 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16118 enum machine_mode mode = GET_MODE (dest);
16121 if (op_false == CONST0_RTX (mode))
16123 op_true = force_reg (mode, op_true);
16124 x = gen_rtx_AND (mode, cmp, op_true);
16125 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16127 else if (op_true == CONST0_RTX (mode))
16129 op_false = force_reg (mode, op_false);
16130 x = gen_rtx_NOT (mode, cmp);
16131 x = gen_rtx_AND (mode, x, op_false);
16132 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16134 else if (TARGET_XOP)
16136 rtx pcmov = gen_rtx_SET (mode, dest,
16137 gen_rtx_IF_THEN_ELSE (mode, cmp,
16144 op_true = force_reg (mode, op_true);
16145 op_false = force_reg (mode, op_false);
16147 t2 = gen_reg_rtx (mode);
16149 t3 = gen_reg_rtx (mode);
16153 x = gen_rtx_AND (mode, op_true, cmp);
16154 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16156 x = gen_rtx_NOT (mode, cmp);
16157 x = gen_rtx_AND (mode, x, op_false);
16158 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16160 x = gen_rtx_IOR (mode, t3, t2);
16161 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16165 /* Expand a floating-point conditional move. Return true if successful. */
16168 ix86_expand_fp_movcc (rtx operands[])
16170 enum machine_mode mode = GET_MODE (operands[0]);
16171 enum rtx_code code = GET_CODE (operands[1]);
16172 rtx tmp, compare_op;
16174 ix86_compare_op0 = XEXP (operands[1], 0);
16175 ix86_compare_op1 = XEXP (operands[1], 1);
16176 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16178 enum machine_mode cmode;
16180 /* Since we've no cmove for sse registers, don't force bad register
16181 allocation just to gain access to it. Deny movcc when the
16182 comparison mode doesn't match the move mode. */
16183 cmode = GET_MODE (ix86_compare_op0);
16184 if (cmode == VOIDmode)
16185 cmode = GET_MODE (ix86_compare_op1);
16189 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16191 &ix86_compare_op1);
16192 if (code == UNKNOWN)
16195 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16196 ix86_compare_op1, operands[2],
16200 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16201 ix86_compare_op1, operands[2], operands[3]);
16202 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16206 /* The floating point conditional move instructions don't directly
16207 support conditions resulting from a signed integer comparison. */
16209 compare_op = ix86_expand_compare (code);
16210 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16212 tmp = gen_reg_rtx (QImode);
16213 ix86_expand_setcc (code, tmp);
16215 ix86_compare_op0 = tmp;
16216 ix86_compare_op1 = const0_rtx;
16217 compare_op = ix86_expand_compare (code);
16220 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16221 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16222 operands[2], operands[3])));
16227 /* Expand a floating-point vector conditional move; a vcond operation
16228 rather than a movcc operation. */
16231 ix86_expand_fp_vcond (rtx operands[])
16233 enum rtx_code code = GET_CODE (operands[3]);
16236 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16237 &operands[4], &operands[5]);
16238 if (code == UNKNOWN)
16241 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16242 operands[5], operands[1], operands[2]))
16245 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16246 operands[1], operands[2]);
16247 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16251 /* Expand a signed/unsigned integral vector conditional move. */
16254 ix86_expand_int_vcond (rtx operands[])
16256 enum machine_mode mode = GET_MODE (operands[0]);
16257 enum rtx_code code = GET_CODE (operands[3]);
16258 bool negate = false;
16261 cop0 = operands[4];
16262 cop1 = operands[5];
16264 /* XOP supports all of the comparisons on all vector int types. */
16267 /* Canonicalize the comparison to EQ, GT, GTU. */
16278 code = reverse_condition (code);
16284 code = reverse_condition (code);
16290 code = swap_condition (code);
16291 x = cop0, cop0 = cop1, cop1 = x;
16295 gcc_unreachable ();
16298 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16299 if (mode == V2DImode)
16304 /* SSE4.1 supports EQ. */
16305 if (!TARGET_SSE4_1)
16311 /* SSE4.2 supports GT/GTU. */
16312 if (!TARGET_SSE4_2)
16317 gcc_unreachable ();
16321 /* Unsigned parallel compare is not supported by the hardware.
16322 Play some tricks to turn this into a signed comparison
16326 cop0 = force_reg (mode, cop0);
16334 rtx (*gen_sub3) (rtx, rtx, rtx);
16336 /* Subtract (-(INT MAX) - 1) from both operands to make
16338 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16340 gen_sub3 = (mode == V4SImode
16341 ? gen_subv4si3 : gen_subv2di3);
16342 t1 = gen_reg_rtx (mode);
16343 emit_insn (gen_sub3 (t1, cop0, mask));
16345 t2 = gen_reg_rtx (mode);
16346 emit_insn (gen_sub3 (t2, cop1, mask));
16356 /* Perform a parallel unsigned saturating subtraction. */
16357 x = gen_reg_rtx (mode);
16358 emit_insn (gen_rtx_SET (VOIDmode, x,
16359 gen_rtx_US_MINUS (mode, cop0, cop1)));
16362 cop1 = CONST0_RTX (mode);
16368 gcc_unreachable ();
16373 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16374 operands[1+negate], operands[2-negate]);
16376 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16377 operands[2-negate]);
16381 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16382 true if we should do zero extension, else sign extension. HIGH_P is
16383 true if we want the N/2 high elements, else the low elements. */
16386 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16388 enum machine_mode imode = GET_MODE (operands[1]);
16389 rtx (*unpack)(rtx, rtx, rtx);
16396 unpack = gen_vec_interleave_highv16qi;
16398 unpack = gen_vec_interleave_lowv16qi;
16402 unpack = gen_vec_interleave_highv8hi;
16404 unpack = gen_vec_interleave_lowv8hi;
16408 unpack = gen_vec_interleave_highv4si;
16410 unpack = gen_vec_interleave_lowv4si;
16413 gcc_unreachable ();
16416 dest = gen_lowpart (imode, operands[0]);
16419 se = force_reg (imode, CONST0_RTX (imode));
16421 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16422 operands[1], pc_rtx, pc_rtx);
16424 emit_insn (unpack (dest, operands[1], se));
16427 /* This function performs the same task as ix86_expand_sse_unpack,
16428 but with SSE4.1 instructions. */
16431 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16433 enum machine_mode imode = GET_MODE (operands[1]);
16434 rtx (*unpack)(rtx, rtx);
16441 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16443 unpack = gen_sse4_1_extendv8qiv8hi2;
16447 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16449 unpack = gen_sse4_1_extendv4hiv4si2;
16453 unpack = gen_sse4_1_zero_extendv2siv2di2;
16455 unpack = gen_sse4_1_extendv2siv2di2;
16458 gcc_unreachable ();
16461 dest = operands[0];
16464 /* Shift higher 8 bytes to lower 8 bytes. */
16465 src = gen_reg_rtx (imode);
16466 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16467 gen_lowpart (V1TImode, operands[1]),
16473 emit_insn (unpack (dest, src));
16476 /* Expand conditional increment or decrement using adb/sbb instructions.
16477 The default case using setcc followed by the conditional move can be
16478 done by generic code. */
16480 ix86_expand_int_addcc (rtx operands[])
16482 enum rtx_code code = GET_CODE (operands[1]);
16484 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16486 rtx val = const0_rtx;
16487 bool fpcmp = false;
16488 enum machine_mode mode;
16490 ix86_compare_op0 = XEXP (operands[1], 0);
16491 ix86_compare_op1 = XEXP (operands[1], 1);
16492 if (operands[3] != const1_rtx
16493 && operands[3] != constm1_rtx)
16495 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16496 ix86_compare_op1, &compare_op))
16498 code = GET_CODE (compare_op);
16500 flags = XEXP (compare_op, 0);
16502 if (GET_MODE (flags) == CCFPmode
16503 || GET_MODE (flags) == CCFPUmode)
16506 code = ix86_fp_compare_code_to_integer (code);
16513 PUT_CODE (compare_op,
16514 reverse_condition_maybe_unordered
16515 (GET_CODE (compare_op)));
16517 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16520 mode = GET_MODE (operands[0]);
16522 /* Construct either adc or sbb insn. */
16523 if ((code == LTU) == (operands[3] == constm1_rtx))
16528 insn = gen_subqi3_carry;
16531 insn = gen_subhi3_carry;
16534 insn = gen_subsi3_carry;
16537 insn = gen_subdi3_carry;
16540 gcc_unreachable ();
16548 insn = gen_addqi3_carry;
16551 insn = gen_addhi3_carry;
16554 insn = gen_addsi3_carry;
16557 insn = gen_adddi3_carry;
16560 gcc_unreachable ();
16563 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16565 return 1; /* DONE */
16569 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16570 works for floating pointer parameters and nonoffsetable memories.
16571 For pushes, it returns just stack offsets; the values will be saved
16572 in the right order. Maximally three parts are generated. */
16575 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16580 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16582 size = (GET_MODE_SIZE (mode) + 4) / 8;
16584 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16585 gcc_assert (size >= 2 && size <= 4);
16587 /* Optimize constant pool reference to immediates. This is used by fp
16588 moves, that force all constants to memory to allow combining. */
16589 if (MEM_P (operand) && MEM_READONLY_P (operand))
16591 rtx tmp = maybe_get_pool_constant (operand);
16596 if (MEM_P (operand) && !offsettable_memref_p (operand))
16598 /* The only non-offsetable memories we handle are pushes. */
16599 int ok = push_operand (operand, VOIDmode);
16603 operand = copy_rtx (operand);
16604 PUT_MODE (operand, Pmode);
16605 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16609 if (GET_CODE (operand) == CONST_VECTOR)
16611 enum machine_mode imode = int_mode_for_mode (mode);
16612 /* Caution: if we looked through a constant pool memory above,
16613 the operand may actually have a different mode now. That's
16614 ok, since we want to pun this all the way back to an integer. */
16615 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16616 gcc_assert (operand != NULL);
16622 if (mode == DImode)
16623 split_di (&operand, 1, &parts[0], &parts[1]);
16628 if (REG_P (operand))
16630 gcc_assert (reload_completed);
16631 for (i = 0; i < size; i++)
16632 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16634 else if (offsettable_memref_p (operand))
16636 operand = adjust_address (operand, SImode, 0);
16637 parts[0] = operand;
16638 for (i = 1; i < size; i++)
16639 parts[i] = adjust_address (operand, SImode, 4 * i);
16641 else if (GET_CODE (operand) == CONST_DOUBLE)
16646 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16650 real_to_target (l, &r, mode);
16651 parts[3] = gen_int_mode (l[3], SImode);
16652 parts[2] = gen_int_mode (l[2], SImode);
16655 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16656 parts[2] = gen_int_mode (l[2], SImode);
16659 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16662 gcc_unreachable ();
16664 parts[1] = gen_int_mode (l[1], SImode);
16665 parts[0] = gen_int_mode (l[0], SImode);
16668 gcc_unreachable ();
16673 if (mode == TImode)
16674 split_ti (&operand, 1, &parts[0], &parts[1]);
16675 if (mode == XFmode || mode == TFmode)
16677 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16678 if (REG_P (operand))
16680 gcc_assert (reload_completed);
16681 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16682 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16684 else if (offsettable_memref_p (operand))
16686 operand = adjust_address (operand, DImode, 0);
16687 parts[0] = operand;
16688 parts[1] = adjust_address (operand, upper_mode, 8);
16690 else if (GET_CODE (operand) == CONST_DOUBLE)
16695 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16696 real_to_target (l, &r, mode);
16698 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16699 if (HOST_BITS_PER_WIDE_INT >= 64)
16702 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16703 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16706 parts[0] = immed_double_const (l[0], l[1], DImode);
16708 if (upper_mode == SImode)
16709 parts[1] = gen_int_mode (l[2], SImode);
16710 else if (HOST_BITS_PER_WIDE_INT >= 64)
16713 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16714 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16717 parts[1] = immed_double_const (l[2], l[3], DImode);
16720 gcc_unreachable ();
16727 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16728 Return false when normal moves are needed; true when all required
16729 insns have been emitted. Operands 2-4 contain the input values
16730 int the correct order; operands 5-7 contain the output values. */
16733 ix86_split_long_move (rtx operands[])
16738 int collisions = 0;
16739 enum machine_mode mode = GET_MODE (operands[0]);
16740 bool collisionparts[4];
16742 /* The DFmode expanders may ask us to move double.
16743 For 64bit target this is single move. By hiding the fact
16744 here we simplify i386.md splitters. */
16745 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16747 /* Optimize constant pool reference to immediates. This is used by
16748 fp moves, that force all constants to memory to allow combining. */
16750 if (MEM_P (operands[1])
16751 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16752 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16753 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16754 if (push_operand (operands[0], VOIDmode))
16756 operands[0] = copy_rtx (operands[0]);
16757 PUT_MODE (operands[0], Pmode);
16760 operands[0] = gen_lowpart (DImode, operands[0]);
16761 operands[1] = gen_lowpart (DImode, operands[1]);
16762 emit_move_insn (operands[0], operands[1]);
16766 /* The only non-offsettable memory we handle is push. */
16767 if (push_operand (operands[0], VOIDmode))
16770 gcc_assert (!MEM_P (operands[0])
16771 || offsettable_memref_p (operands[0]));
16773 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16774 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16776 /* When emitting push, take care for source operands on the stack. */
16777 if (push && MEM_P (operands[1])
16778 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16780 rtx src_base = XEXP (part[1][nparts - 1], 0);
16782 /* Compensate for the stack decrement by 4. */
16783 if (!TARGET_64BIT && nparts == 3
16784 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16785 src_base = plus_constant (src_base, 4);
16787 /* src_base refers to the stack pointer and is
16788 automatically decreased by emitted push. */
16789 for (i = 0; i < nparts; i++)
16790 part[1][i] = change_address (part[1][i],
16791 GET_MODE (part[1][i]), src_base);
16794 /* We need to do copy in the right order in case an address register
16795 of the source overlaps the destination. */
16796 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16800 for (i = 0; i < nparts; i++)
16803 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16804 if (collisionparts[i])
16808 /* Collision in the middle part can be handled by reordering. */
16809 if (collisions == 1 && nparts == 3 && collisionparts [1])
16811 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16812 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16814 else if (collisions == 1
16816 && (collisionparts [1] || collisionparts [2]))
16818 if (collisionparts [1])
16820 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16821 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16825 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16826 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16830 /* If there are more collisions, we can't handle it by reordering.
16831 Do an lea to the last part and use only one colliding move. */
16832 else if (collisions > 1)
16838 base = part[0][nparts - 1];
16840 /* Handle the case when the last part isn't valid for lea.
16841 Happens in 64-bit mode storing the 12-byte XFmode. */
16842 if (GET_MODE (base) != Pmode)
16843 base = gen_rtx_REG (Pmode, REGNO (base));
16845 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16846 part[1][0] = replace_equiv_address (part[1][0], base);
16847 for (i = 1; i < nparts; i++)
16849 tmp = plus_constant (base, UNITS_PER_WORD * i);
16850 part[1][i] = replace_equiv_address (part[1][i], tmp);
16861 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16862 emit_insn (gen_addsi3 (stack_pointer_rtx,
16863 stack_pointer_rtx, GEN_INT (-4)));
16864 emit_move_insn (part[0][2], part[1][2]);
16866 else if (nparts == 4)
16868 emit_move_insn (part[0][3], part[1][3]);
16869 emit_move_insn (part[0][2], part[1][2]);
16874 /* In 64bit mode we don't have 32bit push available. In case this is
16875 register, it is OK - we will just use larger counterpart. We also
16876 retype memory - these comes from attempt to avoid REX prefix on
16877 moving of second half of TFmode value. */
16878 if (GET_MODE (part[1][1]) == SImode)
16880 switch (GET_CODE (part[1][1]))
16883 part[1][1] = adjust_address (part[1][1], DImode, 0);
16887 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16891 gcc_unreachable ();
16894 if (GET_MODE (part[1][0]) == SImode)
16895 part[1][0] = part[1][1];
16898 emit_move_insn (part[0][1], part[1][1]);
16899 emit_move_insn (part[0][0], part[1][0]);
16903 /* Choose correct order to not overwrite the source before it is copied. */
16904 if ((REG_P (part[0][0])
16905 && REG_P (part[1][1])
16906 && (REGNO (part[0][0]) == REGNO (part[1][1])
16908 && REGNO (part[0][0]) == REGNO (part[1][2]))
16910 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16912 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16914 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16916 operands[2 + i] = part[0][j];
16917 operands[6 + i] = part[1][j];
16922 for (i = 0; i < nparts; i++)
16924 operands[2 + i] = part[0][i];
16925 operands[6 + i] = part[1][i];
16929 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16930 if (optimize_insn_for_size_p ())
16932 for (j = 0; j < nparts - 1; j++)
16933 if (CONST_INT_P (operands[6 + j])
16934 && operands[6 + j] != const0_rtx
16935 && REG_P (operands[2 + j]))
16936 for (i = j; i < nparts - 1; i++)
16937 if (CONST_INT_P (operands[7 + i])
16938 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16939 operands[7 + i] = operands[2 + j];
16942 for (i = 0; i < nparts; i++)
16943 emit_move_insn (operands[2 + i], operands[6 + i]);
16948 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16949 left shift by a constant, either using a single shift or
16950 a sequence of add instructions. */
16953 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16957 emit_insn ((mode == DImode
16959 : gen_adddi3) (operand, operand, operand));
16961 else if (!optimize_insn_for_size_p ()
16962 && count * ix86_cost->add <= ix86_cost->shift_const)
16965 for (i=0; i<count; i++)
16967 emit_insn ((mode == DImode
16969 : gen_adddi3) (operand, operand, operand));
16973 emit_insn ((mode == DImode
16975 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16979 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16981 rtx low[2], high[2];
16983 const int single_width = mode == DImode ? 32 : 64;
16985 if (CONST_INT_P (operands[2]))
16987 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16988 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16990 if (count >= single_width)
16992 emit_move_insn (high[0], low[1]);
16993 emit_move_insn (low[0], const0_rtx);
16995 if (count > single_width)
16996 ix86_expand_ashl_const (high[0], count - single_width, mode);
17000 if (!rtx_equal_p (operands[0], operands[1]))
17001 emit_move_insn (operands[0], operands[1]);
17002 emit_insn ((mode == DImode
17004 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17005 ix86_expand_ashl_const (low[0], count, mode);
17010 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17012 if (operands[1] == const1_rtx)
17014 /* Assuming we've chosen a QImode capable registers, then 1 << N
17015 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17016 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17018 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17020 ix86_expand_clear (low[0]);
17021 ix86_expand_clear (high[0]);
17022 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17024 d = gen_lowpart (QImode, low[0]);
17025 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17026 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17027 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17029 d = gen_lowpart (QImode, high[0]);
17030 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17031 s = gen_rtx_NE (QImode, flags, const0_rtx);
17032 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17035 /* Otherwise, we can get the same results by manually performing
17036 a bit extract operation on bit 5/6, and then performing the two
17037 shifts. The two methods of getting 0/1 into low/high are exactly
17038 the same size. Avoiding the shift in the bit extract case helps
17039 pentium4 a bit; no one else seems to care much either way. */
17044 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17045 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17047 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17048 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17050 emit_insn ((mode == DImode
17052 : gen_lshrdi3) (high[0], high[0],
17053 GEN_INT (mode == DImode ? 5 : 6)));
17054 emit_insn ((mode == DImode
17056 : gen_anddi3) (high[0], high[0], const1_rtx));
17057 emit_move_insn (low[0], high[0]);
17058 emit_insn ((mode == DImode
17060 : gen_xordi3) (low[0], low[0], const1_rtx));
17063 emit_insn ((mode == DImode
17065 : gen_ashldi3) (low[0], low[0], operands[2]));
17066 emit_insn ((mode == DImode
17068 : gen_ashldi3) (high[0], high[0], operands[2]));
17072 if (operands[1] == constm1_rtx)
17074 /* For -1 << N, we can avoid the shld instruction, because we
17075 know that we're shifting 0...31/63 ones into a -1. */
17076 emit_move_insn (low[0], constm1_rtx);
17077 if (optimize_insn_for_size_p ())
17078 emit_move_insn (high[0], low[0]);
17080 emit_move_insn (high[0], constm1_rtx);
17084 if (!rtx_equal_p (operands[0], operands[1]))
17085 emit_move_insn (operands[0], operands[1]);
17087 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17088 emit_insn ((mode == DImode
17090 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17093 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
17095 if (TARGET_CMOVE && scratch)
17097 ix86_expand_clear (scratch);
17098 emit_insn ((mode == DImode
17099 ? gen_x86_shift_adj_1
17100 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
17104 emit_insn ((mode == DImode
17105 ? gen_x86_shift_adj_2
17106 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
17110 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17112 rtx low[2], high[2];
17114 const int single_width = mode == DImode ? 32 : 64;
17116 if (CONST_INT_P (operands[2]))
17118 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17119 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17121 if (count == single_width * 2 - 1)
17123 emit_move_insn (high[0], high[1]);
17124 emit_insn ((mode == DImode
17126 : gen_ashrdi3) (high[0], high[0],
17127 GEN_INT (single_width - 1)));
17128 emit_move_insn (low[0], high[0]);
17131 else if (count >= single_width)
17133 emit_move_insn (low[0], high[1]);
17134 emit_move_insn (high[0], low[0]);
17135 emit_insn ((mode == DImode
17137 : gen_ashrdi3) (high[0], high[0],
17138 GEN_INT (single_width - 1)));
17139 if (count > single_width)
17140 emit_insn ((mode == DImode
17142 : gen_ashrdi3) (low[0], low[0],
17143 GEN_INT (count - single_width)));
17147 if (!rtx_equal_p (operands[0], operands[1]))
17148 emit_move_insn (operands[0], operands[1]);
17149 emit_insn ((mode == DImode
17151 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17152 emit_insn ((mode == DImode
17154 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17159 if (!rtx_equal_p (operands[0], operands[1]))
17160 emit_move_insn (operands[0], operands[1]);
17162 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17164 emit_insn ((mode == DImode
17166 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17167 emit_insn ((mode == DImode
17169 : gen_ashrdi3) (high[0], high[0], operands[2]));
17171 if (TARGET_CMOVE && scratch)
17173 emit_move_insn (scratch, high[0]);
17174 emit_insn ((mode == DImode
17176 : gen_ashrdi3) (scratch, scratch,
17177 GEN_INT (single_width - 1)));
17178 emit_insn ((mode == DImode
17179 ? gen_x86_shift_adj_1
17180 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17184 emit_insn ((mode == DImode
17185 ? gen_x86_shift_adj_3
17186 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
17191 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17193 rtx low[2], high[2];
17195 const int single_width = mode == DImode ? 32 : 64;
17197 if (CONST_INT_P (operands[2]))
17199 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17200 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17202 if (count >= single_width)
17204 emit_move_insn (low[0], high[1]);
17205 ix86_expand_clear (high[0]);
17207 if (count > single_width)
17208 emit_insn ((mode == DImode
17210 : gen_lshrdi3) (low[0], low[0],
17211 GEN_INT (count - single_width)));
17215 if (!rtx_equal_p (operands[0], operands[1]))
17216 emit_move_insn (operands[0], operands[1]);
17217 emit_insn ((mode == DImode
17219 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17220 emit_insn ((mode == DImode
17222 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17227 if (!rtx_equal_p (operands[0], operands[1]))
17228 emit_move_insn (operands[0], operands[1]);
17230 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17232 emit_insn ((mode == DImode
17234 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17235 emit_insn ((mode == DImode
17237 : gen_lshrdi3) (high[0], high[0], operands[2]));
17239 /* Heh. By reversing the arguments, we can reuse this pattern. */
17240 if (TARGET_CMOVE && scratch)
17242 ix86_expand_clear (scratch);
17243 emit_insn ((mode == DImode
17244 ? gen_x86_shift_adj_1
17245 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17249 emit_insn ((mode == DImode
17250 ? gen_x86_shift_adj_2
17251 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
17255 /* Predict just emitted jump instruction to be taken with probability PROB. */
17257 predict_jump (int prob)
17259 rtx insn = get_last_insn ();
17260 gcc_assert (JUMP_P (insn));
17261 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17264 /* Helper function for the string operations below. Dest VARIABLE whether
17265 it is aligned to VALUE bytes. If true, jump to the label. */
17267 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17269 rtx label = gen_label_rtx ();
17270 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17271 if (GET_MODE (variable) == DImode)
17272 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17274 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17275 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17278 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17280 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17284 /* Adjust COUNTER by the VALUE. */
17286 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17288 if (GET_MODE (countreg) == DImode)
17289 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17291 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17294 /* Zero extend possibly SImode EXP to Pmode register. */
17296 ix86_zero_extend_to_Pmode (rtx exp)
17299 if (GET_MODE (exp) == VOIDmode)
17300 return force_reg (Pmode, exp);
17301 if (GET_MODE (exp) == Pmode)
17302 return copy_to_mode_reg (Pmode, exp);
17303 r = gen_reg_rtx (Pmode);
17304 emit_insn (gen_zero_extendsidi2 (r, exp));
17308 /* Divide COUNTREG by SCALE. */
17310 scale_counter (rtx countreg, int scale)
17316 if (CONST_INT_P (countreg))
17317 return GEN_INT (INTVAL (countreg) / scale);
17318 gcc_assert (REG_P (countreg));
17320 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17321 GEN_INT (exact_log2 (scale)),
17322 NULL, 1, OPTAB_DIRECT);
17326 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17327 DImode for constant loop counts. */
17329 static enum machine_mode
17330 counter_mode (rtx count_exp)
17332 if (GET_MODE (count_exp) != VOIDmode)
17333 return GET_MODE (count_exp);
17334 if (!CONST_INT_P (count_exp))
17336 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17341 /* When SRCPTR is non-NULL, output simple loop to move memory
17342 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17343 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17344 equivalent loop to set memory by VALUE (supposed to be in MODE).
17346 The size is rounded down to whole number of chunk size moved at once.
17347 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17351 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17352 rtx destptr, rtx srcptr, rtx value,
17353 rtx count, enum machine_mode mode, int unroll,
17356 rtx out_label, top_label, iter, tmp;
17357 enum machine_mode iter_mode = counter_mode (count);
17358 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17359 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17365 top_label = gen_label_rtx ();
17366 out_label = gen_label_rtx ();
17367 iter = gen_reg_rtx (iter_mode);
17369 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17370 NULL, 1, OPTAB_DIRECT);
17371 /* Those two should combine. */
17372 if (piece_size == const1_rtx)
17374 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17376 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17378 emit_move_insn (iter, const0_rtx);
17380 emit_label (top_label);
17382 tmp = convert_modes (Pmode, iter_mode, iter, true);
17383 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17384 destmem = change_address (destmem, mode, x_addr);
17388 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17389 srcmem = change_address (srcmem, mode, y_addr);
17391 /* When unrolling for chips that reorder memory reads and writes,
17392 we can save registers by using single temporary.
17393 Also using 4 temporaries is overkill in 32bit mode. */
17394 if (!TARGET_64BIT && 0)
17396 for (i = 0; i < unroll; i++)
17401 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17403 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17405 emit_move_insn (destmem, srcmem);
17411 gcc_assert (unroll <= 4);
17412 for (i = 0; i < unroll; i++)
17414 tmpreg[i] = gen_reg_rtx (mode);
17418 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17420 emit_move_insn (tmpreg[i], srcmem);
17422 for (i = 0; i < unroll; i++)
17427 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17429 emit_move_insn (destmem, tmpreg[i]);
17434 for (i = 0; i < unroll; i++)
17438 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17439 emit_move_insn (destmem, value);
17442 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17443 true, OPTAB_LIB_WIDEN);
17445 emit_move_insn (iter, tmp);
17447 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17449 if (expected_size != -1)
17451 expected_size /= GET_MODE_SIZE (mode) * unroll;
17452 if (expected_size == 0)
17454 else if (expected_size > REG_BR_PROB_BASE)
17455 predict_jump (REG_BR_PROB_BASE - 1);
17457 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17460 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17461 iter = ix86_zero_extend_to_Pmode (iter);
17462 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17463 true, OPTAB_LIB_WIDEN);
17464 if (tmp != destptr)
17465 emit_move_insn (destptr, tmp);
17468 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17469 true, OPTAB_LIB_WIDEN);
17471 emit_move_insn (srcptr, tmp);
17473 emit_label (out_label);
17476 /* Output "rep; mov" instruction.
17477 Arguments have same meaning as for previous function */
17479 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17480 rtx destptr, rtx srcptr,
17482 enum machine_mode mode)
17488 /* If the size is known, it is shorter to use rep movs. */
17489 if (mode == QImode && CONST_INT_P (count)
17490 && !(INTVAL (count) & 3))
17493 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17494 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17495 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17496 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17497 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17498 if (mode != QImode)
17500 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17501 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17502 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17503 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17504 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17505 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17509 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17510 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17512 if (CONST_INT_P (count))
17514 count = GEN_INT (INTVAL (count)
17515 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17516 destmem = shallow_copy_rtx (destmem);
17517 srcmem = shallow_copy_rtx (srcmem);
17518 set_mem_size (destmem, count);
17519 set_mem_size (srcmem, count);
17523 if (MEM_SIZE (destmem))
17524 set_mem_size (destmem, NULL_RTX);
17525 if (MEM_SIZE (srcmem))
17526 set_mem_size (srcmem, NULL_RTX);
17528 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17532 /* Output "rep; stos" instruction.
17533 Arguments have same meaning as for previous function */
17535 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17536 rtx count, enum machine_mode mode,
17542 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17543 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17544 value = force_reg (mode, gen_lowpart (mode, value));
17545 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17546 if (mode != QImode)
17548 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17549 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17550 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17553 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17554 if (orig_value == const0_rtx && CONST_INT_P (count))
17556 count = GEN_INT (INTVAL (count)
17557 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17558 destmem = shallow_copy_rtx (destmem);
17559 set_mem_size (destmem, count);
17561 else if (MEM_SIZE (destmem))
17562 set_mem_size (destmem, NULL_RTX);
17563 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17567 emit_strmov (rtx destmem, rtx srcmem,
17568 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17570 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17571 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17572 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17575 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17577 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17578 rtx destptr, rtx srcptr, rtx count, int max_size)
17581 if (CONST_INT_P (count))
17583 HOST_WIDE_INT countval = INTVAL (count);
17586 if ((countval & 0x10) && max_size > 16)
17590 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17591 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17594 gcc_unreachable ();
17597 if ((countval & 0x08) && max_size > 8)
17600 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17603 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17604 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17608 if ((countval & 0x04) && max_size > 4)
17610 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17613 if ((countval & 0x02) && max_size > 2)
17615 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17618 if ((countval & 0x01) && max_size > 1)
17620 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17627 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17628 count, 1, OPTAB_DIRECT);
17629 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17630 count, QImode, 1, 4);
17634 /* When there are stringops, we can cheaply increase dest and src pointers.
17635 Otherwise we save code size by maintaining offset (zero is readily
17636 available from preceding rep operation) and using x86 addressing modes.
17638 if (TARGET_SINGLE_STRINGOP)
17642 rtx label = ix86_expand_aligntest (count, 4, true);
17643 src = change_address (srcmem, SImode, srcptr);
17644 dest = change_address (destmem, SImode, destptr);
17645 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17646 emit_label (label);
17647 LABEL_NUSES (label) = 1;
17651 rtx label = ix86_expand_aligntest (count, 2, true);
17652 src = change_address (srcmem, HImode, srcptr);
17653 dest = change_address (destmem, HImode, destptr);
17654 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17655 emit_label (label);
17656 LABEL_NUSES (label) = 1;
17660 rtx label = ix86_expand_aligntest (count, 1, true);
17661 src = change_address (srcmem, QImode, srcptr);
17662 dest = change_address (destmem, QImode, destptr);
17663 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17664 emit_label (label);
17665 LABEL_NUSES (label) = 1;
17670 rtx offset = force_reg (Pmode, const0_rtx);
17675 rtx label = ix86_expand_aligntest (count, 4, true);
17676 src = change_address (srcmem, SImode, srcptr);
17677 dest = change_address (destmem, SImode, destptr);
17678 emit_move_insn (dest, src);
17679 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17680 true, OPTAB_LIB_WIDEN);
17682 emit_move_insn (offset, tmp);
17683 emit_label (label);
17684 LABEL_NUSES (label) = 1;
17688 rtx label = ix86_expand_aligntest (count, 2, true);
17689 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17690 src = change_address (srcmem, HImode, tmp);
17691 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17692 dest = change_address (destmem, HImode, tmp);
17693 emit_move_insn (dest, src);
17694 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17695 true, OPTAB_LIB_WIDEN);
17697 emit_move_insn (offset, tmp);
17698 emit_label (label);
17699 LABEL_NUSES (label) = 1;
17703 rtx label = ix86_expand_aligntest (count, 1, true);
17704 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17705 src = change_address (srcmem, QImode, tmp);
17706 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17707 dest = change_address (destmem, QImode, tmp);
17708 emit_move_insn (dest, src);
17709 emit_label (label);
17710 LABEL_NUSES (label) = 1;
17715 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17717 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17718 rtx count, int max_size)
17721 expand_simple_binop (counter_mode (count), AND, count,
17722 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17723 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17724 gen_lowpart (QImode, value), count, QImode,
17728 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17730 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17734 if (CONST_INT_P (count))
17736 HOST_WIDE_INT countval = INTVAL (count);
17739 if ((countval & 0x10) && max_size > 16)
17743 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17744 emit_insn (gen_strset (destptr, dest, value));
17745 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17746 emit_insn (gen_strset (destptr, dest, value));
17749 gcc_unreachable ();
17752 if ((countval & 0x08) && max_size > 8)
17756 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17757 emit_insn (gen_strset (destptr, dest, value));
17761 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17762 emit_insn (gen_strset (destptr, dest, value));
17763 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17764 emit_insn (gen_strset (destptr, dest, value));
17768 if ((countval & 0x04) && max_size > 4)
17770 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17771 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17774 if ((countval & 0x02) && max_size > 2)
17776 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17777 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17780 if ((countval & 0x01) && max_size > 1)
17782 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17783 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17790 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17795 rtx label = ix86_expand_aligntest (count, 16, true);
17798 dest = change_address (destmem, DImode, destptr);
17799 emit_insn (gen_strset (destptr, dest, value));
17800 emit_insn (gen_strset (destptr, dest, value));
17804 dest = change_address (destmem, SImode, destptr);
17805 emit_insn (gen_strset (destptr, dest, value));
17806 emit_insn (gen_strset (destptr, dest, value));
17807 emit_insn (gen_strset (destptr, dest, value));
17808 emit_insn (gen_strset (destptr, dest, value));
17810 emit_label (label);
17811 LABEL_NUSES (label) = 1;
17815 rtx label = ix86_expand_aligntest (count, 8, true);
17818 dest = change_address (destmem, DImode, destptr);
17819 emit_insn (gen_strset (destptr, dest, value));
17823 dest = change_address (destmem, SImode, destptr);
17824 emit_insn (gen_strset (destptr, dest, value));
17825 emit_insn (gen_strset (destptr, dest, value));
17827 emit_label (label);
17828 LABEL_NUSES (label) = 1;
17832 rtx label = ix86_expand_aligntest (count, 4, true);
17833 dest = change_address (destmem, SImode, destptr);
17834 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17835 emit_label (label);
17836 LABEL_NUSES (label) = 1;
17840 rtx label = ix86_expand_aligntest (count, 2, true);
17841 dest = change_address (destmem, HImode, destptr);
17842 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17843 emit_label (label);
17844 LABEL_NUSES (label) = 1;
17848 rtx label = ix86_expand_aligntest (count, 1, true);
17849 dest = change_address (destmem, QImode, destptr);
17850 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17851 emit_label (label);
17852 LABEL_NUSES (label) = 1;
17856 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17857 DESIRED_ALIGNMENT. */
17859 expand_movmem_prologue (rtx destmem, rtx srcmem,
17860 rtx destptr, rtx srcptr, rtx count,
17861 int align, int desired_alignment)
17863 if (align <= 1 && desired_alignment > 1)
17865 rtx label = ix86_expand_aligntest (destptr, 1, false);
17866 srcmem = change_address (srcmem, QImode, srcptr);
17867 destmem = change_address (destmem, QImode, destptr);
17868 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17869 ix86_adjust_counter (count, 1);
17870 emit_label (label);
17871 LABEL_NUSES (label) = 1;
17873 if (align <= 2 && desired_alignment > 2)
17875 rtx label = ix86_expand_aligntest (destptr, 2, false);
17876 srcmem = change_address (srcmem, HImode, srcptr);
17877 destmem = change_address (destmem, HImode, destptr);
17878 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17879 ix86_adjust_counter (count, 2);
17880 emit_label (label);
17881 LABEL_NUSES (label) = 1;
17883 if (align <= 4 && desired_alignment > 4)
17885 rtx label = ix86_expand_aligntest (destptr, 4, false);
17886 srcmem = change_address (srcmem, SImode, srcptr);
17887 destmem = change_address (destmem, SImode, destptr);
17888 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17889 ix86_adjust_counter (count, 4);
17890 emit_label (label);
17891 LABEL_NUSES (label) = 1;
17893 gcc_assert (desired_alignment <= 8);
17896 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17897 ALIGN_BYTES is how many bytes need to be copied. */
17899 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17900 int desired_align, int align_bytes)
17903 rtx src_size, dst_size;
17905 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17906 if (src_align_bytes >= 0)
17907 src_align_bytes = desired_align - src_align_bytes;
17908 src_size = MEM_SIZE (src);
17909 dst_size = MEM_SIZE (dst);
17910 if (align_bytes & 1)
17912 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17913 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17915 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17917 if (align_bytes & 2)
17919 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17920 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17921 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17922 set_mem_align (dst, 2 * BITS_PER_UNIT);
17923 if (src_align_bytes >= 0
17924 && (src_align_bytes & 1) == (align_bytes & 1)
17925 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17926 set_mem_align (src, 2 * BITS_PER_UNIT);
17928 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17930 if (align_bytes & 4)
17932 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17933 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17934 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17935 set_mem_align (dst, 4 * BITS_PER_UNIT);
17936 if (src_align_bytes >= 0)
17938 unsigned int src_align = 0;
17939 if ((src_align_bytes & 3) == (align_bytes & 3))
17941 else if ((src_align_bytes & 1) == (align_bytes & 1))
17943 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17944 set_mem_align (src, src_align * BITS_PER_UNIT);
17947 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17949 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17950 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17951 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17952 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17953 if (src_align_bytes >= 0)
17955 unsigned int src_align = 0;
17956 if ((src_align_bytes & 7) == (align_bytes & 7))
17958 else if ((src_align_bytes & 3) == (align_bytes & 3))
17960 else if ((src_align_bytes & 1) == (align_bytes & 1))
17962 if (src_align > (unsigned int) desired_align)
17963 src_align = desired_align;
17964 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17965 set_mem_align (src, src_align * BITS_PER_UNIT);
17968 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17970 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17975 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17976 DESIRED_ALIGNMENT. */
17978 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17979 int align, int desired_alignment)
17981 if (align <= 1 && desired_alignment > 1)
17983 rtx label = ix86_expand_aligntest (destptr, 1, false);
17984 destmem = change_address (destmem, QImode, destptr);
17985 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17986 ix86_adjust_counter (count, 1);
17987 emit_label (label);
17988 LABEL_NUSES (label) = 1;
17990 if (align <= 2 && desired_alignment > 2)
17992 rtx label = ix86_expand_aligntest (destptr, 2, false);
17993 destmem = change_address (destmem, HImode, destptr);
17994 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17995 ix86_adjust_counter (count, 2);
17996 emit_label (label);
17997 LABEL_NUSES (label) = 1;
17999 if (align <= 4 && desired_alignment > 4)
18001 rtx label = ix86_expand_aligntest (destptr, 4, false);
18002 destmem = change_address (destmem, SImode, destptr);
18003 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18004 ix86_adjust_counter (count, 4);
18005 emit_label (label);
18006 LABEL_NUSES (label) = 1;
18008 gcc_assert (desired_alignment <= 8);
18011 /* Set enough from DST to align DST known to by aligned by ALIGN to
18012 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18014 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18015 int desired_align, int align_bytes)
18018 rtx dst_size = MEM_SIZE (dst);
18019 if (align_bytes & 1)
18021 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18023 emit_insn (gen_strset (destreg, dst,
18024 gen_lowpart (QImode, value)));
18026 if (align_bytes & 2)
18028 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18029 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18030 set_mem_align (dst, 2 * BITS_PER_UNIT);
18032 emit_insn (gen_strset (destreg, dst,
18033 gen_lowpart (HImode, value)));
18035 if (align_bytes & 4)
18037 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18038 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18039 set_mem_align (dst, 4 * BITS_PER_UNIT);
18041 emit_insn (gen_strset (destreg, dst,
18042 gen_lowpart (SImode, value)));
18044 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18045 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18046 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18048 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18052 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18053 static enum stringop_alg
18054 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18055 int *dynamic_check)
18057 const struct stringop_algs * algs;
18058 bool optimize_for_speed;
18059 /* Algorithms using the rep prefix want at least edi and ecx;
18060 additionally, memset wants eax and memcpy wants esi. Don't
18061 consider such algorithms if the user has appropriated those
18062 registers for their own purposes. */
18063 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18065 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18067 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18068 || (alg != rep_prefix_1_byte \
18069 && alg != rep_prefix_4_byte \
18070 && alg != rep_prefix_8_byte))
18071 const struct processor_costs *cost;
18073 /* Even if the string operation call is cold, we still might spend a lot
18074 of time processing large blocks. */
18075 if (optimize_function_for_size_p (cfun)
18076 || (optimize_insn_for_size_p ()
18077 && expected_size != -1 && expected_size < 256))
18078 optimize_for_speed = false;
18080 optimize_for_speed = true;
18082 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18084 *dynamic_check = -1;
18086 algs = &cost->memset[TARGET_64BIT != 0];
18088 algs = &cost->memcpy[TARGET_64BIT != 0];
18089 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18090 return stringop_alg;
18091 /* rep; movq or rep; movl is the smallest variant. */
18092 else if (!optimize_for_speed)
18094 if (!count || (count & 3))
18095 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18097 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18099 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18101 else if (expected_size != -1 && expected_size < 4)
18102 return loop_1_byte;
18103 else if (expected_size != -1)
18106 enum stringop_alg alg = libcall;
18107 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18109 /* We get here if the algorithms that were not libcall-based
18110 were rep-prefix based and we are unable to use rep prefixes
18111 based on global register usage. Break out of the loop and
18112 use the heuristic below. */
18113 if (algs->size[i].max == 0)
18115 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18117 enum stringop_alg candidate = algs->size[i].alg;
18119 if (candidate != libcall && ALG_USABLE_P (candidate))
18121 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18122 last non-libcall inline algorithm. */
18123 if (TARGET_INLINE_ALL_STRINGOPS)
18125 /* When the current size is best to be copied by a libcall,
18126 but we are still forced to inline, run the heuristic below
18127 that will pick code for medium sized blocks. */
18128 if (alg != libcall)
18132 else if (ALG_USABLE_P (candidate))
18136 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18138 /* When asked to inline the call anyway, try to pick meaningful choice.
18139 We look for maximal size of block that is faster to copy by hand and
18140 take blocks of at most of that size guessing that average size will
18141 be roughly half of the block.
18143 If this turns out to be bad, we might simply specify the preferred
18144 choice in ix86_costs. */
18145 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18146 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18149 enum stringop_alg alg;
18151 bool any_alg_usable_p = true;
18153 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18155 enum stringop_alg candidate = algs->size[i].alg;
18156 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18158 if (candidate != libcall && candidate
18159 && ALG_USABLE_P (candidate))
18160 max = algs->size[i].max;
18162 /* If there aren't any usable algorithms, then recursing on
18163 smaller sizes isn't going to find anything. Just return the
18164 simple byte-at-a-time copy loop. */
18165 if (!any_alg_usable_p)
18167 /* Pick something reasonable. */
18168 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18169 *dynamic_check = 128;
18170 return loop_1_byte;
18174 alg = decide_alg (count, max / 2, memset, dynamic_check);
18175 gcc_assert (*dynamic_check == -1);
18176 gcc_assert (alg != libcall);
18177 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18178 *dynamic_check = max;
18181 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18182 #undef ALG_USABLE_P
18185 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18186 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18188 decide_alignment (int align,
18189 enum stringop_alg alg,
18192 int desired_align = 0;
18196 gcc_unreachable ();
18198 case unrolled_loop:
18199 desired_align = GET_MODE_SIZE (Pmode);
18201 case rep_prefix_8_byte:
18204 case rep_prefix_4_byte:
18205 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18206 copying whole cacheline at once. */
18207 if (TARGET_PENTIUMPRO)
18212 case rep_prefix_1_byte:
18213 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18214 copying whole cacheline at once. */
18215 if (TARGET_PENTIUMPRO)
18229 if (desired_align < align)
18230 desired_align = align;
18231 if (expected_size != -1 && expected_size < 4)
18232 desired_align = align;
18233 return desired_align;
18236 /* Return the smallest power of 2 greater than VAL. */
18238 smallest_pow2_greater_than (int val)
18246 /* Expand string move (memcpy) operation. Use i386 string operations when
18247 profitable. expand_setmem contains similar code. The code depends upon
18248 architecture, block size and alignment, but always has the same
18251 1) Prologue guard: Conditional that jumps up to epilogues for small
18252 blocks that can be handled by epilogue alone. This is faster but
18253 also needed for correctness, since prologue assume the block is larger
18254 than the desired alignment.
18256 Optional dynamic check for size and libcall for large
18257 blocks is emitted here too, with -minline-stringops-dynamically.
18259 2) Prologue: copy first few bytes in order to get destination aligned
18260 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18261 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18262 We emit either a jump tree on power of two sized blocks, or a byte loop.
18264 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18265 with specified algorithm.
18267 4) Epilogue: code copying tail of the block that is too small to be
18268 handled by main body (or up to size guarded by prologue guard). */
18271 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18272 rtx expected_align_exp, rtx expected_size_exp)
18278 rtx jump_around_label = NULL;
18279 HOST_WIDE_INT align = 1;
18280 unsigned HOST_WIDE_INT count = 0;
18281 HOST_WIDE_INT expected_size = -1;
18282 int size_needed = 0, epilogue_size_needed;
18283 int desired_align = 0, align_bytes = 0;
18284 enum stringop_alg alg;
18286 bool need_zero_guard = false;
18288 if (CONST_INT_P (align_exp))
18289 align = INTVAL (align_exp);
18290 /* i386 can do misaligned access on reasonably increased cost. */
18291 if (CONST_INT_P (expected_align_exp)
18292 && INTVAL (expected_align_exp) > align)
18293 align = INTVAL (expected_align_exp);
18294 /* ALIGN is the minimum of destination and source alignment, but we care here
18295 just about destination alignment. */
18296 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18297 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18299 if (CONST_INT_P (count_exp))
18300 count = expected_size = INTVAL (count_exp);
18301 if (CONST_INT_P (expected_size_exp) && count == 0)
18302 expected_size = INTVAL (expected_size_exp);
18304 /* Make sure we don't need to care about overflow later on. */
18305 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18308 /* Step 0: Decide on preferred algorithm, desired alignment and
18309 size of chunks to be copied by main loop. */
18311 alg = decide_alg (count, expected_size, false, &dynamic_check);
18312 desired_align = decide_alignment (align, alg, expected_size);
18314 if (!TARGET_ALIGN_STRINGOPS)
18315 align = desired_align;
18317 if (alg == libcall)
18319 gcc_assert (alg != no_stringop);
18321 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18322 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18323 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18328 gcc_unreachable ();
18330 need_zero_guard = true;
18331 size_needed = GET_MODE_SIZE (Pmode);
18333 case unrolled_loop:
18334 need_zero_guard = true;
18335 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18337 case rep_prefix_8_byte:
18340 case rep_prefix_4_byte:
18343 case rep_prefix_1_byte:
18347 need_zero_guard = true;
18352 epilogue_size_needed = size_needed;
18354 /* Step 1: Prologue guard. */
18356 /* Alignment code needs count to be in register. */
18357 if (CONST_INT_P (count_exp) && desired_align > align)
18359 if (INTVAL (count_exp) > desired_align
18360 && INTVAL (count_exp) > size_needed)
18363 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18364 if (align_bytes <= 0)
18367 align_bytes = desired_align - align_bytes;
18369 if (align_bytes == 0)
18370 count_exp = force_reg (counter_mode (count_exp), count_exp);
18372 gcc_assert (desired_align >= 1 && align >= 1);
18374 /* Ensure that alignment prologue won't copy past end of block. */
18375 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18377 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18378 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18379 Make sure it is power of 2. */
18380 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18384 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18386 /* If main algorithm works on QImode, no epilogue is needed.
18387 For small sizes just don't align anything. */
18388 if (size_needed == 1)
18389 desired_align = align;
18396 label = gen_label_rtx ();
18397 emit_cmp_and_jump_insns (count_exp,
18398 GEN_INT (epilogue_size_needed),
18399 LTU, 0, counter_mode (count_exp), 1, label);
18400 if (expected_size == -1 || expected_size < epilogue_size_needed)
18401 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18403 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18407 /* Emit code to decide on runtime whether library call or inline should be
18409 if (dynamic_check != -1)
18411 if (CONST_INT_P (count_exp))
18413 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18415 emit_block_move_via_libcall (dst, src, count_exp, false);
18416 count_exp = const0_rtx;
18422 rtx hot_label = gen_label_rtx ();
18423 jump_around_label = gen_label_rtx ();
18424 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18425 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18426 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18427 emit_block_move_via_libcall (dst, src, count_exp, false);
18428 emit_jump (jump_around_label);
18429 emit_label (hot_label);
18433 /* Step 2: Alignment prologue. */
18435 if (desired_align > align)
18437 if (align_bytes == 0)
18439 /* Except for the first move in epilogue, we no longer know
18440 constant offset in aliasing info. It don't seems to worth
18441 the pain to maintain it for the first move, so throw away
18443 src = change_address (src, BLKmode, srcreg);
18444 dst = change_address (dst, BLKmode, destreg);
18445 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18450 /* If we know how many bytes need to be stored before dst is
18451 sufficiently aligned, maintain aliasing info accurately. */
18452 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18453 desired_align, align_bytes);
18454 count_exp = plus_constant (count_exp, -align_bytes);
18455 count -= align_bytes;
18457 if (need_zero_guard
18458 && (count < (unsigned HOST_WIDE_INT) size_needed
18459 || (align_bytes == 0
18460 && count < ((unsigned HOST_WIDE_INT) size_needed
18461 + desired_align - align))))
18463 /* It is possible that we copied enough so the main loop will not
18465 gcc_assert (size_needed > 1);
18466 if (label == NULL_RTX)
18467 label = gen_label_rtx ();
18468 emit_cmp_and_jump_insns (count_exp,
18469 GEN_INT (size_needed),
18470 LTU, 0, counter_mode (count_exp), 1, label);
18471 if (expected_size == -1
18472 || expected_size < (desired_align - align) / 2 + size_needed)
18473 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18475 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18478 if (label && size_needed == 1)
18480 emit_label (label);
18481 LABEL_NUSES (label) = 1;
18483 epilogue_size_needed = 1;
18485 else if (label == NULL_RTX)
18486 epilogue_size_needed = size_needed;
18488 /* Step 3: Main loop. */
18494 gcc_unreachable ();
18496 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18497 count_exp, QImode, 1, expected_size);
18500 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18501 count_exp, Pmode, 1, expected_size);
18503 case unrolled_loop:
18504 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18505 registers for 4 temporaries anyway. */
18506 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18507 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18510 case rep_prefix_8_byte:
18511 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18514 case rep_prefix_4_byte:
18515 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18518 case rep_prefix_1_byte:
18519 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18523 /* Adjust properly the offset of src and dest memory for aliasing. */
18524 if (CONST_INT_P (count_exp))
18526 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18527 (count / size_needed) * size_needed);
18528 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18529 (count / size_needed) * size_needed);
18533 src = change_address (src, BLKmode, srcreg);
18534 dst = change_address (dst, BLKmode, destreg);
18537 /* Step 4: Epilogue to copy the remaining bytes. */
18541 /* When the main loop is done, COUNT_EXP might hold original count,
18542 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18543 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18544 bytes. Compensate if needed. */
18546 if (size_needed < epilogue_size_needed)
18549 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18550 GEN_INT (size_needed - 1), count_exp, 1,
18552 if (tmp != count_exp)
18553 emit_move_insn (count_exp, tmp);
18555 emit_label (label);
18556 LABEL_NUSES (label) = 1;
18559 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18560 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18561 epilogue_size_needed);
18562 if (jump_around_label)
18563 emit_label (jump_around_label);
18567 /* Helper function for memcpy. For QImode value 0xXY produce
18568 0xXYXYXYXY of wide specified by MODE. This is essentially
18569 a * 0x10101010, but we can do slightly better than
18570 synth_mult by unwinding the sequence by hand on CPUs with
18573 promote_duplicated_reg (enum machine_mode mode, rtx val)
18575 enum machine_mode valmode = GET_MODE (val);
18577 int nops = mode == DImode ? 3 : 2;
18579 gcc_assert (mode == SImode || mode == DImode);
18580 if (val == const0_rtx)
18581 return copy_to_mode_reg (mode, const0_rtx);
18582 if (CONST_INT_P (val))
18584 HOST_WIDE_INT v = INTVAL (val) & 255;
18588 if (mode == DImode)
18589 v |= (v << 16) << 16;
18590 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18593 if (valmode == VOIDmode)
18595 if (valmode != QImode)
18596 val = gen_lowpart (QImode, val);
18597 if (mode == QImode)
18599 if (!TARGET_PARTIAL_REG_STALL)
18601 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18602 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18603 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18604 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18606 rtx reg = convert_modes (mode, QImode, val, true);
18607 tmp = promote_duplicated_reg (mode, const1_rtx);
18608 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18613 rtx reg = convert_modes (mode, QImode, val, true);
18615 if (!TARGET_PARTIAL_REG_STALL)
18616 if (mode == SImode)
18617 emit_insn (gen_movsi_insv_1 (reg, reg));
18619 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18622 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18623 NULL, 1, OPTAB_DIRECT);
18625 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18627 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18628 NULL, 1, OPTAB_DIRECT);
18629 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18630 if (mode == SImode)
18632 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18633 NULL, 1, OPTAB_DIRECT);
18634 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18639 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18640 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18641 alignment from ALIGN to DESIRED_ALIGN. */
18643 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18648 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18649 promoted_val = promote_duplicated_reg (DImode, val);
18650 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18651 promoted_val = promote_duplicated_reg (SImode, val);
18652 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18653 promoted_val = promote_duplicated_reg (HImode, val);
18655 promoted_val = val;
18657 return promoted_val;
18660 /* Expand string clear operation (bzero). Use i386 string operations when
18661 profitable. See expand_movmem comment for explanation of individual
18662 steps performed. */
18664 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18665 rtx expected_align_exp, rtx expected_size_exp)
18670 rtx jump_around_label = NULL;
18671 HOST_WIDE_INT align = 1;
18672 unsigned HOST_WIDE_INT count = 0;
18673 HOST_WIDE_INT expected_size = -1;
18674 int size_needed = 0, epilogue_size_needed;
18675 int desired_align = 0, align_bytes = 0;
18676 enum stringop_alg alg;
18677 rtx promoted_val = NULL;
18678 bool force_loopy_epilogue = false;
18680 bool need_zero_guard = false;
18682 if (CONST_INT_P (align_exp))
18683 align = INTVAL (align_exp);
18684 /* i386 can do misaligned access on reasonably increased cost. */
18685 if (CONST_INT_P (expected_align_exp)
18686 && INTVAL (expected_align_exp) > align)
18687 align = INTVAL (expected_align_exp);
18688 if (CONST_INT_P (count_exp))
18689 count = expected_size = INTVAL (count_exp);
18690 if (CONST_INT_P (expected_size_exp) && count == 0)
18691 expected_size = INTVAL (expected_size_exp);
18693 /* Make sure we don't need to care about overflow later on. */
18694 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18697 /* Step 0: Decide on preferred algorithm, desired alignment and
18698 size of chunks to be copied by main loop. */
18700 alg = decide_alg (count, expected_size, true, &dynamic_check);
18701 desired_align = decide_alignment (align, alg, expected_size);
18703 if (!TARGET_ALIGN_STRINGOPS)
18704 align = desired_align;
18706 if (alg == libcall)
18708 gcc_assert (alg != no_stringop);
18710 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18711 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18716 gcc_unreachable ();
18718 need_zero_guard = true;
18719 size_needed = GET_MODE_SIZE (Pmode);
18721 case unrolled_loop:
18722 need_zero_guard = true;
18723 size_needed = GET_MODE_SIZE (Pmode) * 4;
18725 case rep_prefix_8_byte:
18728 case rep_prefix_4_byte:
18731 case rep_prefix_1_byte:
18735 need_zero_guard = true;
18739 epilogue_size_needed = size_needed;
18741 /* Step 1: Prologue guard. */
18743 /* Alignment code needs count to be in register. */
18744 if (CONST_INT_P (count_exp) && desired_align > align)
18746 if (INTVAL (count_exp) > desired_align
18747 && INTVAL (count_exp) > size_needed)
18750 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18751 if (align_bytes <= 0)
18754 align_bytes = desired_align - align_bytes;
18756 if (align_bytes == 0)
18758 enum machine_mode mode = SImode;
18759 if (TARGET_64BIT && (count & ~0xffffffff))
18761 count_exp = force_reg (mode, count_exp);
18764 /* Do the cheap promotion to allow better CSE across the
18765 main loop and epilogue (ie one load of the big constant in the
18766 front of all code. */
18767 if (CONST_INT_P (val_exp))
18768 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18769 desired_align, align);
18770 /* Ensure that alignment prologue won't copy past end of block. */
18771 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18773 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18774 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18775 Make sure it is power of 2. */
18776 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18778 /* To improve performance of small blocks, we jump around the VAL
18779 promoting mode. This mean that if the promoted VAL is not constant,
18780 we might not use it in the epilogue and have to use byte
18782 if (epilogue_size_needed > 2 && !promoted_val)
18783 force_loopy_epilogue = true;
18786 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18788 /* If main algorithm works on QImode, no epilogue is needed.
18789 For small sizes just don't align anything. */
18790 if (size_needed == 1)
18791 desired_align = align;
18798 label = gen_label_rtx ();
18799 emit_cmp_and_jump_insns (count_exp,
18800 GEN_INT (epilogue_size_needed),
18801 LTU, 0, counter_mode (count_exp), 1, label);
18802 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18803 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18805 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18808 if (dynamic_check != -1)
18810 rtx hot_label = gen_label_rtx ();
18811 jump_around_label = gen_label_rtx ();
18812 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18813 LEU, 0, counter_mode (count_exp), 1, hot_label);
18814 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18815 set_storage_via_libcall (dst, count_exp, val_exp, false);
18816 emit_jump (jump_around_label);
18817 emit_label (hot_label);
18820 /* Step 2: Alignment prologue. */
18822 /* Do the expensive promotion once we branched off the small blocks. */
18824 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18825 desired_align, align);
18826 gcc_assert (desired_align >= 1 && align >= 1);
18828 if (desired_align > align)
18830 if (align_bytes == 0)
18832 /* Except for the first move in epilogue, we no longer know
18833 constant offset in aliasing info. It don't seems to worth
18834 the pain to maintain it for the first move, so throw away
18836 dst = change_address (dst, BLKmode, destreg);
18837 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18842 /* If we know how many bytes need to be stored before dst is
18843 sufficiently aligned, maintain aliasing info accurately. */
18844 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18845 desired_align, align_bytes);
18846 count_exp = plus_constant (count_exp, -align_bytes);
18847 count -= align_bytes;
18849 if (need_zero_guard
18850 && (count < (unsigned HOST_WIDE_INT) size_needed
18851 || (align_bytes == 0
18852 && count < ((unsigned HOST_WIDE_INT) size_needed
18853 + desired_align - align))))
18855 /* It is possible that we copied enough so the main loop will not
18857 gcc_assert (size_needed > 1);
18858 if (label == NULL_RTX)
18859 label = gen_label_rtx ();
18860 emit_cmp_and_jump_insns (count_exp,
18861 GEN_INT (size_needed),
18862 LTU, 0, counter_mode (count_exp), 1, label);
18863 if (expected_size == -1
18864 || expected_size < (desired_align - align) / 2 + size_needed)
18865 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18867 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18870 if (label && size_needed == 1)
18872 emit_label (label);
18873 LABEL_NUSES (label) = 1;
18875 promoted_val = val_exp;
18876 epilogue_size_needed = 1;
18878 else if (label == NULL_RTX)
18879 epilogue_size_needed = size_needed;
18881 /* Step 3: Main loop. */
18887 gcc_unreachable ();
18889 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18890 count_exp, QImode, 1, expected_size);
18893 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18894 count_exp, Pmode, 1, expected_size);
18896 case unrolled_loop:
18897 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18898 count_exp, Pmode, 4, expected_size);
18900 case rep_prefix_8_byte:
18901 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18904 case rep_prefix_4_byte:
18905 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18908 case rep_prefix_1_byte:
18909 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18913 /* Adjust properly the offset of src and dest memory for aliasing. */
18914 if (CONST_INT_P (count_exp))
18915 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18916 (count / size_needed) * size_needed);
18918 dst = change_address (dst, BLKmode, destreg);
18920 /* Step 4: Epilogue to copy the remaining bytes. */
18924 /* When the main loop is done, COUNT_EXP might hold original count,
18925 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18926 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18927 bytes. Compensate if needed. */
18929 if (size_needed < epilogue_size_needed)
18932 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18933 GEN_INT (size_needed - 1), count_exp, 1,
18935 if (tmp != count_exp)
18936 emit_move_insn (count_exp, tmp);
18938 emit_label (label);
18939 LABEL_NUSES (label) = 1;
18942 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18944 if (force_loopy_epilogue)
18945 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18946 epilogue_size_needed);
18948 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18949 epilogue_size_needed);
18951 if (jump_around_label)
18952 emit_label (jump_around_label);
18956 /* Expand the appropriate insns for doing strlen if not just doing
18959 out = result, initialized with the start address
18960 align_rtx = alignment of the address.
18961 scratch = scratch register, initialized with the startaddress when
18962 not aligned, otherwise undefined
18964 This is just the body. It needs the initializations mentioned above and
18965 some address computing at the end. These things are done in i386.md. */
18968 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18972 rtx align_2_label = NULL_RTX;
18973 rtx align_3_label = NULL_RTX;
18974 rtx align_4_label = gen_label_rtx ();
18975 rtx end_0_label = gen_label_rtx ();
18977 rtx tmpreg = gen_reg_rtx (SImode);
18978 rtx scratch = gen_reg_rtx (SImode);
18982 if (CONST_INT_P (align_rtx))
18983 align = INTVAL (align_rtx);
18985 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18987 /* Is there a known alignment and is it less than 4? */
18990 rtx scratch1 = gen_reg_rtx (Pmode);
18991 emit_move_insn (scratch1, out);
18992 /* Is there a known alignment and is it not 2? */
18995 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18996 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18998 /* Leave just the 3 lower bits. */
18999 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19000 NULL_RTX, 0, OPTAB_WIDEN);
19002 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19003 Pmode, 1, align_4_label);
19004 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19005 Pmode, 1, align_2_label);
19006 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19007 Pmode, 1, align_3_label);
19011 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19012 check if is aligned to 4 - byte. */
19014 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19015 NULL_RTX, 0, OPTAB_WIDEN);
19017 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19018 Pmode, 1, align_4_label);
19021 mem = change_address (src, QImode, out);
19023 /* Now compare the bytes. */
19025 /* Compare the first n unaligned byte on a byte per byte basis. */
19026 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19027 QImode, 1, end_0_label);
19029 /* Increment the address. */
19030 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19032 /* Not needed with an alignment of 2 */
19035 emit_label (align_2_label);
19037 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19040 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19042 emit_label (align_3_label);
19045 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19048 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19051 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19052 align this loop. It gives only huge programs, but does not help to
19054 emit_label (align_4_label);
19056 mem = change_address (src, SImode, out);
19057 emit_move_insn (scratch, mem);
19058 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19060 /* This formula yields a nonzero result iff one of the bytes is zero.
19061 This saves three branches inside loop and many cycles. */
19063 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19064 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19065 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19066 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19067 gen_int_mode (0x80808080, SImode)));
19068 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19073 rtx reg = gen_reg_rtx (SImode);
19074 rtx reg2 = gen_reg_rtx (Pmode);
19075 emit_move_insn (reg, tmpreg);
19076 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19078 /* If zero is not in the first two bytes, move two bytes forward. */
19079 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19080 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19081 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19082 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19083 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19086 /* Emit lea manually to avoid clobbering of flags. */
19087 emit_insn (gen_rtx_SET (SImode, reg2,
19088 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19090 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19091 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19092 emit_insn (gen_rtx_SET (VOIDmode, out,
19093 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19099 rtx end_2_label = gen_label_rtx ();
19100 /* Is zero in the first two bytes? */
19102 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19103 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19104 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19105 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19106 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19108 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19109 JUMP_LABEL (tmp) = end_2_label;
19111 /* Not in the first two. Move two bytes forward. */
19112 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19113 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19115 emit_label (end_2_label);
19119 /* Avoid branch in fixing the byte. */
19120 tmpreg = gen_lowpart (QImode, tmpreg);
19121 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19122 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19123 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19124 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19126 emit_label (end_0_label);
19129 /* Expand strlen. */
19132 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19134 rtx addr, scratch1, scratch2, scratch3, scratch4;
19136 /* The generic case of strlen expander is long. Avoid it's
19137 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19139 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19140 && !TARGET_INLINE_ALL_STRINGOPS
19141 && !optimize_insn_for_size_p ()
19142 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19145 addr = force_reg (Pmode, XEXP (src, 0));
19146 scratch1 = gen_reg_rtx (Pmode);
19148 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19149 && !optimize_insn_for_size_p ())
19151 /* Well it seems that some optimizer does not combine a call like
19152 foo(strlen(bar), strlen(bar));
19153 when the move and the subtraction is done here. It does calculate
19154 the length just once when these instructions are done inside of
19155 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19156 often used and I use one fewer register for the lifetime of
19157 output_strlen_unroll() this is better. */
19159 emit_move_insn (out, addr);
19161 ix86_expand_strlensi_unroll_1 (out, src, align);
19163 /* strlensi_unroll_1 returns the address of the zero at the end of
19164 the string, like memchr(), so compute the length by subtracting
19165 the start address. */
19166 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19172 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19173 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19176 scratch2 = gen_reg_rtx (Pmode);
19177 scratch3 = gen_reg_rtx (Pmode);
19178 scratch4 = force_reg (Pmode, constm1_rtx);
19180 emit_move_insn (scratch3, addr);
19181 eoschar = force_reg (QImode, eoschar);
19183 src = replace_equiv_address_nv (src, scratch3);
19185 /* If .md starts supporting :P, this can be done in .md. */
19186 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19187 scratch4), UNSPEC_SCAS);
19188 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19189 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19190 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19195 /* For given symbol (function) construct code to compute address of it's PLT
19196 entry in large x86-64 PIC model. */
19198 construct_plt_address (rtx symbol)
19200 rtx tmp = gen_reg_rtx (Pmode);
19201 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19203 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19204 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19206 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19207 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19212 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19214 rtx pop, int sibcall)
19216 rtx use = NULL, call;
19218 if (pop == const0_rtx)
19220 gcc_assert (!TARGET_64BIT || !pop);
19222 if (TARGET_MACHO && !TARGET_64BIT)
19225 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19226 fnaddr = machopic_indirect_call_target (fnaddr);
19231 /* Static functions and indirect calls don't need the pic register. */
19232 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19233 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19234 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19235 use_reg (&use, pic_offset_table_rtx);
19238 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19240 rtx al = gen_rtx_REG (QImode, AX_REG);
19241 emit_move_insn (al, callarg2);
19242 use_reg (&use, al);
19245 if (ix86_cmodel == CM_LARGE_PIC
19247 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19248 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19249 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19251 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19252 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19254 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19255 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19258 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19260 call = gen_rtx_SET (VOIDmode, retval, call);
19263 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19264 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19265 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19268 && ix86_cfun_abi () == MS_ABI
19269 && (!callarg2 || INTVAL (callarg2) != -2))
19271 /* We need to represent that SI and DI registers are clobbered
19273 static int clobbered_registers[] = {
19274 XMM6_REG, XMM7_REG, XMM8_REG,
19275 XMM9_REG, XMM10_REG, XMM11_REG,
19276 XMM12_REG, XMM13_REG, XMM14_REG,
19277 XMM15_REG, SI_REG, DI_REG
19280 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19281 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19282 UNSPEC_MS_TO_SYSV_CALL);
19286 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19287 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19290 (SSE_REGNO_P (clobbered_registers[i])
19292 clobbered_registers[i]));
19294 call = gen_rtx_PARALLEL (VOIDmode,
19295 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19299 call = emit_call_insn (call);
19301 CALL_INSN_FUNCTION_USAGE (call) = use;
19305 /* Clear stack slot assignments remembered from previous functions.
19306 This is called from INIT_EXPANDERS once before RTL is emitted for each
19309 static struct machine_function *
19310 ix86_init_machine_status (void)
19312 struct machine_function *f;
19314 f = GGC_CNEW (struct machine_function);
19315 f->use_fast_prologue_epilogue_nregs = -1;
19316 f->tls_descriptor_call_expanded_p = 0;
19317 f->call_abi = ix86_abi;
19322 /* Return a MEM corresponding to a stack slot with mode MODE.
19323 Allocate a new slot if necessary.
19325 The RTL for a function can have several slots available: N is
19326 which slot to use. */
19329 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19331 struct stack_local_entry *s;
19333 gcc_assert (n < MAX_386_STACK_LOCALS);
19335 /* Virtual slot is valid only before vregs are instantiated. */
19336 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19338 for (s = ix86_stack_locals; s; s = s->next)
19339 if (s->mode == mode && s->n == n)
19340 return copy_rtx (s->rtl);
19342 s = (struct stack_local_entry *)
19343 ggc_alloc (sizeof (struct stack_local_entry));
19346 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19348 s->next = ix86_stack_locals;
19349 ix86_stack_locals = s;
19353 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19355 static GTY(()) rtx ix86_tls_symbol;
19357 ix86_tls_get_addr (void)
19360 if (!ix86_tls_symbol)
19362 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19363 (TARGET_ANY_GNU_TLS
19365 ? "___tls_get_addr"
19366 : "__tls_get_addr");
19369 return ix86_tls_symbol;
19372 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19374 static GTY(()) rtx ix86_tls_module_base_symbol;
19376 ix86_tls_module_base (void)
19379 if (!ix86_tls_module_base_symbol)
19381 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19382 "_TLS_MODULE_BASE_");
19383 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19384 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19387 return ix86_tls_module_base_symbol;
19390 /* Calculate the length of the memory address in the instruction
19391 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19394 memory_address_length (rtx addr)
19396 struct ix86_address parts;
19397 rtx base, index, disp;
19401 if (GET_CODE (addr) == PRE_DEC
19402 || GET_CODE (addr) == POST_INC
19403 || GET_CODE (addr) == PRE_MODIFY
19404 || GET_CODE (addr) == POST_MODIFY)
19407 ok = ix86_decompose_address (addr, &parts);
19410 if (parts.base && GET_CODE (parts.base) == SUBREG)
19411 parts.base = SUBREG_REG (parts.base);
19412 if (parts.index && GET_CODE (parts.index) == SUBREG)
19413 parts.index = SUBREG_REG (parts.index);
19416 index = parts.index;
19421 - esp as the base always wants an index,
19422 - ebp as the base always wants a displacement,
19423 - r12 as the base always wants an index,
19424 - r13 as the base always wants a displacement. */
19426 /* Register Indirect. */
19427 if (base && !index && !disp)
19429 /* esp (for its index) and ebp (for its displacement) need
19430 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19433 && (addr == arg_pointer_rtx
19434 || addr == frame_pointer_rtx
19435 || REGNO (addr) == SP_REG
19436 || REGNO (addr) == BP_REG
19437 || REGNO (addr) == R12_REG
19438 || REGNO (addr) == R13_REG))
19442 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19443 is not disp32, but disp32(%rip), so for disp32
19444 SIB byte is needed, unless print_operand_address
19445 optimizes it into disp32(%rip) or (%rip) is implied
19447 else if (disp && !base && !index)
19454 if (GET_CODE (disp) == CONST)
19455 symbol = XEXP (disp, 0);
19456 if (GET_CODE (symbol) == PLUS
19457 && CONST_INT_P (XEXP (symbol, 1)))
19458 symbol = XEXP (symbol, 0);
19460 if (GET_CODE (symbol) != LABEL_REF
19461 && (GET_CODE (symbol) != SYMBOL_REF
19462 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19463 && (GET_CODE (symbol) != UNSPEC
19464 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19465 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19472 /* Find the length of the displacement constant. */
19475 if (base && satisfies_constraint_K (disp))
19480 /* ebp always wants a displacement. Similarly r13. */
19481 else if (base && REG_P (base)
19482 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19485 /* An index requires the two-byte modrm form.... */
19487 /* ...like esp (or r12), which always wants an index. */
19488 || base == arg_pointer_rtx
19489 || base == frame_pointer_rtx
19490 || (base && REG_P (base)
19491 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19508 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19509 is set, expect that insn have 8bit immediate alternative. */
19511 ix86_attr_length_immediate_default (rtx insn, int shortform)
19515 extract_insn_cached (insn);
19516 for (i = recog_data.n_operands - 1; i >= 0; --i)
19517 if (CONSTANT_P (recog_data.operand[i]))
19519 enum attr_mode mode = get_attr_mode (insn);
19522 if (shortform && CONST_INT_P (recog_data.operand[i]))
19524 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19531 ival = trunc_int_for_mode (ival, HImode);
19534 ival = trunc_int_for_mode (ival, SImode);
19539 if (IN_RANGE (ival, -128, 127))
19556 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19561 fatal_insn ("unknown insn mode", insn);
19566 /* Compute default value for "length_address" attribute. */
19568 ix86_attr_length_address_default (rtx insn)
19572 if (get_attr_type (insn) == TYPE_LEA)
19574 rtx set = PATTERN (insn), addr;
19576 if (GET_CODE (set) == PARALLEL)
19577 set = XVECEXP (set, 0, 0);
19579 gcc_assert (GET_CODE (set) == SET);
19581 addr = SET_SRC (set);
19582 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19584 if (GET_CODE (addr) == ZERO_EXTEND)
19585 addr = XEXP (addr, 0);
19586 if (GET_CODE (addr) == SUBREG)
19587 addr = SUBREG_REG (addr);
19590 return memory_address_length (addr);
19593 extract_insn_cached (insn);
19594 for (i = recog_data.n_operands - 1; i >= 0; --i)
19595 if (MEM_P (recog_data.operand[i]))
19597 constrain_operands_cached (reload_completed);
19598 if (which_alternative != -1)
19600 const char *constraints = recog_data.constraints[i];
19601 int alt = which_alternative;
19603 while (*constraints == '=' || *constraints == '+')
19606 while (*constraints++ != ',')
19608 /* Skip ignored operands. */
19609 if (*constraints == 'X')
19612 return memory_address_length (XEXP (recog_data.operand[i], 0));
19617 /* Compute default value for "length_vex" attribute. It includes
19618 2 or 3 byte VEX prefix and 1 opcode byte. */
19621 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19626 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19627 byte VEX prefix. */
19628 if (!has_0f_opcode || has_vex_w)
19631 /* We can always use 2 byte VEX prefix in 32bit. */
19635 extract_insn_cached (insn);
19637 for (i = recog_data.n_operands - 1; i >= 0; --i)
19638 if (REG_P (recog_data.operand[i]))
19640 /* REX.W bit uses 3 byte VEX prefix. */
19641 if (GET_MODE (recog_data.operand[i]) == DImode
19642 && GENERAL_REG_P (recog_data.operand[i]))
19647 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19648 if (MEM_P (recog_data.operand[i])
19649 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19656 /* Return the maximum number of instructions a cpu can issue. */
19659 ix86_issue_rate (void)
19663 case PROCESSOR_PENTIUM:
19664 case PROCESSOR_ATOM:
19668 case PROCESSOR_PENTIUMPRO:
19669 case PROCESSOR_PENTIUM4:
19670 case PROCESSOR_ATHLON:
19672 case PROCESSOR_AMDFAM10:
19673 case PROCESSOR_NOCONA:
19674 case PROCESSOR_GENERIC32:
19675 case PROCESSOR_GENERIC64:
19678 case PROCESSOR_CORE2:
19686 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19687 by DEP_INSN and nothing set by DEP_INSN. */
19690 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19694 /* Simplify the test for uninteresting insns. */
19695 if (insn_type != TYPE_SETCC
19696 && insn_type != TYPE_ICMOV
19697 && insn_type != TYPE_FCMOV
19698 && insn_type != TYPE_IBR)
19701 if ((set = single_set (dep_insn)) != 0)
19703 set = SET_DEST (set);
19706 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19707 && XVECLEN (PATTERN (dep_insn), 0) == 2
19708 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19709 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19711 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19712 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19717 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19720 /* This test is true if the dependent insn reads the flags but
19721 not any other potentially set register. */
19722 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19725 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19731 /* Return true iff USE_INSN has a memory address with operands set by
19735 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19738 extract_insn_cached (use_insn);
19739 for (i = recog_data.n_operands - 1; i >= 0; --i)
19740 if (MEM_P (recog_data.operand[i]))
19742 rtx addr = XEXP (recog_data.operand[i], 0);
19743 return modified_in_p (addr, set_insn) != 0;
19749 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19751 enum attr_type insn_type, dep_insn_type;
19752 enum attr_memory memory;
19754 int dep_insn_code_number;
19756 /* Anti and output dependencies have zero cost on all CPUs. */
19757 if (REG_NOTE_KIND (link) != 0)
19760 dep_insn_code_number = recog_memoized (dep_insn);
19762 /* If we can't recognize the insns, we can't really do anything. */
19763 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19766 insn_type = get_attr_type (insn);
19767 dep_insn_type = get_attr_type (dep_insn);
19771 case PROCESSOR_PENTIUM:
19772 /* Address Generation Interlock adds a cycle of latency. */
19773 if (insn_type == TYPE_LEA)
19775 rtx addr = PATTERN (insn);
19777 if (GET_CODE (addr) == PARALLEL)
19778 addr = XVECEXP (addr, 0, 0);
19780 gcc_assert (GET_CODE (addr) == SET);
19782 addr = SET_SRC (addr);
19783 if (modified_in_p (addr, dep_insn))
19786 else if (ix86_agi_dependent (dep_insn, insn))
19789 /* ??? Compares pair with jump/setcc. */
19790 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19793 /* Floating point stores require value to be ready one cycle earlier. */
19794 if (insn_type == TYPE_FMOV
19795 && get_attr_memory (insn) == MEMORY_STORE
19796 && !ix86_agi_dependent (dep_insn, insn))
19800 case PROCESSOR_PENTIUMPRO:
19801 memory = get_attr_memory (insn);
19803 /* INT->FP conversion is expensive. */
19804 if (get_attr_fp_int_src (dep_insn))
19807 /* There is one cycle extra latency between an FP op and a store. */
19808 if (insn_type == TYPE_FMOV
19809 && (set = single_set (dep_insn)) != NULL_RTX
19810 && (set2 = single_set (insn)) != NULL_RTX
19811 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19812 && MEM_P (SET_DEST (set2)))
19815 /* Show ability of reorder buffer to hide latency of load by executing
19816 in parallel with previous instruction in case
19817 previous instruction is not needed to compute the address. */
19818 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19819 && !ix86_agi_dependent (dep_insn, insn))
19821 /* Claim moves to take one cycle, as core can issue one load
19822 at time and the next load can start cycle later. */
19823 if (dep_insn_type == TYPE_IMOV
19824 || dep_insn_type == TYPE_FMOV)
19832 memory = get_attr_memory (insn);
19834 /* The esp dependency is resolved before the instruction is really
19836 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19837 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19840 /* INT->FP conversion is expensive. */
19841 if (get_attr_fp_int_src (dep_insn))
19844 /* Show ability of reorder buffer to hide latency of load by executing
19845 in parallel with previous instruction in case
19846 previous instruction is not needed to compute the address. */
19847 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19848 && !ix86_agi_dependent (dep_insn, insn))
19850 /* Claim moves to take one cycle, as core can issue one load
19851 at time and the next load can start cycle later. */
19852 if (dep_insn_type == TYPE_IMOV
19853 || dep_insn_type == TYPE_FMOV)
19862 case PROCESSOR_ATHLON:
19864 case PROCESSOR_AMDFAM10:
19865 case PROCESSOR_ATOM:
19866 case PROCESSOR_GENERIC32:
19867 case PROCESSOR_GENERIC64:
19868 memory = get_attr_memory (insn);
19870 /* Show ability of reorder buffer to hide latency of load by executing
19871 in parallel with previous instruction in case
19872 previous instruction is not needed to compute the address. */
19873 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19874 && !ix86_agi_dependent (dep_insn, insn))
19876 enum attr_unit unit = get_attr_unit (insn);
19879 /* Because of the difference between the length of integer and
19880 floating unit pipeline preparation stages, the memory operands
19881 for floating point are cheaper.
19883 ??? For Athlon it the difference is most probably 2. */
19884 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19887 loadcost = TARGET_ATHLON ? 2 : 0;
19889 if (cost >= loadcost)
19902 /* How many alternative schedules to try. This should be as wide as the
19903 scheduling freedom in the DFA, but no wider. Making this value too
19904 large results extra work for the scheduler. */
19907 ia32_multipass_dfa_lookahead (void)
19911 case PROCESSOR_PENTIUM:
19914 case PROCESSOR_PENTIUMPRO:
19924 /* Compute the alignment given to a constant that is being placed in memory.
19925 EXP is the constant and ALIGN is the alignment that the object would
19927 The value of this function is used instead of that alignment to align
19931 ix86_constant_alignment (tree exp, int align)
19933 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19934 || TREE_CODE (exp) == INTEGER_CST)
19936 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19938 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19941 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19942 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19943 return BITS_PER_WORD;
19948 /* Compute the alignment for a static variable.
19949 TYPE is the data type, and ALIGN is the alignment that
19950 the object would ordinarily have. The value of this function is used
19951 instead of that alignment to align the object. */
19954 ix86_data_alignment (tree type, int align)
19956 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19958 if (AGGREGATE_TYPE_P (type)
19959 && TYPE_SIZE (type)
19960 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19961 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19962 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19963 && align < max_align)
19966 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19967 to 16byte boundary. */
19970 if (AGGREGATE_TYPE_P (type)
19971 && TYPE_SIZE (type)
19972 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19973 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19974 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19978 if (TREE_CODE (type) == ARRAY_TYPE)
19980 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19982 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19985 else if (TREE_CODE (type) == COMPLEX_TYPE)
19988 if (TYPE_MODE (type) == DCmode && align < 64)
19990 if ((TYPE_MODE (type) == XCmode
19991 || TYPE_MODE (type) == TCmode) && align < 128)
19994 else if ((TREE_CODE (type) == RECORD_TYPE
19995 || TREE_CODE (type) == UNION_TYPE
19996 || TREE_CODE (type) == QUAL_UNION_TYPE)
19997 && TYPE_FIELDS (type))
19999 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20001 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20004 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20005 || TREE_CODE (type) == INTEGER_TYPE)
20007 if (TYPE_MODE (type) == DFmode && align < 64)
20009 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20016 /* Compute the alignment for a local variable or a stack slot. EXP is
20017 the data type or decl itself, MODE is the widest mode available and
20018 ALIGN is the alignment that the object would ordinarily have. The
20019 value of this macro is used instead of that alignment to align the
20023 ix86_local_alignment (tree exp, enum machine_mode mode,
20024 unsigned int align)
20028 if (exp && DECL_P (exp))
20030 type = TREE_TYPE (exp);
20039 /* Don't do dynamic stack realignment for long long objects with
20040 -mpreferred-stack-boundary=2. */
20043 && ix86_preferred_stack_boundary < 64
20044 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20045 && (!type || !TYPE_USER_ALIGN (type))
20046 && (!decl || !DECL_USER_ALIGN (decl)))
20049 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20050 register in MODE. We will return the largest alignment of XF
20054 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20055 align = GET_MODE_ALIGNMENT (DFmode);
20059 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20060 to 16byte boundary. */
20063 if (AGGREGATE_TYPE_P (type)
20064 && TYPE_SIZE (type)
20065 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20066 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20067 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20070 if (TREE_CODE (type) == ARRAY_TYPE)
20072 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20074 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20077 else if (TREE_CODE (type) == COMPLEX_TYPE)
20079 if (TYPE_MODE (type) == DCmode && align < 64)
20081 if ((TYPE_MODE (type) == XCmode
20082 || TYPE_MODE (type) == TCmode) && align < 128)
20085 else if ((TREE_CODE (type) == RECORD_TYPE
20086 || TREE_CODE (type) == UNION_TYPE
20087 || TREE_CODE (type) == QUAL_UNION_TYPE)
20088 && TYPE_FIELDS (type))
20090 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20092 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20095 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20096 || TREE_CODE (type) == INTEGER_TYPE)
20099 if (TYPE_MODE (type) == DFmode && align < 64)
20101 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20107 /* Compute the minimum required alignment for dynamic stack realignment
20108 purposes for a local variable, parameter or a stack slot. EXP is
20109 the data type or decl itself, MODE is its mode and ALIGN is the
20110 alignment that the object would ordinarily have. */
20113 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20114 unsigned int align)
20118 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20121 if (exp && DECL_P (exp))
20123 type = TREE_TYPE (exp);
20132 /* Don't do dynamic stack realignment for long long objects with
20133 -mpreferred-stack-boundary=2. */
20134 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20135 && (!type || !TYPE_USER_ALIGN (type))
20136 && (!decl || !DECL_USER_ALIGN (decl)))
20142 /* Find a location for the static chain incoming to a nested function.
20143 This is a register, unless all free registers are used by arguments. */
20146 ix86_static_chain (const_tree fndecl, bool incoming_p)
20150 if (!DECL_STATIC_CHAIN (fndecl))
20155 /* We always use R10 in 64-bit mode. */
20161 /* By default in 32-bit mode we use ECX to pass the static chain. */
20164 fntype = TREE_TYPE (fndecl);
20165 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20167 /* Fastcall functions use ecx/edx for arguments, which leaves
20168 us with EAX for the static chain. */
20171 else if (ix86_function_regparm (fntype, fndecl) == 3)
20173 /* For regparm 3, we have no free call-clobbered registers in
20174 which to store the static chain. In order to implement this,
20175 we have the trampoline push the static chain to the stack.
20176 However, we can't push a value below the return address when
20177 we call the nested function directly, so we have to use an
20178 alternate entry point. For this we use ESI, and have the
20179 alternate entry point push ESI, so that things appear the
20180 same once we're executing the nested function. */
20183 if (fndecl == current_function_decl)
20184 ix86_static_chain_on_stack = true;
20185 return gen_frame_mem (SImode,
20186 plus_constant (arg_pointer_rtx, -8));
20192 return gen_rtx_REG (Pmode, regno);
20195 /* Emit RTL insns to initialize the variable parts of a trampoline.
20196 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20197 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20198 to be passed to the target function. */
20201 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20205 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20212 /* Depending on the static chain location, either load a register
20213 with a constant, or push the constant to the stack. All of the
20214 instructions are the same size. */
20215 chain = ix86_static_chain (fndecl, true);
20218 if (REGNO (chain) == CX_REG)
20220 else if (REGNO (chain) == AX_REG)
20223 gcc_unreachable ();
20228 mem = adjust_address (m_tramp, QImode, 0);
20229 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20231 mem = adjust_address (m_tramp, SImode, 1);
20232 emit_move_insn (mem, chain_value);
20234 /* Compute offset from the end of the jmp to the target function.
20235 In the case in which the trampoline stores the static chain on
20236 the stack, we need to skip the first insn which pushes the
20237 (call-saved) register static chain; this push is 1 byte. */
20238 disp = expand_binop (SImode, sub_optab, fnaddr,
20239 plus_constant (XEXP (m_tramp, 0),
20240 MEM_P (chain) ? 9 : 10),
20241 NULL_RTX, 1, OPTAB_DIRECT);
20243 mem = adjust_address (m_tramp, QImode, 5);
20244 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20246 mem = adjust_address (m_tramp, SImode, 6);
20247 emit_move_insn (mem, disp);
20253 /* Load the function address to r11. Try to load address using
20254 the shorter movl instead of movabs. We may want to support
20255 movq for kernel mode, but kernel does not use trampolines at
20257 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20259 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20261 mem = adjust_address (m_tramp, HImode, offset);
20262 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20264 mem = adjust_address (m_tramp, SImode, offset + 2);
20265 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20270 mem = adjust_address (m_tramp, HImode, offset);
20271 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20273 mem = adjust_address (m_tramp, DImode, offset + 2);
20274 emit_move_insn (mem, fnaddr);
20278 /* Load static chain using movabs to r10. */
20279 mem = adjust_address (m_tramp, HImode, offset);
20280 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20282 mem = adjust_address (m_tramp, DImode, offset + 2);
20283 emit_move_insn (mem, chain_value);
20286 /* Jump to r11; the last (unused) byte is a nop, only there to
20287 pad the write out to a single 32-bit store. */
20288 mem = adjust_address (m_tramp, SImode, offset);
20289 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20292 gcc_assert (offset <= TRAMPOLINE_SIZE);
20295 #ifdef ENABLE_EXECUTE_STACK
20296 #ifdef CHECK_EXECUTE_STACK_ENABLED
20297 if (CHECK_EXECUTE_STACK_ENABLED)
20299 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20300 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20304 /* The following file contains several enumerations and data structures
20305 built from the definitions in i386-builtin-types.def. */
20307 #include "i386-builtin-types.inc"
20309 /* Table for the ix86 builtin non-function types. */
20310 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20312 /* Retrieve an element from the above table, building some of
20313 the types lazily. */
20316 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20318 unsigned int index;
20321 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20323 type = ix86_builtin_type_tab[(int) tcode];
20327 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20328 if (tcode <= IX86_BT_LAST_VECT)
20330 enum machine_mode mode;
20332 index = tcode - IX86_BT_LAST_PRIM - 1;
20333 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20334 mode = ix86_builtin_type_vect_mode[index];
20336 type = build_vector_type_for_mode (itype, mode);
20342 index = tcode - IX86_BT_LAST_VECT - 1;
20343 if (tcode <= IX86_BT_LAST_PTR)
20344 quals = TYPE_UNQUALIFIED;
20346 quals = TYPE_QUAL_CONST;
20348 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20349 if (quals != TYPE_UNQUALIFIED)
20350 itype = build_qualified_type (itype, quals);
20352 type = build_pointer_type (itype);
20355 ix86_builtin_type_tab[(int) tcode] = type;
20359 /* Table for the ix86 builtin function types. */
20360 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20362 /* Retrieve an element from the above table, building some of
20363 the types lazily. */
20366 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20370 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20372 type = ix86_builtin_func_type_tab[(int) tcode];
20376 if (tcode <= IX86_BT_LAST_FUNC)
20378 unsigned start = ix86_builtin_func_start[(int) tcode];
20379 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20380 tree rtype, atype, args = void_list_node;
20383 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20384 for (i = after - 1; i > start; --i)
20386 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20387 args = tree_cons (NULL, atype, args);
20390 type = build_function_type (rtype, args);
20394 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20395 enum ix86_builtin_func_type icode;
20397 icode = ix86_builtin_func_alias_base[index];
20398 type = ix86_get_builtin_func_type (icode);
20401 ix86_builtin_func_type_tab[(int) tcode] = type;
20406 /* Codes for all the SSE/MMX builtins. */
20409 IX86_BUILTIN_ADDPS,
20410 IX86_BUILTIN_ADDSS,
20411 IX86_BUILTIN_DIVPS,
20412 IX86_BUILTIN_DIVSS,
20413 IX86_BUILTIN_MULPS,
20414 IX86_BUILTIN_MULSS,
20415 IX86_BUILTIN_SUBPS,
20416 IX86_BUILTIN_SUBSS,
20418 IX86_BUILTIN_CMPEQPS,
20419 IX86_BUILTIN_CMPLTPS,
20420 IX86_BUILTIN_CMPLEPS,
20421 IX86_BUILTIN_CMPGTPS,
20422 IX86_BUILTIN_CMPGEPS,
20423 IX86_BUILTIN_CMPNEQPS,
20424 IX86_BUILTIN_CMPNLTPS,
20425 IX86_BUILTIN_CMPNLEPS,
20426 IX86_BUILTIN_CMPNGTPS,
20427 IX86_BUILTIN_CMPNGEPS,
20428 IX86_BUILTIN_CMPORDPS,
20429 IX86_BUILTIN_CMPUNORDPS,
20430 IX86_BUILTIN_CMPEQSS,
20431 IX86_BUILTIN_CMPLTSS,
20432 IX86_BUILTIN_CMPLESS,
20433 IX86_BUILTIN_CMPNEQSS,
20434 IX86_BUILTIN_CMPNLTSS,
20435 IX86_BUILTIN_CMPNLESS,
20436 IX86_BUILTIN_CMPNGTSS,
20437 IX86_BUILTIN_CMPNGESS,
20438 IX86_BUILTIN_CMPORDSS,
20439 IX86_BUILTIN_CMPUNORDSS,
20441 IX86_BUILTIN_COMIEQSS,
20442 IX86_BUILTIN_COMILTSS,
20443 IX86_BUILTIN_COMILESS,
20444 IX86_BUILTIN_COMIGTSS,
20445 IX86_BUILTIN_COMIGESS,
20446 IX86_BUILTIN_COMINEQSS,
20447 IX86_BUILTIN_UCOMIEQSS,
20448 IX86_BUILTIN_UCOMILTSS,
20449 IX86_BUILTIN_UCOMILESS,
20450 IX86_BUILTIN_UCOMIGTSS,
20451 IX86_BUILTIN_UCOMIGESS,
20452 IX86_BUILTIN_UCOMINEQSS,
20454 IX86_BUILTIN_CVTPI2PS,
20455 IX86_BUILTIN_CVTPS2PI,
20456 IX86_BUILTIN_CVTSI2SS,
20457 IX86_BUILTIN_CVTSI642SS,
20458 IX86_BUILTIN_CVTSS2SI,
20459 IX86_BUILTIN_CVTSS2SI64,
20460 IX86_BUILTIN_CVTTPS2PI,
20461 IX86_BUILTIN_CVTTSS2SI,
20462 IX86_BUILTIN_CVTTSS2SI64,
20464 IX86_BUILTIN_MAXPS,
20465 IX86_BUILTIN_MAXSS,
20466 IX86_BUILTIN_MINPS,
20467 IX86_BUILTIN_MINSS,
20469 IX86_BUILTIN_LOADUPS,
20470 IX86_BUILTIN_STOREUPS,
20471 IX86_BUILTIN_MOVSS,
20473 IX86_BUILTIN_MOVHLPS,
20474 IX86_BUILTIN_MOVLHPS,
20475 IX86_BUILTIN_LOADHPS,
20476 IX86_BUILTIN_LOADLPS,
20477 IX86_BUILTIN_STOREHPS,
20478 IX86_BUILTIN_STORELPS,
20480 IX86_BUILTIN_MASKMOVQ,
20481 IX86_BUILTIN_MOVMSKPS,
20482 IX86_BUILTIN_PMOVMSKB,
20484 IX86_BUILTIN_MOVNTPS,
20485 IX86_BUILTIN_MOVNTQ,
20487 IX86_BUILTIN_LOADDQU,
20488 IX86_BUILTIN_STOREDQU,
20490 IX86_BUILTIN_PACKSSWB,
20491 IX86_BUILTIN_PACKSSDW,
20492 IX86_BUILTIN_PACKUSWB,
20494 IX86_BUILTIN_PADDB,
20495 IX86_BUILTIN_PADDW,
20496 IX86_BUILTIN_PADDD,
20497 IX86_BUILTIN_PADDQ,
20498 IX86_BUILTIN_PADDSB,
20499 IX86_BUILTIN_PADDSW,
20500 IX86_BUILTIN_PADDUSB,
20501 IX86_BUILTIN_PADDUSW,
20502 IX86_BUILTIN_PSUBB,
20503 IX86_BUILTIN_PSUBW,
20504 IX86_BUILTIN_PSUBD,
20505 IX86_BUILTIN_PSUBQ,
20506 IX86_BUILTIN_PSUBSB,
20507 IX86_BUILTIN_PSUBSW,
20508 IX86_BUILTIN_PSUBUSB,
20509 IX86_BUILTIN_PSUBUSW,
20512 IX86_BUILTIN_PANDN,
20516 IX86_BUILTIN_PAVGB,
20517 IX86_BUILTIN_PAVGW,
20519 IX86_BUILTIN_PCMPEQB,
20520 IX86_BUILTIN_PCMPEQW,
20521 IX86_BUILTIN_PCMPEQD,
20522 IX86_BUILTIN_PCMPGTB,
20523 IX86_BUILTIN_PCMPGTW,
20524 IX86_BUILTIN_PCMPGTD,
20526 IX86_BUILTIN_PMADDWD,
20528 IX86_BUILTIN_PMAXSW,
20529 IX86_BUILTIN_PMAXUB,
20530 IX86_BUILTIN_PMINSW,
20531 IX86_BUILTIN_PMINUB,
20533 IX86_BUILTIN_PMULHUW,
20534 IX86_BUILTIN_PMULHW,
20535 IX86_BUILTIN_PMULLW,
20537 IX86_BUILTIN_PSADBW,
20538 IX86_BUILTIN_PSHUFW,
20540 IX86_BUILTIN_PSLLW,
20541 IX86_BUILTIN_PSLLD,
20542 IX86_BUILTIN_PSLLQ,
20543 IX86_BUILTIN_PSRAW,
20544 IX86_BUILTIN_PSRAD,
20545 IX86_BUILTIN_PSRLW,
20546 IX86_BUILTIN_PSRLD,
20547 IX86_BUILTIN_PSRLQ,
20548 IX86_BUILTIN_PSLLWI,
20549 IX86_BUILTIN_PSLLDI,
20550 IX86_BUILTIN_PSLLQI,
20551 IX86_BUILTIN_PSRAWI,
20552 IX86_BUILTIN_PSRADI,
20553 IX86_BUILTIN_PSRLWI,
20554 IX86_BUILTIN_PSRLDI,
20555 IX86_BUILTIN_PSRLQI,
20557 IX86_BUILTIN_PUNPCKHBW,
20558 IX86_BUILTIN_PUNPCKHWD,
20559 IX86_BUILTIN_PUNPCKHDQ,
20560 IX86_BUILTIN_PUNPCKLBW,
20561 IX86_BUILTIN_PUNPCKLWD,
20562 IX86_BUILTIN_PUNPCKLDQ,
20564 IX86_BUILTIN_SHUFPS,
20566 IX86_BUILTIN_RCPPS,
20567 IX86_BUILTIN_RCPSS,
20568 IX86_BUILTIN_RSQRTPS,
20569 IX86_BUILTIN_RSQRTPS_NR,
20570 IX86_BUILTIN_RSQRTSS,
20571 IX86_BUILTIN_RSQRTF,
20572 IX86_BUILTIN_SQRTPS,
20573 IX86_BUILTIN_SQRTPS_NR,
20574 IX86_BUILTIN_SQRTSS,
20576 IX86_BUILTIN_UNPCKHPS,
20577 IX86_BUILTIN_UNPCKLPS,
20579 IX86_BUILTIN_ANDPS,
20580 IX86_BUILTIN_ANDNPS,
20582 IX86_BUILTIN_XORPS,
20585 IX86_BUILTIN_LDMXCSR,
20586 IX86_BUILTIN_STMXCSR,
20587 IX86_BUILTIN_SFENCE,
20589 /* 3DNow! Original */
20590 IX86_BUILTIN_FEMMS,
20591 IX86_BUILTIN_PAVGUSB,
20592 IX86_BUILTIN_PF2ID,
20593 IX86_BUILTIN_PFACC,
20594 IX86_BUILTIN_PFADD,
20595 IX86_BUILTIN_PFCMPEQ,
20596 IX86_BUILTIN_PFCMPGE,
20597 IX86_BUILTIN_PFCMPGT,
20598 IX86_BUILTIN_PFMAX,
20599 IX86_BUILTIN_PFMIN,
20600 IX86_BUILTIN_PFMUL,
20601 IX86_BUILTIN_PFRCP,
20602 IX86_BUILTIN_PFRCPIT1,
20603 IX86_BUILTIN_PFRCPIT2,
20604 IX86_BUILTIN_PFRSQIT1,
20605 IX86_BUILTIN_PFRSQRT,
20606 IX86_BUILTIN_PFSUB,
20607 IX86_BUILTIN_PFSUBR,
20608 IX86_BUILTIN_PI2FD,
20609 IX86_BUILTIN_PMULHRW,
20611 /* 3DNow! Athlon Extensions */
20612 IX86_BUILTIN_PF2IW,
20613 IX86_BUILTIN_PFNACC,
20614 IX86_BUILTIN_PFPNACC,
20615 IX86_BUILTIN_PI2FW,
20616 IX86_BUILTIN_PSWAPDSI,
20617 IX86_BUILTIN_PSWAPDSF,
20620 IX86_BUILTIN_ADDPD,
20621 IX86_BUILTIN_ADDSD,
20622 IX86_BUILTIN_DIVPD,
20623 IX86_BUILTIN_DIVSD,
20624 IX86_BUILTIN_MULPD,
20625 IX86_BUILTIN_MULSD,
20626 IX86_BUILTIN_SUBPD,
20627 IX86_BUILTIN_SUBSD,
20629 IX86_BUILTIN_CMPEQPD,
20630 IX86_BUILTIN_CMPLTPD,
20631 IX86_BUILTIN_CMPLEPD,
20632 IX86_BUILTIN_CMPGTPD,
20633 IX86_BUILTIN_CMPGEPD,
20634 IX86_BUILTIN_CMPNEQPD,
20635 IX86_BUILTIN_CMPNLTPD,
20636 IX86_BUILTIN_CMPNLEPD,
20637 IX86_BUILTIN_CMPNGTPD,
20638 IX86_BUILTIN_CMPNGEPD,
20639 IX86_BUILTIN_CMPORDPD,
20640 IX86_BUILTIN_CMPUNORDPD,
20641 IX86_BUILTIN_CMPEQSD,
20642 IX86_BUILTIN_CMPLTSD,
20643 IX86_BUILTIN_CMPLESD,
20644 IX86_BUILTIN_CMPNEQSD,
20645 IX86_BUILTIN_CMPNLTSD,
20646 IX86_BUILTIN_CMPNLESD,
20647 IX86_BUILTIN_CMPORDSD,
20648 IX86_BUILTIN_CMPUNORDSD,
20650 IX86_BUILTIN_COMIEQSD,
20651 IX86_BUILTIN_COMILTSD,
20652 IX86_BUILTIN_COMILESD,
20653 IX86_BUILTIN_COMIGTSD,
20654 IX86_BUILTIN_COMIGESD,
20655 IX86_BUILTIN_COMINEQSD,
20656 IX86_BUILTIN_UCOMIEQSD,
20657 IX86_BUILTIN_UCOMILTSD,
20658 IX86_BUILTIN_UCOMILESD,
20659 IX86_BUILTIN_UCOMIGTSD,
20660 IX86_BUILTIN_UCOMIGESD,
20661 IX86_BUILTIN_UCOMINEQSD,
20663 IX86_BUILTIN_MAXPD,
20664 IX86_BUILTIN_MAXSD,
20665 IX86_BUILTIN_MINPD,
20666 IX86_BUILTIN_MINSD,
20668 IX86_BUILTIN_ANDPD,
20669 IX86_BUILTIN_ANDNPD,
20671 IX86_BUILTIN_XORPD,
20673 IX86_BUILTIN_SQRTPD,
20674 IX86_BUILTIN_SQRTSD,
20676 IX86_BUILTIN_UNPCKHPD,
20677 IX86_BUILTIN_UNPCKLPD,
20679 IX86_BUILTIN_SHUFPD,
20681 IX86_BUILTIN_LOADUPD,
20682 IX86_BUILTIN_STOREUPD,
20683 IX86_BUILTIN_MOVSD,
20685 IX86_BUILTIN_LOADHPD,
20686 IX86_BUILTIN_LOADLPD,
20688 IX86_BUILTIN_CVTDQ2PD,
20689 IX86_BUILTIN_CVTDQ2PS,
20691 IX86_BUILTIN_CVTPD2DQ,
20692 IX86_BUILTIN_CVTPD2PI,
20693 IX86_BUILTIN_CVTPD2PS,
20694 IX86_BUILTIN_CVTTPD2DQ,
20695 IX86_BUILTIN_CVTTPD2PI,
20697 IX86_BUILTIN_CVTPI2PD,
20698 IX86_BUILTIN_CVTSI2SD,
20699 IX86_BUILTIN_CVTSI642SD,
20701 IX86_BUILTIN_CVTSD2SI,
20702 IX86_BUILTIN_CVTSD2SI64,
20703 IX86_BUILTIN_CVTSD2SS,
20704 IX86_BUILTIN_CVTSS2SD,
20705 IX86_BUILTIN_CVTTSD2SI,
20706 IX86_BUILTIN_CVTTSD2SI64,
20708 IX86_BUILTIN_CVTPS2DQ,
20709 IX86_BUILTIN_CVTPS2PD,
20710 IX86_BUILTIN_CVTTPS2DQ,
20712 IX86_BUILTIN_MOVNTI,
20713 IX86_BUILTIN_MOVNTPD,
20714 IX86_BUILTIN_MOVNTDQ,
20716 IX86_BUILTIN_MOVQ128,
20719 IX86_BUILTIN_MASKMOVDQU,
20720 IX86_BUILTIN_MOVMSKPD,
20721 IX86_BUILTIN_PMOVMSKB128,
20723 IX86_BUILTIN_PACKSSWB128,
20724 IX86_BUILTIN_PACKSSDW128,
20725 IX86_BUILTIN_PACKUSWB128,
20727 IX86_BUILTIN_PADDB128,
20728 IX86_BUILTIN_PADDW128,
20729 IX86_BUILTIN_PADDD128,
20730 IX86_BUILTIN_PADDQ128,
20731 IX86_BUILTIN_PADDSB128,
20732 IX86_BUILTIN_PADDSW128,
20733 IX86_BUILTIN_PADDUSB128,
20734 IX86_BUILTIN_PADDUSW128,
20735 IX86_BUILTIN_PSUBB128,
20736 IX86_BUILTIN_PSUBW128,
20737 IX86_BUILTIN_PSUBD128,
20738 IX86_BUILTIN_PSUBQ128,
20739 IX86_BUILTIN_PSUBSB128,
20740 IX86_BUILTIN_PSUBSW128,
20741 IX86_BUILTIN_PSUBUSB128,
20742 IX86_BUILTIN_PSUBUSW128,
20744 IX86_BUILTIN_PAND128,
20745 IX86_BUILTIN_PANDN128,
20746 IX86_BUILTIN_POR128,
20747 IX86_BUILTIN_PXOR128,
20749 IX86_BUILTIN_PAVGB128,
20750 IX86_BUILTIN_PAVGW128,
20752 IX86_BUILTIN_PCMPEQB128,
20753 IX86_BUILTIN_PCMPEQW128,
20754 IX86_BUILTIN_PCMPEQD128,
20755 IX86_BUILTIN_PCMPGTB128,
20756 IX86_BUILTIN_PCMPGTW128,
20757 IX86_BUILTIN_PCMPGTD128,
20759 IX86_BUILTIN_PMADDWD128,
20761 IX86_BUILTIN_PMAXSW128,
20762 IX86_BUILTIN_PMAXUB128,
20763 IX86_BUILTIN_PMINSW128,
20764 IX86_BUILTIN_PMINUB128,
20766 IX86_BUILTIN_PMULUDQ,
20767 IX86_BUILTIN_PMULUDQ128,
20768 IX86_BUILTIN_PMULHUW128,
20769 IX86_BUILTIN_PMULHW128,
20770 IX86_BUILTIN_PMULLW128,
20772 IX86_BUILTIN_PSADBW128,
20773 IX86_BUILTIN_PSHUFHW,
20774 IX86_BUILTIN_PSHUFLW,
20775 IX86_BUILTIN_PSHUFD,
20777 IX86_BUILTIN_PSLLDQI128,
20778 IX86_BUILTIN_PSLLWI128,
20779 IX86_BUILTIN_PSLLDI128,
20780 IX86_BUILTIN_PSLLQI128,
20781 IX86_BUILTIN_PSRAWI128,
20782 IX86_BUILTIN_PSRADI128,
20783 IX86_BUILTIN_PSRLDQI128,
20784 IX86_BUILTIN_PSRLWI128,
20785 IX86_BUILTIN_PSRLDI128,
20786 IX86_BUILTIN_PSRLQI128,
20788 IX86_BUILTIN_PSLLDQ128,
20789 IX86_BUILTIN_PSLLW128,
20790 IX86_BUILTIN_PSLLD128,
20791 IX86_BUILTIN_PSLLQ128,
20792 IX86_BUILTIN_PSRAW128,
20793 IX86_BUILTIN_PSRAD128,
20794 IX86_BUILTIN_PSRLW128,
20795 IX86_BUILTIN_PSRLD128,
20796 IX86_BUILTIN_PSRLQ128,
20798 IX86_BUILTIN_PUNPCKHBW128,
20799 IX86_BUILTIN_PUNPCKHWD128,
20800 IX86_BUILTIN_PUNPCKHDQ128,
20801 IX86_BUILTIN_PUNPCKHQDQ128,
20802 IX86_BUILTIN_PUNPCKLBW128,
20803 IX86_BUILTIN_PUNPCKLWD128,
20804 IX86_BUILTIN_PUNPCKLDQ128,
20805 IX86_BUILTIN_PUNPCKLQDQ128,
20807 IX86_BUILTIN_CLFLUSH,
20808 IX86_BUILTIN_MFENCE,
20809 IX86_BUILTIN_LFENCE,
20811 IX86_BUILTIN_BSRSI,
20812 IX86_BUILTIN_BSRDI,
20813 IX86_BUILTIN_RDPMC,
20814 IX86_BUILTIN_RDTSC,
20815 IX86_BUILTIN_RDTSCP,
20816 IX86_BUILTIN_ROLQI,
20817 IX86_BUILTIN_ROLHI,
20818 IX86_BUILTIN_RORQI,
20819 IX86_BUILTIN_RORHI,
20822 IX86_BUILTIN_ADDSUBPS,
20823 IX86_BUILTIN_HADDPS,
20824 IX86_BUILTIN_HSUBPS,
20825 IX86_BUILTIN_MOVSHDUP,
20826 IX86_BUILTIN_MOVSLDUP,
20827 IX86_BUILTIN_ADDSUBPD,
20828 IX86_BUILTIN_HADDPD,
20829 IX86_BUILTIN_HSUBPD,
20830 IX86_BUILTIN_LDDQU,
20832 IX86_BUILTIN_MONITOR,
20833 IX86_BUILTIN_MWAIT,
20836 IX86_BUILTIN_PHADDW,
20837 IX86_BUILTIN_PHADDD,
20838 IX86_BUILTIN_PHADDSW,
20839 IX86_BUILTIN_PHSUBW,
20840 IX86_BUILTIN_PHSUBD,
20841 IX86_BUILTIN_PHSUBSW,
20842 IX86_BUILTIN_PMADDUBSW,
20843 IX86_BUILTIN_PMULHRSW,
20844 IX86_BUILTIN_PSHUFB,
20845 IX86_BUILTIN_PSIGNB,
20846 IX86_BUILTIN_PSIGNW,
20847 IX86_BUILTIN_PSIGND,
20848 IX86_BUILTIN_PALIGNR,
20849 IX86_BUILTIN_PABSB,
20850 IX86_BUILTIN_PABSW,
20851 IX86_BUILTIN_PABSD,
20853 IX86_BUILTIN_PHADDW128,
20854 IX86_BUILTIN_PHADDD128,
20855 IX86_BUILTIN_PHADDSW128,
20856 IX86_BUILTIN_PHSUBW128,
20857 IX86_BUILTIN_PHSUBD128,
20858 IX86_BUILTIN_PHSUBSW128,
20859 IX86_BUILTIN_PMADDUBSW128,
20860 IX86_BUILTIN_PMULHRSW128,
20861 IX86_BUILTIN_PSHUFB128,
20862 IX86_BUILTIN_PSIGNB128,
20863 IX86_BUILTIN_PSIGNW128,
20864 IX86_BUILTIN_PSIGND128,
20865 IX86_BUILTIN_PALIGNR128,
20866 IX86_BUILTIN_PABSB128,
20867 IX86_BUILTIN_PABSW128,
20868 IX86_BUILTIN_PABSD128,
20870 /* AMDFAM10 - SSE4A New Instructions. */
20871 IX86_BUILTIN_MOVNTSD,
20872 IX86_BUILTIN_MOVNTSS,
20873 IX86_BUILTIN_EXTRQI,
20874 IX86_BUILTIN_EXTRQ,
20875 IX86_BUILTIN_INSERTQI,
20876 IX86_BUILTIN_INSERTQ,
20879 IX86_BUILTIN_BLENDPD,
20880 IX86_BUILTIN_BLENDPS,
20881 IX86_BUILTIN_BLENDVPD,
20882 IX86_BUILTIN_BLENDVPS,
20883 IX86_BUILTIN_PBLENDVB128,
20884 IX86_BUILTIN_PBLENDW128,
20889 IX86_BUILTIN_INSERTPS128,
20891 IX86_BUILTIN_MOVNTDQA,
20892 IX86_BUILTIN_MPSADBW128,
20893 IX86_BUILTIN_PACKUSDW128,
20894 IX86_BUILTIN_PCMPEQQ,
20895 IX86_BUILTIN_PHMINPOSUW128,
20897 IX86_BUILTIN_PMAXSB128,
20898 IX86_BUILTIN_PMAXSD128,
20899 IX86_BUILTIN_PMAXUD128,
20900 IX86_BUILTIN_PMAXUW128,
20902 IX86_BUILTIN_PMINSB128,
20903 IX86_BUILTIN_PMINSD128,
20904 IX86_BUILTIN_PMINUD128,
20905 IX86_BUILTIN_PMINUW128,
20907 IX86_BUILTIN_PMOVSXBW128,
20908 IX86_BUILTIN_PMOVSXBD128,
20909 IX86_BUILTIN_PMOVSXBQ128,
20910 IX86_BUILTIN_PMOVSXWD128,
20911 IX86_BUILTIN_PMOVSXWQ128,
20912 IX86_BUILTIN_PMOVSXDQ128,
20914 IX86_BUILTIN_PMOVZXBW128,
20915 IX86_BUILTIN_PMOVZXBD128,
20916 IX86_BUILTIN_PMOVZXBQ128,
20917 IX86_BUILTIN_PMOVZXWD128,
20918 IX86_BUILTIN_PMOVZXWQ128,
20919 IX86_BUILTIN_PMOVZXDQ128,
20921 IX86_BUILTIN_PMULDQ128,
20922 IX86_BUILTIN_PMULLD128,
20924 IX86_BUILTIN_ROUNDPD,
20925 IX86_BUILTIN_ROUNDPS,
20926 IX86_BUILTIN_ROUNDSD,
20927 IX86_BUILTIN_ROUNDSS,
20929 IX86_BUILTIN_PTESTZ,
20930 IX86_BUILTIN_PTESTC,
20931 IX86_BUILTIN_PTESTNZC,
20933 IX86_BUILTIN_VEC_INIT_V2SI,
20934 IX86_BUILTIN_VEC_INIT_V4HI,
20935 IX86_BUILTIN_VEC_INIT_V8QI,
20936 IX86_BUILTIN_VEC_EXT_V2DF,
20937 IX86_BUILTIN_VEC_EXT_V2DI,
20938 IX86_BUILTIN_VEC_EXT_V4SF,
20939 IX86_BUILTIN_VEC_EXT_V4SI,
20940 IX86_BUILTIN_VEC_EXT_V8HI,
20941 IX86_BUILTIN_VEC_EXT_V2SI,
20942 IX86_BUILTIN_VEC_EXT_V4HI,
20943 IX86_BUILTIN_VEC_EXT_V16QI,
20944 IX86_BUILTIN_VEC_SET_V2DI,
20945 IX86_BUILTIN_VEC_SET_V4SF,
20946 IX86_BUILTIN_VEC_SET_V4SI,
20947 IX86_BUILTIN_VEC_SET_V8HI,
20948 IX86_BUILTIN_VEC_SET_V4HI,
20949 IX86_BUILTIN_VEC_SET_V16QI,
20951 IX86_BUILTIN_VEC_PACK_SFIX,
20954 IX86_BUILTIN_CRC32QI,
20955 IX86_BUILTIN_CRC32HI,
20956 IX86_BUILTIN_CRC32SI,
20957 IX86_BUILTIN_CRC32DI,
20959 IX86_BUILTIN_PCMPESTRI128,
20960 IX86_BUILTIN_PCMPESTRM128,
20961 IX86_BUILTIN_PCMPESTRA128,
20962 IX86_BUILTIN_PCMPESTRC128,
20963 IX86_BUILTIN_PCMPESTRO128,
20964 IX86_BUILTIN_PCMPESTRS128,
20965 IX86_BUILTIN_PCMPESTRZ128,
20966 IX86_BUILTIN_PCMPISTRI128,
20967 IX86_BUILTIN_PCMPISTRM128,
20968 IX86_BUILTIN_PCMPISTRA128,
20969 IX86_BUILTIN_PCMPISTRC128,
20970 IX86_BUILTIN_PCMPISTRO128,
20971 IX86_BUILTIN_PCMPISTRS128,
20972 IX86_BUILTIN_PCMPISTRZ128,
20974 IX86_BUILTIN_PCMPGTQ,
20976 /* AES instructions */
20977 IX86_BUILTIN_AESENC128,
20978 IX86_BUILTIN_AESENCLAST128,
20979 IX86_BUILTIN_AESDEC128,
20980 IX86_BUILTIN_AESDECLAST128,
20981 IX86_BUILTIN_AESIMC128,
20982 IX86_BUILTIN_AESKEYGENASSIST128,
20984 /* PCLMUL instruction */
20985 IX86_BUILTIN_PCLMULQDQ128,
20988 IX86_BUILTIN_ADDPD256,
20989 IX86_BUILTIN_ADDPS256,
20990 IX86_BUILTIN_ADDSUBPD256,
20991 IX86_BUILTIN_ADDSUBPS256,
20992 IX86_BUILTIN_ANDPD256,
20993 IX86_BUILTIN_ANDPS256,
20994 IX86_BUILTIN_ANDNPD256,
20995 IX86_BUILTIN_ANDNPS256,
20996 IX86_BUILTIN_BLENDPD256,
20997 IX86_BUILTIN_BLENDPS256,
20998 IX86_BUILTIN_BLENDVPD256,
20999 IX86_BUILTIN_BLENDVPS256,
21000 IX86_BUILTIN_DIVPD256,
21001 IX86_BUILTIN_DIVPS256,
21002 IX86_BUILTIN_DPPS256,
21003 IX86_BUILTIN_HADDPD256,
21004 IX86_BUILTIN_HADDPS256,
21005 IX86_BUILTIN_HSUBPD256,
21006 IX86_BUILTIN_HSUBPS256,
21007 IX86_BUILTIN_MAXPD256,
21008 IX86_BUILTIN_MAXPS256,
21009 IX86_BUILTIN_MINPD256,
21010 IX86_BUILTIN_MINPS256,
21011 IX86_BUILTIN_MULPD256,
21012 IX86_BUILTIN_MULPS256,
21013 IX86_BUILTIN_ORPD256,
21014 IX86_BUILTIN_ORPS256,
21015 IX86_BUILTIN_SHUFPD256,
21016 IX86_BUILTIN_SHUFPS256,
21017 IX86_BUILTIN_SUBPD256,
21018 IX86_BUILTIN_SUBPS256,
21019 IX86_BUILTIN_XORPD256,
21020 IX86_BUILTIN_XORPS256,
21021 IX86_BUILTIN_CMPSD,
21022 IX86_BUILTIN_CMPSS,
21023 IX86_BUILTIN_CMPPD,
21024 IX86_BUILTIN_CMPPS,
21025 IX86_BUILTIN_CMPPD256,
21026 IX86_BUILTIN_CMPPS256,
21027 IX86_BUILTIN_CVTDQ2PD256,
21028 IX86_BUILTIN_CVTDQ2PS256,
21029 IX86_BUILTIN_CVTPD2PS256,
21030 IX86_BUILTIN_CVTPS2DQ256,
21031 IX86_BUILTIN_CVTPS2PD256,
21032 IX86_BUILTIN_CVTTPD2DQ256,
21033 IX86_BUILTIN_CVTPD2DQ256,
21034 IX86_BUILTIN_CVTTPS2DQ256,
21035 IX86_BUILTIN_EXTRACTF128PD256,
21036 IX86_BUILTIN_EXTRACTF128PS256,
21037 IX86_BUILTIN_EXTRACTF128SI256,
21038 IX86_BUILTIN_VZEROALL,
21039 IX86_BUILTIN_VZEROUPPER,
21040 IX86_BUILTIN_VPERMILVARPD,
21041 IX86_BUILTIN_VPERMILVARPS,
21042 IX86_BUILTIN_VPERMILVARPD256,
21043 IX86_BUILTIN_VPERMILVARPS256,
21044 IX86_BUILTIN_VPERMILPD,
21045 IX86_BUILTIN_VPERMILPS,
21046 IX86_BUILTIN_VPERMILPD256,
21047 IX86_BUILTIN_VPERMILPS256,
21048 IX86_BUILTIN_VPERMIL2PD,
21049 IX86_BUILTIN_VPERMIL2PS,
21050 IX86_BUILTIN_VPERMIL2PD256,
21051 IX86_BUILTIN_VPERMIL2PS256,
21052 IX86_BUILTIN_VPERM2F128PD256,
21053 IX86_BUILTIN_VPERM2F128PS256,
21054 IX86_BUILTIN_VPERM2F128SI256,
21055 IX86_BUILTIN_VBROADCASTSS,
21056 IX86_BUILTIN_VBROADCASTSD256,
21057 IX86_BUILTIN_VBROADCASTSS256,
21058 IX86_BUILTIN_VBROADCASTPD256,
21059 IX86_BUILTIN_VBROADCASTPS256,
21060 IX86_BUILTIN_VINSERTF128PD256,
21061 IX86_BUILTIN_VINSERTF128PS256,
21062 IX86_BUILTIN_VINSERTF128SI256,
21063 IX86_BUILTIN_LOADUPD256,
21064 IX86_BUILTIN_LOADUPS256,
21065 IX86_BUILTIN_STOREUPD256,
21066 IX86_BUILTIN_STOREUPS256,
21067 IX86_BUILTIN_LDDQU256,
21068 IX86_BUILTIN_MOVNTDQ256,
21069 IX86_BUILTIN_MOVNTPD256,
21070 IX86_BUILTIN_MOVNTPS256,
21071 IX86_BUILTIN_LOADDQU256,
21072 IX86_BUILTIN_STOREDQU256,
21073 IX86_BUILTIN_MASKLOADPD,
21074 IX86_BUILTIN_MASKLOADPS,
21075 IX86_BUILTIN_MASKSTOREPD,
21076 IX86_BUILTIN_MASKSTOREPS,
21077 IX86_BUILTIN_MASKLOADPD256,
21078 IX86_BUILTIN_MASKLOADPS256,
21079 IX86_BUILTIN_MASKSTOREPD256,
21080 IX86_BUILTIN_MASKSTOREPS256,
21081 IX86_BUILTIN_MOVSHDUP256,
21082 IX86_BUILTIN_MOVSLDUP256,
21083 IX86_BUILTIN_MOVDDUP256,
21085 IX86_BUILTIN_SQRTPD256,
21086 IX86_BUILTIN_SQRTPS256,
21087 IX86_BUILTIN_SQRTPS_NR256,
21088 IX86_BUILTIN_RSQRTPS256,
21089 IX86_BUILTIN_RSQRTPS_NR256,
21091 IX86_BUILTIN_RCPPS256,
21093 IX86_BUILTIN_ROUNDPD256,
21094 IX86_BUILTIN_ROUNDPS256,
21096 IX86_BUILTIN_UNPCKHPD256,
21097 IX86_BUILTIN_UNPCKLPD256,
21098 IX86_BUILTIN_UNPCKHPS256,
21099 IX86_BUILTIN_UNPCKLPS256,
21101 IX86_BUILTIN_SI256_SI,
21102 IX86_BUILTIN_PS256_PS,
21103 IX86_BUILTIN_PD256_PD,
21104 IX86_BUILTIN_SI_SI256,
21105 IX86_BUILTIN_PS_PS256,
21106 IX86_BUILTIN_PD_PD256,
21108 IX86_BUILTIN_VTESTZPD,
21109 IX86_BUILTIN_VTESTCPD,
21110 IX86_BUILTIN_VTESTNZCPD,
21111 IX86_BUILTIN_VTESTZPS,
21112 IX86_BUILTIN_VTESTCPS,
21113 IX86_BUILTIN_VTESTNZCPS,
21114 IX86_BUILTIN_VTESTZPD256,
21115 IX86_BUILTIN_VTESTCPD256,
21116 IX86_BUILTIN_VTESTNZCPD256,
21117 IX86_BUILTIN_VTESTZPS256,
21118 IX86_BUILTIN_VTESTCPS256,
21119 IX86_BUILTIN_VTESTNZCPS256,
21120 IX86_BUILTIN_PTESTZ256,
21121 IX86_BUILTIN_PTESTC256,
21122 IX86_BUILTIN_PTESTNZC256,
21124 IX86_BUILTIN_MOVMSKPD256,
21125 IX86_BUILTIN_MOVMSKPS256,
21127 /* TFmode support builtins. */
21129 IX86_BUILTIN_HUGE_VALQ,
21130 IX86_BUILTIN_FABSQ,
21131 IX86_BUILTIN_COPYSIGNQ,
21133 /* Vectorizer support builtins. */
21134 IX86_BUILTIN_CPYSGNPS,
21135 IX86_BUILTIN_CPYSGNPD,
21137 IX86_BUILTIN_CVTUDQ2PS,
21139 IX86_BUILTIN_VEC_PERM_V2DF,
21140 IX86_BUILTIN_VEC_PERM_V4SF,
21141 IX86_BUILTIN_VEC_PERM_V2DI,
21142 IX86_BUILTIN_VEC_PERM_V4SI,
21143 IX86_BUILTIN_VEC_PERM_V8HI,
21144 IX86_BUILTIN_VEC_PERM_V16QI,
21145 IX86_BUILTIN_VEC_PERM_V2DI_U,
21146 IX86_BUILTIN_VEC_PERM_V4SI_U,
21147 IX86_BUILTIN_VEC_PERM_V8HI_U,
21148 IX86_BUILTIN_VEC_PERM_V16QI_U,
21149 IX86_BUILTIN_VEC_PERM_V4DF,
21150 IX86_BUILTIN_VEC_PERM_V8SF,
21152 /* FMA4 and XOP instructions. */
21153 IX86_BUILTIN_VFMADDSS,
21154 IX86_BUILTIN_VFMADDSD,
21155 IX86_BUILTIN_VFMADDPS,
21156 IX86_BUILTIN_VFMADDPD,
21157 IX86_BUILTIN_VFMSUBSS,
21158 IX86_BUILTIN_VFMSUBSD,
21159 IX86_BUILTIN_VFMSUBPS,
21160 IX86_BUILTIN_VFMSUBPD,
21161 IX86_BUILTIN_VFMADDSUBPS,
21162 IX86_BUILTIN_VFMADDSUBPD,
21163 IX86_BUILTIN_VFMSUBADDPS,
21164 IX86_BUILTIN_VFMSUBADDPD,
21165 IX86_BUILTIN_VFNMADDSS,
21166 IX86_BUILTIN_VFNMADDSD,
21167 IX86_BUILTIN_VFNMADDPS,
21168 IX86_BUILTIN_VFNMADDPD,
21169 IX86_BUILTIN_VFNMSUBSS,
21170 IX86_BUILTIN_VFNMSUBSD,
21171 IX86_BUILTIN_VFNMSUBPS,
21172 IX86_BUILTIN_VFNMSUBPD,
21173 IX86_BUILTIN_VFMADDPS256,
21174 IX86_BUILTIN_VFMADDPD256,
21175 IX86_BUILTIN_VFMSUBPS256,
21176 IX86_BUILTIN_VFMSUBPD256,
21177 IX86_BUILTIN_VFMADDSUBPS256,
21178 IX86_BUILTIN_VFMADDSUBPD256,
21179 IX86_BUILTIN_VFMSUBADDPS256,
21180 IX86_BUILTIN_VFMSUBADDPD256,
21181 IX86_BUILTIN_VFNMADDPS256,
21182 IX86_BUILTIN_VFNMADDPD256,
21183 IX86_BUILTIN_VFNMSUBPS256,
21184 IX86_BUILTIN_VFNMSUBPD256,
21186 IX86_BUILTIN_VPCMOV,
21187 IX86_BUILTIN_VPCMOV_V2DI,
21188 IX86_BUILTIN_VPCMOV_V4SI,
21189 IX86_BUILTIN_VPCMOV_V8HI,
21190 IX86_BUILTIN_VPCMOV_V16QI,
21191 IX86_BUILTIN_VPCMOV_V4SF,
21192 IX86_BUILTIN_VPCMOV_V2DF,
21193 IX86_BUILTIN_VPCMOV256,
21194 IX86_BUILTIN_VPCMOV_V4DI256,
21195 IX86_BUILTIN_VPCMOV_V8SI256,
21196 IX86_BUILTIN_VPCMOV_V16HI256,
21197 IX86_BUILTIN_VPCMOV_V32QI256,
21198 IX86_BUILTIN_VPCMOV_V8SF256,
21199 IX86_BUILTIN_VPCMOV_V4DF256,
21201 IX86_BUILTIN_VPPERM,
21203 IX86_BUILTIN_VPMACSSWW,
21204 IX86_BUILTIN_VPMACSWW,
21205 IX86_BUILTIN_VPMACSSWD,
21206 IX86_BUILTIN_VPMACSWD,
21207 IX86_BUILTIN_VPMACSSDD,
21208 IX86_BUILTIN_VPMACSDD,
21209 IX86_BUILTIN_VPMACSSDQL,
21210 IX86_BUILTIN_VPMACSSDQH,
21211 IX86_BUILTIN_VPMACSDQL,
21212 IX86_BUILTIN_VPMACSDQH,
21213 IX86_BUILTIN_VPMADCSSWD,
21214 IX86_BUILTIN_VPMADCSWD,
21216 IX86_BUILTIN_VPHADDBW,
21217 IX86_BUILTIN_VPHADDBD,
21218 IX86_BUILTIN_VPHADDBQ,
21219 IX86_BUILTIN_VPHADDWD,
21220 IX86_BUILTIN_VPHADDWQ,
21221 IX86_BUILTIN_VPHADDDQ,
21222 IX86_BUILTIN_VPHADDUBW,
21223 IX86_BUILTIN_VPHADDUBD,
21224 IX86_BUILTIN_VPHADDUBQ,
21225 IX86_BUILTIN_VPHADDUWD,
21226 IX86_BUILTIN_VPHADDUWQ,
21227 IX86_BUILTIN_VPHADDUDQ,
21228 IX86_BUILTIN_VPHSUBBW,
21229 IX86_BUILTIN_VPHSUBWD,
21230 IX86_BUILTIN_VPHSUBDQ,
21232 IX86_BUILTIN_VPROTB,
21233 IX86_BUILTIN_VPROTW,
21234 IX86_BUILTIN_VPROTD,
21235 IX86_BUILTIN_VPROTQ,
21236 IX86_BUILTIN_VPROTB_IMM,
21237 IX86_BUILTIN_VPROTW_IMM,
21238 IX86_BUILTIN_VPROTD_IMM,
21239 IX86_BUILTIN_VPROTQ_IMM,
21241 IX86_BUILTIN_VPSHLB,
21242 IX86_BUILTIN_VPSHLW,
21243 IX86_BUILTIN_VPSHLD,
21244 IX86_BUILTIN_VPSHLQ,
21245 IX86_BUILTIN_VPSHAB,
21246 IX86_BUILTIN_VPSHAW,
21247 IX86_BUILTIN_VPSHAD,
21248 IX86_BUILTIN_VPSHAQ,
21250 IX86_BUILTIN_VFRCZSS,
21251 IX86_BUILTIN_VFRCZSD,
21252 IX86_BUILTIN_VFRCZPS,
21253 IX86_BUILTIN_VFRCZPD,
21254 IX86_BUILTIN_VFRCZPS256,
21255 IX86_BUILTIN_VFRCZPD256,
21257 IX86_BUILTIN_VPCOMEQUB,
21258 IX86_BUILTIN_VPCOMNEUB,
21259 IX86_BUILTIN_VPCOMLTUB,
21260 IX86_BUILTIN_VPCOMLEUB,
21261 IX86_BUILTIN_VPCOMGTUB,
21262 IX86_BUILTIN_VPCOMGEUB,
21263 IX86_BUILTIN_VPCOMFALSEUB,
21264 IX86_BUILTIN_VPCOMTRUEUB,
21266 IX86_BUILTIN_VPCOMEQUW,
21267 IX86_BUILTIN_VPCOMNEUW,
21268 IX86_BUILTIN_VPCOMLTUW,
21269 IX86_BUILTIN_VPCOMLEUW,
21270 IX86_BUILTIN_VPCOMGTUW,
21271 IX86_BUILTIN_VPCOMGEUW,
21272 IX86_BUILTIN_VPCOMFALSEUW,
21273 IX86_BUILTIN_VPCOMTRUEUW,
21275 IX86_BUILTIN_VPCOMEQUD,
21276 IX86_BUILTIN_VPCOMNEUD,
21277 IX86_BUILTIN_VPCOMLTUD,
21278 IX86_BUILTIN_VPCOMLEUD,
21279 IX86_BUILTIN_VPCOMGTUD,
21280 IX86_BUILTIN_VPCOMGEUD,
21281 IX86_BUILTIN_VPCOMFALSEUD,
21282 IX86_BUILTIN_VPCOMTRUEUD,
21284 IX86_BUILTIN_VPCOMEQUQ,
21285 IX86_BUILTIN_VPCOMNEUQ,
21286 IX86_BUILTIN_VPCOMLTUQ,
21287 IX86_BUILTIN_VPCOMLEUQ,
21288 IX86_BUILTIN_VPCOMGTUQ,
21289 IX86_BUILTIN_VPCOMGEUQ,
21290 IX86_BUILTIN_VPCOMFALSEUQ,
21291 IX86_BUILTIN_VPCOMTRUEUQ,
21293 IX86_BUILTIN_VPCOMEQB,
21294 IX86_BUILTIN_VPCOMNEB,
21295 IX86_BUILTIN_VPCOMLTB,
21296 IX86_BUILTIN_VPCOMLEB,
21297 IX86_BUILTIN_VPCOMGTB,
21298 IX86_BUILTIN_VPCOMGEB,
21299 IX86_BUILTIN_VPCOMFALSEB,
21300 IX86_BUILTIN_VPCOMTRUEB,
21302 IX86_BUILTIN_VPCOMEQW,
21303 IX86_BUILTIN_VPCOMNEW,
21304 IX86_BUILTIN_VPCOMLTW,
21305 IX86_BUILTIN_VPCOMLEW,
21306 IX86_BUILTIN_VPCOMGTW,
21307 IX86_BUILTIN_VPCOMGEW,
21308 IX86_BUILTIN_VPCOMFALSEW,
21309 IX86_BUILTIN_VPCOMTRUEW,
21311 IX86_BUILTIN_VPCOMEQD,
21312 IX86_BUILTIN_VPCOMNED,
21313 IX86_BUILTIN_VPCOMLTD,
21314 IX86_BUILTIN_VPCOMLED,
21315 IX86_BUILTIN_VPCOMGTD,
21316 IX86_BUILTIN_VPCOMGED,
21317 IX86_BUILTIN_VPCOMFALSED,
21318 IX86_BUILTIN_VPCOMTRUED,
21320 IX86_BUILTIN_VPCOMEQQ,
21321 IX86_BUILTIN_VPCOMNEQ,
21322 IX86_BUILTIN_VPCOMLTQ,
21323 IX86_BUILTIN_VPCOMLEQ,
21324 IX86_BUILTIN_VPCOMGTQ,
21325 IX86_BUILTIN_VPCOMGEQ,
21326 IX86_BUILTIN_VPCOMFALSEQ,
21327 IX86_BUILTIN_VPCOMTRUEQ,
21329 /* LWP instructions. */
21330 IX86_BUILTIN_LLWPCB,
21331 IX86_BUILTIN_SLWPCB,
21332 IX86_BUILTIN_LWPVAL32,
21333 IX86_BUILTIN_LWPVAL64,
21334 IX86_BUILTIN_LWPINS32,
21335 IX86_BUILTIN_LWPINS64,
21342 /* Table for the ix86 builtin decls. */
21343 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21345 /* Table of all of the builtin functions that are possible with different ISA's
21346 but are waiting to be built until a function is declared to use that
21348 struct builtin_isa {
21349 const char *name; /* function name */
21350 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21351 int isa; /* isa_flags this builtin is defined for */
21352 bool const_p; /* true if the declaration is constant */
21353 bool set_and_not_built_p;
21356 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21359 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21360 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21361 function decl in the ix86_builtins array. Returns the function decl or
21362 NULL_TREE, if the builtin was not added.
21364 If the front end has a special hook for builtin functions, delay adding
21365 builtin functions that aren't in the current ISA until the ISA is changed
21366 with function specific optimization. Doing so, can save about 300K for the
21367 default compiler. When the builtin is expanded, check at that time whether
21370 If the front end doesn't have a special hook, record all builtins, even if
21371 it isn't an instruction set in the current ISA in case the user uses
21372 function specific options for a different ISA, so that we don't get scope
21373 errors if a builtin is added in the middle of a function scope. */
21376 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21377 enum ix86_builtins code)
21379 tree decl = NULL_TREE;
21381 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21383 ix86_builtins_isa[(int) code].isa = mask;
21386 || (mask & ix86_isa_flags) != 0
21387 || (lang_hooks.builtin_function
21388 == lang_hooks.builtin_function_ext_scope))
21391 tree type = ix86_get_builtin_func_type (tcode);
21392 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21394 ix86_builtins[(int) code] = decl;
21395 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21399 ix86_builtins[(int) code] = NULL_TREE;
21400 ix86_builtins_isa[(int) code].tcode = tcode;
21401 ix86_builtins_isa[(int) code].name = name;
21402 ix86_builtins_isa[(int) code].const_p = false;
21403 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21410 /* Like def_builtin, but also marks the function decl "const". */
21413 def_builtin_const (int mask, const char *name,
21414 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21416 tree decl = def_builtin (mask, name, tcode, code);
21418 TREE_READONLY (decl) = 1;
21420 ix86_builtins_isa[(int) code].const_p = true;
21425 /* Add any new builtin functions for a given ISA that may not have been
21426 declared. This saves a bit of space compared to adding all of the
21427 declarations to the tree, even if we didn't use them. */
21430 ix86_add_new_builtins (int isa)
21434 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21436 if ((ix86_builtins_isa[i].isa & isa) != 0
21437 && ix86_builtins_isa[i].set_and_not_built_p)
21441 /* Don't define the builtin again. */
21442 ix86_builtins_isa[i].set_and_not_built_p = false;
21444 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21445 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21446 type, i, BUILT_IN_MD, NULL,
21449 ix86_builtins[i] = decl;
21450 if (ix86_builtins_isa[i].const_p)
21451 TREE_READONLY (decl) = 1;
21456 /* Bits for builtin_description.flag. */
21458 /* Set when we don't support the comparison natively, and should
21459 swap_comparison in order to support it. */
21460 #define BUILTIN_DESC_SWAP_OPERANDS 1
21462 struct builtin_description
21464 const unsigned int mask;
21465 const enum insn_code icode;
21466 const char *const name;
21467 const enum ix86_builtins code;
21468 const enum rtx_code comparison;
21472 static const struct builtin_description bdesc_comi[] =
21474 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21475 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21476 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21477 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21478 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21479 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21480 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21481 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21482 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21483 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21484 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21485 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21486 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21487 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21488 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21489 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21490 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21491 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21492 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21493 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21494 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21495 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21496 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21497 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21500 static const struct builtin_description bdesc_pcmpestr[] =
21503 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21504 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21505 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21506 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21507 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21508 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21509 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21512 static const struct builtin_description bdesc_pcmpistr[] =
21515 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21516 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21517 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21518 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21519 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21520 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21521 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21524 /* Special builtins with variable number of arguments. */
21525 static const struct builtin_description bdesc_special_args[] =
21527 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21528 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21531 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21534 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21537 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21538 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21539 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21541 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21542 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21543 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21544 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21546 /* SSE or 3DNow!A */
21547 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21548 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21551 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21552 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21553 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21554 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21555 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21556 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21557 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21558 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21559 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21561 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21562 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21565 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21568 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21571 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21572 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21575 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21576 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21578 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21579 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21580 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21581 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21582 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21584 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21585 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21586 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21587 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21588 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21589 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21590 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21592 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21593 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21594 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21596 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21597 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21598 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21599 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21600 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21601 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21602 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21603 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21605 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21606 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21607 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21608 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21609 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21610 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21614 /* Builtins with variable number of arguments. */
21615 static const struct builtin_description bdesc_args[] =
21617 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21618 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21619 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21620 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21621 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21622 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21623 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21626 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21627 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21628 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21629 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21630 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21631 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21633 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21634 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21635 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21636 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21637 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21638 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21639 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21640 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21642 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21643 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21645 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21646 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21647 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21648 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21650 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21651 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21652 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21653 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21654 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21655 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21657 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21658 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21659 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21660 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21661 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21662 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21664 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21665 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21666 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21668 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21670 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21671 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21672 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21673 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21674 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21675 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21677 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21678 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21679 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21680 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21681 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21682 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21684 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21685 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21686 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21687 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21690 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21691 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21692 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21693 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21695 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21696 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21697 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21698 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21699 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21700 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21701 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21702 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21703 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21704 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21705 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21706 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21707 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21708 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21709 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21712 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21713 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21714 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21715 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21716 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21717 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21720 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21721 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21722 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21723 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21724 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21725 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21726 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21727 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21728 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21729 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21730 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21731 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21733 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21735 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21736 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21737 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21738 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21739 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21740 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21741 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21742 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21744 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21745 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21746 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21747 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21748 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21749 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21750 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21751 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21752 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21753 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21754 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21755 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21756 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21757 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21758 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21759 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21760 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21761 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21762 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21763 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21764 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21765 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21767 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21768 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21769 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21770 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21772 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21774 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21775 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21777 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21780 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21781 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21782 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21783 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21785 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21786 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21787 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21789 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21791 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21792 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21793 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21795 /* SSE MMX or 3Dnow!A */
21796 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21797 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21798 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21800 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21801 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21802 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21803 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21805 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21806 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21808 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21811 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21813 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21814 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21815 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21816 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21817 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21818 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21819 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21820 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21821 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21822 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21823 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21824 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21826 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21827 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21828 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21829 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21830 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21831 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21833 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21834 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21835 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21836 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21837 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21839 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21841 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21842 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21843 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21844 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21846 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21847 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21848 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21850 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21851 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21852 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21853 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21854 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21855 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21856 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21857 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21859 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21860 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21861 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21862 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21863 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21864 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21867 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21871 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21872 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21877 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21878 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21883 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21885 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21886 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21893 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21898 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21902 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21903 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21904 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21921 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21925 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21927 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21930 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21932 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21935 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21937 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21939 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21941 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21942 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21943 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21944 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21945 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21948 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21949 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21950 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21952 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21956 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21960 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21961 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21963 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21966 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21968 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21969 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21971 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21974 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21976 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21977 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21979 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21981 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21982 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21983 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21984 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21986 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21987 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21988 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21990 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21992 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21993 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21995 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21998 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21999 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22002 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22003 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22005 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22006 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22007 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22008 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22009 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22010 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22013 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22014 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22015 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22016 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22017 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22018 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22020 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22021 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22022 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22023 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22024 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22025 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22026 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22027 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22028 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22029 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22030 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22031 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22032 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22033 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22034 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22035 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22036 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22037 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22038 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22039 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22040 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22041 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22042 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22043 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22046 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22047 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22050 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22051 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22052 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22053 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22054 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22055 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22056 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22057 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22058 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22059 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22061 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22062 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22063 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22064 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22065 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22066 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22067 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22068 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22069 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22070 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22071 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22072 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22073 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22075 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22076 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22077 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22078 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22079 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22080 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22081 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22082 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22083 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22084 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22085 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22086 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22089 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22090 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22091 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22092 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22094 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22095 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22096 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22099 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22100 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22101 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22102 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22103 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22106 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22107 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22108 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22109 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22112 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22113 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22115 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22116 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22117 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22118 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22121 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22124 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22125 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22126 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22127 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22128 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22129 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22130 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22131 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22132 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22133 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22134 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22135 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22136 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22137 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22138 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22139 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22140 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22141 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22142 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22143 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22144 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22145 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22146 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22147 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22148 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22149 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22151 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22152 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22153 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22154 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22156 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22157 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22158 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22159 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22160 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22161 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22162 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22163 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22164 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22165 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22166 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22167 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22168 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22169 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22170 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22171 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22172 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22173 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22174 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22175 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22176 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22177 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22178 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22179 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22180 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22181 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22182 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22188 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22193 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22196 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22198 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22201 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22203 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22204 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22208 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22209 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22214 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22221 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22224 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22225 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22226 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22227 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22228 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22229 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22230 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22231 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22232 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22234 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22235 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22237 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22240 /* FMA4 and XOP. */
22241 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22242 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22243 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22244 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22245 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22246 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22247 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22248 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22249 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22250 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22251 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22252 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22253 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22254 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22255 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22256 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22257 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22258 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22259 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22260 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22261 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22262 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22263 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22264 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22265 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22266 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22267 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22268 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22269 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22270 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22271 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22272 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22273 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22274 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22275 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22276 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22277 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22278 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22279 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22280 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22281 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22282 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22283 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22284 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22285 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22286 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22287 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22288 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22289 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22290 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22291 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22292 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22294 static const struct builtin_description bdesc_multi_arg[] =
22296 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22297 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22298 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22299 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22300 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22301 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22302 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22303 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22305 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22306 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22307 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22308 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22309 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22310 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22311 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22312 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22314 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22315 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22316 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22317 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22319 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22320 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22321 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22322 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22324 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22325 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22326 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22327 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22329 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22330 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22331 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22332 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22334 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22335 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22336 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22337 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22338 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22339 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22340 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22342 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22343 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22344 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22345 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22346 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22347 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22348 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22350 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22352 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22353 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22354 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22355 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22356 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22357 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22358 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22359 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22360 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22361 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22362 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22363 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22365 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22366 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22367 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22368 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22369 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22370 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22371 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22372 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22373 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22374 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22375 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22376 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22377 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22378 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22379 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22380 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22382 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22383 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22384 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22385 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22386 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22387 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22397 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22400 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22408 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22410 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22416 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22417 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22418 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22423 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22424 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22427 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22429 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22432 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22434 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22435 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22440 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22441 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22442 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22447 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22448 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22450 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22456 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22457 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22458 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22463 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22465 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22466 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22471 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22473 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22474 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22476 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22479 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22480 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22481 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22482 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22483 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22484 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22485 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22487 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22488 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22489 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22490 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22494 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22495 in the current target ISA to allow the user to compile particular modules
22496 with different target specific options that differ from the command line
22499 ix86_init_mmx_sse_builtins (void)
22501 const struct builtin_description * d;
22502 enum ix86_builtin_func_type ftype;
22505 /* Add all special builtins with variable number of operands. */
22506 for (i = 0, d = bdesc_special_args;
22507 i < ARRAY_SIZE (bdesc_special_args);
22513 ftype = (enum ix86_builtin_func_type) d->flag;
22514 def_builtin (d->mask, d->name, ftype, d->code);
22517 /* Add all builtins with variable number of operands. */
22518 for (i = 0, d = bdesc_args;
22519 i < ARRAY_SIZE (bdesc_args);
22525 ftype = (enum ix86_builtin_func_type) d->flag;
22526 def_builtin_const (d->mask, d->name, ftype, d->code);
22529 /* pcmpestr[im] insns. */
22530 for (i = 0, d = bdesc_pcmpestr;
22531 i < ARRAY_SIZE (bdesc_pcmpestr);
22534 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22535 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22537 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22538 def_builtin_const (d->mask, d->name, ftype, d->code);
22541 /* pcmpistr[im] insns. */
22542 for (i = 0, d = bdesc_pcmpistr;
22543 i < ARRAY_SIZE (bdesc_pcmpistr);
22546 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22547 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22549 ftype = INT_FTYPE_V16QI_V16QI_INT;
22550 def_builtin_const (d->mask, d->name, ftype, d->code);
22553 /* comi/ucomi insns. */
22554 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22556 if (d->mask == OPTION_MASK_ISA_SSE2)
22557 ftype = INT_FTYPE_V2DF_V2DF;
22559 ftype = INT_FTYPE_V4SF_V4SF;
22560 def_builtin_const (d->mask, d->name, ftype, d->code);
22564 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22565 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22566 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22567 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22569 /* SSE or 3DNow!A */
22570 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22571 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22572 IX86_BUILTIN_MASKMOVQ);
22575 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22576 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22578 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22579 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22580 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22581 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22584 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22585 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22586 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22587 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22590 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22591 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22592 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22593 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22594 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22595 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22596 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22597 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22598 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22599 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22600 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22601 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22604 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22605 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22607 /* MMX access to the vec_init patterns. */
22608 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22609 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22611 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22612 V4HI_FTYPE_HI_HI_HI_HI,
22613 IX86_BUILTIN_VEC_INIT_V4HI);
22615 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22616 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22617 IX86_BUILTIN_VEC_INIT_V8QI);
22619 /* Access to the vec_extract patterns. */
22620 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22621 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22622 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22623 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22624 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22625 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22626 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22627 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22628 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22629 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22631 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22632 "__builtin_ia32_vec_ext_v4hi",
22633 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22635 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22636 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22638 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22639 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22641 /* Access to the vec_set patterns. */
22642 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22643 "__builtin_ia32_vec_set_v2di",
22644 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22646 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22647 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22649 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22650 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22652 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22653 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22655 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22656 "__builtin_ia32_vec_set_v4hi",
22657 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22659 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22660 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22662 /* Add FMA4 multi-arg argument instructions */
22663 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22668 ftype = (enum ix86_builtin_func_type) d->flag;
22669 def_builtin_const (d->mask, d->name, ftype, d->code);
22673 /* Internal method for ix86_init_builtins. */
22676 ix86_init_builtins_va_builtins_abi (void)
22678 tree ms_va_ref, sysv_va_ref;
22679 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22680 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22681 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22682 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22686 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22687 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22688 ms_va_ref = build_reference_type (ms_va_list_type_node);
22690 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22693 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22694 fnvoid_va_start_ms =
22695 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22696 fnvoid_va_end_sysv =
22697 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22698 fnvoid_va_start_sysv =
22699 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22701 fnvoid_va_copy_ms =
22702 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22704 fnvoid_va_copy_sysv =
22705 build_function_type_list (void_type_node, sysv_va_ref,
22706 sysv_va_ref, NULL_TREE);
22708 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22709 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22710 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22711 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22712 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22713 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22714 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22715 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22716 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22717 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22718 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22719 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22723 ix86_init_builtin_types (void)
22725 tree float128_type_node, float80_type_node;
22727 /* The __float80 type. */
22728 float80_type_node = long_double_type_node;
22729 if (TYPE_MODE (float80_type_node) != XFmode)
22731 /* The __float80 type. */
22732 float80_type_node = make_node (REAL_TYPE);
22734 TYPE_PRECISION (float80_type_node) = 80;
22735 layout_type (float80_type_node);
22737 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22739 /* The __float128 type. */
22740 float128_type_node = make_node (REAL_TYPE);
22741 TYPE_PRECISION (float128_type_node) = 128;
22742 layout_type (float128_type_node);
22743 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22745 /* This macro is built by i386-builtin-types.awk. */
22746 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22750 ix86_init_builtins (void)
22754 ix86_init_builtin_types ();
22756 /* TFmode support builtins. */
22757 def_builtin_const (0, "__builtin_infq",
22758 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22759 def_builtin_const (0, "__builtin_huge_valq",
22760 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22762 /* We will expand them to normal call if SSE2 isn't available since
22763 they are used by libgcc. */
22764 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22765 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22766 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22767 TREE_READONLY (t) = 1;
22768 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22770 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22771 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22772 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22773 TREE_READONLY (t) = 1;
22774 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22776 ix86_init_mmx_sse_builtins ();
22779 ix86_init_builtins_va_builtins_abi ();
22782 /* Return the ix86 builtin for CODE. */
22785 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22787 if (code >= IX86_BUILTIN_MAX)
22788 return error_mark_node;
22790 return ix86_builtins[code];
22793 /* Errors in the source file can cause expand_expr to return const0_rtx
22794 where we expect a vector. To avoid crashing, use one of the vector
22795 clear instructions. */
22797 safe_vector_operand (rtx x, enum machine_mode mode)
22799 if (x == const0_rtx)
22800 x = CONST0_RTX (mode);
22804 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22807 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22810 tree arg0 = CALL_EXPR_ARG (exp, 0);
22811 tree arg1 = CALL_EXPR_ARG (exp, 1);
22812 rtx op0 = expand_normal (arg0);
22813 rtx op1 = expand_normal (arg1);
22814 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22815 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22816 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22818 if (VECTOR_MODE_P (mode0))
22819 op0 = safe_vector_operand (op0, mode0);
22820 if (VECTOR_MODE_P (mode1))
22821 op1 = safe_vector_operand (op1, mode1);
22823 if (optimize || !target
22824 || GET_MODE (target) != tmode
22825 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22826 target = gen_reg_rtx (tmode);
22828 if (GET_MODE (op1) == SImode && mode1 == TImode)
22830 rtx x = gen_reg_rtx (V4SImode);
22831 emit_insn (gen_sse2_loadd (x, op1));
22832 op1 = gen_lowpart (TImode, x);
22835 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22836 op0 = copy_to_mode_reg (mode0, op0);
22837 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22838 op1 = copy_to_mode_reg (mode1, op1);
22840 pat = GEN_FCN (icode) (target, op0, op1);
22849 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22852 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22853 enum ix86_builtin_func_type m_type,
22854 enum rtx_code sub_code)
22859 bool comparison_p = false;
22861 bool last_arg_constant = false;
22862 int num_memory = 0;
22865 enum machine_mode mode;
22868 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22872 case MULTI_ARG_4_DF2_DI_I:
22873 case MULTI_ARG_4_DF2_DI_I1:
22874 case MULTI_ARG_4_SF2_SI_I:
22875 case MULTI_ARG_4_SF2_SI_I1:
22877 last_arg_constant = true;
22880 case MULTI_ARG_3_SF:
22881 case MULTI_ARG_3_DF:
22882 case MULTI_ARG_3_SF2:
22883 case MULTI_ARG_3_DF2:
22884 case MULTI_ARG_3_DI:
22885 case MULTI_ARG_3_SI:
22886 case MULTI_ARG_3_SI_DI:
22887 case MULTI_ARG_3_HI:
22888 case MULTI_ARG_3_HI_SI:
22889 case MULTI_ARG_3_QI:
22890 case MULTI_ARG_3_DI2:
22891 case MULTI_ARG_3_SI2:
22892 case MULTI_ARG_3_HI2:
22893 case MULTI_ARG_3_QI2:
22897 case MULTI_ARG_2_SF:
22898 case MULTI_ARG_2_DF:
22899 case MULTI_ARG_2_DI:
22900 case MULTI_ARG_2_SI:
22901 case MULTI_ARG_2_HI:
22902 case MULTI_ARG_2_QI:
22906 case MULTI_ARG_2_DI_IMM:
22907 case MULTI_ARG_2_SI_IMM:
22908 case MULTI_ARG_2_HI_IMM:
22909 case MULTI_ARG_2_QI_IMM:
22911 last_arg_constant = true;
22914 case MULTI_ARG_1_SF:
22915 case MULTI_ARG_1_DF:
22916 case MULTI_ARG_1_SF2:
22917 case MULTI_ARG_1_DF2:
22918 case MULTI_ARG_1_DI:
22919 case MULTI_ARG_1_SI:
22920 case MULTI_ARG_1_HI:
22921 case MULTI_ARG_1_QI:
22922 case MULTI_ARG_1_SI_DI:
22923 case MULTI_ARG_1_HI_DI:
22924 case MULTI_ARG_1_HI_SI:
22925 case MULTI_ARG_1_QI_DI:
22926 case MULTI_ARG_1_QI_SI:
22927 case MULTI_ARG_1_QI_HI:
22931 case MULTI_ARG_2_DI_CMP:
22932 case MULTI_ARG_2_SI_CMP:
22933 case MULTI_ARG_2_HI_CMP:
22934 case MULTI_ARG_2_QI_CMP:
22936 comparison_p = true;
22939 case MULTI_ARG_2_SF_TF:
22940 case MULTI_ARG_2_DF_TF:
22941 case MULTI_ARG_2_DI_TF:
22942 case MULTI_ARG_2_SI_TF:
22943 case MULTI_ARG_2_HI_TF:
22944 case MULTI_ARG_2_QI_TF:
22950 gcc_unreachable ();
22953 if (optimize || !target
22954 || GET_MODE (target) != tmode
22955 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22956 target = gen_reg_rtx (tmode);
22958 gcc_assert (nargs <= 4);
22960 for (i = 0; i < nargs; i++)
22962 tree arg = CALL_EXPR_ARG (exp, i);
22963 rtx op = expand_normal (arg);
22964 int adjust = (comparison_p) ? 1 : 0;
22965 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
22967 if (last_arg_constant && i == nargs-1)
22969 if (!CONST_INT_P (op))
22971 error ("last argument must be an immediate");
22972 return gen_reg_rtx (tmode);
22977 if (VECTOR_MODE_P (mode))
22978 op = safe_vector_operand (op, mode);
22980 /* If we aren't optimizing, only allow one memory operand to be
22982 if (memory_operand (op, mode))
22985 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
22988 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
22990 op = force_reg (mode, op);
22994 args[i].mode = mode;
23000 pat = GEN_FCN (icode) (target, args[0].op);
23005 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23006 GEN_INT ((int)sub_code));
23007 else if (! comparison_p)
23008 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23011 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23015 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23020 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23024 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23028 gcc_unreachable ();
23038 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23039 insns with vec_merge. */
23042 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23046 tree arg0 = CALL_EXPR_ARG (exp, 0);
23047 rtx op1, op0 = expand_normal (arg0);
23048 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23049 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23051 if (optimize || !target
23052 || GET_MODE (target) != tmode
23053 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23054 target = gen_reg_rtx (tmode);
23056 if (VECTOR_MODE_P (mode0))
23057 op0 = safe_vector_operand (op0, mode0);
23059 if ((optimize && !register_operand (op0, mode0))
23060 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23061 op0 = copy_to_mode_reg (mode0, op0);
23064 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23065 op1 = copy_to_mode_reg (mode0, op1);
23067 pat = GEN_FCN (icode) (target, op0, op1);
23074 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23077 ix86_expand_sse_compare (const struct builtin_description *d,
23078 tree exp, rtx target, bool swap)
23081 tree arg0 = CALL_EXPR_ARG (exp, 0);
23082 tree arg1 = CALL_EXPR_ARG (exp, 1);
23083 rtx op0 = expand_normal (arg0);
23084 rtx op1 = expand_normal (arg1);
23086 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23087 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23088 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23089 enum rtx_code comparison = d->comparison;
23091 if (VECTOR_MODE_P (mode0))
23092 op0 = safe_vector_operand (op0, mode0);
23093 if (VECTOR_MODE_P (mode1))
23094 op1 = safe_vector_operand (op1, mode1);
23096 /* Swap operands if we have a comparison that isn't available in
23100 rtx tmp = gen_reg_rtx (mode1);
23101 emit_move_insn (tmp, op1);
23106 if (optimize || !target
23107 || GET_MODE (target) != tmode
23108 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23109 target = gen_reg_rtx (tmode);
23111 if ((optimize && !register_operand (op0, mode0))
23112 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23113 op0 = copy_to_mode_reg (mode0, op0);
23114 if ((optimize && !register_operand (op1, mode1))
23115 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23116 op1 = copy_to_mode_reg (mode1, op1);
23118 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23119 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23126 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23129 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23133 tree arg0 = CALL_EXPR_ARG (exp, 0);
23134 tree arg1 = CALL_EXPR_ARG (exp, 1);
23135 rtx op0 = expand_normal (arg0);
23136 rtx op1 = expand_normal (arg1);
23137 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23138 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23139 enum rtx_code comparison = d->comparison;
23141 if (VECTOR_MODE_P (mode0))
23142 op0 = safe_vector_operand (op0, mode0);
23143 if (VECTOR_MODE_P (mode1))
23144 op1 = safe_vector_operand (op1, mode1);
23146 /* Swap operands if we have a comparison that isn't available in
23148 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23155 target = gen_reg_rtx (SImode);
23156 emit_move_insn (target, const0_rtx);
23157 target = gen_rtx_SUBREG (QImode, target, 0);
23159 if ((optimize && !register_operand (op0, mode0))
23160 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23161 op0 = copy_to_mode_reg (mode0, op0);
23162 if ((optimize && !register_operand (op1, mode1))
23163 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23164 op1 = copy_to_mode_reg (mode1, op1);
23166 pat = GEN_FCN (d->icode) (op0, op1);
23170 emit_insn (gen_rtx_SET (VOIDmode,
23171 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23172 gen_rtx_fmt_ee (comparison, QImode,
23176 return SUBREG_REG (target);
23179 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23182 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23186 tree arg0 = CALL_EXPR_ARG (exp, 0);
23187 tree arg1 = CALL_EXPR_ARG (exp, 1);
23188 rtx op0 = expand_normal (arg0);
23189 rtx op1 = expand_normal (arg1);
23190 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23191 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23192 enum rtx_code comparison = d->comparison;
23194 if (VECTOR_MODE_P (mode0))
23195 op0 = safe_vector_operand (op0, mode0);
23196 if (VECTOR_MODE_P (mode1))
23197 op1 = safe_vector_operand (op1, mode1);
23199 target = gen_reg_rtx (SImode);
23200 emit_move_insn (target, const0_rtx);
23201 target = gen_rtx_SUBREG (QImode, target, 0);
23203 if ((optimize && !register_operand (op0, mode0))
23204 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23205 op0 = copy_to_mode_reg (mode0, op0);
23206 if ((optimize && !register_operand (op1, mode1))
23207 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23208 op1 = copy_to_mode_reg (mode1, op1);
23210 pat = GEN_FCN (d->icode) (op0, op1);
23214 emit_insn (gen_rtx_SET (VOIDmode,
23215 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23216 gen_rtx_fmt_ee (comparison, QImode,
23220 return SUBREG_REG (target);
23223 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23226 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23227 tree exp, rtx target)
23230 tree arg0 = CALL_EXPR_ARG (exp, 0);
23231 tree arg1 = CALL_EXPR_ARG (exp, 1);
23232 tree arg2 = CALL_EXPR_ARG (exp, 2);
23233 tree arg3 = CALL_EXPR_ARG (exp, 3);
23234 tree arg4 = CALL_EXPR_ARG (exp, 4);
23235 rtx scratch0, scratch1;
23236 rtx op0 = expand_normal (arg0);
23237 rtx op1 = expand_normal (arg1);
23238 rtx op2 = expand_normal (arg2);
23239 rtx op3 = expand_normal (arg3);
23240 rtx op4 = expand_normal (arg4);
23241 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23243 tmode0 = insn_data[d->icode].operand[0].mode;
23244 tmode1 = insn_data[d->icode].operand[1].mode;
23245 modev2 = insn_data[d->icode].operand[2].mode;
23246 modei3 = insn_data[d->icode].operand[3].mode;
23247 modev4 = insn_data[d->icode].operand[4].mode;
23248 modei5 = insn_data[d->icode].operand[5].mode;
23249 modeimm = insn_data[d->icode].operand[6].mode;
23251 if (VECTOR_MODE_P (modev2))
23252 op0 = safe_vector_operand (op0, modev2);
23253 if (VECTOR_MODE_P (modev4))
23254 op2 = safe_vector_operand (op2, modev4);
23256 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23257 op0 = copy_to_mode_reg (modev2, op0);
23258 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23259 op1 = copy_to_mode_reg (modei3, op1);
23260 if ((optimize && !register_operand (op2, modev4))
23261 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23262 op2 = copy_to_mode_reg (modev4, op2);
23263 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23264 op3 = copy_to_mode_reg (modei5, op3);
23266 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23268 error ("the fifth argument must be a 8-bit immediate");
23272 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23274 if (optimize || !target
23275 || GET_MODE (target) != tmode0
23276 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23277 target = gen_reg_rtx (tmode0);
23279 scratch1 = gen_reg_rtx (tmode1);
23281 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23283 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23285 if (optimize || !target
23286 || GET_MODE (target) != tmode1
23287 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23288 target = gen_reg_rtx (tmode1);
23290 scratch0 = gen_reg_rtx (tmode0);
23292 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23296 gcc_assert (d->flag);
23298 scratch0 = gen_reg_rtx (tmode0);
23299 scratch1 = gen_reg_rtx (tmode1);
23301 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23311 target = gen_reg_rtx (SImode);
23312 emit_move_insn (target, const0_rtx);
23313 target = gen_rtx_SUBREG (QImode, target, 0);
23316 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23317 gen_rtx_fmt_ee (EQ, QImode,
23318 gen_rtx_REG ((enum machine_mode) d->flag,
23321 return SUBREG_REG (target);
23328 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23331 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23332 tree exp, rtx target)
23335 tree arg0 = CALL_EXPR_ARG (exp, 0);
23336 tree arg1 = CALL_EXPR_ARG (exp, 1);
23337 tree arg2 = CALL_EXPR_ARG (exp, 2);
23338 rtx scratch0, scratch1;
23339 rtx op0 = expand_normal (arg0);
23340 rtx op1 = expand_normal (arg1);
23341 rtx op2 = expand_normal (arg2);
23342 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23344 tmode0 = insn_data[d->icode].operand[0].mode;
23345 tmode1 = insn_data[d->icode].operand[1].mode;
23346 modev2 = insn_data[d->icode].operand[2].mode;
23347 modev3 = insn_data[d->icode].operand[3].mode;
23348 modeimm = insn_data[d->icode].operand[4].mode;
23350 if (VECTOR_MODE_P (modev2))
23351 op0 = safe_vector_operand (op0, modev2);
23352 if (VECTOR_MODE_P (modev3))
23353 op1 = safe_vector_operand (op1, modev3);
23355 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23356 op0 = copy_to_mode_reg (modev2, op0);
23357 if ((optimize && !register_operand (op1, modev3))
23358 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23359 op1 = copy_to_mode_reg (modev3, op1);
23361 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23363 error ("the third argument must be a 8-bit immediate");
23367 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23369 if (optimize || !target
23370 || GET_MODE (target) != tmode0
23371 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23372 target = gen_reg_rtx (tmode0);
23374 scratch1 = gen_reg_rtx (tmode1);
23376 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23378 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23380 if (optimize || !target
23381 || GET_MODE (target) != tmode1
23382 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23383 target = gen_reg_rtx (tmode1);
23385 scratch0 = gen_reg_rtx (tmode0);
23387 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23391 gcc_assert (d->flag);
23393 scratch0 = gen_reg_rtx (tmode0);
23394 scratch1 = gen_reg_rtx (tmode1);
23396 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23406 target = gen_reg_rtx (SImode);
23407 emit_move_insn (target, const0_rtx);
23408 target = gen_rtx_SUBREG (QImode, target, 0);
23411 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23412 gen_rtx_fmt_ee (EQ, QImode,
23413 gen_rtx_REG ((enum machine_mode) d->flag,
23416 return SUBREG_REG (target);
23422 /* Subroutine of ix86_expand_builtin to take care of insns with
23423 variable number of operands. */
23426 ix86_expand_args_builtin (const struct builtin_description *d,
23427 tree exp, rtx target)
23429 rtx pat, real_target;
23430 unsigned int i, nargs;
23431 unsigned int nargs_constant = 0;
23432 int num_memory = 0;
23436 enum machine_mode mode;
23438 bool last_arg_count = false;
23439 enum insn_code icode = d->icode;
23440 const struct insn_data *insn_p = &insn_data[icode];
23441 enum machine_mode tmode = insn_p->operand[0].mode;
23442 enum machine_mode rmode = VOIDmode;
23444 enum rtx_code comparison = d->comparison;
23446 switch ((enum ix86_builtin_func_type) d->flag)
23448 case INT_FTYPE_V8SF_V8SF_PTEST:
23449 case INT_FTYPE_V4DI_V4DI_PTEST:
23450 case INT_FTYPE_V4DF_V4DF_PTEST:
23451 case INT_FTYPE_V4SF_V4SF_PTEST:
23452 case INT_FTYPE_V2DI_V2DI_PTEST:
23453 case INT_FTYPE_V2DF_V2DF_PTEST:
23454 return ix86_expand_sse_ptest (d, exp, target);
23455 case FLOAT128_FTYPE_FLOAT128:
23456 case FLOAT_FTYPE_FLOAT:
23457 case INT_FTYPE_INT:
23458 case UINT64_FTYPE_INT:
23459 case UINT16_FTYPE_UINT16:
23460 case INT64_FTYPE_INT64:
23461 case INT64_FTYPE_V4SF:
23462 case INT64_FTYPE_V2DF:
23463 case INT_FTYPE_V16QI:
23464 case INT_FTYPE_V8QI:
23465 case INT_FTYPE_V8SF:
23466 case INT_FTYPE_V4DF:
23467 case INT_FTYPE_V4SF:
23468 case INT_FTYPE_V2DF:
23469 case V16QI_FTYPE_V16QI:
23470 case V8SI_FTYPE_V8SF:
23471 case V8SI_FTYPE_V4SI:
23472 case V8HI_FTYPE_V8HI:
23473 case V8HI_FTYPE_V16QI:
23474 case V8QI_FTYPE_V8QI:
23475 case V8SF_FTYPE_V8SF:
23476 case V8SF_FTYPE_V8SI:
23477 case V8SF_FTYPE_V4SF:
23478 case V4SI_FTYPE_V4SI:
23479 case V4SI_FTYPE_V16QI:
23480 case V4SI_FTYPE_V4SF:
23481 case V4SI_FTYPE_V8SI:
23482 case V4SI_FTYPE_V8HI:
23483 case V4SI_FTYPE_V4DF:
23484 case V4SI_FTYPE_V2DF:
23485 case V4HI_FTYPE_V4HI:
23486 case V4DF_FTYPE_V4DF:
23487 case V4DF_FTYPE_V4SI:
23488 case V4DF_FTYPE_V4SF:
23489 case V4DF_FTYPE_V2DF:
23490 case V4SF_FTYPE_V4SF:
23491 case V4SF_FTYPE_V4SI:
23492 case V4SF_FTYPE_V8SF:
23493 case V4SF_FTYPE_V4DF:
23494 case V4SF_FTYPE_V2DF:
23495 case V2DI_FTYPE_V2DI:
23496 case V2DI_FTYPE_V16QI:
23497 case V2DI_FTYPE_V8HI:
23498 case V2DI_FTYPE_V4SI:
23499 case V2DF_FTYPE_V2DF:
23500 case V2DF_FTYPE_V4SI:
23501 case V2DF_FTYPE_V4DF:
23502 case V2DF_FTYPE_V4SF:
23503 case V2DF_FTYPE_V2SI:
23504 case V2SI_FTYPE_V2SI:
23505 case V2SI_FTYPE_V4SF:
23506 case V2SI_FTYPE_V2SF:
23507 case V2SI_FTYPE_V2DF:
23508 case V2SF_FTYPE_V2SF:
23509 case V2SF_FTYPE_V2SI:
23512 case V4SF_FTYPE_V4SF_VEC_MERGE:
23513 case V2DF_FTYPE_V2DF_VEC_MERGE:
23514 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23515 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23516 case V16QI_FTYPE_V16QI_V16QI:
23517 case V16QI_FTYPE_V8HI_V8HI:
23518 case V8QI_FTYPE_V8QI_V8QI:
23519 case V8QI_FTYPE_V4HI_V4HI:
23520 case V8HI_FTYPE_V8HI_V8HI:
23521 case V8HI_FTYPE_V16QI_V16QI:
23522 case V8HI_FTYPE_V4SI_V4SI:
23523 case V8SF_FTYPE_V8SF_V8SF:
23524 case V8SF_FTYPE_V8SF_V8SI:
23525 case V4SI_FTYPE_V4SI_V4SI:
23526 case V4SI_FTYPE_V8HI_V8HI:
23527 case V4SI_FTYPE_V4SF_V4SF:
23528 case V4SI_FTYPE_V2DF_V2DF:
23529 case V4HI_FTYPE_V4HI_V4HI:
23530 case V4HI_FTYPE_V8QI_V8QI:
23531 case V4HI_FTYPE_V2SI_V2SI:
23532 case V4DF_FTYPE_V4DF_V4DF:
23533 case V4DF_FTYPE_V4DF_V4DI:
23534 case V4SF_FTYPE_V4SF_V4SF:
23535 case V4SF_FTYPE_V4SF_V4SI:
23536 case V4SF_FTYPE_V4SF_V2SI:
23537 case V4SF_FTYPE_V4SF_V2DF:
23538 case V4SF_FTYPE_V4SF_DI:
23539 case V4SF_FTYPE_V4SF_SI:
23540 case V2DI_FTYPE_V2DI_V2DI:
23541 case V2DI_FTYPE_V16QI_V16QI:
23542 case V2DI_FTYPE_V4SI_V4SI:
23543 case V2DI_FTYPE_V2DI_V16QI:
23544 case V2DI_FTYPE_V2DF_V2DF:
23545 case V2SI_FTYPE_V2SI_V2SI:
23546 case V2SI_FTYPE_V4HI_V4HI:
23547 case V2SI_FTYPE_V2SF_V2SF:
23548 case V2DF_FTYPE_V2DF_V2DF:
23549 case V2DF_FTYPE_V2DF_V4SF:
23550 case V2DF_FTYPE_V2DF_V2DI:
23551 case V2DF_FTYPE_V2DF_DI:
23552 case V2DF_FTYPE_V2DF_SI:
23553 case V2SF_FTYPE_V2SF_V2SF:
23554 case V1DI_FTYPE_V1DI_V1DI:
23555 case V1DI_FTYPE_V8QI_V8QI:
23556 case V1DI_FTYPE_V2SI_V2SI:
23557 if (comparison == UNKNOWN)
23558 return ix86_expand_binop_builtin (icode, exp, target);
23561 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23562 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23563 gcc_assert (comparison != UNKNOWN);
23567 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23568 case V8HI_FTYPE_V8HI_SI_COUNT:
23569 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23570 case V4SI_FTYPE_V4SI_SI_COUNT:
23571 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23572 case V4HI_FTYPE_V4HI_SI_COUNT:
23573 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23574 case V2DI_FTYPE_V2DI_SI_COUNT:
23575 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23576 case V2SI_FTYPE_V2SI_SI_COUNT:
23577 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23578 case V1DI_FTYPE_V1DI_SI_COUNT:
23580 last_arg_count = true;
23582 case UINT64_FTYPE_UINT64_UINT64:
23583 case UINT_FTYPE_UINT_UINT:
23584 case UINT_FTYPE_UINT_USHORT:
23585 case UINT_FTYPE_UINT_UCHAR:
23586 case UINT16_FTYPE_UINT16_INT:
23587 case UINT8_FTYPE_UINT8_INT:
23590 case V2DI_FTYPE_V2DI_INT_CONVERT:
23593 nargs_constant = 1;
23595 case V8HI_FTYPE_V8HI_INT:
23596 case V8SF_FTYPE_V8SF_INT:
23597 case V4SI_FTYPE_V4SI_INT:
23598 case V4SI_FTYPE_V8SI_INT:
23599 case V4HI_FTYPE_V4HI_INT:
23600 case V4DF_FTYPE_V4DF_INT:
23601 case V4SF_FTYPE_V4SF_INT:
23602 case V4SF_FTYPE_V8SF_INT:
23603 case V2DI_FTYPE_V2DI_INT:
23604 case V2DF_FTYPE_V2DF_INT:
23605 case V2DF_FTYPE_V4DF_INT:
23607 nargs_constant = 1;
23609 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23610 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23611 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23612 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23613 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23616 case V16QI_FTYPE_V16QI_V16QI_INT:
23617 case V8HI_FTYPE_V8HI_V8HI_INT:
23618 case V8SI_FTYPE_V8SI_V8SI_INT:
23619 case V8SI_FTYPE_V8SI_V4SI_INT:
23620 case V8SF_FTYPE_V8SF_V8SF_INT:
23621 case V8SF_FTYPE_V8SF_V4SF_INT:
23622 case V4SI_FTYPE_V4SI_V4SI_INT:
23623 case V4DF_FTYPE_V4DF_V4DF_INT:
23624 case V4DF_FTYPE_V4DF_V2DF_INT:
23625 case V4SF_FTYPE_V4SF_V4SF_INT:
23626 case V2DI_FTYPE_V2DI_V2DI_INT:
23627 case V2DF_FTYPE_V2DF_V2DF_INT:
23629 nargs_constant = 1;
23631 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23634 nargs_constant = 1;
23636 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23639 nargs_constant = 1;
23641 case V2DI_FTYPE_V2DI_UINT_UINT:
23643 nargs_constant = 2;
23645 case MULTI_ARG_4_DF2_DI_I:
23646 case MULTI_ARG_4_DF2_DI_I1:
23647 case MULTI_ARG_4_SF2_SI_I:
23648 case MULTI_ARG_4_SF2_SI_I1:
23650 nargs_constant = 1;
23652 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23654 nargs_constant = 2;
23657 gcc_unreachable ();
23660 gcc_assert (nargs <= ARRAY_SIZE (args));
23662 if (comparison != UNKNOWN)
23664 gcc_assert (nargs == 2);
23665 return ix86_expand_sse_compare (d, exp, target, swap);
23668 if (rmode == VOIDmode || rmode == tmode)
23672 || GET_MODE (target) != tmode
23673 || ! (*insn_p->operand[0].predicate) (target, tmode))
23674 target = gen_reg_rtx (tmode);
23675 real_target = target;
23679 target = gen_reg_rtx (rmode);
23680 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23683 for (i = 0; i < nargs; i++)
23685 tree arg = CALL_EXPR_ARG (exp, i);
23686 rtx op = expand_normal (arg);
23687 enum machine_mode mode = insn_p->operand[i + 1].mode;
23688 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23690 if (last_arg_count && (i + 1) == nargs)
23692 /* SIMD shift insns take either an 8-bit immediate or
23693 register as count. But builtin functions take int as
23694 count. If count doesn't match, we put it in register. */
23697 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23698 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23699 op = copy_to_reg (op);
23702 else if ((nargs - i) <= nargs_constant)
23707 case CODE_FOR_sse4_1_roundpd:
23708 case CODE_FOR_sse4_1_roundps:
23709 case CODE_FOR_sse4_1_roundsd:
23710 case CODE_FOR_sse4_1_roundss:
23711 case CODE_FOR_sse4_1_blendps:
23712 case CODE_FOR_avx_blendpd256:
23713 case CODE_FOR_avx_vpermilv4df:
23714 case CODE_FOR_avx_roundpd256:
23715 case CODE_FOR_avx_roundps256:
23716 error ("the last argument must be a 4-bit immediate");
23719 case CODE_FOR_sse4_1_blendpd:
23720 case CODE_FOR_avx_vpermilv2df:
23721 case CODE_FOR_xop_vpermil2v2df3:
23722 case CODE_FOR_xop_vpermil2v4sf3:
23723 case CODE_FOR_xop_vpermil2v4df3:
23724 case CODE_FOR_xop_vpermil2v8sf3:
23725 error ("the last argument must be a 2-bit immediate");
23728 case CODE_FOR_avx_vextractf128v4df:
23729 case CODE_FOR_avx_vextractf128v8sf:
23730 case CODE_FOR_avx_vextractf128v8si:
23731 case CODE_FOR_avx_vinsertf128v4df:
23732 case CODE_FOR_avx_vinsertf128v8sf:
23733 case CODE_FOR_avx_vinsertf128v8si:
23734 error ("the last argument must be a 1-bit immediate");
23737 case CODE_FOR_avx_cmpsdv2df3:
23738 case CODE_FOR_avx_cmpssv4sf3:
23739 case CODE_FOR_avx_cmppdv2df3:
23740 case CODE_FOR_avx_cmppsv4sf3:
23741 case CODE_FOR_avx_cmppdv4df3:
23742 case CODE_FOR_avx_cmppsv8sf3:
23743 error ("the last argument must be a 5-bit immediate");
23747 switch (nargs_constant)
23750 if ((nargs - i) == nargs_constant)
23752 error ("the next to last argument must be an 8-bit immediate");
23756 error ("the last argument must be an 8-bit immediate");
23759 gcc_unreachable ();
23766 if (VECTOR_MODE_P (mode))
23767 op = safe_vector_operand (op, mode);
23769 /* If we aren't optimizing, only allow one memory operand to
23771 if (memory_operand (op, mode))
23774 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23776 if (optimize || !match || num_memory > 1)
23777 op = copy_to_mode_reg (mode, op);
23781 op = copy_to_reg (op);
23782 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23787 args[i].mode = mode;
23793 pat = GEN_FCN (icode) (real_target, args[0].op);
23796 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23799 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23803 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23804 args[2].op, args[3].op);
23807 gcc_unreachable ();
23817 /* Subroutine of ix86_expand_builtin to take care of special insns
23818 with variable number of operands. */
23821 ix86_expand_special_args_builtin (const struct builtin_description *d,
23822 tree exp, rtx target)
23826 unsigned int i, nargs, arg_adjust, memory;
23830 enum machine_mode mode;
23832 enum insn_code icode = d->icode;
23833 bool last_arg_constant = false;
23834 const struct insn_data *insn_p = &insn_data[icode];
23835 enum machine_mode tmode = insn_p->operand[0].mode;
23836 enum { load, store } klass;
23838 switch ((enum ix86_builtin_func_type) d->flag)
23840 case VOID_FTYPE_VOID:
23841 emit_insn (GEN_FCN (icode) (target));
23843 case UINT64_FTYPE_VOID:
23848 case UINT64_FTYPE_PUNSIGNED:
23849 case V2DI_FTYPE_PV2DI:
23850 case V32QI_FTYPE_PCCHAR:
23851 case V16QI_FTYPE_PCCHAR:
23852 case V8SF_FTYPE_PCV4SF:
23853 case V8SF_FTYPE_PCFLOAT:
23854 case V4SF_FTYPE_PCFLOAT:
23855 case V4DF_FTYPE_PCV2DF:
23856 case V4DF_FTYPE_PCDOUBLE:
23857 case V2DF_FTYPE_PCDOUBLE:
23858 case VOID_FTYPE_PVOID:
23863 case VOID_FTYPE_PV2SF_V4SF:
23864 case VOID_FTYPE_PV4DI_V4DI:
23865 case VOID_FTYPE_PV2DI_V2DI:
23866 case VOID_FTYPE_PCHAR_V32QI:
23867 case VOID_FTYPE_PCHAR_V16QI:
23868 case VOID_FTYPE_PFLOAT_V8SF:
23869 case VOID_FTYPE_PFLOAT_V4SF:
23870 case VOID_FTYPE_PDOUBLE_V4DF:
23871 case VOID_FTYPE_PDOUBLE_V2DF:
23872 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23873 case VOID_FTYPE_PINT_INT:
23876 /* Reserve memory operand for target. */
23877 memory = ARRAY_SIZE (args);
23879 case V4SF_FTYPE_V4SF_PCV2SF:
23880 case V2DF_FTYPE_V2DF_PCDOUBLE:
23885 case V8SF_FTYPE_PCV8SF_V8SF:
23886 case V4DF_FTYPE_PCV4DF_V4DF:
23887 case V4SF_FTYPE_PCV4SF_V4SF:
23888 case V2DF_FTYPE_PCV2DF_V2DF:
23893 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23894 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23895 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23896 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23899 /* Reserve memory operand for target. */
23900 memory = ARRAY_SIZE (args);
23902 case VOID_FTYPE_UINT_UINT_UINT:
23903 case VOID_FTYPE_UINT64_UINT_UINT:
23904 case UCHAR_FTYPE_UINT_UINT_UINT:
23905 case UCHAR_FTYPE_UINT64_UINT_UINT:
23908 memory = ARRAY_SIZE (args);
23909 last_arg_constant = true;
23912 gcc_unreachable ();
23915 gcc_assert (nargs <= ARRAY_SIZE (args));
23917 if (klass == store)
23919 arg = CALL_EXPR_ARG (exp, 0);
23920 op = expand_normal (arg);
23921 gcc_assert (target == 0);
23922 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23930 || GET_MODE (target) != tmode
23931 || ! (*insn_p->operand[0].predicate) (target, tmode))
23932 target = gen_reg_rtx (tmode);
23935 for (i = 0; i < nargs; i++)
23937 enum machine_mode mode = insn_p->operand[i + 1].mode;
23940 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23941 op = expand_normal (arg);
23942 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23944 if (last_arg_constant && (i + 1) == nargs)
23948 if (icode == CODE_FOR_lwp_lwpvalsi3
23949 || icode == CODE_FOR_lwp_lwpinssi3
23950 || icode == CODE_FOR_lwp_lwpvaldi3
23951 || icode == CODE_FOR_lwp_lwpinsdi3)
23952 error ("the last argument must be a 32-bit immediate");
23954 error ("the last argument must be an 8-bit immediate");
23962 /* This must be the memory operand. */
23963 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
23964 gcc_assert (GET_MODE (op) == mode
23965 || GET_MODE (op) == VOIDmode);
23969 /* This must be register. */
23970 if (VECTOR_MODE_P (mode))
23971 op = safe_vector_operand (op, mode);
23973 gcc_assert (GET_MODE (op) == mode
23974 || GET_MODE (op) == VOIDmode);
23975 op = copy_to_mode_reg (mode, op);
23980 args[i].mode = mode;
23986 pat = GEN_FCN (icode) (target);
23989 pat = GEN_FCN (icode) (target, args[0].op);
23992 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23995 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23998 gcc_unreachable ();
24004 return klass == store ? 0 : target;
24007 /* Return the integer constant in ARG. Constrain it to be in the range
24008 of the subparts of VEC_TYPE; issue an error if not. */
24011 get_element_number (tree vec_type, tree arg)
24013 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24015 if (!host_integerp (arg, 1)
24016 || (elt = tree_low_cst (arg, 1), elt > max))
24018 error ("selector must be an integer constant in the range 0..%wi", max);
24025 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24026 ix86_expand_vector_init. We DO have language-level syntax for this, in
24027 the form of (type){ init-list }. Except that since we can't place emms
24028 instructions from inside the compiler, we can't allow the use of MMX
24029 registers unless the user explicitly asks for it. So we do *not* define
24030 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24031 we have builtins invoked by mmintrin.h that gives us license to emit
24032 these sorts of instructions. */
24035 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24037 enum machine_mode tmode = TYPE_MODE (type);
24038 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24039 int i, n_elt = GET_MODE_NUNITS (tmode);
24040 rtvec v = rtvec_alloc (n_elt);
24042 gcc_assert (VECTOR_MODE_P (tmode));
24043 gcc_assert (call_expr_nargs (exp) == n_elt);
24045 for (i = 0; i < n_elt; ++i)
24047 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24048 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24051 if (!target || !register_operand (target, tmode))
24052 target = gen_reg_rtx (tmode);
24054 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24058 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24059 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24060 had a language-level syntax for referencing vector elements. */
24063 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24065 enum machine_mode tmode, mode0;
24070 arg0 = CALL_EXPR_ARG (exp, 0);
24071 arg1 = CALL_EXPR_ARG (exp, 1);
24073 op0 = expand_normal (arg0);
24074 elt = get_element_number (TREE_TYPE (arg0), arg1);
24076 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24077 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24078 gcc_assert (VECTOR_MODE_P (mode0));
24080 op0 = force_reg (mode0, op0);
24082 if (optimize || !target || !register_operand (target, tmode))
24083 target = gen_reg_rtx (tmode);
24085 ix86_expand_vector_extract (true, target, op0, elt);
24090 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24091 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24092 a language-level syntax for referencing vector elements. */
24095 ix86_expand_vec_set_builtin (tree exp)
24097 enum machine_mode tmode, mode1;
24098 tree arg0, arg1, arg2;
24100 rtx op0, op1, target;
24102 arg0 = CALL_EXPR_ARG (exp, 0);
24103 arg1 = CALL_EXPR_ARG (exp, 1);
24104 arg2 = CALL_EXPR_ARG (exp, 2);
24106 tmode = TYPE_MODE (TREE_TYPE (arg0));
24107 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24108 gcc_assert (VECTOR_MODE_P (tmode));
24110 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24111 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24112 elt = get_element_number (TREE_TYPE (arg0), arg2);
24114 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24115 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24117 op0 = force_reg (tmode, op0);
24118 op1 = force_reg (mode1, op1);
24120 /* OP0 is the source of these builtin functions and shouldn't be
24121 modified. Create a copy, use it and return it as target. */
24122 target = gen_reg_rtx (tmode);
24123 emit_move_insn (target, op0);
24124 ix86_expand_vector_set (true, target, op1, elt);
24129 /* Expand an expression EXP that calls a built-in function,
24130 with result going to TARGET if that's convenient
24131 (and in mode MODE if that's convenient).
24132 SUBTARGET may be used as the target for computing one of EXP's operands.
24133 IGNORE is nonzero if the value is to be ignored. */
24136 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24137 enum machine_mode mode ATTRIBUTE_UNUSED,
24138 int ignore ATTRIBUTE_UNUSED)
24140 const struct builtin_description *d;
24142 enum insn_code icode;
24143 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24144 tree arg0, arg1, arg2;
24145 rtx op0, op1, op2, pat;
24146 enum machine_mode mode0, mode1, mode2;
24147 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24149 /* Determine whether the builtin function is available under the current ISA.
24150 Originally the builtin was not created if it wasn't applicable to the
24151 current ISA based on the command line switches. With function specific
24152 options, we need to check in the context of the function making the call
24153 whether it is supported. */
24154 if (ix86_builtins_isa[fcode].isa
24155 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24157 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24158 NULL, NULL, false);
24161 error ("%qE needs unknown isa option", fndecl);
24164 gcc_assert (opts != NULL);
24165 error ("%qE needs isa option %s", fndecl, opts);
24173 case IX86_BUILTIN_MASKMOVQ:
24174 case IX86_BUILTIN_MASKMOVDQU:
24175 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24176 ? CODE_FOR_mmx_maskmovq
24177 : CODE_FOR_sse2_maskmovdqu);
24178 /* Note the arg order is different from the operand order. */
24179 arg1 = CALL_EXPR_ARG (exp, 0);
24180 arg2 = CALL_EXPR_ARG (exp, 1);
24181 arg0 = CALL_EXPR_ARG (exp, 2);
24182 op0 = expand_normal (arg0);
24183 op1 = expand_normal (arg1);
24184 op2 = expand_normal (arg2);
24185 mode0 = insn_data[icode].operand[0].mode;
24186 mode1 = insn_data[icode].operand[1].mode;
24187 mode2 = insn_data[icode].operand[2].mode;
24189 op0 = force_reg (Pmode, op0);
24190 op0 = gen_rtx_MEM (mode1, op0);
24192 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24193 op0 = copy_to_mode_reg (mode0, op0);
24194 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24195 op1 = copy_to_mode_reg (mode1, op1);
24196 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24197 op2 = copy_to_mode_reg (mode2, op2);
24198 pat = GEN_FCN (icode) (op0, op1, op2);
24204 case IX86_BUILTIN_LDMXCSR:
24205 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24206 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24207 emit_move_insn (target, op0);
24208 emit_insn (gen_sse_ldmxcsr (target));
24211 case IX86_BUILTIN_STMXCSR:
24212 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24213 emit_insn (gen_sse_stmxcsr (target));
24214 return copy_to_mode_reg (SImode, target);
24216 case IX86_BUILTIN_CLFLUSH:
24217 arg0 = CALL_EXPR_ARG (exp, 0);
24218 op0 = expand_normal (arg0);
24219 icode = CODE_FOR_sse2_clflush;
24220 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24221 op0 = copy_to_mode_reg (Pmode, op0);
24223 emit_insn (gen_sse2_clflush (op0));
24226 case IX86_BUILTIN_MONITOR:
24227 arg0 = CALL_EXPR_ARG (exp, 0);
24228 arg1 = CALL_EXPR_ARG (exp, 1);
24229 arg2 = CALL_EXPR_ARG (exp, 2);
24230 op0 = expand_normal (arg0);
24231 op1 = expand_normal (arg1);
24232 op2 = expand_normal (arg2);
24234 op0 = copy_to_mode_reg (Pmode, op0);
24236 op1 = copy_to_mode_reg (SImode, op1);
24238 op2 = copy_to_mode_reg (SImode, op2);
24239 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24242 case IX86_BUILTIN_MWAIT:
24243 arg0 = CALL_EXPR_ARG (exp, 0);
24244 arg1 = CALL_EXPR_ARG (exp, 1);
24245 op0 = expand_normal (arg0);
24246 op1 = expand_normal (arg1);
24248 op0 = copy_to_mode_reg (SImode, op0);
24250 op1 = copy_to_mode_reg (SImode, op1);
24251 emit_insn (gen_sse3_mwait (op0, op1));
24254 case IX86_BUILTIN_VEC_INIT_V2SI:
24255 case IX86_BUILTIN_VEC_INIT_V4HI:
24256 case IX86_BUILTIN_VEC_INIT_V8QI:
24257 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24259 case IX86_BUILTIN_VEC_EXT_V2DF:
24260 case IX86_BUILTIN_VEC_EXT_V2DI:
24261 case IX86_BUILTIN_VEC_EXT_V4SF:
24262 case IX86_BUILTIN_VEC_EXT_V4SI:
24263 case IX86_BUILTIN_VEC_EXT_V8HI:
24264 case IX86_BUILTIN_VEC_EXT_V2SI:
24265 case IX86_BUILTIN_VEC_EXT_V4HI:
24266 case IX86_BUILTIN_VEC_EXT_V16QI:
24267 return ix86_expand_vec_ext_builtin (exp, target);
24269 case IX86_BUILTIN_VEC_SET_V2DI:
24270 case IX86_BUILTIN_VEC_SET_V4SF:
24271 case IX86_BUILTIN_VEC_SET_V4SI:
24272 case IX86_BUILTIN_VEC_SET_V8HI:
24273 case IX86_BUILTIN_VEC_SET_V4HI:
24274 case IX86_BUILTIN_VEC_SET_V16QI:
24275 return ix86_expand_vec_set_builtin (exp);
24277 case IX86_BUILTIN_VEC_PERM_V2DF:
24278 case IX86_BUILTIN_VEC_PERM_V4SF:
24279 case IX86_BUILTIN_VEC_PERM_V2DI:
24280 case IX86_BUILTIN_VEC_PERM_V4SI:
24281 case IX86_BUILTIN_VEC_PERM_V8HI:
24282 case IX86_BUILTIN_VEC_PERM_V16QI:
24283 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24284 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24285 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24286 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24287 case IX86_BUILTIN_VEC_PERM_V4DF:
24288 case IX86_BUILTIN_VEC_PERM_V8SF:
24289 return ix86_expand_vec_perm_builtin (exp);
24291 case IX86_BUILTIN_INFQ:
24292 case IX86_BUILTIN_HUGE_VALQ:
24294 REAL_VALUE_TYPE inf;
24298 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24300 tmp = validize_mem (force_const_mem (mode, tmp));
24303 target = gen_reg_rtx (mode);
24305 emit_move_insn (target, tmp);
24309 case IX86_BUILTIN_LLWPCB:
24310 arg0 = CALL_EXPR_ARG (exp, 0);
24311 op0 = expand_normal (arg0);
24312 icode = CODE_FOR_lwp_llwpcb;
24313 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24314 op0 = copy_to_mode_reg (Pmode, op0);
24315 emit_insn (gen_lwp_llwpcb (op0));
24318 case IX86_BUILTIN_SLWPCB:
24319 icode = CODE_FOR_lwp_slwpcb;
24321 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24322 target = gen_reg_rtx (Pmode);
24323 emit_insn (gen_lwp_slwpcb (target));
24330 for (i = 0, d = bdesc_special_args;
24331 i < ARRAY_SIZE (bdesc_special_args);
24333 if (d->code == fcode)
24334 return ix86_expand_special_args_builtin (d, exp, target);
24336 for (i = 0, d = bdesc_args;
24337 i < ARRAY_SIZE (bdesc_args);
24339 if (d->code == fcode)
24342 case IX86_BUILTIN_FABSQ:
24343 case IX86_BUILTIN_COPYSIGNQ:
24345 /* Emit a normal call if SSE2 isn't available. */
24346 return expand_call (exp, target, ignore);
24348 return ix86_expand_args_builtin (d, exp, target);
24351 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24352 if (d->code == fcode)
24353 return ix86_expand_sse_comi (d, exp, target);
24355 for (i = 0, d = bdesc_pcmpestr;
24356 i < ARRAY_SIZE (bdesc_pcmpestr);
24358 if (d->code == fcode)
24359 return ix86_expand_sse_pcmpestr (d, exp, target);
24361 for (i = 0, d = bdesc_pcmpistr;
24362 i < ARRAY_SIZE (bdesc_pcmpistr);
24364 if (d->code == fcode)
24365 return ix86_expand_sse_pcmpistr (d, exp, target);
24367 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24368 if (d->code == fcode)
24369 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24370 (enum ix86_builtin_func_type)
24371 d->flag, d->comparison);
24373 gcc_unreachable ();
24376 /* Returns a function decl for a vectorized version of the builtin function
24377 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24378 if it is not available. */
24381 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24384 enum machine_mode in_mode, out_mode;
24387 if (TREE_CODE (type_out) != VECTOR_TYPE
24388 || TREE_CODE (type_in) != VECTOR_TYPE)
24391 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24392 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24393 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24394 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24398 case BUILT_IN_SQRT:
24399 if (out_mode == DFmode && out_n == 2
24400 && in_mode == DFmode && in_n == 2)
24401 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24404 case BUILT_IN_SQRTF:
24405 if (out_mode == SFmode && out_n == 4
24406 && in_mode == SFmode && in_n == 4)
24407 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24410 case BUILT_IN_LRINT:
24411 if (out_mode == SImode && out_n == 4
24412 && in_mode == DFmode && in_n == 2)
24413 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24416 case BUILT_IN_LRINTF:
24417 if (out_mode == SImode && out_n == 4
24418 && in_mode == SFmode && in_n == 4)
24419 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24422 case BUILT_IN_COPYSIGN:
24423 if (out_mode == DFmode && out_n == 2
24424 && in_mode == DFmode && in_n == 2)
24425 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24428 case BUILT_IN_COPYSIGNF:
24429 if (out_mode == SFmode && out_n == 4
24430 && in_mode == SFmode && in_n == 4)
24431 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24438 /* Dispatch to a handler for a vectorization library. */
24439 if (ix86_veclib_handler)
24440 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24446 /* Handler for an SVML-style interface to
24447 a library with vectorized intrinsics. */
24450 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24453 tree fntype, new_fndecl, args;
24456 enum machine_mode el_mode, in_mode;
24459 /* The SVML is suitable for unsafe math only. */
24460 if (!flag_unsafe_math_optimizations)
24463 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24464 n = TYPE_VECTOR_SUBPARTS (type_out);
24465 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24466 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24467 if (el_mode != in_mode
24475 case BUILT_IN_LOG10:
24477 case BUILT_IN_TANH:
24479 case BUILT_IN_ATAN:
24480 case BUILT_IN_ATAN2:
24481 case BUILT_IN_ATANH:
24482 case BUILT_IN_CBRT:
24483 case BUILT_IN_SINH:
24485 case BUILT_IN_ASINH:
24486 case BUILT_IN_ASIN:
24487 case BUILT_IN_COSH:
24489 case BUILT_IN_ACOSH:
24490 case BUILT_IN_ACOS:
24491 if (el_mode != DFmode || n != 2)
24495 case BUILT_IN_EXPF:
24496 case BUILT_IN_LOGF:
24497 case BUILT_IN_LOG10F:
24498 case BUILT_IN_POWF:
24499 case BUILT_IN_TANHF:
24500 case BUILT_IN_TANF:
24501 case BUILT_IN_ATANF:
24502 case BUILT_IN_ATAN2F:
24503 case BUILT_IN_ATANHF:
24504 case BUILT_IN_CBRTF:
24505 case BUILT_IN_SINHF:
24506 case BUILT_IN_SINF:
24507 case BUILT_IN_ASINHF:
24508 case BUILT_IN_ASINF:
24509 case BUILT_IN_COSHF:
24510 case BUILT_IN_COSF:
24511 case BUILT_IN_ACOSHF:
24512 case BUILT_IN_ACOSF:
24513 if (el_mode != SFmode || n != 4)
24521 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24523 if (fn == BUILT_IN_LOGF)
24524 strcpy (name, "vmlsLn4");
24525 else if (fn == BUILT_IN_LOG)
24526 strcpy (name, "vmldLn2");
24529 sprintf (name, "vmls%s", bname+10);
24530 name[strlen (name)-1] = '4';
24533 sprintf (name, "vmld%s2", bname+10);
24535 /* Convert to uppercase. */
24539 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24540 args = TREE_CHAIN (args))
24544 fntype = build_function_type_list (type_out, type_in, NULL);
24546 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24548 /* Build a function declaration for the vectorized function. */
24549 new_fndecl = build_decl (BUILTINS_LOCATION,
24550 FUNCTION_DECL, get_identifier (name), fntype);
24551 TREE_PUBLIC (new_fndecl) = 1;
24552 DECL_EXTERNAL (new_fndecl) = 1;
24553 DECL_IS_NOVOPS (new_fndecl) = 1;
24554 TREE_READONLY (new_fndecl) = 1;
24559 /* Handler for an ACML-style interface to
24560 a library with vectorized intrinsics. */
24563 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24565 char name[20] = "__vr.._";
24566 tree fntype, new_fndecl, args;
24569 enum machine_mode el_mode, in_mode;
24572 /* The ACML is 64bits only and suitable for unsafe math only as
24573 it does not correctly support parts of IEEE with the required
24574 precision such as denormals. */
24576 || !flag_unsafe_math_optimizations)
24579 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24580 n = TYPE_VECTOR_SUBPARTS (type_out);
24581 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24582 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24583 if (el_mode != in_mode
24593 case BUILT_IN_LOG2:
24594 case BUILT_IN_LOG10:
24597 if (el_mode != DFmode
24602 case BUILT_IN_SINF:
24603 case BUILT_IN_COSF:
24604 case BUILT_IN_EXPF:
24605 case BUILT_IN_POWF:
24606 case BUILT_IN_LOGF:
24607 case BUILT_IN_LOG2F:
24608 case BUILT_IN_LOG10F:
24611 if (el_mode != SFmode
24620 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24621 sprintf (name + 7, "%s", bname+10);
24624 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24625 args = TREE_CHAIN (args))
24629 fntype = build_function_type_list (type_out, type_in, NULL);
24631 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24633 /* Build a function declaration for the vectorized function. */
24634 new_fndecl = build_decl (BUILTINS_LOCATION,
24635 FUNCTION_DECL, get_identifier (name), fntype);
24636 TREE_PUBLIC (new_fndecl) = 1;
24637 DECL_EXTERNAL (new_fndecl) = 1;
24638 DECL_IS_NOVOPS (new_fndecl) = 1;
24639 TREE_READONLY (new_fndecl) = 1;
24645 /* Returns a decl of a function that implements conversion of an integer vector
24646 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24647 side of the conversion.
24648 Return NULL_TREE if it is not available. */
24651 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24653 if (! (TARGET_SSE2 && TREE_CODE (type) == VECTOR_TYPE))
24659 switch (TYPE_MODE (type))
24662 return TYPE_UNSIGNED (type)
24663 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24664 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24669 case FIX_TRUNC_EXPR:
24670 switch (TYPE_MODE (type))
24673 return TYPE_UNSIGNED (type)
24675 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24685 /* Returns a code for a target-specific builtin that implements
24686 reciprocal of the function, or NULL_TREE if not available. */
24689 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24690 bool sqrt ATTRIBUTE_UNUSED)
24692 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24693 && flag_finite_math_only && !flag_trapping_math
24694 && flag_unsafe_math_optimizations))
24698 /* Machine dependent builtins. */
24701 /* Vectorized version of sqrt to rsqrt conversion. */
24702 case IX86_BUILTIN_SQRTPS_NR:
24703 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24709 /* Normal builtins. */
24712 /* Sqrt to rsqrt conversion. */
24713 case BUILT_IN_SQRTF:
24714 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24721 /* Helper for avx_vpermilps256_operand et al. This is also used by
24722 the expansion functions to turn the parallel back into a mask.
24723 The return value is 0 for no match and the imm8+1 for a match. */
24726 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24728 unsigned i, nelt = GET_MODE_NUNITS (mode);
24730 unsigned char ipar[8];
24732 if (XVECLEN (par, 0) != (int) nelt)
24735 /* Validate that all of the elements are constants, and not totally
24736 out of range. Copy the data into an integral array to make the
24737 subsequent checks easier. */
24738 for (i = 0; i < nelt; ++i)
24740 rtx er = XVECEXP (par, 0, i);
24741 unsigned HOST_WIDE_INT ei;
24743 if (!CONST_INT_P (er))
24754 /* In the 256-bit DFmode case, we can only move elements within
24756 for (i = 0; i < 2; ++i)
24760 mask |= ipar[i] << i;
24762 for (i = 2; i < 4; ++i)
24766 mask |= (ipar[i] - 2) << i;
24771 /* In the 256-bit SFmode case, we have full freedom of movement
24772 within the low 128-bit lane, but the high 128-bit lane must
24773 mirror the exact same pattern. */
24774 for (i = 0; i < 4; ++i)
24775 if (ipar[i] + 4 != ipar[i + 4])
24782 /* In the 128-bit case, we've full freedom in the placement of
24783 the elements from the source operand. */
24784 for (i = 0; i < nelt; ++i)
24785 mask |= ipar[i] << (i * (nelt / 2));
24789 gcc_unreachable ();
24792 /* Make sure success has a non-zero value by adding one. */
24796 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24797 the expansion functions to turn the parallel back into a mask.
24798 The return value is 0 for no match and the imm8+1 for a match. */
24801 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24803 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24805 unsigned char ipar[8];
24807 if (XVECLEN (par, 0) != (int) nelt)
24810 /* Validate that all of the elements are constants, and not totally
24811 out of range. Copy the data into an integral array to make the
24812 subsequent checks easier. */
24813 for (i = 0; i < nelt; ++i)
24815 rtx er = XVECEXP (par, 0, i);
24816 unsigned HOST_WIDE_INT ei;
24818 if (!CONST_INT_P (er))
24821 if (ei >= 2 * nelt)
24826 /* Validate that the halves of the permute are halves. */
24827 for (i = 0; i < nelt2 - 1; ++i)
24828 if (ipar[i] + 1 != ipar[i + 1])
24830 for (i = nelt2; i < nelt - 1; ++i)
24831 if (ipar[i] + 1 != ipar[i + 1])
24834 /* Reconstruct the mask. */
24835 for (i = 0; i < 2; ++i)
24837 unsigned e = ipar[i * nelt2];
24841 mask |= e << (i * 4);
24844 /* Make sure success has a non-zero value by adding one. */
24849 /* Store OPERAND to the memory after reload is completed. This means
24850 that we can't easily use assign_stack_local. */
24852 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24856 gcc_assert (reload_completed);
24857 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24859 result = gen_rtx_MEM (mode,
24860 gen_rtx_PLUS (Pmode,
24862 GEN_INT (-RED_ZONE_SIZE)));
24863 emit_move_insn (result, operand);
24865 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24871 operand = gen_lowpart (DImode, operand);
24875 gen_rtx_SET (VOIDmode,
24876 gen_rtx_MEM (DImode,
24877 gen_rtx_PRE_DEC (DImode,
24878 stack_pointer_rtx)),
24882 gcc_unreachable ();
24884 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24893 split_di (&operand, 1, operands, operands + 1);
24895 gen_rtx_SET (VOIDmode,
24896 gen_rtx_MEM (SImode,
24897 gen_rtx_PRE_DEC (Pmode,
24898 stack_pointer_rtx)),
24901 gen_rtx_SET (VOIDmode,
24902 gen_rtx_MEM (SImode,
24903 gen_rtx_PRE_DEC (Pmode,
24904 stack_pointer_rtx)),
24909 /* Store HImodes as SImodes. */
24910 operand = gen_lowpart (SImode, operand);
24914 gen_rtx_SET (VOIDmode,
24915 gen_rtx_MEM (GET_MODE (operand),
24916 gen_rtx_PRE_DEC (SImode,
24917 stack_pointer_rtx)),
24921 gcc_unreachable ();
24923 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24928 /* Free operand from the memory. */
24930 ix86_free_from_memory (enum machine_mode mode)
24932 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24936 if (mode == DImode || TARGET_64BIT)
24940 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24941 to pop or add instruction if registers are available. */
24942 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24943 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24948 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
24949 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
24951 static const enum reg_class *
24952 i386_ira_cover_classes (void)
24954 static const enum reg_class sse_fpmath_classes[] = {
24955 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
24957 static const enum reg_class no_sse_fpmath_classes[] = {
24958 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
24961 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
24964 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24965 QImode must go into class Q_REGS.
24966 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24967 movdf to do mem-to-mem moves through integer regs. */
24969 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24971 enum machine_mode mode = GET_MODE (x);
24973 /* We're only allowed to return a subclass of CLASS. Many of the
24974 following checks fail for NO_REGS, so eliminate that early. */
24975 if (regclass == NO_REGS)
24978 /* All classes can load zeros. */
24979 if (x == CONST0_RTX (mode))
24982 /* Force constants into memory if we are loading a (nonzero) constant into
24983 an MMX or SSE register. This is because there are no MMX/SSE instructions
24984 to load from a constant. */
24986 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24989 /* Prefer SSE regs only, if we can use them for math. */
24990 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24991 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24993 /* Floating-point constants need more complex checks. */
24994 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24996 /* General regs can load everything. */
24997 if (reg_class_subset_p (regclass, GENERAL_REGS))
25000 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25001 zero above. We only want to wind up preferring 80387 registers if
25002 we plan on doing computation with them. */
25004 && standard_80387_constant_p (x))
25006 /* Limit class to non-sse. */
25007 if (regclass == FLOAT_SSE_REGS)
25009 if (regclass == FP_TOP_SSE_REGS)
25011 if (regclass == FP_SECOND_SSE_REGS)
25012 return FP_SECOND_REG;
25013 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25020 /* Generally when we see PLUS here, it's the function invariant
25021 (plus soft-fp const_int). Which can only be computed into general
25023 if (GET_CODE (x) == PLUS)
25024 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25026 /* QImode constants are easy to load, but non-constant QImode data
25027 must go into Q_REGS. */
25028 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25030 if (reg_class_subset_p (regclass, Q_REGS))
25032 if (reg_class_subset_p (Q_REGS, regclass))
25040 /* Discourage putting floating-point values in SSE registers unless
25041 SSE math is being used, and likewise for the 387 registers. */
25043 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25045 enum machine_mode mode = GET_MODE (x);
25047 /* Restrict the output reload class to the register bank that we are doing
25048 math on. If we would like not to return a subset of CLASS, reject this
25049 alternative: if reload cannot do this, it will still use its choice. */
25050 mode = GET_MODE (x);
25051 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25052 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25054 if (X87_FLOAT_MODE_P (mode))
25056 if (regclass == FP_TOP_SSE_REGS)
25058 else if (regclass == FP_SECOND_SSE_REGS)
25059 return FP_SECOND_REG;
25061 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25067 static enum reg_class
25068 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25069 enum machine_mode mode,
25070 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25072 /* QImode spills from non-QI registers require
25073 intermediate register on 32bit targets. */
25074 if (!in_p && mode == QImode && !TARGET_64BIT
25075 && (rclass == GENERAL_REGS
25076 || rclass == LEGACY_REGS
25077 || rclass == INDEX_REGS))
25086 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25087 regno = true_regnum (x);
25089 /* Return Q_REGS if the operand is in memory. */
25097 /* If we are copying between general and FP registers, we need a memory
25098 location. The same is true for SSE and MMX registers.
25100 To optimize register_move_cost performance, allow inline variant.
25102 The macro can't work reliably when one of the CLASSES is class containing
25103 registers from multiple units (SSE, MMX, integer). We avoid this by never
25104 combining those units in single alternative in the machine description.
25105 Ensure that this constraint holds to avoid unexpected surprises.
25107 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25108 enforce these sanity checks. */
25111 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25112 enum machine_mode mode, int strict)
25114 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25115 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25116 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25117 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25118 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25119 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25121 gcc_assert (!strict);
25125 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25128 /* ??? This is a lie. We do have moves between mmx/general, and for
25129 mmx/sse2. But by saying we need secondary memory we discourage the
25130 register allocator from using the mmx registers unless needed. */
25131 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25134 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25136 /* SSE1 doesn't have any direct moves from other classes. */
25140 /* If the target says that inter-unit moves are more expensive
25141 than moving through memory, then don't generate them. */
25142 if (!TARGET_INTER_UNIT_MOVES)
25145 /* Between SSE and general, we have moves no larger than word size. */
25146 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25154 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25155 enum machine_mode mode, int strict)
25157 return inline_secondary_memory_needed (class1, class2, mode, strict);
25160 /* Return true if the registers in CLASS cannot represent the change from
25161 modes FROM to TO. */
25164 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25165 enum reg_class regclass)
25170 /* x87 registers can't do subreg at all, as all values are reformatted
25171 to extended precision. */
25172 if (MAYBE_FLOAT_CLASS_P (regclass))
25175 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25177 /* Vector registers do not support QI or HImode loads. If we don't
25178 disallow a change to these modes, reload will assume it's ok to
25179 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25180 the vec_dupv4hi pattern. */
25181 if (GET_MODE_SIZE (from) < 4)
25184 /* Vector registers do not support subreg with nonzero offsets, which
25185 are otherwise valid for integer registers. Since we can't see
25186 whether we have a nonzero offset from here, prohibit all
25187 nonparadoxical subregs changing size. */
25188 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25195 /* Return the cost of moving data of mode M between a
25196 register and memory. A value of 2 is the default; this cost is
25197 relative to those in `REGISTER_MOVE_COST'.
25199 This function is used extensively by register_move_cost that is used to
25200 build tables at startup. Make it inline in this case.
25201 When IN is 2, return maximum of in and out move cost.
25203 If moving between registers and memory is more expensive than
25204 between two registers, you should define this macro to express the
25207 Model also increased moving costs of QImode registers in non
25211 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25215 if (FLOAT_CLASS_P (regclass))
25233 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25234 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25236 if (SSE_CLASS_P (regclass))
25239 switch (GET_MODE_SIZE (mode))
25254 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25255 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25257 if (MMX_CLASS_P (regclass))
25260 switch (GET_MODE_SIZE (mode))
25272 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25273 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25275 switch (GET_MODE_SIZE (mode))
25278 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25281 return ix86_cost->int_store[0];
25282 if (TARGET_PARTIAL_REG_DEPENDENCY
25283 && optimize_function_for_speed_p (cfun))
25284 cost = ix86_cost->movzbl_load;
25286 cost = ix86_cost->int_load[0];
25288 return MAX (cost, ix86_cost->int_store[0]);
25294 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25296 return ix86_cost->movzbl_load;
25298 return ix86_cost->int_store[0] + 4;
25303 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25304 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25306 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25307 if (mode == TFmode)
25310 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25312 cost = ix86_cost->int_load[2];
25314 cost = ix86_cost->int_store[2];
25315 return (cost * (((int) GET_MODE_SIZE (mode)
25316 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25321 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25323 return inline_memory_move_cost (mode, regclass, in);
25327 /* Return the cost of moving data from a register in class CLASS1 to
25328 one in class CLASS2.
25330 It is not required that the cost always equal 2 when FROM is the same as TO;
25331 on some machines it is expensive to move between registers if they are not
25332 general registers. */
25335 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25336 enum reg_class class2)
25338 /* In case we require secondary memory, compute cost of the store followed
25339 by load. In order to avoid bad register allocation choices, we need
25340 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25342 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25346 cost += inline_memory_move_cost (mode, class1, 2);
25347 cost += inline_memory_move_cost (mode, class2, 2);
25349 /* In case of copying from general_purpose_register we may emit multiple
25350 stores followed by single load causing memory size mismatch stall.
25351 Count this as arbitrarily high cost of 20. */
25352 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25355 /* In the case of FP/MMX moves, the registers actually overlap, and we
25356 have to switch modes in order to treat them differently. */
25357 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25358 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25364 /* Moves between SSE/MMX and integer unit are expensive. */
25365 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25366 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25368 /* ??? By keeping returned value relatively high, we limit the number
25369 of moves between integer and MMX/SSE registers for all targets.
25370 Additionally, high value prevents problem with x86_modes_tieable_p(),
25371 where integer modes in MMX/SSE registers are not tieable
25372 because of missing QImode and HImode moves to, from or between
25373 MMX/SSE registers. */
25374 return MAX (8, ix86_cost->mmxsse_to_integer);
25376 if (MAYBE_FLOAT_CLASS_P (class1))
25377 return ix86_cost->fp_move;
25378 if (MAYBE_SSE_CLASS_P (class1))
25379 return ix86_cost->sse_move;
25380 if (MAYBE_MMX_CLASS_P (class1))
25381 return ix86_cost->mmx_move;
25385 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25388 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25390 /* Flags and only flags can only hold CCmode values. */
25391 if (CC_REGNO_P (regno))
25392 return GET_MODE_CLASS (mode) == MODE_CC;
25393 if (GET_MODE_CLASS (mode) == MODE_CC
25394 || GET_MODE_CLASS (mode) == MODE_RANDOM
25395 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25397 if (FP_REGNO_P (regno))
25398 return VALID_FP_MODE_P (mode);
25399 if (SSE_REGNO_P (regno))
25401 /* We implement the move patterns for all vector modes into and
25402 out of SSE registers, even when no operation instructions
25403 are available. OImode move is available only when AVX is
25405 return ((TARGET_AVX && mode == OImode)
25406 || VALID_AVX256_REG_MODE (mode)
25407 || VALID_SSE_REG_MODE (mode)
25408 || VALID_SSE2_REG_MODE (mode)
25409 || VALID_MMX_REG_MODE (mode)
25410 || VALID_MMX_REG_MODE_3DNOW (mode));
25412 if (MMX_REGNO_P (regno))
25414 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25415 so if the register is available at all, then we can move data of
25416 the given mode into or out of it. */
25417 return (VALID_MMX_REG_MODE (mode)
25418 || VALID_MMX_REG_MODE_3DNOW (mode));
25421 if (mode == QImode)
25423 /* Take care for QImode values - they can be in non-QI regs,
25424 but then they do cause partial register stalls. */
25425 if (regno <= BX_REG || TARGET_64BIT)
25427 if (!TARGET_PARTIAL_REG_STALL)
25429 return reload_in_progress || reload_completed;
25431 /* We handle both integer and floats in the general purpose registers. */
25432 else if (VALID_INT_MODE_P (mode))
25434 else if (VALID_FP_MODE_P (mode))
25436 else if (VALID_DFP_MODE_P (mode))
25438 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25439 on to use that value in smaller contexts, this can easily force a
25440 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25441 supporting DImode, allow it. */
25442 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25448 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25449 tieable integer mode. */
25452 ix86_tieable_integer_mode_p (enum machine_mode mode)
25461 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25464 return TARGET_64BIT;
25471 /* Return true if MODE1 is accessible in a register that can hold MODE2
25472 without copying. That is, all register classes that can hold MODE2
25473 can also hold MODE1. */
25476 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25478 if (mode1 == mode2)
25481 if (ix86_tieable_integer_mode_p (mode1)
25482 && ix86_tieable_integer_mode_p (mode2))
25485 /* MODE2 being XFmode implies fp stack or general regs, which means we
25486 can tie any smaller floating point modes to it. Note that we do not
25487 tie this with TFmode. */
25488 if (mode2 == XFmode)
25489 return mode1 == SFmode || mode1 == DFmode;
25491 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25492 that we can tie it with SFmode. */
25493 if (mode2 == DFmode)
25494 return mode1 == SFmode;
25496 /* If MODE2 is only appropriate for an SSE register, then tie with
25497 any other mode acceptable to SSE registers. */
25498 if (GET_MODE_SIZE (mode2) == 16
25499 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25500 return (GET_MODE_SIZE (mode1) == 16
25501 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25503 /* If MODE2 is appropriate for an MMX register, then tie
25504 with any other mode acceptable to MMX registers. */
25505 if (GET_MODE_SIZE (mode2) == 8
25506 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25507 return (GET_MODE_SIZE (mode1) == 8
25508 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25513 /* Compute a (partial) cost for rtx X. Return true if the complete
25514 cost has been computed, and false if subexpressions should be
25515 scanned. In either case, *TOTAL contains the cost result. */
25518 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25520 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25521 enum machine_mode mode = GET_MODE (x);
25522 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25530 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25532 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25534 else if (flag_pic && SYMBOLIC_CONST (x)
25536 || (!GET_CODE (x) != LABEL_REF
25537 && (GET_CODE (x) != SYMBOL_REF
25538 || !SYMBOL_REF_LOCAL_P (x)))))
25545 if (mode == VOIDmode)
25548 switch (standard_80387_constant_p (x))
25553 default: /* Other constants */
25558 /* Start with (MEM (SYMBOL_REF)), since that's where
25559 it'll probably end up. Add a penalty for size. */
25560 *total = (COSTS_N_INSNS (1)
25561 + (flag_pic != 0 && !TARGET_64BIT)
25562 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25568 /* The zero extensions is often completely free on x86_64, so make
25569 it as cheap as possible. */
25570 if (TARGET_64BIT && mode == DImode
25571 && GET_MODE (XEXP (x, 0)) == SImode)
25573 else if (TARGET_ZERO_EXTEND_WITH_AND)
25574 *total = cost->add;
25576 *total = cost->movzx;
25580 *total = cost->movsx;
25584 if (CONST_INT_P (XEXP (x, 1))
25585 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25587 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25590 *total = cost->add;
25593 if ((value == 2 || value == 3)
25594 && cost->lea <= cost->shift_const)
25596 *total = cost->lea;
25606 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25608 if (CONST_INT_P (XEXP (x, 1)))
25610 if (INTVAL (XEXP (x, 1)) > 32)
25611 *total = cost->shift_const + COSTS_N_INSNS (2);
25613 *total = cost->shift_const * 2;
25617 if (GET_CODE (XEXP (x, 1)) == AND)
25618 *total = cost->shift_var * 2;
25620 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25625 if (CONST_INT_P (XEXP (x, 1)))
25626 *total = cost->shift_const;
25628 *total = cost->shift_var;
25633 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25635 /* ??? SSE scalar cost should be used here. */
25636 *total = cost->fmul;
25639 else if (X87_FLOAT_MODE_P (mode))
25641 *total = cost->fmul;
25644 else if (FLOAT_MODE_P (mode))
25646 /* ??? SSE vector cost should be used here. */
25647 *total = cost->fmul;
25652 rtx op0 = XEXP (x, 0);
25653 rtx op1 = XEXP (x, 1);
25655 if (CONST_INT_P (XEXP (x, 1)))
25657 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25658 for (nbits = 0; value != 0; value &= value - 1)
25662 /* This is arbitrary. */
25665 /* Compute costs correctly for widening multiplication. */
25666 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25667 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25668 == GET_MODE_SIZE (mode))
25670 int is_mulwiden = 0;
25671 enum machine_mode inner_mode = GET_MODE (op0);
25673 if (GET_CODE (op0) == GET_CODE (op1))
25674 is_mulwiden = 1, op1 = XEXP (op1, 0);
25675 else if (CONST_INT_P (op1))
25677 if (GET_CODE (op0) == SIGN_EXTEND)
25678 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25681 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25685 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25688 *total = (cost->mult_init[MODE_INDEX (mode)]
25689 + nbits * cost->mult_bit
25690 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25699 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25700 /* ??? SSE cost should be used here. */
25701 *total = cost->fdiv;
25702 else if (X87_FLOAT_MODE_P (mode))
25703 *total = cost->fdiv;
25704 else if (FLOAT_MODE_P (mode))
25705 /* ??? SSE vector cost should be used here. */
25706 *total = cost->fdiv;
25708 *total = cost->divide[MODE_INDEX (mode)];
25712 if (GET_MODE_CLASS (mode) == MODE_INT
25713 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25715 if (GET_CODE (XEXP (x, 0)) == PLUS
25716 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25717 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25718 && CONSTANT_P (XEXP (x, 1)))
25720 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25721 if (val == 2 || val == 4 || val == 8)
25723 *total = cost->lea;
25724 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25725 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25726 outer_code, speed);
25727 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25731 else if (GET_CODE (XEXP (x, 0)) == MULT
25732 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25734 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25735 if (val == 2 || val == 4 || val == 8)
25737 *total = cost->lea;
25738 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25739 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25743 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25745 *total = cost->lea;
25746 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25747 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25748 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25755 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25757 /* ??? SSE cost should be used here. */
25758 *total = cost->fadd;
25761 else if (X87_FLOAT_MODE_P (mode))
25763 *total = cost->fadd;
25766 else if (FLOAT_MODE_P (mode))
25768 /* ??? SSE vector cost should be used here. */
25769 *total = cost->fadd;
25777 if (!TARGET_64BIT && mode == DImode)
25779 *total = (cost->add * 2
25780 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25781 << (GET_MODE (XEXP (x, 0)) != DImode))
25782 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25783 << (GET_MODE (XEXP (x, 1)) != DImode)));
25789 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25791 /* ??? SSE cost should be used here. */
25792 *total = cost->fchs;
25795 else if (X87_FLOAT_MODE_P (mode))
25797 *total = cost->fchs;
25800 else if (FLOAT_MODE_P (mode))
25802 /* ??? SSE vector cost should be used here. */
25803 *total = cost->fchs;
25809 if (!TARGET_64BIT && mode == DImode)
25810 *total = cost->add * 2;
25812 *total = cost->add;
25816 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25817 && XEXP (XEXP (x, 0), 1) == const1_rtx
25818 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25819 && XEXP (x, 1) == const0_rtx)
25821 /* This kind of construct is implemented using test[bwl].
25822 Treat it as if we had an AND. */
25823 *total = (cost->add
25824 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25825 + rtx_cost (const1_rtx, outer_code, speed));
25831 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25836 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25837 /* ??? SSE cost should be used here. */
25838 *total = cost->fabs;
25839 else if (X87_FLOAT_MODE_P (mode))
25840 *total = cost->fabs;
25841 else if (FLOAT_MODE_P (mode))
25842 /* ??? SSE vector cost should be used here. */
25843 *total = cost->fabs;
25847 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25848 /* ??? SSE cost should be used here. */
25849 *total = cost->fsqrt;
25850 else if (X87_FLOAT_MODE_P (mode))
25851 *total = cost->fsqrt;
25852 else if (FLOAT_MODE_P (mode))
25853 /* ??? SSE vector cost should be used here. */
25854 *total = cost->fsqrt;
25858 if (XINT (x, 1) == UNSPEC_TP)
25865 case VEC_DUPLICATE:
25866 /* ??? Assume all of these vector manipulation patterns are
25867 recognizable. In which case they all pretty much have the
25869 *total = COSTS_N_INSNS (1);
25879 static int current_machopic_label_num;
25881 /* Given a symbol name and its associated stub, write out the
25882 definition of the stub. */
25885 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25887 unsigned int length;
25888 char *binder_name, *symbol_name, lazy_ptr_name[32];
25889 int label = ++current_machopic_label_num;
25891 /* For 64-bit we shouldn't get here. */
25892 gcc_assert (!TARGET_64BIT);
25894 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25895 symb = (*targetm.strip_name_encoding) (symb);
25897 length = strlen (stub);
25898 binder_name = XALLOCAVEC (char, length + 32);
25899 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25901 length = strlen (symb);
25902 symbol_name = XALLOCAVEC (char, length + 32);
25903 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25905 sprintf (lazy_ptr_name, "L%d$lz", label);
25908 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25910 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25912 fprintf (file, "%s:\n", stub);
25913 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25917 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25918 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25919 fprintf (file, "\tjmp\t*%%edx\n");
25922 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25924 fprintf (file, "%s:\n", binder_name);
25928 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25929 fputs ("\tpushl\t%eax\n", file);
25932 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25934 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
25936 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25937 fprintf (file, "%s:\n", lazy_ptr_name);
25938 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25939 fprintf (file, ASM_LONG "%s\n", binder_name);
25941 #endif /* TARGET_MACHO */
25943 /* Order the registers for register allocator. */
25946 x86_order_regs_for_local_alloc (void)
25951 /* First allocate the local general purpose registers. */
25952 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25953 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25954 reg_alloc_order [pos++] = i;
25956 /* Global general purpose registers. */
25957 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25958 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25959 reg_alloc_order [pos++] = i;
25961 /* x87 registers come first in case we are doing FP math
25963 if (!TARGET_SSE_MATH)
25964 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25965 reg_alloc_order [pos++] = i;
25967 /* SSE registers. */
25968 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25969 reg_alloc_order [pos++] = i;
25970 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25971 reg_alloc_order [pos++] = i;
25973 /* x87 registers. */
25974 if (TARGET_SSE_MATH)
25975 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25976 reg_alloc_order [pos++] = i;
25978 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25979 reg_alloc_order [pos++] = i;
25981 /* Initialize the rest of array as we do not allocate some registers
25983 while (pos < FIRST_PSEUDO_REGISTER)
25984 reg_alloc_order [pos++] = 0;
25987 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25988 struct attribute_spec.handler. */
25990 ix86_handle_abi_attribute (tree *node, tree name,
25991 tree args ATTRIBUTE_UNUSED,
25992 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25994 if (TREE_CODE (*node) != FUNCTION_TYPE
25995 && TREE_CODE (*node) != METHOD_TYPE
25996 && TREE_CODE (*node) != FIELD_DECL
25997 && TREE_CODE (*node) != TYPE_DECL)
25999 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26001 *no_add_attrs = true;
26006 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26008 *no_add_attrs = true;
26012 /* Can combine regparm with all attributes but fastcall. */
26013 if (is_attribute_p ("ms_abi", name))
26015 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26017 error ("ms_abi and sysv_abi attributes are not compatible");
26022 else if (is_attribute_p ("sysv_abi", name))
26024 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26026 error ("ms_abi and sysv_abi attributes are not compatible");
26035 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26036 struct attribute_spec.handler. */
26038 ix86_handle_struct_attribute (tree *node, tree name,
26039 tree args ATTRIBUTE_UNUSED,
26040 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26043 if (DECL_P (*node))
26045 if (TREE_CODE (*node) == TYPE_DECL)
26046 type = &TREE_TYPE (*node);
26051 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26052 || TREE_CODE (*type) == UNION_TYPE)))
26054 warning (OPT_Wattributes, "%qE attribute ignored",
26056 *no_add_attrs = true;
26059 else if ((is_attribute_p ("ms_struct", name)
26060 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26061 || ((is_attribute_p ("gcc_struct", name)
26062 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26064 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26066 *no_add_attrs = true;
26073 ix86_handle_fndecl_attribute (tree *node, tree name,
26074 tree args ATTRIBUTE_UNUSED,
26075 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26077 if (TREE_CODE (*node) != FUNCTION_DECL)
26079 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26081 *no_add_attrs = true;
26087 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26092 #ifndef HAVE_AS_IX86_SWAP
26093 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26100 ix86_ms_bitfield_layout_p (const_tree record_type)
26102 return (TARGET_MS_BITFIELD_LAYOUT &&
26103 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26104 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26107 /* Returns an expression indicating where the this parameter is
26108 located on entry to the FUNCTION. */
26111 x86_this_parameter (tree function)
26113 tree type = TREE_TYPE (function);
26114 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26119 const int *parm_regs;
26121 if (ix86_function_type_abi (type) == MS_ABI)
26122 parm_regs = x86_64_ms_abi_int_parameter_registers;
26124 parm_regs = x86_64_int_parameter_registers;
26125 return gen_rtx_REG (DImode, parm_regs[aggr]);
26128 nregs = ix86_function_regparm (type, function);
26130 if (nregs > 0 && !stdarg_p (type))
26134 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26135 regno = aggr ? DX_REG : CX_REG;
26143 return gen_rtx_MEM (SImode,
26144 plus_constant (stack_pointer_rtx, 4));
26147 return gen_rtx_REG (SImode, regno);
26150 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26153 /* Determine whether x86_output_mi_thunk can succeed. */
26156 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26157 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26158 HOST_WIDE_INT vcall_offset, const_tree function)
26160 /* 64-bit can handle anything. */
26164 /* For 32-bit, everything's fine if we have one free register. */
26165 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26168 /* Need a free register for vcall_offset. */
26172 /* Need a free register for GOT references. */
26173 if (flag_pic && !(*targetm.binds_local_p) (function))
26176 /* Otherwise ok. */
26180 /* Output the assembler code for a thunk function. THUNK_DECL is the
26181 declaration for the thunk function itself, FUNCTION is the decl for
26182 the target function. DELTA is an immediate constant offset to be
26183 added to THIS. If VCALL_OFFSET is nonzero, the word at
26184 *(*this + vcall_offset) should be added to THIS. */
26187 x86_output_mi_thunk (FILE *file,
26188 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26189 HOST_WIDE_INT vcall_offset, tree function)
26192 rtx this_param = x86_this_parameter (function);
26195 /* Make sure unwind info is emitted for the thunk if needed. */
26196 final_start_function (emit_barrier (), file, 1);
26198 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26199 pull it in now and let DELTA benefit. */
26200 if (REG_P (this_param))
26201 this_reg = this_param;
26202 else if (vcall_offset)
26204 /* Put the this parameter into %eax. */
26205 xops[0] = this_param;
26206 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26207 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26210 this_reg = NULL_RTX;
26212 /* Adjust the this parameter by a fixed constant. */
26215 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26216 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26217 bool sub = delta < 0 || delta == 128;
26218 xops[0] = GEN_INT (sub ? -delta : delta);
26219 xops[1] = this_reg ? this_reg : this_param;
26222 if (!x86_64_general_operand (xops[0], DImode))
26224 tmp = gen_rtx_REG (DImode, R10_REG);
26226 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26228 xops[1] = this_param;
26231 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26233 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26236 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26238 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26241 /* Adjust the this parameter by a value stored in the vtable. */
26245 tmp = gen_rtx_REG (DImode, R10_REG);
26248 int tmp_regno = CX_REG;
26249 if (lookup_attribute ("fastcall",
26250 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26251 tmp_regno = AX_REG;
26252 tmp = gen_rtx_REG (SImode, tmp_regno);
26255 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26257 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26259 /* Adjust the this parameter. */
26260 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26261 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26263 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26264 xops[0] = GEN_INT (vcall_offset);
26266 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26267 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26269 xops[1] = this_reg;
26270 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26273 /* If necessary, drop THIS back to its stack slot. */
26274 if (this_reg && this_reg != this_param)
26276 xops[0] = this_reg;
26277 xops[1] = this_param;
26278 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26281 xops[0] = XEXP (DECL_RTL (function), 0);
26284 if (!flag_pic || (*targetm.binds_local_p) (function))
26285 output_asm_insn ("jmp\t%P0", xops);
26286 /* All thunks should be in the same object as their target,
26287 and thus binds_local_p should be true. */
26288 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26289 gcc_unreachable ();
26292 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26293 tmp = gen_rtx_CONST (Pmode, tmp);
26294 tmp = gen_rtx_MEM (QImode, tmp);
26296 output_asm_insn ("jmp\t%A0", xops);
26301 if (!flag_pic || (*targetm.binds_local_p) (function))
26302 output_asm_insn ("jmp\t%P0", xops);
26307 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26308 tmp = (gen_rtx_SYMBOL_REF
26310 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26311 tmp = gen_rtx_MEM (QImode, tmp);
26313 output_asm_insn ("jmp\t%0", xops);
26316 #endif /* TARGET_MACHO */
26318 tmp = gen_rtx_REG (SImode, CX_REG);
26319 output_set_got (tmp, NULL_RTX);
26322 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26323 output_asm_insn ("jmp\t{*}%1", xops);
26326 final_end_function ();
26330 x86_file_start (void)
26332 default_file_start ();
26334 darwin_file_start ();
26336 if (X86_FILE_START_VERSION_DIRECTIVE)
26337 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26338 if (X86_FILE_START_FLTUSED)
26339 fputs ("\t.global\t__fltused\n", asm_out_file);
26340 if (ix86_asm_dialect == ASM_INTEL)
26341 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26345 x86_field_alignment (tree field, int computed)
26347 enum machine_mode mode;
26348 tree type = TREE_TYPE (field);
26350 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26352 mode = TYPE_MODE (strip_array_types (type));
26353 if (mode == DFmode || mode == DCmode
26354 || GET_MODE_CLASS (mode) == MODE_INT
26355 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26356 return MIN (32, computed);
26360 /* Output assembler code to FILE to increment profiler label # LABELNO
26361 for profiling a function entry. */
26363 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26367 #ifndef NO_PROFILE_COUNTERS
26368 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26371 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26372 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26374 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26378 #ifndef NO_PROFILE_COUNTERS
26379 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26382 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26386 #ifndef NO_PROFILE_COUNTERS
26387 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26390 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26394 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26395 /* We don't have exact information about the insn sizes, but we may assume
26396 quite safely that we are informed about all 1 byte insns and memory
26397 address sizes. This is enough to eliminate unnecessary padding in
26401 min_insn_size (rtx insn)
26405 if (!INSN_P (insn) || !active_insn_p (insn))
26408 /* Discard alignments we've emit and jump instructions. */
26409 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26410 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26412 if (JUMP_TABLE_DATA_P (insn))
26415 /* Important case - calls are always 5 bytes.
26416 It is common to have many calls in the row. */
26418 && symbolic_reference_mentioned_p (PATTERN (insn))
26419 && !SIBLING_CALL_P (insn))
26421 len = get_attr_length (insn);
26425 /* For normal instructions we rely on get_attr_length being exact,
26426 with a few exceptions. */
26427 if (!JUMP_P (insn))
26429 enum attr_type type = get_attr_type (insn);
26434 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26435 || asm_noperands (PATTERN (insn)) >= 0)
26442 /* Otherwise trust get_attr_length. */
26446 l = get_attr_length_address (insn);
26447 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26456 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26460 ix86_avoid_jump_mispredicts (void)
26462 rtx insn, start = get_insns ();
26463 int nbytes = 0, njumps = 0;
26466 /* Look for all minimal intervals of instructions containing 4 jumps.
26467 The intervals are bounded by START and INSN. NBYTES is the total
26468 size of instructions in the interval including INSN and not including
26469 START. When the NBYTES is smaller than 16 bytes, it is possible
26470 that the end of START and INSN ends up in the same 16byte page.
26472 The smallest offset in the page INSN can start is the case where START
26473 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26474 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26476 for (insn = start; insn; insn = NEXT_INSN (insn))
26480 if (LABEL_P (insn))
26482 int align = label_to_alignment (insn);
26483 int max_skip = label_to_max_skip (insn);
26487 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26488 already in the current 16 byte page, because otherwise
26489 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26490 bytes to reach 16 byte boundary. */
26492 || (align <= 3 && max_skip != (1 << align) - 1))
26495 fprintf (dump_file, "Label %i with max_skip %i\n",
26496 INSN_UID (insn), max_skip);
26499 while (nbytes + max_skip >= 16)
26501 start = NEXT_INSN (start);
26502 if ((JUMP_P (start)
26503 && GET_CODE (PATTERN (start)) != ADDR_VEC
26504 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26506 njumps--, isjump = 1;
26509 nbytes -= min_insn_size (start);
26515 min_size = min_insn_size (insn);
26516 nbytes += min_size;
26518 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26519 INSN_UID (insn), min_size);
26521 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26522 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26530 start = NEXT_INSN (start);
26531 if ((JUMP_P (start)
26532 && GET_CODE (PATTERN (start)) != ADDR_VEC
26533 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26535 njumps--, isjump = 1;
26538 nbytes -= min_insn_size (start);
26540 gcc_assert (njumps >= 0);
26542 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26543 INSN_UID (start), INSN_UID (insn), nbytes);
26545 if (njumps == 3 && isjump && nbytes < 16)
26547 int padsize = 15 - nbytes + min_insn_size (insn);
26550 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26551 INSN_UID (insn), padsize);
26552 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26558 /* AMD Athlon works faster
26559 when RET is not destination of conditional jump or directly preceded
26560 by other jump instruction. We avoid the penalty by inserting NOP just
26561 before the RET instructions in such cases. */
26563 ix86_pad_returns (void)
26568 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26570 basic_block bb = e->src;
26571 rtx ret = BB_END (bb);
26573 bool replace = false;
26575 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26576 || optimize_bb_for_size_p (bb))
26578 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26579 if (active_insn_p (prev) || LABEL_P (prev))
26581 if (prev && LABEL_P (prev))
26586 FOR_EACH_EDGE (e, ei, bb->preds)
26587 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26588 && !(e->flags & EDGE_FALLTHRU))
26593 prev = prev_active_insn (ret);
26595 && ((JUMP_P (prev) && any_condjump_p (prev))
26598 /* Empty functions get branch mispredict even when the jump destination
26599 is not visible to us. */
26600 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26605 emit_jump_insn_before (gen_return_internal_long (), ret);
26611 /* Implement machine specific optimizations. We implement padding of returns
26612 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26616 if (optimize && optimize_function_for_speed_p (cfun))
26618 if (TARGET_PAD_RETURNS)
26619 ix86_pad_returns ();
26620 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26621 if (TARGET_FOUR_JUMP_LIMIT)
26622 ix86_avoid_jump_mispredicts ();
26627 /* Return nonzero when QImode register that must be represented via REX prefix
26630 x86_extended_QIreg_mentioned_p (rtx insn)
26633 extract_insn_cached (insn);
26634 for (i = 0; i < recog_data.n_operands; i++)
26635 if (REG_P (recog_data.operand[i])
26636 && REGNO (recog_data.operand[i]) > BX_REG)
26641 /* Return nonzero when P points to register encoded via REX prefix.
26642 Called via for_each_rtx. */
26644 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26646 unsigned int regno;
26649 regno = REGNO (*p);
26650 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26653 /* Return true when INSN mentions register that must be encoded using REX
26656 x86_extended_reg_mentioned_p (rtx insn)
26658 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26659 extended_reg_mentioned_1, NULL);
26662 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26663 optabs would emit if we didn't have TFmode patterns. */
26666 x86_emit_floatuns (rtx operands[2])
26668 rtx neglab, donelab, i0, i1, f0, in, out;
26669 enum machine_mode mode, inmode;
26671 inmode = GET_MODE (operands[1]);
26672 gcc_assert (inmode == SImode || inmode == DImode);
26675 in = force_reg (inmode, operands[1]);
26676 mode = GET_MODE (out);
26677 neglab = gen_label_rtx ();
26678 donelab = gen_label_rtx ();
26679 f0 = gen_reg_rtx (mode);
26681 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26683 expand_float (out, in, 0);
26685 emit_jump_insn (gen_jump (donelab));
26688 emit_label (neglab);
26690 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26692 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26694 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26696 expand_float (f0, i0, 0);
26698 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26700 emit_label (donelab);
26703 /* AVX does not support 32-byte integer vector operations,
26704 thus the longest vector we are faced with is V16QImode. */
26705 #define MAX_VECT_LEN 16
26707 struct expand_vec_perm_d
26709 rtx target, op0, op1;
26710 unsigned char perm[MAX_VECT_LEN];
26711 enum machine_mode vmode;
26712 unsigned char nelt;
26716 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26717 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26719 /* Get a vector mode of the same size as the original but with elements
26720 twice as wide. This is only guaranteed to apply to integral vectors. */
26722 static inline enum machine_mode
26723 get_mode_wider_vector (enum machine_mode o)
26725 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26726 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26727 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26728 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26732 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26733 with all elements equal to VAR. Return true if successful. */
26736 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26737 rtx target, rtx val)
26760 /* First attempt to recognize VAL as-is. */
26761 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26762 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26763 if (recog_memoized (insn) < 0)
26766 /* If that fails, force VAL into a register. */
26769 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26770 seq = get_insns ();
26773 emit_insn_before (seq, insn);
26775 ok = recog_memoized (insn) >= 0;
26784 if (TARGET_SSE || TARGET_3DNOW_A)
26788 val = gen_lowpart (SImode, val);
26789 x = gen_rtx_TRUNCATE (HImode, val);
26790 x = gen_rtx_VEC_DUPLICATE (mode, x);
26791 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26804 struct expand_vec_perm_d dperm;
26808 memset (&dperm, 0, sizeof (dperm));
26809 dperm.target = target;
26810 dperm.vmode = mode;
26811 dperm.nelt = GET_MODE_NUNITS (mode);
26812 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26814 /* Extend to SImode using a paradoxical SUBREG. */
26815 tmp1 = gen_reg_rtx (SImode);
26816 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26818 /* Insert the SImode value as low element of a V4SImode vector. */
26819 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26820 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26822 ok = (expand_vec_perm_1 (&dperm)
26823 || expand_vec_perm_broadcast_1 (&dperm));
26835 /* Replicate the value once into the next wider mode and recurse. */
26837 enum machine_mode smode, wsmode, wvmode;
26840 smode = GET_MODE_INNER (mode);
26841 wvmode = get_mode_wider_vector (mode);
26842 wsmode = GET_MODE_INNER (wvmode);
26844 val = convert_modes (wsmode, smode, val, true);
26845 x = expand_simple_binop (wsmode, ASHIFT, val,
26846 GEN_INT (GET_MODE_BITSIZE (smode)),
26847 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26848 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26850 x = gen_lowpart (wvmode, target);
26851 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
26859 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
26860 rtx x = gen_reg_rtx (hvmode);
26862 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
26865 x = gen_rtx_VEC_CONCAT (mode, x, x);
26866 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26875 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26876 whose ONE_VAR element is VAR, and other elements are zero. Return true
26880 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26881 rtx target, rtx var, int one_var)
26883 enum machine_mode vsimode;
26886 bool use_vector_set = false;
26891 /* For SSE4.1, we normally use vector set. But if the second
26892 element is zero and inter-unit moves are OK, we use movq
26894 use_vector_set = (TARGET_64BIT
26896 && !(TARGET_INTER_UNIT_MOVES
26902 use_vector_set = TARGET_SSE4_1;
26905 use_vector_set = TARGET_SSE2;
26908 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26915 use_vector_set = TARGET_AVX;
26918 /* Use ix86_expand_vector_set in 64bit mode only. */
26919 use_vector_set = TARGET_AVX && TARGET_64BIT;
26925 if (use_vector_set)
26927 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26928 var = force_reg (GET_MODE_INNER (mode), var);
26929 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26945 var = force_reg (GET_MODE_INNER (mode), var);
26946 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26947 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26952 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26953 new_target = gen_reg_rtx (mode);
26955 new_target = target;
26956 var = force_reg (GET_MODE_INNER (mode), var);
26957 x = gen_rtx_VEC_DUPLICATE (mode, var);
26958 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26959 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26962 /* We need to shuffle the value to the correct position, so
26963 create a new pseudo to store the intermediate result. */
26965 /* With SSE2, we can use the integer shuffle insns. */
26966 if (mode != V4SFmode && TARGET_SSE2)
26968 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
26970 GEN_INT (one_var == 1 ? 0 : 1),
26971 GEN_INT (one_var == 2 ? 0 : 1),
26972 GEN_INT (one_var == 3 ? 0 : 1)));
26973 if (target != new_target)
26974 emit_move_insn (target, new_target);
26978 /* Otherwise convert the intermediate result to V4SFmode and
26979 use the SSE1 shuffle instructions. */
26980 if (mode != V4SFmode)
26982 tmp = gen_reg_rtx (V4SFmode);
26983 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
26988 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
26990 GEN_INT (one_var == 1 ? 0 : 1),
26991 GEN_INT (one_var == 2 ? 0+4 : 1+4),
26992 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
26994 if (mode != V4SFmode)
26995 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
26996 else if (tmp != target)
26997 emit_move_insn (target, tmp);
26999 else if (target != new_target)
27000 emit_move_insn (target, new_target);
27005 vsimode = V4SImode;
27011 vsimode = V2SImode;
27017 /* Zero extend the variable element to SImode and recurse. */
27018 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27020 x = gen_reg_rtx (vsimode);
27021 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27023 gcc_unreachable ();
27025 emit_move_insn (target, gen_lowpart (mode, x));
27033 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27034 consisting of the values in VALS. It is known that all elements
27035 except ONE_VAR are constants. Return true if successful. */
27038 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27039 rtx target, rtx vals, int one_var)
27041 rtx var = XVECEXP (vals, 0, one_var);
27042 enum machine_mode wmode;
27045 const_vec = copy_rtx (vals);
27046 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27047 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27055 /* For the two element vectors, it's just as easy to use
27056 the general case. */
27060 /* Use ix86_expand_vector_set in 64bit mode only. */
27083 /* There's no way to set one QImode entry easily. Combine
27084 the variable value with its adjacent constant value, and
27085 promote to an HImode set. */
27086 x = XVECEXP (vals, 0, one_var ^ 1);
27089 var = convert_modes (HImode, QImode, var, true);
27090 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27091 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27092 x = GEN_INT (INTVAL (x) & 0xff);
27096 var = convert_modes (HImode, QImode, var, true);
27097 x = gen_int_mode (INTVAL (x) << 8, HImode);
27099 if (x != const0_rtx)
27100 var = expand_simple_binop (HImode, IOR, var, x, var,
27101 1, OPTAB_LIB_WIDEN);
27103 x = gen_reg_rtx (wmode);
27104 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27105 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27107 emit_move_insn (target, gen_lowpart (mode, x));
27114 emit_move_insn (target, const_vec);
27115 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27119 /* A subroutine of ix86_expand_vector_init_general. Use vector
27120 concatenate to handle the most general case: all values variable,
27121 and none identical. */
27124 ix86_expand_vector_init_concat (enum machine_mode mode,
27125 rtx target, rtx *ops, int n)
27127 enum machine_mode cmode, hmode = VOIDmode;
27128 rtx first[8], second[4];
27168 gcc_unreachable ();
27171 if (!register_operand (ops[1], cmode))
27172 ops[1] = force_reg (cmode, ops[1]);
27173 if (!register_operand (ops[0], cmode))
27174 ops[0] = force_reg (cmode, ops[0]);
27175 emit_insn (gen_rtx_SET (VOIDmode, target,
27176 gen_rtx_VEC_CONCAT (mode, ops[0],
27196 gcc_unreachable ();
27212 gcc_unreachable ();
27217 /* FIXME: We process inputs backward to help RA. PR 36222. */
27220 for (; i > 0; i -= 2, j--)
27222 first[j] = gen_reg_rtx (cmode);
27223 v = gen_rtvec (2, ops[i - 1], ops[i]);
27224 ix86_expand_vector_init (false, first[j],
27225 gen_rtx_PARALLEL (cmode, v));
27231 gcc_assert (hmode != VOIDmode);
27232 for (i = j = 0; i < n; i += 2, j++)
27234 second[j] = gen_reg_rtx (hmode);
27235 ix86_expand_vector_init_concat (hmode, second [j],
27239 ix86_expand_vector_init_concat (mode, target, second, n);
27242 ix86_expand_vector_init_concat (mode, target, first, n);
27246 gcc_unreachable ();
27250 /* A subroutine of ix86_expand_vector_init_general. Use vector
27251 interleave to handle the most general case: all values variable,
27252 and none identical. */
27255 ix86_expand_vector_init_interleave (enum machine_mode mode,
27256 rtx target, rtx *ops, int n)
27258 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27261 rtx (*gen_load_even) (rtx, rtx, rtx);
27262 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27263 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27268 gen_load_even = gen_vec_setv8hi;
27269 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27270 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27271 inner_mode = HImode;
27272 first_imode = V4SImode;
27273 second_imode = V2DImode;
27274 third_imode = VOIDmode;
27277 gen_load_even = gen_vec_setv16qi;
27278 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27279 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27280 inner_mode = QImode;
27281 first_imode = V8HImode;
27282 second_imode = V4SImode;
27283 third_imode = V2DImode;
27286 gcc_unreachable ();
27289 for (i = 0; i < n; i++)
27291 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27292 op0 = gen_reg_rtx (SImode);
27293 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27295 /* Insert the SImode value as low element of V4SImode vector. */
27296 op1 = gen_reg_rtx (V4SImode);
27297 op0 = gen_rtx_VEC_MERGE (V4SImode,
27298 gen_rtx_VEC_DUPLICATE (V4SImode,
27300 CONST0_RTX (V4SImode),
27302 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27304 /* Cast the V4SImode vector back to a vector in orignal mode. */
27305 op0 = gen_reg_rtx (mode);
27306 emit_move_insn (op0, gen_lowpart (mode, op1));
27308 /* Load even elements into the second positon. */
27309 emit_insn ((*gen_load_even) (op0,
27310 force_reg (inner_mode,
27314 /* Cast vector to FIRST_IMODE vector. */
27315 ops[i] = gen_reg_rtx (first_imode);
27316 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27319 /* Interleave low FIRST_IMODE vectors. */
27320 for (i = j = 0; i < n; i += 2, j++)
27322 op0 = gen_reg_rtx (first_imode);
27323 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27325 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27326 ops[j] = gen_reg_rtx (second_imode);
27327 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27330 /* Interleave low SECOND_IMODE vectors. */
27331 switch (second_imode)
27334 for (i = j = 0; i < n / 2; i += 2, j++)
27336 op0 = gen_reg_rtx (second_imode);
27337 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27340 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27342 ops[j] = gen_reg_rtx (third_imode);
27343 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27345 second_imode = V2DImode;
27346 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27350 op0 = gen_reg_rtx (second_imode);
27351 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27354 /* Cast the SECOND_IMODE vector back to a vector on original
27356 emit_insn (gen_rtx_SET (VOIDmode, target,
27357 gen_lowpart (mode, op0)));
27361 gcc_unreachable ();
27365 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27366 all values variable, and none identical. */
27369 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27370 rtx target, rtx vals)
27372 rtx ops[32], op0, op1;
27373 enum machine_mode half_mode = VOIDmode;
27380 if (!mmx_ok && !TARGET_SSE)
27392 n = GET_MODE_NUNITS (mode);
27393 for (i = 0; i < n; i++)
27394 ops[i] = XVECEXP (vals, 0, i);
27395 ix86_expand_vector_init_concat (mode, target, ops, n);
27399 half_mode = V16QImode;
27403 half_mode = V8HImode;
27407 n = GET_MODE_NUNITS (mode);
27408 for (i = 0; i < n; i++)
27409 ops[i] = XVECEXP (vals, 0, i);
27410 op0 = gen_reg_rtx (half_mode);
27411 op1 = gen_reg_rtx (half_mode);
27412 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27414 ix86_expand_vector_init_interleave (half_mode, op1,
27415 &ops [n >> 1], n >> 2);
27416 emit_insn (gen_rtx_SET (VOIDmode, target,
27417 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27421 if (!TARGET_SSE4_1)
27429 /* Don't use ix86_expand_vector_init_interleave if we can't
27430 move from GPR to SSE register directly. */
27431 if (!TARGET_INTER_UNIT_MOVES)
27434 n = GET_MODE_NUNITS (mode);
27435 for (i = 0; i < n; i++)
27436 ops[i] = XVECEXP (vals, 0, i);
27437 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27445 gcc_unreachable ();
27449 int i, j, n_elts, n_words, n_elt_per_word;
27450 enum machine_mode inner_mode;
27451 rtx words[4], shift;
27453 inner_mode = GET_MODE_INNER (mode);
27454 n_elts = GET_MODE_NUNITS (mode);
27455 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27456 n_elt_per_word = n_elts / n_words;
27457 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27459 for (i = 0; i < n_words; ++i)
27461 rtx word = NULL_RTX;
27463 for (j = 0; j < n_elt_per_word; ++j)
27465 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27466 elt = convert_modes (word_mode, inner_mode, elt, true);
27472 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27473 word, 1, OPTAB_LIB_WIDEN);
27474 word = expand_simple_binop (word_mode, IOR, word, elt,
27475 word, 1, OPTAB_LIB_WIDEN);
27483 emit_move_insn (target, gen_lowpart (mode, words[0]));
27484 else if (n_words == 2)
27486 rtx tmp = gen_reg_rtx (mode);
27487 emit_clobber (tmp);
27488 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27489 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27490 emit_move_insn (target, tmp);
27492 else if (n_words == 4)
27494 rtx tmp = gen_reg_rtx (V4SImode);
27495 gcc_assert (word_mode == SImode);
27496 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27497 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27498 emit_move_insn (target, gen_lowpart (mode, tmp));
27501 gcc_unreachable ();
27505 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27506 instructions unless MMX_OK is true. */
27509 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27511 enum machine_mode mode = GET_MODE (target);
27512 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27513 int n_elts = GET_MODE_NUNITS (mode);
27514 int n_var = 0, one_var = -1;
27515 bool all_same = true, all_const_zero = true;
27519 for (i = 0; i < n_elts; ++i)
27521 x = XVECEXP (vals, 0, i);
27522 if (!(CONST_INT_P (x)
27523 || GET_CODE (x) == CONST_DOUBLE
27524 || GET_CODE (x) == CONST_FIXED))
27525 n_var++, one_var = i;
27526 else if (x != CONST0_RTX (inner_mode))
27527 all_const_zero = false;
27528 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27532 /* Constants are best loaded from the constant pool. */
27535 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27539 /* If all values are identical, broadcast the value. */
27541 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27542 XVECEXP (vals, 0, 0)))
27545 /* Values where only one field is non-constant are best loaded from
27546 the pool and overwritten via move later. */
27550 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27551 XVECEXP (vals, 0, one_var),
27555 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27559 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27563 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27565 enum machine_mode mode = GET_MODE (target);
27566 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27567 enum machine_mode half_mode;
27568 bool use_vec_merge = false;
27570 static rtx (*gen_extract[6][2]) (rtx, rtx)
27572 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27573 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27574 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27575 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27576 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27577 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27579 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27581 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27582 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27583 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27584 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27585 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27586 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27596 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27597 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27599 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27601 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27602 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27608 use_vec_merge = TARGET_SSE4_1;
27616 /* For the two element vectors, we implement a VEC_CONCAT with
27617 the extraction of the other element. */
27619 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27620 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27623 op0 = val, op1 = tmp;
27625 op0 = tmp, op1 = val;
27627 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27628 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27633 use_vec_merge = TARGET_SSE4_1;
27640 use_vec_merge = true;
27644 /* tmp = target = A B C D */
27645 tmp = copy_to_reg (target);
27646 /* target = A A B B */
27647 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27648 /* target = X A B B */
27649 ix86_expand_vector_set (false, target, val, 0);
27650 /* target = A X C D */
27651 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27652 const1_rtx, const0_rtx,
27653 GEN_INT (2+4), GEN_INT (3+4)));
27657 /* tmp = target = A B C D */
27658 tmp = copy_to_reg (target);
27659 /* tmp = X B C D */
27660 ix86_expand_vector_set (false, tmp, val, 0);
27661 /* target = A B X D */
27662 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27663 const0_rtx, const1_rtx,
27664 GEN_INT (0+4), GEN_INT (3+4)));
27668 /* tmp = target = A B C D */
27669 tmp = copy_to_reg (target);
27670 /* tmp = X B C D */
27671 ix86_expand_vector_set (false, tmp, val, 0);
27672 /* target = A B X D */
27673 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27674 const0_rtx, const1_rtx,
27675 GEN_INT (2+4), GEN_INT (0+4)));
27679 gcc_unreachable ();
27684 use_vec_merge = TARGET_SSE4_1;
27688 /* Element 0 handled by vec_merge below. */
27691 use_vec_merge = true;
27697 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27698 store into element 0, then shuffle them back. */
27702 order[0] = GEN_INT (elt);
27703 order[1] = const1_rtx;
27704 order[2] = const2_rtx;
27705 order[3] = GEN_INT (3);
27706 order[elt] = const0_rtx;
27708 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27709 order[1], order[2], order[3]));
27711 ix86_expand_vector_set (false, target, val, 0);
27713 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27714 order[1], order[2], order[3]));
27718 /* For SSE1, we have to reuse the V4SF code. */
27719 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27720 gen_lowpart (SFmode, val), elt);
27725 use_vec_merge = TARGET_SSE2;
27728 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27732 use_vec_merge = TARGET_SSE4_1;
27739 half_mode = V16QImode;
27745 half_mode = V8HImode;
27751 half_mode = V4SImode;
27757 half_mode = V2DImode;
27763 half_mode = V4SFmode;
27769 half_mode = V2DFmode;
27775 /* Compute offset. */
27779 gcc_assert (i <= 1);
27781 /* Extract the half. */
27782 tmp = gen_reg_rtx (half_mode);
27783 emit_insn ((*gen_extract[j][i]) (tmp, target));
27785 /* Put val in tmp at elt. */
27786 ix86_expand_vector_set (false, tmp, val, elt);
27789 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27798 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27799 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27800 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27804 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27806 emit_move_insn (mem, target);
27808 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27809 emit_move_insn (tmp, val);
27811 emit_move_insn (target, mem);
27816 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27818 enum machine_mode mode = GET_MODE (vec);
27819 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27820 bool use_vec_extr = false;
27833 use_vec_extr = true;
27837 use_vec_extr = TARGET_SSE4_1;
27849 tmp = gen_reg_rtx (mode);
27850 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27851 GEN_INT (elt), GEN_INT (elt),
27852 GEN_INT (elt+4), GEN_INT (elt+4)));
27856 tmp = gen_reg_rtx (mode);
27857 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
27861 gcc_unreachable ();
27864 use_vec_extr = true;
27869 use_vec_extr = TARGET_SSE4_1;
27883 tmp = gen_reg_rtx (mode);
27884 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27885 GEN_INT (elt), GEN_INT (elt),
27886 GEN_INT (elt), GEN_INT (elt)));
27890 tmp = gen_reg_rtx (mode);
27891 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
27895 gcc_unreachable ();
27898 use_vec_extr = true;
27903 /* For SSE1, we have to reuse the V4SF code. */
27904 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27905 gen_lowpart (V4SFmode, vec), elt);
27911 use_vec_extr = TARGET_SSE2;
27914 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27918 use_vec_extr = TARGET_SSE4_1;
27922 /* ??? Could extract the appropriate HImode element and shift. */
27929 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27930 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27932 /* Let the rtl optimizers know about the zero extension performed. */
27933 if (inner_mode == QImode || inner_mode == HImode)
27935 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27936 target = gen_lowpart (SImode, target);
27939 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27943 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27945 emit_move_insn (mem, vec);
27947 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27948 emit_move_insn (target, tmp);
27952 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27953 pattern to reduce; DEST is the destination; IN is the input vector. */
27956 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27958 rtx tmp1, tmp2, tmp3;
27960 tmp1 = gen_reg_rtx (V4SFmode);
27961 tmp2 = gen_reg_rtx (V4SFmode);
27962 tmp3 = gen_reg_rtx (V4SFmode);
27964 emit_insn (gen_sse_movhlps (tmp1, in, in));
27965 emit_insn (fn (tmp2, tmp1, in));
27967 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
27968 const1_rtx, const1_rtx,
27969 GEN_INT (1+4), GEN_INT (1+4)));
27970 emit_insn (fn (dest, tmp2, tmp3));
27973 /* Target hook for scalar_mode_supported_p. */
27975 ix86_scalar_mode_supported_p (enum machine_mode mode)
27977 if (DECIMAL_FLOAT_MODE_P (mode))
27978 return default_decimal_float_supported_p ();
27979 else if (mode == TFmode)
27982 return default_scalar_mode_supported_p (mode);
27985 /* Implements target hook vector_mode_supported_p. */
27987 ix86_vector_mode_supported_p (enum machine_mode mode)
27989 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
27991 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
27993 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
27995 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
27997 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28002 /* Target hook for c_mode_for_suffix. */
28003 static enum machine_mode
28004 ix86_c_mode_for_suffix (char suffix)
28014 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28016 We do this in the new i386 backend to maintain source compatibility
28017 with the old cc0-based compiler. */
28020 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28021 tree inputs ATTRIBUTE_UNUSED,
28024 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28026 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28031 /* Implements target vector targetm.asm.encode_section_info. This
28032 is not used by netware. */
28034 static void ATTRIBUTE_UNUSED
28035 ix86_encode_section_info (tree decl, rtx rtl, int first)
28037 default_encode_section_info (decl, rtl, first);
28039 if (TREE_CODE (decl) == VAR_DECL
28040 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28041 && ix86_in_large_data_p (decl))
28042 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28045 /* Worker function for REVERSE_CONDITION. */
28048 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28050 return (mode != CCFPmode && mode != CCFPUmode
28051 ? reverse_condition (code)
28052 : reverse_condition_maybe_unordered (code));
28055 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28059 output_387_reg_move (rtx insn, rtx *operands)
28061 if (REG_P (operands[0]))
28063 if (REG_P (operands[1])
28064 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28066 if (REGNO (operands[0]) == FIRST_STACK_REG)
28067 return output_387_ffreep (operands, 0);
28068 return "fstp\t%y0";
28070 if (STACK_TOP_P (operands[0]))
28071 return "fld%Z1\t%y1";
28074 else if (MEM_P (operands[0]))
28076 gcc_assert (REG_P (operands[1]));
28077 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28078 return "fstp%Z0\t%y0";
28081 /* There is no non-popping store to memory for XFmode.
28082 So if we need one, follow the store with a load. */
28083 if (GET_MODE (operands[0]) == XFmode)
28084 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28086 return "fst%Z0\t%y0";
28093 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28094 FP status register is set. */
28097 ix86_emit_fp_unordered_jump (rtx label)
28099 rtx reg = gen_reg_rtx (HImode);
28102 emit_insn (gen_x86_fnstsw_1 (reg));
28104 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28106 emit_insn (gen_x86_sahf_1 (reg));
28108 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28109 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28113 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28115 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28116 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28119 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28120 gen_rtx_LABEL_REF (VOIDmode, label),
28122 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28124 emit_jump_insn (temp);
28125 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28128 /* Output code to perform a log1p XFmode calculation. */
28130 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28132 rtx label1 = gen_label_rtx ();
28133 rtx label2 = gen_label_rtx ();
28135 rtx tmp = gen_reg_rtx (XFmode);
28136 rtx tmp2 = gen_reg_rtx (XFmode);
28139 emit_insn (gen_absxf2 (tmp, op1));
28140 test = gen_rtx_GE (VOIDmode, tmp,
28141 CONST_DOUBLE_FROM_REAL_VALUE (
28142 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28144 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28146 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28147 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28148 emit_jump (label2);
28150 emit_label (label1);
28151 emit_move_insn (tmp, CONST1_RTX (XFmode));
28152 emit_insn (gen_addxf3 (tmp, op1, tmp));
28153 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28154 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28156 emit_label (label2);
28159 /* Output code to perform a Newton-Rhapson approximation of a single precision
28160 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28162 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28164 rtx x0, x1, e0, e1, two;
28166 x0 = gen_reg_rtx (mode);
28167 e0 = gen_reg_rtx (mode);
28168 e1 = gen_reg_rtx (mode);
28169 x1 = gen_reg_rtx (mode);
28171 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28173 if (VECTOR_MODE_P (mode))
28174 two = ix86_build_const_vector (SFmode, true, two);
28176 two = force_reg (mode, two);
28178 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28180 /* x0 = rcp(b) estimate */
28181 emit_insn (gen_rtx_SET (VOIDmode, x0,
28182 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28185 emit_insn (gen_rtx_SET (VOIDmode, e0,
28186 gen_rtx_MULT (mode, x0, a)));
28188 emit_insn (gen_rtx_SET (VOIDmode, e1,
28189 gen_rtx_MULT (mode, x0, b)));
28191 emit_insn (gen_rtx_SET (VOIDmode, x1,
28192 gen_rtx_MINUS (mode, two, e1)));
28193 /* res = e0 * x1 */
28194 emit_insn (gen_rtx_SET (VOIDmode, res,
28195 gen_rtx_MULT (mode, e0, x1)));
28198 /* Output code to perform a Newton-Rhapson approximation of a
28199 single precision floating point [reciprocal] square root. */
28201 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28204 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28207 x0 = gen_reg_rtx (mode);
28208 e0 = gen_reg_rtx (mode);
28209 e1 = gen_reg_rtx (mode);
28210 e2 = gen_reg_rtx (mode);
28211 e3 = gen_reg_rtx (mode);
28213 real_from_integer (&r, VOIDmode, -3, -1, 0);
28214 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28216 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28217 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28219 if (VECTOR_MODE_P (mode))
28221 mthree = ix86_build_const_vector (SFmode, true, mthree);
28222 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28225 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28226 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28228 /* x0 = rsqrt(a) estimate */
28229 emit_insn (gen_rtx_SET (VOIDmode, x0,
28230 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28233 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28238 zero = gen_reg_rtx (mode);
28239 mask = gen_reg_rtx (mode);
28241 zero = force_reg (mode, CONST0_RTX(mode));
28242 emit_insn (gen_rtx_SET (VOIDmode, mask,
28243 gen_rtx_NE (mode, zero, a)));
28245 emit_insn (gen_rtx_SET (VOIDmode, x0,
28246 gen_rtx_AND (mode, x0, mask)));
28250 emit_insn (gen_rtx_SET (VOIDmode, e0,
28251 gen_rtx_MULT (mode, x0, a)));
28253 emit_insn (gen_rtx_SET (VOIDmode, e1,
28254 gen_rtx_MULT (mode, e0, x0)));
28257 mthree = force_reg (mode, mthree);
28258 emit_insn (gen_rtx_SET (VOIDmode, e2,
28259 gen_rtx_PLUS (mode, e1, mthree)));
28261 mhalf = force_reg (mode, mhalf);
28263 /* e3 = -.5 * x0 */
28264 emit_insn (gen_rtx_SET (VOIDmode, e3,
28265 gen_rtx_MULT (mode, x0, mhalf)));
28267 /* e3 = -.5 * e0 */
28268 emit_insn (gen_rtx_SET (VOIDmode, e3,
28269 gen_rtx_MULT (mode, e0, mhalf)));
28270 /* ret = e2 * e3 */
28271 emit_insn (gen_rtx_SET (VOIDmode, res,
28272 gen_rtx_MULT (mode, e2, e3)));
28275 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28277 static void ATTRIBUTE_UNUSED
28278 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28281 /* With Binutils 2.15, the "@unwind" marker must be specified on
28282 every occurrence of the ".eh_frame" section, not just the first
28285 && strcmp (name, ".eh_frame") == 0)
28287 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28288 flags & SECTION_WRITE ? "aw" : "a");
28291 default_elf_asm_named_section (name, flags, decl);
28294 /* Return the mangling of TYPE if it is an extended fundamental type. */
28296 static const char *
28297 ix86_mangle_type (const_tree type)
28299 type = TYPE_MAIN_VARIANT (type);
28301 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28302 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28305 switch (TYPE_MODE (type))
28308 /* __float128 is "g". */
28311 /* "long double" or __float80 is "e". */
28318 /* For 32-bit code we can save PIC register setup by using
28319 __stack_chk_fail_local hidden function instead of calling
28320 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28321 register, so it is better to call __stack_chk_fail directly. */
28324 ix86_stack_protect_fail (void)
28326 return TARGET_64BIT
28327 ? default_external_stack_protect_fail ()
28328 : default_hidden_stack_protect_fail ();
28331 /* Select a format to encode pointers in exception handling data. CODE
28332 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28333 true if the symbol may be affected by dynamic relocations.
28335 ??? All x86 object file formats are capable of representing this.
28336 After all, the relocation needed is the same as for the call insn.
28337 Whether or not a particular assembler allows us to enter such, I
28338 guess we'll have to see. */
28340 asm_preferred_eh_data_format (int code, int global)
28344 int type = DW_EH_PE_sdata8;
28346 || ix86_cmodel == CM_SMALL_PIC
28347 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28348 type = DW_EH_PE_sdata4;
28349 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28351 if (ix86_cmodel == CM_SMALL
28352 || (ix86_cmodel == CM_MEDIUM && code))
28353 return DW_EH_PE_udata4;
28354 return DW_EH_PE_absptr;
28357 /* Expand copysign from SIGN to the positive value ABS_VALUE
28358 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28361 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28363 enum machine_mode mode = GET_MODE (sign);
28364 rtx sgn = gen_reg_rtx (mode);
28365 if (mask == NULL_RTX)
28367 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28368 if (!VECTOR_MODE_P (mode))
28370 /* We need to generate a scalar mode mask in this case. */
28371 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28372 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28373 mask = gen_reg_rtx (mode);
28374 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28378 mask = gen_rtx_NOT (mode, mask);
28379 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28380 gen_rtx_AND (mode, mask, sign)));
28381 emit_insn (gen_rtx_SET (VOIDmode, result,
28382 gen_rtx_IOR (mode, abs_value, sgn)));
28385 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28386 mask for masking out the sign-bit is stored in *SMASK, if that is
28389 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28391 enum machine_mode mode = GET_MODE (op0);
28394 xa = gen_reg_rtx (mode);
28395 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28396 if (!VECTOR_MODE_P (mode))
28398 /* We need to generate a scalar mode mask in this case. */
28399 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28400 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28401 mask = gen_reg_rtx (mode);
28402 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28404 emit_insn (gen_rtx_SET (VOIDmode, xa,
28405 gen_rtx_AND (mode, op0, mask)));
28413 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28414 swapping the operands if SWAP_OPERANDS is true. The expanded
28415 code is a forward jump to a newly created label in case the
28416 comparison is true. The generated label rtx is returned. */
28418 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28419 bool swap_operands)
28430 label = gen_label_rtx ();
28431 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28432 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28433 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28434 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28435 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28436 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28437 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28438 JUMP_LABEL (tmp) = label;
28443 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28444 using comparison code CODE. Operands are swapped for the comparison if
28445 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28447 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28448 bool swap_operands)
28450 enum machine_mode mode = GET_MODE (op0);
28451 rtx mask = gen_reg_rtx (mode);
28460 if (mode == DFmode)
28461 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28462 gen_rtx_fmt_ee (code, mode, op0, op1)));
28464 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28465 gen_rtx_fmt_ee (code, mode, op0, op1)));
28470 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28471 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28473 ix86_gen_TWO52 (enum machine_mode mode)
28475 REAL_VALUE_TYPE TWO52r;
28478 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28479 TWO52 = const_double_from_real_value (TWO52r, mode);
28480 TWO52 = force_reg (mode, TWO52);
28485 /* Expand SSE sequence for computing lround from OP1 storing
28488 ix86_expand_lround (rtx op0, rtx op1)
28490 /* C code for the stuff we're doing below:
28491 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28494 enum machine_mode mode = GET_MODE (op1);
28495 const struct real_format *fmt;
28496 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28499 /* load nextafter (0.5, 0.0) */
28500 fmt = REAL_MODE_FORMAT (mode);
28501 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28502 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28504 /* adj = copysign (0.5, op1) */
28505 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28506 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28508 /* adj = op1 + adj */
28509 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28511 /* op0 = (imode)adj */
28512 expand_fix (op0, adj, 0);
28515 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28518 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28520 /* C code for the stuff we're doing below (for do_floor):
28522 xi -= (double)xi > op1 ? 1 : 0;
28525 enum machine_mode fmode = GET_MODE (op1);
28526 enum machine_mode imode = GET_MODE (op0);
28527 rtx ireg, freg, label, tmp;
28529 /* reg = (long)op1 */
28530 ireg = gen_reg_rtx (imode);
28531 expand_fix (ireg, op1, 0);
28533 /* freg = (double)reg */
28534 freg = gen_reg_rtx (fmode);
28535 expand_float (freg, ireg, 0);
28537 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28538 label = ix86_expand_sse_compare_and_jump (UNLE,
28539 freg, op1, !do_floor);
28540 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28541 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28542 emit_move_insn (ireg, tmp);
28544 emit_label (label);
28545 LABEL_NUSES (label) = 1;
28547 emit_move_insn (op0, ireg);
28550 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28551 result in OPERAND0. */
28553 ix86_expand_rint (rtx operand0, rtx operand1)
28555 /* C code for the stuff we're doing below:
28556 xa = fabs (operand1);
28557 if (!isless (xa, 2**52))
28559 xa = xa + 2**52 - 2**52;
28560 return copysign (xa, operand1);
28562 enum machine_mode mode = GET_MODE (operand0);
28563 rtx res, xa, label, TWO52, mask;
28565 res = gen_reg_rtx (mode);
28566 emit_move_insn (res, operand1);
28568 /* xa = abs (operand1) */
28569 xa = ix86_expand_sse_fabs (res, &mask);
28571 /* if (!isless (xa, TWO52)) goto label; */
28572 TWO52 = ix86_gen_TWO52 (mode);
28573 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28575 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28576 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28578 ix86_sse_copysign_to_positive (res, xa, res, mask);
28580 emit_label (label);
28581 LABEL_NUSES (label) = 1;
28583 emit_move_insn (operand0, res);
28586 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28589 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28591 /* C code for the stuff we expand below.
28592 double xa = fabs (x), x2;
28593 if (!isless (xa, TWO52))
28595 xa = xa + TWO52 - TWO52;
28596 x2 = copysign (xa, x);
28605 enum machine_mode mode = GET_MODE (operand0);
28606 rtx xa, TWO52, tmp, label, one, res, mask;
28608 TWO52 = ix86_gen_TWO52 (mode);
28610 /* Temporary for holding the result, initialized to the input
28611 operand to ease control flow. */
28612 res = gen_reg_rtx (mode);
28613 emit_move_insn (res, operand1);
28615 /* xa = abs (operand1) */
28616 xa = ix86_expand_sse_fabs (res, &mask);
28618 /* if (!isless (xa, TWO52)) goto label; */
28619 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28621 /* xa = xa + TWO52 - TWO52; */
28622 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28623 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28625 /* xa = copysign (xa, operand1) */
28626 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28628 /* generate 1.0 or -1.0 */
28629 one = force_reg (mode,
28630 const_double_from_real_value (do_floor
28631 ? dconst1 : dconstm1, mode));
28633 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28634 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28635 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28636 gen_rtx_AND (mode, one, tmp)));
28637 /* We always need to subtract here to preserve signed zero. */
28638 tmp = expand_simple_binop (mode, MINUS,
28639 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28640 emit_move_insn (res, tmp);
28642 emit_label (label);
28643 LABEL_NUSES (label) = 1;
28645 emit_move_insn (operand0, res);
28648 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28651 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28653 /* C code for the stuff we expand below.
28654 double xa = fabs (x), x2;
28655 if (!isless (xa, TWO52))
28657 x2 = (double)(long)x;
28664 if (HONOR_SIGNED_ZEROS (mode))
28665 return copysign (x2, x);
28668 enum machine_mode mode = GET_MODE (operand0);
28669 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28671 TWO52 = ix86_gen_TWO52 (mode);
28673 /* Temporary for holding the result, initialized to the input
28674 operand to ease control flow. */
28675 res = gen_reg_rtx (mode);
28676 emit_move_insn (res, operand1);
28678 /* xa = abs (operand1) */
28679 xa = ix86_expand_sse_fabs (res, &mask);
28681 /* if (!isless (xa, TWO52)) goto label; */
28682 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28684 /* xa = (double)(long)x */
28685 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28686 expand_fix (xi, res, 0);
28687 expand_float (xa, xi, 0);
28690 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28692 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28693 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28694 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28695 gen_rtx_AND (mode, one, tmp)));
28696 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28697 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28698 emit_move_insn (res, tmp);
28700 if (HONOR_SIGNED_ZEROS (mode))
28701 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28703 emit_label (label);
28704 LABEL_NUSES (label) = 1;
28706 emit_move_insn (operand0, res);
28709 /* Expand SSE sequence for computing round from OPERAND1 storing
28710 into OPERAND0. Sequence that works without relying on DImode truncation
28711 via cvttsd2siq that is only available on 64bit targets. */
28713 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28715 /* C code for the stuff we expand below.
28716 double xa = fabs (x), xa2, x2;
28717 if (!isless (xa, TWO52))
28719 Using the absolute value and copying back sign makes
28720 -0.0 -> -0.0 correct.
28721 xa2 = xa + TWO52 - TWO52;
28726 else if (dxa > 0.5)
28728 x2 = copysign (xa2, x);
28731 enum machine_mode mode = GET_MODE (operand0);
28732 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28734 TWO52 = ix86_gen_TWO52 (mode);
28736 /* Temporary for holding the result, initialized to the input
28737 operand to ease control flow. */
28738 res = gen_reg_rtx (mode);
28739 emit_move_insn (res, operand1);
28741 /* xa = abs (operand1) */
28742 xa = ix86_expand_sse_fabs (res, &mask);
28744 /* if (!isless (xa, TWO52)) goto label; */
28745 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28747 /* xa2 = xa + TWO52 - TWO52; */
28748 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28749 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28751 /* dxa = xa2 - xa; */
28752 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28754 /* generate 0.5, 1.0 and -0.5 */
28755 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28756 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28757 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28761 tmp = gen_reg_rtx (mode);
28762 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28763 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28764 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28765 gen_rtx_AND (mode, one, tmp)));
28766 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28767 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28768 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28769 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28770 gen_rtx_AND (mode, one, tmp)));
28771 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28773 /* res = copysign (xa2, operand1) */
28774 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28776 emit_label (label);
28777 LABEL_NUSES (label) = 1;
28779 emit_move_insn (operand0, res);
28782 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28785 ix86_expand_trunc (rtx operand0, rtx operand1)
28787 /* C code for SSE variant we expand below.
28788 double xa = fabs (x), x2;
28789 if (!isless (xa, TWO52))
28791 x2 = (double)(long)x;
28792 if (HONOR_SIGNED_ZEROS (mode))
28793 return copysign (x2, x);
28796 enum machine_mode mode = GET_MODE (operand0);
28797 rtx xa, xi, TWO52, label, res, mask;
28799 TWO52 = ix86_gen_TWO52 (mode);
28801 /* Temporary for holding the result, initialized to the input
28802 operand to ease control flow. */
28803 res = gen_reg_rtx (mode);
28804 emit_move_insn (res, operand1);
28806 /* xa = abs (operand1) */
28807 xa = ix86_expand_sse_fabs (res, &mask);
28809 /* if (!isless (xa, TWO52)) goto label; */
28810 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28812 /* x = (double)(long)x */
28813 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28814 expand_fix (xi, res, 0);
28815 expand_float (res, xi, 0);
28817 if (HONOR_SIGNED_ZEROS (mode))
28818 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28820 emit_label (label);
28821 LABEL_NUSES (label) = 1;
28823 emit_move_insn (operand0, res);
28826 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28829 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28831 enum machine_mode mode = GET_MODE (operand0);
28832 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28834 /* C code for SSE variant we expand below.
28835 double xa = fabs (x), x2;
28836 if (!isless (xa, TWO52))
28838 xa2 = xa + TWO52 - TWO52;
28842 x2 = copysign (xa2, x);
28846 TWO52 = ix86_gen_TWO52 (mode);
28848 /* Temporary for holding the result, initialized to the input
28849 operand to ease control flow. */
28850 res = gen_reg_rtx (mode);
28851 emit_move_insn (res, operand1);
28853 /* xa = abs (operand1) */
28854 xa = ix86_expand_sse_fabs (res, &smask);
28856 /* if (!isless (xa, TWO52)) goto label; */
28857 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28859 /* res = xa + TWO52 - TWO52; */
28860 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28861 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28862 emit_move_insn (res, tmp);
28865 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28867 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28868 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28869 emit_insn (gen_rtx_SET (VOIDmode, mask,
28870 gen_rtx_AND (mode, mask, one)));
28871 tmp = expand_simple_binop (mode, MINUS,
28872 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28873 emit_move_insn (res, tmp);
28875 /* res = copysign (res, operand1) */
28876 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28878 emit_label (label);
28879 LABEL_NUSES (label) = 1;
28881 emit_move_insn (operand0, res);
28884 /* Expand SSE sequence for computing round from OPERAND1 storing
28887 ix86_expand_round (rtx operand0, rtx operand1)
28889 /* C code for the stuff we're doing below:
28890 double xa = fabs (x);
28891 if (!isless (xa, TWO52))
28893 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28894 return copysign (xa, x);
28896 enum machine_mode mode = GET_MODE (operand0);
28897 rtx res, TWO52, xa, label, xi, half, mask;
28898 const struct real_format *fmt;
28899 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28901 /* Temporary for holding the result, initialized to the input
28902 operand to ease control flow. */
28903 res = gen_reg_rtx (mode);
28904 emit_move_insn (res, operand1);
28906 TWO52 = ix86_gen_TWO52 (mode);
28907 xa = ix86_expand_sse_fabs (res, &mask);
28908 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28910 /* load nextafter (0.5, 0.0) */
28911 fmt = REAL_MODE_FORMAT (mode);
28912 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28913 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28915 /* xa = xa + 0.5 */
28916 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28917 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28919 /* xa = (double)(int64_t)xa */
28920 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28921 expand_fix (xi, xa, 0);
28922 expand_float (xa, xi, 0);
28924 /* res = copysign (xa, operand1) */
28925 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28927 emit_label (label);
28928 LABEL_NUSES (label) = 1;
28930 emit_move_insn (operand0, res);
28934 /* Table of valid machine attributes. */
28935 static const struct attribute_spec ix86_attribute_table[] =
28937 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28938 /* Stdcall attribute says callee is responsible for popping arguments
28939 if they are not variable. */
28940 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28941 /* Fastcall attribute says callee is responsible for popping arguments
28942 if they are not variable. */
28943 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28944 /* Cdecl attribute says the callee is a normal C declaration */
28945 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28946 /* Regparm attribute specifies how many integer arguments are to be
28947 passed in registers. */
28948 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28949 /* Sseregparm attribute says we are using x86_64 calling conventions
28950 for FP arguments. */
28951 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28952 /* force_align_arg_pointer says this function realigns the stack at entry. */
28953 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28954 false, true, true, ix86_handle_cconv_attribute },
28955 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28956 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28957 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28958 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28960 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28961 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28962 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28963 SUBTARGET_ATTRIBUTE_TABLE,
28965 /* ms_abi and sysv_abi calling convention function attributes. */
28966 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28967 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28968 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
28970 { NULL, 0, 0, false, false, false, NULL }
28973 /* Implement targetm.vectorize.builtin_vectorization_cost. */
28975 ix86_builtin_vectorization_cost (bool runtime_test)
28977 /* If the branch of the runtime test is taken - i.e. - the vectorized
28978 version is skipped - this incurs a misprediction cost (because the
28979 vectorized version is expected to be the fall-through). So we subtract
28980 the latency of a mispredicted branch from the costs that are incured
28981 when the vectorized version is executed.
28983 TODO: The values in individual target tables have to be tuned or new
28984 fields may be needed. For eg. on K8, the default branch path is the
28985 not-taken path. If the taken path is predicted correctly, the minimum
28986 penalty of going down the taken-path is 1 cycle. If the taken-path is
28987 not predicted correctly, then the minimum penalty is 10 cycles. */
28991 return (-(ix86_cost->cond_taken_branch_cost));
28997 /* Implement targetm.vectorize.builtin_vec_perm. */
29000 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29002 tree itype = TREE_TYPE (vec_type);
29003 bool u = TYPE_UNSIGNED (itype);
29004 enum machine_mode vmode = TYPE_MODE (vec_type);
29005 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29006 bool ok = TARGET_SSE2;
29012 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29015 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29017 itype = ix86_get_builtin_type (IX86_BT_DI);
29022 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29026 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29028 itype = ix86_get_builtin_type (IX86_BT_SI);
29032 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29035 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29038 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29041 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29051 *mask_type = itype;
29052 return ix86_builtins[(int) fcode];
29055 /* Return a vector mode with twice as many elements as VMODE. */
29056 /* ??? Consider moving this to a table generated by genmodes.c. */
29058 static enum machine_mode
29059 doublesize_vector_mode (enum machine_mode vmode)
29063 case V2SFmode: return V4SFmode;
29064 case V1DImode: return V2DImode;
29065 case V2SImode: return V4SImode;
29066 case V4HImode: return V8HImode;
29067 case V8QImode: return V16QImode;
29069 case V2DFmode: return V4DFmode;
29070 case V4SFmode: return V8SFmode;
29071 case V2DImode: return V4DImode;
29072 case V4SImode: return V8SImode;
29073 case V8HImode: return V16HImode;
29074 case V16QImode: return V32QImode;
29076 case V4DFmode: return V8DFmode;
29077 case V8SFmode: return V16SFmode;
29078 case V4DImode: return V8DImode;
29079 case V8SImode: return V16SImode;
29080 case V16HImode: return V32HImode;
29081 case V32QImode: return V64QImode;
29084 gcc_unreachable ();
29088 /* Construct (set target (vec_select op0 (parallel perm))) and
29089 return true if that's a valid instruction in the active ISA. */
29092 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29094 rtx rperm[MAX_VECT_LEN], x;
29097 for (i = 0; i < nelt; ++i)
29098 rperm[i] = GEN_INT (perm[i]);
29100 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29101 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29102 x = gen_rtx_SET (VOIDmode, target, x);
29105 if (recog_memoized (x) < 0)
29113 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29116 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29117 const unsigned char *perm, unsigned nelt)
29119 enum machine_mode v2mode;
29122 v2mode = doublesize_vector_mode (GET_MODE (op0));
29123 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29124 return expand_vselect (target, x, perm, nelt);
29127 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29128 in terms of blendp[sd] / pblendw / pblendvb. */
29131 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29133 enum machine_mode vmode = d->vmode;
29134 unsigned i, mask, nelt = d->nelt;
29135 rtx target, op0, op1, x;
29137 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29139 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29142 /* This is a blend, not a permute. Elements must stay in their
29143 respective lanes. */
29144 for (i = 0; i < nelt; ++i)
29146 unsigned e = d->perm[i];
29147 if (!(e == i || e == i + nelt))
29154 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29155 decision should be extracted elsewhere, so that we only try that
29156 sequence once all budget==3 options have been tried. */
29158 /* For bytes, see if bytes move in pairs so we can use pblendw with
29159 an immediate argument, rather than pblendvb with a vector argument. */
29160 if (vmode == V16QImode)
29162 bool pblendw_ok = true;
29163 for (i = 0; i < 16 && pblendw_ok; i += 2)
29164 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29168 rtx rperm[16], vperm;
29170 for (i = 0; i < nelt; ++i)
29171 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29173 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29174 vperm = force_reg (V16QImode, vperm);
29176 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29181 target = d->target;
29193 for (i = 0; i < nelt; ++i)
29194 mask |= (d->perm[i] >= nelt) << i;
29198 for (i = 0; i < 2; ++i)
29199 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29203 for (i = 0; i < 4; ++i)
29204 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29208 for (i = 0; i < 8; ++i)
29209 mask |= (d->perm[i * 2] >= 16) << i;
29213 target = gen_lowpart (vmode, target);
29214 op0 = gen_lowpart (vmode, op0);
29215 op1 = gen_lowpart (vmode, op1);
29219 gcc_unreachable ();
29222 /* This matches five different patterns with the different modes. */
29223 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29224 x = gen_rtx_SET (VOIDmode, target, x);
29230 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29231 in terms of the variable form of vpermilps.
29233 Note that we will have already failed the immediate input vpermilps,
29234 which requires that the high and low part shuffle be identical; the
29235 variable form doesn't require that. */
29238 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29240 rtx rperm[8], vperm;
29243 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29246 /* We can only permute within the 128-bit lane. */
29247 for (i = 0; i < 8; ++i)
29249 unsigned e = d->perm[i];
29250 if (i < 4 ? e >= 4 : e < 4)
29257 for (i = 0; i < 8; ++i)
29259 unsigned e = d->perm[i];
29261 /* Within each 128-bit lane, the elements of op0 are numbered
29262 from 0 and the elements of op1 are numbered from 4. */
29268 rperm[i] = GEN_INT (e);
29271 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29272 vperm = force_reg (V8SImode, vperm);
29273 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29278 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29279 in terms of pshufb or vpperm. */
29282 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29284 unsigned i, nelt, eltsz;
29285 rtx rperm[16], vperm, target, op0, op1;
29287 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29289 if (GET_MODE_SIZE (d->vmode) != 16)
29296 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29298 for (i = 0; i < nelt; ++i)
29300 unsigned j, e = d->perm[i];
29301 for (j = 0; j < eltsz; ++j)
29302 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29305 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29306 vperm = force_reg (V16QImode, vperm);
29308 target = gen_lowpart (V16QImode, d->target);
29309 op0 = gen_lowpart (V16QImode, d->op0);
29310 if (d->op0 == d->op1)
29311 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29314 op1 = gen_lowpart (V16QImode, d->op1);
29315 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29321 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29322 in a single instruction. */
29325 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29327 unsigned i, nelt = d->nelt;
29328 unsigned char perm2[MAX_VECT_LEN];
29330 /* Check plain VEC_SELECT first, because AVX has instructions that could
29331 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29332 input where SEL+CONCAT may not. */
29333 if (d->op0 == d->op1)
29335 int mask = nelt - 1;
29337 for (i = 0; i < nelt; i++)
29338 perm2[i] = d->perm[i] & mask;
29340 if (expand_vselect (d->target, d->op0, perm2, nelt))
29343 /* There are plenty of patterns in sse.md that are written for
29344 SEL+CONCAT and are not replicated for a single op. Perhaps
29345 that should be changed, to avoid the nastiness here. */
29347 /* Recognize interleave style patterns, which means incrementing
29348 every other permutation operand. */
29349 for (i = 0; i < nelt; i += 2)
29351 perm2[i] = d->perm[i] & mask;
29352 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29354 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29357 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29360 for (i = 0; i < nelt; i += 4)
29362 perm2[i + 0] = d->perm[i + 0] & mask;
29363 perm2[i + 1] = d->perm[i + 1] & mask;
29364 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29365 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29368 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29373 /* Finally, try the fully general two operand permute. */
29374 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29377 /* Recognize interleave style patterns with reversed operands. */
29378 if (d->op0 != d->op1)
29380 for (i = 0; i < nelt; ++i)
29382 unsigned e = d->perm[i];
29390 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29394 /* Try the SSE4.1 blend variable merge instructions. */
29395 if (expand_vec_perm_blend (d))
29398 /* Try one of the AVX vpermil variable permutations. */
29399 if (expand_vec_perm_vpermil (d))
29402 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29403 if (expand_vec_perm_pshufb (d))
29409 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29410 in terms of a pair of pshuflw + pshufhw instructions. */
29413 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29415 unsigned char perm2[MAX_VECT_LEN];
29419 if (d->vmode != V8HImode || d->op0 != d->op1)
29422 /* The two permutations only operate in 64-bit lanes. */
29423 for (i = 0; i < 4; ++i)
29424 if (d->perm[i] >= 4)
29426 for (i = 4; i < 8; ++i)
29427 if (d->perm[i] < 4)
29433 /* Emit the pshuflw. */
29434 memcpy (perm2, d->perm, 4);
29435 for (i = 4; i < 8; ++i)
29437 ok = expand_vselect (d->target, d->op0, perm2, 8);
29440 /* Emit the pshufhw. */
29441 memcpy (perm2 + 4, d->perm + 4, 4);
29442 for (i = 0; i < 4; ++i)
29444 ok = expand_vselect (d->target, d->target, perm2, 8);
29450 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29451 the permutation using the SSSE3 palignr instruction. This succeeds
29452 when all of the elements in PERM fit within one vector and we merely
29453 need to shift them down so that a single vector permutation has a
29454 chance to succeed. */
29457 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29459 unsigned i, nelt = d->nelt;
29464 /* Even with AVX, palignr only operates on 128-bit vectors. */
29465 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29468 min = nelt, max = 0;
29469 for (i = 0; i < nelt; ++i)
29471 unsigned e = d->perm[i];
29477 if (min == 0 || max - min >= nelt)
29480 /* Given that we have SSSE3, we know we'll be able to implement the
29481 single operand permutation after the palignr with pshufb. */
29485 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29486 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29487 gen_lowpart (TImode, d->op1),
29488 gen_lowpart (TImode, d->op0), shift));
29490 d->op0 = d->op1 = d->target;
29493 for (i = 0; i < nelt; ++i)
29495 unsigned e = d->perm[i] - min;
29501 /* Test for the degenerate case where the alignment by itself
29502 produces the desired permutation. */
29506 ok = expand_vec_perm_1 (d);
29512 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29513 a two vector permutation into a single vector permutation by using
29514 an interleave operation to merge the vectors. */
29517 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29519 struct expand_vec_perm_d dremap, dfinal;
29520 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29521 unsigned contents, h1, h2, h3, h4;
29522 unsigned char remap[2 * MAX_VECT_LEN];
29526 if (d->op0 == d->op1)
29529 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29530 lanes. We can use similar techniques with the vperm2f128 instruction,
29531 but it requires slightly different logic. */
29532 if (GET_MODE_SIZE (d->vmode) != 16)
29535 /* Examine from whence the elements come. */
29537 for (i = 0; i < nelt; ++i)
29538 contents |= 1u << d->perm[i];
29540 /* Split the two input vectors into 4 halves. */
29541 h1 = (1u << nelt2) - 1;
29546 memset (remap, 0xff, sizeof (remap));
29549 /* If the elements from the low halves use interleave low, and similarly
29550 for interleave high. If the elements are from mis-matched halves, we
29551 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29552 if ((contents & (h1 | h3)) == contents)
29554 for (i = 0; i < nelt2; ++i)
29557 remap[i + nelt] = i * 2 + 1;
29558 dremap.perm[i * 2] = i;
29559 dremap.perm[i * 2 + 1] = i + nelt;
29562 else if ((contents & (h2 | h4)) == contents)
29564 for (i = 0; i < nelt2; ++i)
29566 remap[i + nelt2] = i * 2;
29567 remap[i + nelt + nelt2] = i * 2 + 1;
29568 dremap.perm[i * 2] = i + nelt2;
29569 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29572 else if ((contents & (h1 | h4)) == contents)
29574 for (i = 0; i < nelt2; ++i)
29577 remap[i + nelt + nelt2] = i + nelt2;
29578 dremap.perm[i] = i;
29579 dremap.perm[i + nelt2] = i + nelt + nelt2;
29583 dremap.vmode = V2DImode;
29585 dremap.perm[0] = 0;
29586 dremap.perm[1] = 3;
29589 else if ((contents & (h2 | h3)) == contents)
29591 for (i = 0; i < nelt2; ++i)
29593 remap[i + nelt2] = i;
29594 remap[i + nelt] = i + nelt2;
29595 dremap.perm[i] = i + nelt2;
29596 dremap.perm[i + nelt2] = i + nelt;
29600 dremap.vmode = V2DImode;
29602 dremap.perm[0] = 1;
29603 dremap.perm[1] = 2;
29609 /* Use the remapping array set up above to move the elements from their
29610 swizzled locations into their final destinations. */
29612 for (i = 0; i < nelt; ++i)
29614 unsigned e = remap[d->perm[i]];
29615 gcc_assert (e < nelt);
29616 dfinal.perm[i] = e;
29618 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29619 dfinal.op1 = dfinal.op0;
29620 dremap.target = dfinal.op0;
29622 /* Test if the final remap can be done with a single insn. For V4SFmode or
29623 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29625 ok = expand_vec_perm_1 (&dfinal);
29626 seq = get_insns ();
29632 if (dremap.vmode != dfinal.vmode)
29634 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29635 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29636 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29639 ok = expand_vec_perm_1 (&dremap);
29646 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29647 permutation with two pshufb insns and an ior. We should have already
29648 failed all two instruction sequences. */
29651 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29653 rtx rperm[2][16], vperm, l, h, op, m128;
29654 unsigned int i, nelt, eltsz;
29656 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29658 gcc_assert (d->op0 != d->op1);
29661 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29663 /* Generate two permutation masks. If the required element is within
29664 the given vector it is shuffled into the proper lane. If the required
29665 element is in the other vector, force a zero into the lane by setting
29666 bit 7 in the permutation mask. */
29667 m128 = GEN_INT (-128);
29668 for (i = 0; i < nelt; ++i)
29670 unsigned j, e = d->perm[i];
29671 unsigned which = (e >= nelt);
29675 for (j = 0; j < eltsz; ++j)
29677 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29678 rperm[1-which][i*eltsz + j] = m128;
29682 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29683 vperm = force_reg (V16QImode, vperm);
29685 l = gen_reg_rtx (V16QImode);
29686 op = gen_lowpart (V16QImode, d->op0);
29687 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29689 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29690 vperm = force_reg (V16QImode, vperm);
29692 h = gen_reg_rtx (V16QImode);
29693 op = gen_lowpart (V16QImode, d->op1);
29694 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29696 op = gen_lowpart (V16QImode, d->target);
29697 emit_insn (gen_iorv16qi3 (op, l, h));
29702 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29703 and extract-odd permutations. */
29706 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29708 rtx t1, t2, t3, t4;
29713 t1 = gen_reg_rtx (V4DFmode);
29714 t2 = gen_reg_rtx (V4DFmode);
29716 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29717 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29718 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29720 /* Now an unpck[lh]pd will produce the result required. */
29722 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29724 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29730 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29731 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29732 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29734 t1 = gen_reg_rtx (V8SFmode);
29735 t2 = gen_reg_rtx (V8SFmode);
29736 t3 = gen_reg_rtx (V8SFmode);
29737 t4 = gen_reg_rtx (V8SFmode);
29739 /* Shuffle within the 128-bit lanes to produce:
29740 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29741 expand_vselect (t1, d->op0, perm1, 8);
29742 expand_vselect (t2, d->op1, perm1, 8);
29744 /* Shuffle the lanes around to produce:
29745 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29746 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29747 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29749 /* Now a vpermil2p will produce the result required. */
29750 /* ??? The vpermil2p requires a vector constant. Another option
29751 is a unpck[lh]ps to merge the two vectors to produce
29752 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29753 vpermilps to get the elements into the final order. */
29756 memcpy (d->perm, odd ? permo: perme, 8);
29757 expand_vec_perm_vpermil (d);
29765 /* These are always directly implementable by expand_vec_perm_1. */
29766 gcc_unreachable ();
29770 return expand_vec_perm_pshufb2 (d);
29773 /* We need 2*log2(N)-1 operations to achieve odd/even
29774 with interleave. */
29775 t1 = gen_reg_rtx (V8HImode);
29776 t2 = gen_reg_rtx (V8HImode);
29777 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29778 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29779 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29780 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29782 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29784 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29791 return expand_vec_perm_pshufb2 (d);
29794 t1 = gen_reg_rtx (V16QImode);
29795 t2 = gen_reg_rtx (V16QImode);
29796 t3 = gen_reg_rtx (V16QImode);
29797 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29798 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29799 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29800 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29801 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29802 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29804 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29806 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29812 gcc_unreachable ();
29818 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29819 extract-even and extract-odd permutations. */
29822 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29824 unsigned i, odd, nelt = d->nelt;
29827 if (odd != 0 && odd != 1)
29830 for (i = 1; i < nelt; ++i)
29831 if (d->perm[i] != 2 * i + odd)
29834 return expand_vec_perm_even_odd_1 (d, odd);
29837 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29838 permutations. We assume that expand_vec_perm_1 has already failed. */
29841 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
29843 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
29844 enum machine_mode vmode = d->vmode;
29845 unsigned char perm2[4];
29853 /* These are special-cased in sse.md so that we can optionally
29854 use the vbroadcast instruction. They expand to two insns
29855 if the input happens to be in a register. */
29856 gcc_unreachable ();
29862 /* These are always implementable using standard shuffle patterns. */
29863 gcc_unreachable ();
29867 /* These can be implemented via interleave. We save one insn by
29868 stopping once we have promoted to V4SImode and then use pshufd. */
29871 optab otab = vec_interleave_low_optab;
29875 otab = vec_interleave_high_optab;
29880 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
29881 vmode = get_mode_wider_vector (vmode);
29882 op0 = gen_lowpart (vmode, op0);
29884 while (vmode != V4SImode);
29886 memset (perm2, elt, 4);
29887 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
29892 gcc_unreachable ();
29896 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29897 broadcast permutations. */
29900 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
29902 unsigned i, elt, nelt = d->nelt;
29904 if (d->op0 != d->op1)
29908 for (i = 1; i < nelt; ++i)
29909 if (d->perm[i] != elt)
29912 return expand_vec_perm_broadcast_1 (d);
29915 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
29916 With all of the interface bits taken care of, perform the expansion
29917 in D and return true on success. */
29920 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
29922 /* Try a single instruction expansion. */
29923 if (expand_vec_perm_1 (d))
29926 /* Try sequences of two instructions. */
29928 if (expand_vec_perm_pshuflw_pshufhw (d))
29931 if (expand_vec_perm_palignr (d))
29934 if (expand_vec_perm_interleave2 (d))
29937 if (expand_vec_perm_broadcast (d))
29940 /* Try sequences of three instructions. */
29942 if (expand_vec_perm_pshufb2 (d))
29945 /* ??? Look for narrow permutations whose element orderings would
29946 allow the promotion to a wider mode. */
29948 /* ??? Look for sequences of interleave or a wider permute that place
29949 the data into the correct lanes for a half-vector shuffle like
29950 pshuf[lh]w or vpermilps. */
29952 /* ??? Look for sequences of interleave that produce the desired results.
29953 The combinatorics of punpck[lh] get pretty ugly... */
29955 if (expand_vec_perm_even_odd (d))
29961 /* Extract the values from the vector CST into the permutation array in D.
29962 Return 0 on error, 1 if all values from the permutation come from the
29963 first vector, 2 if all values from the second vector, and 3 otherwise. */
29966 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
29968 tree list = TREE_VECTOR_CST_ELTS (cst);
29969 unsigned i, nelt = d->nelt;
29972 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
29974 unsigned HOST_WIDE_INT e;
29976 if (!host_integerp (TREE_VALUE (list), 1))
29978 e = tree_low_cst (TREE_VALUE (list), 1);
29982 ret |= (e < nelt ? 1 : 2);
29985 gcc_assert (list == NULL);
29987 /* For all elements from second vector, fold the elements to first. */
29989 for (i = 0; i < nelt; ++i)
29990 d->perm[i] -= nelt;
29996 ix86_expand_vec_perm_builtin (tree exp)
29998 struct expand_vec_perm_d d;
29999 tree arg0, arg1, arg2;
30001 arg0 = CALL_EXPR_ARG (exp, 0);
30002 arg1 = CALL_EXPR_ARG (exp, 1);
30003 arg2 = CALL_EXPR_ARG (exp, 2);
30005 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30006 d.nelt = GET_MODE_NUNITS (d.vmode);
30007 d.testing_p = false;
30008 gcc_assert (VECTOR_MODE_P (d.vmode));
30010 if (TREE_CODE (arg2) != VECTOR_CST)
30012 error_at (EXPR_LOCATION (exp),
30013 "vector permutation requires vector constant");
30017 switch (extract_vec_perm_cst (&d, arg2))
30023 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30027 if (!operand_equal_p (arg0, arg1, 0))
30029 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30030 d.op0 = force_reg (d.vmode, d.op0);
30031 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30032 d.op1 = force_reg (d.vmode, d.op1);
30036 /* The elements of PERM do not suggest that only the first operand
30037 is used, but both operands are identical. Allow easier matching
30038 of the permutation by folding the permutation into the single
30041 unsigned i, nelt = d.nelt;
30042 for (i = 0; i < nelt; ++i)
30043 if (d.perm[i] >= nelt)
30049 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30050 d.op0 = force_reg (d.vmode, d.op0);
30055 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30056 d.op0 = force_reg (d.vmode, d.op0);
30061 d.target = gen_reg_rtx (d.vmode);
30062 if (ix86_expand_vec_perm_builtin_1 (&d))
30065 /* For compiler generated permutations, we should never got here, because
30066 the compiler should also be checking the ok hook. But since this is a
30067 builtin the user has access too, so don't abort. */
30071 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30074 sorry ("vector permutation (%d %d %d %d)",
30075 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30078 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30079 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30080 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30083 sorry ("vector permutation "
30084 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30085 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30086 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30087 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30088 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30091 gcc_unreachable ();
30094 return CONST0_RTX (d.vmode);
30097 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30100 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30102 struct expand_vec_perm_d d;
30106 d.vmode = TYPE_MODE (vec_type);
30107 d.nelt = GET_MODE_NUNITS (d.vmode);
30108 d.testing_p = true;
30110 /* Given sufficient ISA support we can just return true here
30111 for selected vector modes. */
30112 if (GET_MODE_SIZE (d.vmode) == 16)
30114 /* All implementable with a single vpperm insn. */
30117 /* All implementable with 2 pshufb + 1 ior. */
30120 /* All implementable with shufpd or unpck[lh]pd. */
30125 vec_mask = extract_vec_perm_cst (&d, mask);
30127 /* This hook is cannot be called in response to something that the
30128 user does (unlike the builtin expander) so we shouldn't ever see
30129 an error generated from the extract. */
30130 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30131 one_vec = (vec_mask != 3);
30133 /* Implementable with shufps or pshufd. */
30134 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30137 /* Otherwise we have to go through the motions and see if we can
30138 figure out how to generate the requested permutation. */
30139 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30140 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30142 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30145 ret = ix86_expand_vec_perm_builtin_1 (&d);
30152 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30154 struct expand_vec_perm_d d;
30160 d.vmode = GET_MODE (targ);
30161 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30162 d.testing_p = false;
30164 for (i = 0; i < nelt; ++i)
30165 d.perm[i] = i * 2 + odd;
30167 /* We'll either be able to implement the permutation directly... */
30168 if (expand_vec_perm_1 (&d))
30171 /* ... or we use the special-case patterns. */
30172 expand_vec_perm_even_odd_1 (&d, odd);
30175 /* This function returns the calling abi specific va_list type node.
30176 It returns the FNDECL specific va_list type. */
30179 ix86_fn_abi_va_list (tree fndecl)
30182 return va_list_type_node;
30183 gcc_assert (fndecl != NULL_TREE);
30185 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30186 return ms_va_list_type_node;
30188 return sysv_va_list_type_node;
30191 /* Returns the canonical va_list type specified by TYPE. If there
30192 is no valid TYPE provided, it return NULL_TREE. */
30195 ix86_canonical_va_list_type (tree type)
30199 /* Resolve references and pointers to va_list type. */
30200 if (INDIRECT_REF_P (type))
30201 type = TREE_TYPE (type);
30202 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30203 type = TREE_TYPE (type);
30207 wtype = va_list_type_node;
30208 gcc_assert (wtype != NULL_TREE);
30210 if (TREE_CODE (wtype) == ARRAY_TYPE)
30212 /* If va_list is an array type, the argument may have decayed
30213 to a pointer type, e.g. by being passed to another function.
30214 In that case, unwrap both types so that we can compare the
30215 underlying records. */
30216 if (TREE_CODE (htype) == ARRAY_TYPE
30217 || POINTER_TYPE_P (htype))
30219 wtype = TREE_TYPE (wtype);
30220 htype = TREE_TYPE (htype);
30223 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30224 return va_list_type_node;
30225 wtype = sysv_va_list_type_node;
30226 gcc_assert (wtype != NULL_TREE);
30228 if (TREE_CODE (wtype) == ARRAY_TYPE)
30230 /* If va_list is an array type, the argument may have decayed
30231 to a pointer type, e.g. by being passed to another function.
30232 In that case, unwrap both types so that we can compare the
30233 underlying records. */
30234 if (TREE_CODE (htype) == ARRAY_TYPE
30235 || POINTER_TYPE_P (htype))
30237 wtype = TREE_TYPE (wtype);
30238 htype = TREE_TYPE (htype);
30241 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30242 return sysv_va_list_type_node;
30243 wtype = ms_va_list_type_node;
30244 gcc_assert (wtype != NULL_TREE);
30246 if (TREE_CODE (wtype) == ARRAY_TYPE)
30248 /* If va_list is an array type, the argument may have decayed
30249 to a pointer type, e.g. by being passed to another function.
30250 In that case, unwrap both types so that we can compare the
30251 underlying records. */
30252 if (TREE_CODE (htype) == ARRAY_TYPE
30253 || POINTER_TYPE_P (htype))
30255 wtype = TREE_TYPE (wtype);
30256 htype = TREE_TYPE (htype);
30259 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30260 return ms_va_list_type_node;
30263 return std_canonical_va_list_type (type);
30266 /* Iterate through the target-specific builtin types for va_list.
30267 IDX denotes the iterator, *PTREE is set to the result type of
30268 the va_list builtin, and *PNAME to its internal type.
30269 Returns zero if there is no element for this index, otherwise
30270 IDX should be increased upon the next call.
30271 Note, do not iterate a base builtin's name like __builtin_va_list.
30272 Used from c_common_nodes_and_builtins. */
30275 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30281 *ptree = ms_va_list_type_node;
30282 *pname = "__builtin_ms_va_list";
30285 *ptree = sysv_va_list_type_node;
30286 *pname = "__builtin_sysv_va_list";
30294 /* Initialize the GCC target structure. */
30295 #undef TARGET_RETURN_IN_MEMORY
30296 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30298 #undef TARGET_LEGITIMIZE_ADDRESS
30299 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30301 #undef TARGET_ATTRIBUTE_TABLE
30302 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30303 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30304 # undef TARGET_MERGE_DECL_ATTRIBUTES
30305 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30308 #undef TARGET_COMP_TYPE_ATTRIBUTES
30309 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30311 #undef TARGET_INIT_BUILTINS
30312 #define TARGET_INIT_BUILTINS ix86_init_builtins
30313 #undef TARGET_BUILTIN_DECL
30314 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30315 #undef TARGET_EXPAND_BUILTIN
30316 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30318 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30319 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30320 ix86_builtin_vectorized_function
30322 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30323 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30325 #undef TARGET_BUILTIN_RECIPROCAL
30326 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30328 #undef TARGET_ASM_FUNCTION_EPILOGUE
30329 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30331 #undef TARGET_ENCODE_SECTION_INFO
30332 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30333 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30335 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30338 #undef TARGET_ASM_OPEN_PAREN
30339 #define TARGET_ASM_OPEN_PAREN ""
30340 #undef TARGET_ASM_CLOSE_PAREN
30341 #define TARGET_ASM_CLOSE_PAREN ""
30343 #undef TARGET_ASM_BYTE_OP
30344 #define TARGET_ASM_BYTE_OP ASM_BYTE
30346 #undef TARGET_ASM_ALIGNED_HI_OP
30347 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30348 #undef TARGET_ASM_ALIGNED_SI_OP
30349 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30351 #undef TARGET_ASM_ALIGNED_DI_OP
30352 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30355 #undef TARGET_ASM_UNALIGNED_HI_OP
30356 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30357 #undef TARGET_ASM_UNALIGNED_SI_OP
30358 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30359 #undef TARGET_ASM_UNALIGNED_DI_OP
30360 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30362 #undef TARGET_SCHED_ADJUST_COST
30363 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30364 #undef TARGET_SCHED_ISSUE_RATE
30365 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30366 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30367 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30368 ia32_multipass_dfa_lookahead
30370 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30371 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30374 #undef TARGET_HAVE_TLS
30375 #define TARGET_HAVE_TLS true
30377 #undef TARGET_CANNOT_FORCE_CONST_MEM
30378 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30379 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30380 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30382 #undef TARGET_DELEGITIMIZE_ADDRESS
30383 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30385 #undef TARGET_MS_BITFIELD_LAYOUT_P
30386 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30389 #undef TARGET_BINDS_LOCAL_P
30390 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30392 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30393 #undef TARGET_BINDS_LOCAL_P
30394 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30397 #undef TARGET_ASM_OUTPUT_MI_THUNK
30398 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30399 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30400 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30402 #undef TARGET_ASM_FILE_START
30403 #define TARGET_ASM_FILE_START x86_file_start
30405 #undef TARGET_DEFAULT_TARGET_FLAGS
30406 #define TARGET_DEFAULT_TARGET_FLAGS \
30408 | TARGET_SUBTARGET_DEFAULT \
30409 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30412 #undef TARGET_HANDLE_OPTION
30413 #define TARGET_HANDLE_OPTION ix86_handle_option
30415 #undef TARGET_RTX_COSTS
30416 #define TARGET_RTX_COSTS ix86_rtx_costs
30417 #undef TARGET_ADDRESS_COST
30418 #define TARGET_ADDRESS_COST ix86_address_cost
30420 #undef TARGET_FIXED_CONDITION_CODE_REGS
30421 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30422 #undef TARGET_CC_MODES_COMPATIBLE
30423 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30425 #undef TARGET_MACHINE_DEPENDENT_REORG
30426 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30428 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30429 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30431 #undef TARGET_BUILD_BUILTIN_VA_LIST
30432 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30434 #undef TARGET_FN_ABI_VA_LIST
30435 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30437 #undef TARGET_CANONICAL_VA_LIST_TYPE
30438 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30440 #undef TARGET_EXPAND_BUILTIN_VA_START
30441 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30443 #undef TARGET_MD_ASM_CLOBBERS
30444 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30446 #undef TARGET_PROMOTE_PROTOTYPES
30447 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30448 #undef TARGET_STRUCT_VALUE_RTX
30449 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30450 #undef TARGET_SETUP_INCOMING_VARARGS
30451 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30452 #undef TARGET_MUST_PASS_IN_STACK
30453 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30454 #undef TARGET_PASS_BY_REFERENCE
30455 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30456 #undef TARGET_INTERNAL_ARG_POINTER
30457 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30458 #undef TARGET_UPDATE_STACK_BOUNDARY
30459 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30460 #undef TARGET_GET_DRAP_RTX
30461 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30462 #undef TARGET_STRICT_ARGUMENT_NAMING
30463 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30464 #undef TARGET_STATIC_CHAIN
30465 #define TARGET_STATIC_CHAIN ix86_static_chain
30466 #undef TARGET_TRAMPOLINE_INIT
30467 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30469 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30470 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30472 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30473 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30475 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30476 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30478 #undef TARGET_C_MODE_FOR_SUFFIX
30479 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30482 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30483 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30486 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30487 #undef TARGET_INSERT_ATTRIBUTES
30488 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30491 #undef TARGET_MANGLE_TYPE
30492 #define TARGET_MANGLE_TYPE ix86_mangle_type
30494 #undef TARGET_STACK_PROTECT_FAIL
30495 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30497 #undef TARGET_FUNCTION_VALUE
30498 #define TARGET_FUNCTION_VALUE ix86_function_value
30500 #undef TARGET_SECONDARY_RELOAD
30501 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30503 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30504 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30505 ix86_builtin_vectorization_cost
30506 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30507 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30508 ix86_vectorize_builtin_vec_perm
30509 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30510 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30511 ix86_vectorize_builtin_vec_perm_ok
30513 #undef TARGET_SET_CURRENT_FUNCTION
30514 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30516 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30517 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30519 #undef TARGET_OPTION_SAVE
30520 #define TARGET_OPTION_SAVE ix86_function_specific_save
30522 #undef TARGET_OPTION_RESTORE
30523 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30525 #undef TARGET_OPTION_PRINT
30526 #define TARGET_OPTION_PRINT ix86_function_specific_print
30528 #undef TARGET_CAN_INLINE_P
30529 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30531 #undef TARGET_EXPAND_TO_RTL_HOOK
30532 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30534 #undef TARGET_LEGITIMATE_ADDRESS_P
30535 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30537 #undef TARGET_IRA_COVER_CLASSES
30538 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30540 #undef TARGET_FRAME_POINTER_REQUIRED
30541 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30543 #undef TARGET_CAN_ELIMINATE
30544 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30546 #undef TARGET_ASM_CODE_END
30547 #define TARGET_ASM_CODE_END ix86_code_end
30549 struct gcc_target targetm = TARGET_INITIALIZER;
30551 #include "gt-i386.h"