1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1351 /* X86_TUNE_USE_MOV0 */
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1366 /* X86_TUNE_READ_MODIFY */
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1379 /* X86_TUNE_QIMODE_MATH */
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1454 /* X86_TUNE_SHIFT1 */
1457 /* X86_TUNE_USE_FFREEP */
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1521 m_AMDFAM10 | m_GENERIC,
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1567 static enum stringop_alg stringop_alg = no_stringop;
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1609 /* The "default" register map used in 32bit mode. */
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1622 /* The "default" register map used in 64bit mode. */
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1706 /* Define parameter passing and return registers. */
1708 static int const x86_64_int_parameter_registers[6] =
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1715 CX_REG, DX_REG, R8_REG, R9_REG
1718 static int const x86_64_int_return_registers[4] =
1720 AX_REG, DX_REG, DI_REG, SI_REG
1723 /* Define the structure for the machine field in struct function. */
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1729 struct stack_local_entry *next;
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1762 HOST_WIDE_INT frame;
1764 int outgoing_arguments_size;
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1820 /* Alignment for incoming stack boundary in bits specified at
1822 static unsigned int ix86_user_incoming_stack_boundary;
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1843 int ix86_section_threshold = 65536;
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1849 /* Fence to use after loop using movnt. */
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1874 #define MAX_CLASSES 4
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static bool ix86_function_value_regno_p (const unsigned int);
1884 static rtx ix86_static_chain (const_tree, bool);
1885 static int ix86_function_regparm (const_tree, const_tree);
1886 static void ix86_compute_frame_layout (struct ix86_frame *);
1887 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1889 static void ix86_add_new_builtins (int);
1890 static rtx ix86_expand_vec_perm_builtin (tree);
1892 enum ix86_function_specific_strings
1894 IX86_FUNCTION_SPECIFIC_ARCH,
1895 IX86_FUNCTION_SPECIFIC_TUNE,
1896 IX86_FUNCTION_SPECIFIC_FPMATH,
1897 IX86_FUNCTION_SPECIFIC_MAX
1900 static char *ix86_target_string (int, int, const char *, const char *,
1901 const char *, bool);
1902 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1903 static void ix86_function_specific_save (struct cl_target_option *);
1904 static void ix86_function_specific_restore (struct cl_target_option *);
1905 static void ix86_function_specific_print (FILE *, int,
1906 struct cl_target_option *);
1907 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1908 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1909 static bool ix86_can_inline_p (tree, tree);
1910 static void ix86_set_current_function (tree);
1911 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1913 static enum calling_abi ix86_function_abi (const_tree);
1916 #ifndef SUBTARGET32_DEFAULT_CPU
1917 #define SUBTARGET32_DEFAULT_CPU "i386"
1920 /* The svr4 ABI for the i386 says that records and unions are returned
1922 #ifndef DEFAULT_PCC_STRUCT_RETURN
1923 #define DEFAULT_PCC_STRUCT_RETURN 1
1926 /* Whether -mtune= or -march= were specified */
1927 static int ix86_tune_defaulted;
1928 static int ix86_arch_specified;
1930 /* Bit flags that specify the ISA we are compiling for. */
1931 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1933 /* A mask of ix86_isa_flags that includes bit X if X
1934 was set or cleared on the command line. */
1935 static int ix86_isa_flags_explicit;
1937 /* Define a set of ISAs which are available when a given ISA is
1938 enabled. MMX and SSE ISAs are handled separately. */
1940 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1941 #define OPTION_MASK_ISA_3DNOW_SET \
1942 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1944 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1945 #define OPTION_MASK_ISA_SSE2_SET \
1946 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1947 #define OPTION_MASK_ISA_SSE3_SET \
1948 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1949 #define OPTION_MASK_ISA_SSSE3_SET \
1950 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1951 #define OPTION_MASK_ISA_SSE4_1_SET \
1952 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1953 #define OPTION_MASK_ISA_SSE4_2_SET \
1954 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1955 #define OPTION_MASK_ISA_AVX_SET \
1956 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1957 #define OPTION_MASK_ISA_FMA_SET \
1958 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1960 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1962 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1964 #define OPTION_MASK_ISA_SSE4A_SET \
1965 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1966 #define OPTION_MASK_ISA_FMA4_SET \
1967 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1968 | OPTION_MASK_ISA_AVX_SET)
1969 #define OPTION_MASK_ISA_XOP_SET \
1970 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1971 #define OPTION_MASK_ISA_LWP_SET \
1974 /* AES and PCLMUL need SSE2 because they use xmm registers */
1975 #define OPTION_MASK_ISA_AES_SET \
1976 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1977 #define OPTION_MASK_ISA_PCLMUL_SET \
1978 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1980 #define OPTION_MASK_ISA_ABM_SET \
1981 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1983 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1984 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1985 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1986 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1987 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1989 /* Define a set of ISAs which aren't available when a given ISA is
1990 disabled. MMX and SSE ISAs are handled separately. */
1992 #define OPTION_MASK_ISA_MMX_UNSET \
1993 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1994 #define OPTION_MASK_ISA_3DNOW_UNSET \
1995 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1996 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1998 #define OPTION_MASK_ISA_SSE_UNSET \
1999 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2000 #define OPTION_MASK_ISA_SSE2_UNSET \
2001 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2002 #define OPTION_MASK_ISA_SSE3_UNSET \
2003 (OPTION_MASK_ISA_SSE3 \
2004 | OPTION_MASK_ISA_SSSE3_UNSET \
2005 | OPTION_MASK_ISA_SSE4A_UNSET )
2006 #define OPTION_MASK_ISA_SSSE3_UNSET \
2007 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2008 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2009 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2010 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2011 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2012 #define OPTION_MASK_ISA_AVX_UNSET \
2013 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2014 | OPTION_MASK_ISA_FMA4_UNSET)
2015 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2017 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2019 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2021 #define OPTION_MASK_ISA_SSE4A_UNSET \
2022 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2024 #define OPTION_MASK_ISA_FMA4_UNSET \
2025 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2026 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2027 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2029 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2030 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2031 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2032 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2033 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2034 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2035 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2036 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2038 /* Vectorization library interface and handlers. */
2039 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2040 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2041 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2043 /* Processor target table, indexed by processor number */
2046 const struct processor_costs *cost; /* Processor costs */
2047 const int align_loop; /* Default alignments. */
2048 const int align_loop_max_skip;
2049 const int align_jump;
2050 const int align_jump_max_skip;
2051 const int align_func;
2054 static const struct ptt processor_target_table[PROCESSOR_max] =
2056 {&i386_cost, 4, 3, 4, 3, 4},
2057 {&i486_cost, 16, 15, 16, 15, 16},
2058 {&pentium_cost, 16, 7, 16, 7, 16},
2059 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2060 {&geode_cost, 0, 0, 0, 0, 0},
2061 {&k6_cost, 32, 7, 32, 7, 32},
2062 {&athlon_cost, 16, 7, 16, 7, 16},
2063 {&pentium4_cost, 0, 0, 0, 0, 0},
2064 {&k8_cost, 16, 7, 16, 7, 16},
2065 {&nocona_cost, 0, 0, 0, 0, 0},
2066 {&core2_cost, 16, 10, 16, 10, 16},
2067 {&generic32_cost, 16, 7, 16, 7, 16},
2068 {&generic64_cost, 16, 10, 16, 10, 16},
2069 {&amdfam10_cost, 32, 24, 32, 7, 32},
2070 {&atom_cost, 16, 7, 16, 7, 16}
2073 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2099 /* Implement TARGET_HANDLE_OPTION. */
2102 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2109 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2110 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2114 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2115 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2122 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2123 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2127 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2128 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2138 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2139 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2143 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2144 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2151 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2152 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2156 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2157 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2164 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2165 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2169 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2170 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2177 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2178 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2182 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2183 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2190 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2191 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2195 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2196 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2203 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2204 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2208 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2209 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2216 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2217 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2221 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2222 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2229 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2230 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2234 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2240 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2241 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2245 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2246 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2252 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2253 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2257 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2258 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2265 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2266 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2270 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2271 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2278 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2279 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2283 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2284 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2291 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2292 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2296 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2297 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2304 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2305 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2309 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2310 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2317 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2318 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2322 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2323 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2330 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2331 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2335 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2336 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2343 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2344 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2348 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2349 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2356 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2357 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2361 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2362 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2369 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2370 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2374 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2375 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2382 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2383 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2387 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2388 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2395 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2396 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2400 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2401 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2410 /* Return a string that documents the current -m options. The caller is
2411 responsible for freeing the string. */
2414 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2415 const char *fpmath, bool add_nl_p)
2417 struct ix86_target_opts
2419 const char *option; /* option string */
2420 int mask; /* isa mask options */
2423 /* This table is ordered so that options like -msse4.2 that imply
2424 preceding options while match those first. */
2425 static struct ix86_target_opts isa_opts[] =
2427 { "-m64", OPTION_MASK_ISA_64BIT },
2428 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2429 { "-mfma", OPTION_MASK_ISA_FMA },
2430 { "-mxop", OPTION_MASK_ISA_XOP },
2431 { "-mlwp", OPTION_MASK_ISA_LWP },
2432 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2433 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2434 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2435 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2436 { "-msse3", OPTION_MASK_ISA_SSE3 },
2437 { "-msse2", OPTION_MASK_ISA_SSE2 },
2438 { "-msse", OPTION_MASK_ISA_SSE },
2439 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2440 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2441 { "-mmmx", OPTION_MASK_ISA_MMX },
2442 { "-mabm", OPTION_MASK_ISA_ABM },
2443 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2444 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2445 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2446 { "-maes", OPTION_MASK_ISA_AES },
2447 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2451 static struct ix86_target_opts flag_opts[] =
2453 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2454 { "-m80387", MASK_80387 },
2455 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2456 { "-malign-double", MASK_ALIGN_DOUBLE },
2457 { "-mcld", MASK_CLD },
2458 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2459 { "-mieee-fp", MASK_IEEE_FP },
2460 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2461 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2462 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2463 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2464 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2465 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2466 { "-mno-red-zone", MASK_NO_RED_ZONE },
2467 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2468 { "-mrecip", MASK_RECIP },
2469 { "-mrtd", MASK_RTD },
2470 { "-msseregparm", MASK_SSEREGPARM },
2471 { "-mstack-arg-probe", MASK_STACK_PROBE },
2472 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2475 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2478 char target_other[40];
2487 memset (opts, '\0', sizeof (opts));
2489 /* Add -march= option. */
2492 opts[num][0] = "-march=";
2493 opts[num++][1] = arch;
2496 /* Add -mtune= option. */
2499 opts[num][0] = "-mtune=";
2500 opts[num++][1] = tune;
2503 /* Pick out the options in isa options. */
2504 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2506 if ((isa & isa_opts[i].mask) != 0)
2508 opts[num++][0] = isa_opts[i].option;
2509 isa &= ~ isa_opts[i].mask;
2513 if (isa && add_nl_p)
2515 opts[num++][0] = isa_other;
2516 sprintf (isa_other, "(other isa: %#x)", isa);
2519 /* Add flag options. */
2520 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2522 if ((flags & flag_opts[i].mask) != 0)
2524 opts[num++][0] = flag_opts[i].option;
2525 flags &= ~ flag_opts[i].mask;
2529 if (flags && add_nl_p)
2531 opts[num++][0] = target_other;
2532 sprintf (target_other, "(other flags: %#x)", flags);
2535 /* Add -fpmath= option. */
2538 opts[num][0] = "-mfpmath=";
2539 opts[num++][1] = fpmath;
2546 gcc_assert (num < ARRAY_SIZE (opts));
2548 /* Size the string. */
2550 sep_len = (add_nl_p) ? 3 : 1;
2551 for (i = 0; i < num; i++)
2554 for (j = 0; j < 2; j++)
2556 len += strlen (opts[i][j]);
2559 /* Build the string. */
2560 ret = ptr = (char *) xmalloc (len);
2563 for (i = 0; i < num; i++)
2567 for (j = 0; j < 2; j++)
2568 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2575 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2583 for (j = 0; j < 2; j++)
2586 memcpy (ptr, opts[i][j], len2[j]);
2588 line_len += len2[j];
2593 gcc_assert (ret + len >= ptr);
2598 /* Function that is callable from the debugger to print the current
2601 ix86_debug_options (void)
2603 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2604 ix86_arch_string, ix86_tune_string,
2605 ix86_fpmath_string, true);
2609 fprintf (stderr, "%s\n\n", opts);
2613 fputs ("<no options>\n\n", stderr);
2618 /* Sometimes certain combinations of command options do not make
2619 sense on a particular target machine. You can define a macro
2620 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2621 defined, is executed once just after all the command options have
2624 Don't use this macro to turn on various extra optimizations for
2625 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2628 override_options (bool main_args_p)
2631 unsigned int ix86_arch_mask, ix86_tune_mask;
2632 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2637 /* Comes from final.c -- no real reason to change it. */
2638 #define MAX_CODE_ALIGN 16
2646 PTA_PREFETCH_SSE = 1 << 4,
2648 PTA_3DNOW_A = 1 << 6,
2652 PTA_POPCNT = 1 << 10,
2654 PTA_SSE4A = 1 << 12,
2655 PTA_NO_SAHF = 1 << 13,
2656 PTA_SSE4_1 = 1 << 14,
2657 PTA_SSE4_2 = 1 << 15,
2659 PTA_PCLMUL = 1 << 17,
2662 PTA_MOVBE = 1 << 20,
2670 const char *const name; /* processor name or nickname. */
2671 const enum processor_type processor;
2672 const enum attr_cpu schedule;
2673 const unsigned /*enum pta_flags*/ flags;
2675 const processor_alias_table[] =
2677 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2678 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2679 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2681 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2682 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2683 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2685 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2686 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2688 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2689 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2691 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2694 PTA_MMX | PTA_SSE | PTA_SSE2},
2695 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2696 PTA_MMX |PTA_SSE | PTA_SSE2},
2697 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2698 PTA_MMX | PTA_SSE | PTA_SSE2},
2699 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2700 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2701 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2702 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2703 | PTA_CX16 | PTA_NO_SAHF},
2704 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2705 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2706 | PTA_SSSE3 | PTA_CX16},
2707 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2708 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2709 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2710 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2711 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2712 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2713 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2715 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2716 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2717 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2718 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2719 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2720 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2721 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2722 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2723 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2724 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2725 {"x86-64", PROCESSOR_K8, CPU_K8,
2726 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2727 {"k8", PROCESSOR_K8, CPU_K8,
2728 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2729 | PTA_SSE2 | PTA_NO_SAHF},
2730 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2731 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2732 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2733 {"opteron", PROCESSOR_K8, CPU_K8,
2734 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2735 | PTA_SSE2 | PTA_NO_SAHF},
2736 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2737 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2738 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2739 {"athlon64", PROCESSOR_K8, CPU_K8,
2740 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2741 | PTA_SSE2 | PTA_NO_SAHF},
2742 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2743 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2744 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2745 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2746 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2747 | PTA_SSE2 | PTA_NO_SAHF},
2748 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2749 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2750 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2751 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2752 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2753 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2754 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2755 0 /* flags are only used for -march switch. */ },
2756 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2757 PTA_64BIT /* flags are only used for -march switch. */ },
2760 int const pta_size = ARRAY_SIZE (processor_alias_table);
2762 /* Set up prefix/suffix so the error messages refer to either the command
2763 line argument, or the attribute(target). */
2772 prefix = "option(\"";
2777 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2778 SUBTARGET_OVERRIDE_OPTIONS;
2781 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2782 SUBSUBTARGET_OVERRIDE_OPTIONS;
2785 /* -fPIC is the default for x86_64. */
2786 if (TARGET_MACHO && TARGET_64BIT)
2789 /* Set the default values for switches whose default depends on TARGET_64BIT
2790 in case they weren't overwritten by command line options. */
2793 /* Mach-O doesn't support omitting the frame pointer for now. */
2794 if (flag_omit_frame_pointer == 2)
2795 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2796 if (flag_asynchronous_unwind_tables == 2)
2797 flag_asynchronous_unwind_tables = 1;
2798 if (flag_pcc_struct_return == 2)
2799 flag_pcc_struct_return = 0;
2803 if (flag_omit_frame_pointer == 2)
2804 flag_omit_frame_pointer = 0;
2805 if (flag_asynchronous_unwind_tables == 2)
2806 flag_asynchronous_unwind_tables = 0;
2807 if (flag_pcc_struct_return == 2)
2808 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2811 /* Need to check -mtune=generic first. */
2812 if (ix86_tune_string)
2814 if (!strcmp (ix86_tune_string, "generic")
2815 || !strcmp (ix86_tune_string, "i686")
2816 /* As special support for cross compilers we read -mtune=native
2817 as -mtune=generic. With native compilers we won't see the
2818 -mtune=native, as it was changed by the driver. */
2819 || !strcmp (ix86_tune_string, "native"))
2822 ix86_tune_string = "generic64";
2824 ix86_tune_string = "generic32";
2826 /* If this call is for setting the option attribute, allow the
2827 generic32/generic64 that was previously set. */
2828 else if (!main_args_p
2829 && (!strcmp (ix86_tune_string, "generic32")
2830 || !strcmp (ix86_tune_string, "generic64")))
2832 else if (!strncmp (ix86_tune_string, "generic", 7))
2833 error ("bad value (%s) for %stune=%s %s",
2834 ix86_tune_string, prefix, suffix, sw);
2835 else if (!strcmp (ix86_tune_string, "x86-64"))
2836 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2837 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2838 prefix, suffix, prefix, suffix, prefix, suffix);
2842 if (ix86_arch_string)
2843 ix86_tune_string = ix86_arch_string;
2844 if (!ix86_tune_string)
2846 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2847 ix86_tune_defaulted = 1;
2850 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2851 need to use a sensible tune option. */
2852 if (!strcmp (ix86_tune_string, "generic")
2853 || !strcmp (ix86_tune_string, "x86-64")
2854 || !strcmp (ix86_tune_string, "i686"))
2857 ix86_tune_string = "generic64";
2859 ix86_tune_string = "generic32";
2863 if (ix86_stringop_string)
2865 if (!strcmp (ix86_stringop_string, "rep_byte"))
2866 stringop_alg = rep_prefix_1_byte;
2867 else if (!strcmp (ix86_stringop_string, "libcall"))
2868 stringop_alg = libcall;
2869 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2870 stringop_alg = rep_prefix_4_byte;
2871 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2873 /* rep; movq isn't available in 32-bit code. */
2874 stringop_alg = rep_prefix_8_byte;
2875 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2876 stringop_alg = loop_1_byte;
2877 else if (!strcmp (ix86_stringop_string, "loop"))
2878 stringop_alg = loop;
2879 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2880 stringop_alg = unrolled_loop;
2882 error ("bad value (%s) for %sstringop-strategy=%s %s",
2883 ix86_stringop_string, prefix, suffix, sw);
2886 if (!ix86_arch_string)
2887 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2889 ix86_arch_specified = 1;
2891 /* Validate -mabi= value. */
2892 if (ix86_abi_string)
2894 if (strcmp (ix86_abi_string, "sysv") == 0)
2895 ix86_abi = SYSV_ABI;
2896 else if (strcmp (ix86_abi_string, "ms") == 0)
2899 error ("unknown ABI (%s) for %sabi=%s %s",
2900 ix86_abi_string, prefix, suffix, sw);
2903 ix86_abi = DEFAULT_ABI;
2905 if (ix86_cmodel_string != 0)
2907 if (!strcmp (ix86_cmodel_string, "small"))
2908 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2909 else if (!strcmp (ix86_cmodel_string, "medium"))
2910 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2911 else if (!strcmp (ix86_cmodel_string, "large"))
2912 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2914 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2915 else if (!strcmp (ix86_cmodel_string, "32"))
2916 ix86_cmodel = CM_32;
2917 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2918 ix86_cmodel = CM_KERNEL;
2920 error ("bad value (%s) for %scmodel=%s %s",
2921 ix86_cmodel_string, prefix, suffix, sw);
2925 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2926 use of rip-relative addressing. This eliminates fixups that
2927 would otherwise be needed if this object is to be placed in a
2928 DLL, and is essentially just as efficient as direct addressing. */
2929 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2930 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2931 else if (TARGET_64BIT)
2932 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2934 ix86_cmodel = CM_32;
2936 if (ix86_asm_string != 0)
2939 && !strcmp (ix86_asm_string, "intel"))
2940 ix86_asm_dialect = ASM_INTEL;
2941 else if (!strcmp (ix86_asm_string, "att"))
2942 ix86_asm_dialect = ASM_ATT;
2944 error ("bad value (%s) for %sasm=%s %s",
2945 ix86_asm_string, prefix, suffix, sw);
2947 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2948 error ("code model %qs not supported in the %s bit mode",
2949 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2950 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2951 sorry ("%i-bit mode not compiled in",
2952 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2954 for (i = 0; i < pta_size; i++)
2955 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2957 ix86_schedule = processor_alias_table[i].schedule;
2958 ix86_arch = processor_alias_table[i].processor;
2959 /* Default cpu tuning to the architecture. */
2960 ix86_tune = ix86_arch;
2962 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2963 error ("CPU you selected does not support x86-64 "
2966 if (processor_alias_table[i].flags & PTA_MMX
2967 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2968 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2969 if (processor_alias_table[i].flags & PTA_3DNOW
2970 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2971 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2972 if (processor_alias_table[i].flags & PTA_3DNOW_A
2973 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2974 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2975 if (processor_alias_table[i].flags & PTA_SSE
2976 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2977 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2978 if (processor_alias_table[i].flags & PTA_SSE2
2979 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2980 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2981 if (processor_alias_table[i].flags & PTA_SSE3
2982 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2983 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2984 if (processor_alias_table[i].flags & PTA_SSSE3
2985 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2986 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2987 if (processor_alias_table[i].flags & PTA_SSE4_1
2988 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2989 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2990 if (processor_alias_table[i].flags & PTA_SSE4_2
2991 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2992 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2993 if (processor_alias_table[i].flags & PTA_AVX
2994 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2995 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2996 if (processor_alias_table[i].flags & PTA_FMA
2997 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2998 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2999 if (processor_alias_table[i].flags & PTA_SSE4A
3000 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3001 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3002 if (processor_alias_table[i].flags & PTA_FMA4
3003 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3004 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3005 if (processor_alias_table[i].flags & PTA_XOP
3006 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3007 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3008 if (processor_alias_table[i].flags & PTA_LWP
3009 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3010 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3011 if (processor_alias_table[i].flags & PTA_ABM
3012 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3013 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3014 if (processor_alias_table[i].flags & PTA_CX16
3015 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3016 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3017 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3018 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3019 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3020 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3021 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3022 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3023 if (processor_alias_table[i].flags & PTA_MOVBE
3024 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3025 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3026 if (processor_alias_table[i].flags & PTA_AES
3027 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3028 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3029 if (processor_alias_table[i].flags & PTA_PCLMUL
3030 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3031 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3032 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3033 x86_prefetch_sse = true;
3038 if (!strcmp (ix86_arch_string, "generic"))
3039 error ("generic CPU can be used only for %stune=%s %s",
3040 prefix, suffix, sw);
3041 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3042 error ("bad value (%s) for %sarch=%s %s",
3043 ix86_arch_string, prefix, suffix, sw);
3045 ix86_arch_mask = 1u << ix86_arch;
3046 for (i = 0; i < X86_ARCH_LAST; ++i)
3047 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3049 for (i = 0; i < pta_size; i++)
3050 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3052 ix86_schedule = processor_alias_table[i].schedule;
3053 ix86_tune = processor_alias_table[i].processor;
3054 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3056 if (ix86_tune_defaulted)
3058 ix86_tune_string = "x86-64";
3059 for (i = 0; i < pta_size; i++)
3060 if (! strcmp (ix86_tune_string,
3061 processor_alias_table[i].name))
3063 ix86_schedule = processor_alias_table[i].schedule;
3064 ix86_tune = processor_alias_table[i].processor;
3067 error ("CPU you selected does not support x86-64 "
3070 /* Intel CPUs have always interpreted SSE prefetch instructions as
3071 NOPs; so, we can enable SSE prefetch instructions even when
3072 -mtune (rather than -march) points us to a processor that has them.
3073 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3074 higher processors. */
3076 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3077 x86_prefetch_sse = true;
3081 if (ix86_tune_specified && i == pta_size)
3082 error ("bad value (%s) for %stune=%s %s",
3083 ix86_tune_string, prefix, suffix, sw);
3085 ix86_tune_mask = 1u << ix86_tune;
3086 for (i = 0; i < X86_TUNE_LAST; ++i)
3087 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3090 ix86_cost = &ix86_size_cost;
3092 ix86_cost = processor_target_table[ix86_tune].cost;
3094 /* Arrange to set up i386_stack_locals for all functions. */
3095 init_machine_status = ix86_init_machine_status;
3097 /* Validate -mregparm= value. */
3098 if (ix86_regparm_string)
3101 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3102 i = atoi (ix86_regparm_string);
3103 if (i < 0 || i > REGPARM_MAX)
3104 error ("%sregparm=%d%s is not between 0 and %d",
3105 prefix, i, suffix, REGPARM_MAX);
3110 ix86_regparm = REGPARM_MAX;
3112 /* If the user has provided any of the -malign-* options,
3113 warn and use that value only if -falign-* is not set.
3114 Remove this code in GCC 3.2 or later. */
3115 if (ix86_align_loops_string)
3117 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3118 prefix, suffix, suffix);
3119 if (align_loops == 0)
3121 i = atoi (ix86_align_loops_string);
3122 if (i < 0 || i > MAX_CODE_ALIGN)
3123 error ("%salign-loops=%d%s is not between 0 and %d",
3124 prefix, i, suffix, MAX_CODE_ALIGN);
3126 align_loops = 1 << i;
3130 if (ix86_align_jumps_string)
3132 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3133 prefix, suffix, suffix);
3134 if (align_jumps == 0)
3136 i = atoi (ix86_align_jumps_string);
3137 if (i < 0 || i > MAX_CODE_ALIGN)
3138 error ("%salign-loops=%d%s is not between 0 and %d",
3139 prefix, i, suffix, MAX_CODE_ALIGN);
3141 align_jumps = 1 << i;
3145 if (ix86_align_funcs_string)
3147 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3148 prefix, suffix, suffix);
3149 if (align_functions == 0)
3151 i = atoi (ix86_align_funcs_string);
3152 if (i < 0 || i > MAX_CODE_ALIGN)
3153 error ("%salign-loops=%d%s is not between 0 and %d",
3154 prefix, i, suffix, MAX_CODE_ALIGN);
3156 align_functions = 1 << i;
3160 /* Default align_* from the processor table. */
3161 if (align_loops == 0)
3163 align_loops = processor_target_table[ix86_tune].align_loop;
3164 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3166 if (align_jumps == 0)
3168 align_jumps = processor_target_table[ix86_tune].align_jump;
3169 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3171 if (align_functions == 0)
3173 align_functions = processor_target_table[ix86_tune].align_func;
3176 /* Validate -mbranch-cost= value, or provide default. */
3177 ix86_branch_cost = ix86_cost->branch_cost;
3178 if (ix86_branch_cost_string)
3180 i = atoi (ix86_branch_cost_string);
3182 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3184 ix86_branch_cost = i;
3186 if (ix86_section_threshold_string)
3188 i = atoi (ix86_section_threshold_string);
3190 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3192 ix86_section_threshold = i;
3195 if (ix86_tls_dialect_string)
3197 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3198 ix86_tls_dialect = TLS_DIALECT_GNU;
3199 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3200 ix86_tls_dialect = TLS_DIALECT_GNU2;
3202 error ("bad value (%s) for %stls-dialect=%s %s",
3203 ix86_tls_dialect_string, prefix, suffix, sw);
3206 if (ix87_precision_string)
3208 i = atoi (ix87_precision_string);
3209 if (i != 32 && i != 64 && i != 80)
3210 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3215 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3217 /* Enable by default the SSE and MMX builtins. Do allow the user to
3218 explicitly disable any of these. In particular, disabling SSE and
3219 MMX for kernel code is extremely useful. */
3220 if (!ix86_arch_specified)
3222 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3223 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3226 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3230 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3232 if (!ix86_arch_specified)
3234 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3236 /* i386 ABI does not specify red zone. It still makes sense to use it
3237 when programmer takes care to stack from being destroyed. */
3238 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3239 target_flags |= MASK_NO_RED_ZONE;
3242 /* Keep nonleaf frame pointers. */
3243 if (flag_omit_frame_pointer)
3244 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3245 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3246 flag_omit_frame_pointer = 1;
3248 /* If we're doing fast math, we don't care about comparison order
3249 wrt NaNs. This lets us use a shorter comparison sequence. */
3250 if (flag_finite_math_only)
3251 target_flags &= ~MASK_IEEE_FP;
3253 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3254 since the insns won't need emulation. */
3255 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3256 target_flags &= ~MASK_NO_FANCY_MATH_387;
3258 /* Likewise, if the target doesn't have a 387, or we've specified
3259 software floating point, don't use 387 inline intrinsics. */
3261 target_flags |= MASK_NO_FANCY_MATH_387;
3263 /* Turn on MMX builtins for -msse. */
3266 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3267 x86_prefetch_sse = true;
3270 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3271 if (TARGET_SSE4_2 || TARGET_ABM)
3272 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3274 /* Validate -mpreferred-stack-boundary= value or default it to
3275 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3276 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3277 if (ix86_preferred_stack_boundary_string)
3279 i = atoi (ix86_preferred_stack_boundary_string);
3280 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3281 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3282 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3284 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3287 /* Set the default value for -mstackrealign. */
3288 if (ix86_force_align_arg_pointer == -1)
3289 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3291 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3293 /* Validate -mincoming-stack-boundary= value or default it to
3294 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3295 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3296 if (ix86_incoming_stack_boundary_string)
3298 i = atoi (ix86_incoming_stack_boundary_string);
3299 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3300 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3301 i, TARGET_64BIT ? 4 : 2);
3304 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3305 ix86_incoming_stack_boundary
3306 = ix86_user_incoming_stack_boundary;
3310 /* Accept -msseregparm only if at least SSE support is enabled. */
3311 if (TARGET_SSEREGPARM
3313 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3315 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3316 if (ix86_fpmath_string != 0)
3318 if (! strcmp (ix86_fpmath_string, "387"))
3319 ix86_fpmath = FPMATH_387;
3320 else if (! strcmp (ix86_fpmath_string, "sse"))
3324 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3325 ix86_fpmath = FPMATH_387;
3328 ix86_fpmath = FPMATH_SSE;
3330 else if (! strcmp (ix86_fpmath_string, "387,sse")
3331 || ! strcmp (ix86_fpmath_string, "387+sse")
3332 || ! strcmp (ix86_fpmath_string, "sse,387")
3333 || ! strcmp (ix86_fpmath_string, "sse+387")
3334 || ! strcmp (ix86_fpmath_string, "both"))
3338 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3339 ix86_fpmath = FPMATH_387;
3341 else if (!TARGET_80387)
3343 warning (0, "387 instruction set disabled, using SSE arithmetics");
3344 ix86_fpmath = FPMATH_SSE;
3347 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3350 error ("bad value (%s) for %sfpmath=%s %s",
3351 ix86_fpmath_string, prefix, suffix, sw);
3354 /* If the i387 is disabled, then do not return values in it. */
3356 target_flags &= ~MASK_FLOAT_RETURNS;
3358 /* Use external vectorized library in vectorizing intrinsics. */
3359 if (ix86_veclibabi_string)
3361 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3362 ix86_veclib_handler = ix86_veclibabi_svml;
3363 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3364 ix86_veclib_handler = ix86_veclibabi_acml;
3366 error ("unknown vectorization library ABI type (%s) for "
3367 "%sveclibabi=%s %s", ix86_veclibabi_string,
3368 prefix, suffix, sw);
3371 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3372 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3374 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3376 /* ??? Unwind info is not correct around the CFG unless either a frame
3377 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3378 unwind info generation to be aware of the CFG and propagating states
3380 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3381 || flag_exceptions || flag_non_call_exceptions)
3382 && flag_omit_frame_pointer
3383 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3385 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3386 warning (0, "unwind tables currently require either a frame pointer "
3387 "or %saccumulate-outgoing-args%s for correctness",
3389 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3392 /* If stack probes are required, the space used for large function
3393 arguments on the stack must also be probed, so enable
3394 -maccumulate-outgoing-args so this happens in the prologue. */
3395 if (TARGET_STACK_PROBE
3396 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3398 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3399 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3400 "for correctness", prefix, suffix);
3401 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3404 /* For sane SSE instruction set generation we need fcomi instruction.
3405 It is safe to enable all CMOVE instructions. */
3409 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3412 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3413 p = strchr (internal_label_prefix, 'X');
3414 internal_label_prefix_len = p - internal_label_prefix;
3418 /* When scheduling description is not available, disable scheduler pass
3419 so it won't slow down the compilation and make x87 code slower. */
3420 if (!TARGET_SCHEDULE)
3421 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3423 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3424 set_param_value ("simultaneous-prefetches",
3425 ix86_cost->simultaneous_prefetches);
3426 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3427 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3428 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3429 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3430 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3431 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3433 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3434 can be optimized to ap = __builtin_next_arg (0). */
3436 targetm.expand_builtin_va_start = NULL;
3440 ix86_gen_leave = gen_leave_rex64;
3441 ix86_gen_pop1 = gen_popdi1;
3442 ix86_gen_add3 = gen_adddi3;
3443 ix86_gen_sub3 = gen_subdi3;
3444 ix86_gen_sub3_carry = gen_subdi3_carry;
3445 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3446 ix86_gen_monitor = gen_sse3_monitor64;
3447 ix86_gen_andsp = gen_anddi3;
3451 ix86_gen_leave = gen_leave;
3452 ix86_gen_pop1 = gen_popsi1;
3453 ix86_gen_add3 = gen_addsi3;
3454 ix86_gen_sub3 = gen_subsi3;
3455 ix86_gen_sub3_carry = gen_subsi3_carry;
3456 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3457 ix86_gen_monitor = gen_sse3_monitor;
3458 ix86_gen_andsp = gen_andsi3;
3462 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3464 target_flags |= MASK_CLD & ~target_flags_explicit;
3467 /* Save the initial options in case the user does function specific options */
3469 target_option_default_node = target_option_current_node
3470 = build_target_option_node ();
3473 /* Update register usage after having seen the compiler flags. */
3476 ix86_conditional_register_usage (void)
3481 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3483 if (fixed_regs[i] > 1)
3484 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3485 if (call_used_regs[i] > 1)
3486 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3489 /* The PIC register, if it exists, is fixed. */
3490 j = PIC_OFFSET_TABLE_REGNUM;
3491 if (j != INVALID_REGNUM)
3492 fixed_regs[j] = call_used_regs[j] = 1;
3494 /* The MS_ABI changes the set of call-used registers. */
3495 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3497 call_used_regs[SI_REG] = 0;
3498 call_used_regs[DI_REG] = 0;
3499 call_used_regs[XMM6_REG] = 0;
3500 call_used_regs[XMM7_REG] = 0;
3501 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3502 call_used_regs[i] = 0;
3505 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3506 other call-clobbered regs for 64-bit. */
3509 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3511 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3512 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3513 && call_used_regs[i])
3514 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3517 /* If MMX is disabled, squash the registers. */
3519 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3520 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3521 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3523 /* If SSE is disabled, squash the registers. */
3525 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3526 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3527 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3529 /* If the FPU is disabled, squash the registers. */
3530 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3531 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3532 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3533 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3535 /* If 32-bit, squash the 64-bit registers. */
3538 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3540 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3546 /* Save the current options */
3549 ix86_function_specific_save (struct cl_target_option *ptr)
3551 ptr->arch = ix86_arch;
3552 ptr->schedule = ix86_schedule;
3553 ptr->tune = ix86_tune;
3554 ptr->fpmath = ix86_fpmath;
3555 ptr->branch_cost = ix86_branch_cost;
3556 ptr->tune_defaulted = ix86_tune_defaulted;
3557 ptr->arch_specified = ix86_arch_specified;
3558 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3559 ptr->target_flags_explicit = target_flags_explicit;
3561 /* The fields are char but the variables are not; make sure the
3562 values fit in the fields. */
3563 gcc_assert (ptr->arch == ix86_arch);
3564 gcc_assert (ptr->schedule == ix86_schedule);
3565 gcc_assert (ptr->tune == ix86_tune);
3566 gcc_assert (ptr->fpmath == ix86_fpmath);
3567 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3570 /* Restore the current options */
3573 ix86_function_specific_restore (struct cl_target_option *ptr)
3575 enum processor_type old_tune = ix86_tune;
3576 enum processor_type old_arch = ix86_arch;
3577 unsigned int ix86_arch_mask, ix86_tune_mask;
3580 ix86_arch = (enum processor_type) ptr->arch;
3581 ix86_schedule = (enum attr_cpu) ptr->schedule;
3582 ix86_tune = (enum processor_type) ptr->tune;
3583 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3584 ix86_branch_cost = ptr->branch_cost;
3585 ix86_tune_defaulted = ptr->tune_defaulted;
3586 ix86_arch_specified = ptr->arch_specified;
3587 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3588 target_flags_explicit = ptr->target_flags_explicit;
3590 /* Recreate the arch feature tests if the arch changed */
3591 if (old_arch != ix86_arch)
3593 ix86_arch_mask = 1u << ix86_arch;
3594 for (i = 0; i < X86_ARCH_LAST; ++i)
3595 ix86_arch_features[i]
3596 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3599 /* Recreate the tune optimization tests */
3600 if (old_tune != ix86_tune)
3602 ix86_tune_mask = 1u << ix86_tune;
3603 for (i = 0; i < X86_TUNE_LAST; ++i)
3604 ix86_tune_features[i]
3605 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3609 /* Print the current options */
3612 ix86_function_specific_print (FILE *file, int indent,
3613 struct cl_target_option *ptr)
3616 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3617 NULL, NULL, NULL, false);
3619 fprintf (file, "%*sarch = %d (%s)\n",
3622 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3623 ? cpu_names[ptr->arch]
3626 fprintf (file, "%*stune = %d (%s)\n",
3629 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3630 ? cpu_names[ptr->tune]
3633 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3634 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3635 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3636 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3640 fprintf (file, "%*s%s\n", indent, "", target_string);
3641 free (target_string);
3646 /* Inner function to process the attribute((target(...))), take an argument and
3647 set the current options from the argument. If we have a list, recursively go
3651 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3656 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3657 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3658 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3659 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3674 enum ix86_opt_type type;
3679 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3680 IX86_ATTR_ISA ("abm", OPT_mabm),
3681 IX86_ATTR_ISA ("aes", OPT_maes),
3682 IX86_ATTR_ISA ("avx", OPT_mavx),
3683 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3684 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3685 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3686 IX86_ATTR_ISA ("sse", OPT_msse),
3687 IX86_ATTR_ISA ("sse2", OPT_msse2),
3688 IX86_ATTR_ISA ("sse3", OPT_msse3),
3689 IX86_ATTR_ISA ("sse4", OPT_msse4),
3690 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3691 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3692 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3693 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3694 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3695 IX86_ATTR_ISA ("xop", OPT_mxop),
3696 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3698 /* string options */
3699 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3700 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3701 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3704 IX86_ATTR_YES ("cld",
3708 IX86_ATTR_NO ("fancy-math-387",
3709 OPT_mfancy_math_387,
3710 MASK_NO_FANCY_MATH_387),
3712 IX86_ATTR_YES ("ieee-fp",
3716 IX86_ATTR_YES ("inline-all-stringops",
3717 OPT_minline_all_stringops,
3718 MASK_INLINE_ALL_STRINGOPS),
3720 IX86_ATTR_YES ("inline-stringops-dynamically",
3721 OPT_minline_stringops_dynamically,
3722 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3724 IX86_ATTR_NO ("align-stringops",
3725 OPT_mno_align_stringops,
3726 MASK_NO_ALIGN_STRINGOPS),
3728 IX86_ATTR_YES ("recip",
3734 /* If this is a list, recurse to get the options. */
3735 if (TREE_CODE (args) == TREE_LIST)
3739 for (; args; args = TREE_CHAIN (args))
3740 if (TREE_VALUE (args)
3741 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3747 else if (TREE_CODE (args) != STRING_CST)
3750 /* Handle multiple arguments separated by commas. */
3751 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3753 while (next_optstr && *next_optstr != '\0')
3755 char *p = next_optstr;
3757 char *comma = strchr (next_optstr, ',');
3758 const char *opt_string;
3759 size_t len, opt_len;
3764 enum ix86_opt_type type = ix86_opt_unknown;
3770 len = comma - next_optstr;
3771 next_optstr = comma + 1;
3779 /* Recognize no-xxx. */
3780 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3789 /* Find the option. */
3792 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3794 type = attrs[i].type;
3795 opt_len = attrs[i].len;
3796 if (ch == attrs[i].string[0]
3797 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3798 && memcmp (p, attrs[i].string, opt_len) == 0)
3801 mask = attrs[i].mask;
3802 opt_string = attrs[i].string;
3807 /* Process the option. */
3810 error ("attribute(target(\"%s\")) is unknown", orig_p);
3814 else if (type == ix86_opt_isa)
3815 ix86_handle_option (opt, p, opt_set_p);
3817 else if (type == ix86_opt_yes || type == ix86_opt_no)
3819 if (type == ix86_opt_no)
3820 opt_set_p = !opt_set_p;
3823 target_flags |= mask;
3825 target_flags &= ~mask;
3828 else if (type == ix86_opt_str)
3832 error ("option(\"%s\") was already specified", opt_string);
3836 p_strings[opt] = xstrdup (p + opt_len);
3846 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3849 ix86_valid_target_attribute_tree (tree args)
3851 const char *orig_arch_string = ix86_arch_string;
3852 const char *orig_tune_string = ix86_tune_string;
3853 const char *orig_fpmath_string = ix86_fpmath_string;
3854 int orig_tune_defaulted = ix86_tune_defaulted;
3855 int orig_arch_specified = ix86_arch_specified;
3856 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3859 struct cl_target_option *def
3860 = TREE_TARGET_OPTION (target_option_default_node);
3862 /* Process each of the options on the chain. */
3863 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3866 /* If the changed options are different from the default, rerun override_options,
3867 and then save the options away. The string options are are attribute options,
3868 and will be undone when we copy the save structure. */
3869 if (ix86_isa_flags != def->ix86_isa_flags
3870 || target_flags != def->target_flags
3871 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3873 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3875 /* If we are using the default tune= or arch=, undo the string assigned,
3876 and use the default. */
3877 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3878 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3879 else if (!orig_arch_specified)
3880 ix86_arch_string = NULL;
3882 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3883 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3884 else if (orig_tune_defaulted)
3885 ix86_tune_string = NULL;
3887 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3888 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3889 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3890 else if (!TARGET_64BIT && TARGET_SSE)
3891 ix86_fpmath_string = "sse,387";
3893 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3894 override_options (false);
3896 /* Add any builtin functions with the new isa if any. */
3897 ix86_add_new_builtins (ix86_isa_flags);
3899 /* Save the current options unless we are validating options for
3901 t = build_target_option_node ();
3903 ix86_arch_string = orig_arch_string;
3904 ix86_tune_string = orig_tune_string;
3905 ix86_fpmath_string = orig_fpmath_string;
3907 /* Free up memory allocated to hold the strings */
3908 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3909 if (option_strings[i])
3910 free (option_strings[i]);
3916 /* Hook to validate attribute((target("string"))). */
3919 ix86_valid_target_attribute_p (tree fndecl,
3920 tree ARG_UNUSED (name),
3922 int ARG_UNUSED (flags))
3924 struct cl_target_option cur_target;
3926 tree old_optimize = build_optimization_node ();
3927 tree new_target, new_optimize;
3928 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3930 /* If the function changed the optimization levels as well as setting target
3931 options, start with the optimizations specified. */
3932 if (func_optimize && func_optimize != old_optimize)
3933 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3935 /* The target attributes may also change some optimization flags, so update
3936 the optimization options if necessary. */
3937 cl_target_option_save (&cur_target);
3938 new_target = ix86_valid_target_attribute_tree (args);
3939 new_optimize = build_optimization_node ();
3946 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3948 if (old_optimize != new_optimize)
3949 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3952 cl_target_option_restore (&cur_target);
3954 if (old_optimize != new_optimize)
3955 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3961 /* Hook to determine if one function can safely inline another. */
3964 ix86_can_inline_p (tree caller, tree callee)
3967 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3968 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3970 /* If callee has no option attributes, then it is ok to inline. */
3974 /* If caller has no option attributes, but callee does then it is not ok to
3976 else if (!caller_tree)
3981 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3982 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3984 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3985 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3987 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3988 != callee_opts->ix86_isa_flags)
3991 /* See if we have the same non-isa options. */
3992 else if (caller_opts->target_flags != callee_opts->target_flags)
3995 /* See if arch, tune, etc. are the same. */
3996 else if (caller_opts->arch != callee_opts->arch)
3999 else if (caller_opts->tune != callee_opts->tune)
4002 else if (caller_opts->fpmath != callee_opts->fpmath)
4005 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4016 /* Remember the last target of ix86_set_current_function. */
4017 static GTY(()) tree ix86_previous_fndecl;
4019 /* Establish appropriate back-end context for processing the function
4020 FNDECL. The argument might be NULL to indicate processing at top
4021 level, outside of any function scope. */
4023 ix86_set_current_function (tree fndecl)
4025 /* Only change the context if the function changes. This hook is called
4026 several times in the course of compiling a function, and we don't want to
4027 slow things down too much or call target_reinit when it isn't safe. */
4028 if (fndecl && fndecl != ix86_previous_fndecl)
4030 tree old_tree = (ix86_previous_fndecl
4031 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4034 tree new_tree = (fndecl
4035 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4038 ix86_previous_fndecl = fndecl;
4039 if (old_tree == new_tree)
4044 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4050 struct cl_target_option *def
4051 = TREE_TARGET_OPTION (target_option_current_node);
4053 cl_target_option_restore (def);
4060 /* Return true if this goes in large data/bss. */
4063 ix86_in_large_data_p (tree exp)
4065 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4068 /* Functions are never large data. */
4069 if (TREE_CODE (exp) == FUNCTION_DECL)
4072 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4074 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4075 if (strcmp (section, ".ldata") == 0
4076 || strcmp (section, ".lbss") == 0)
4082 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4084 /* If this is an incomplete type with size 0, then we can't put it
4085 in data because it might be too big when completed. */
4086 if (!size || size > ix86_section_threshold)
4093 /* Switch to the appropriate section for output of DECL.
4094 DECL is either a `VAR_DECL' node or a constant of some sort.
4095 RELOC indicates whether forming the initial value of DECL requires
4096 link-time relocations. */
4098 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4102 x86_64_elf_select_section (tree decl, int reloc,
4103 unsigned HOST_WIDE_INT align)
4105 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4106 && ix86_in_large_data_p (decl))
4108 const char *sname = NULL;
4109 unsigned int flags = SECTION_WRITE;
4110 switch (categorize_decl_for_section (decl, reloc))
4115 case SECCAT_DATA_REL:
4116 sname = ".ldata.rel";
4118 case SECCAT_DATA_REL_LOCAL:
4119 sname = ".ldata.rel.local";
4121 case SECCAT_DATA_REL_RO:
4122 sname = ".ldata.rel.ro";
4124 case SECCAT_DATA_REL_RO_LOCAL:
4125 sname = ".ldata.rel.ro.local";
4129 flags |= SECTION_BSS;
4132 case SECCAT_RODATA_MERGE_STR:
4133 case SECCAT_RODATA_MERGE_STR_INIT:
4134 case SECCAT_RODATA_MERGE_CONST:
4138 case SECCAT_SRODATA:
4145 /* We don't split these for medium model. Place them into
4146 default sections and hope for best. */
4148 case SECCAT_EMUTLS_VAR:
4149 case SECCAT_EMUTLS_TMPL:
4154 /* We might get called with string constants, but get_named_section
4155 doesn't like them as they are not DECLs. Also, we need to set
4156 flags in that case. */
4158 return get_section (sname, flags, NULL);
4159 return get_named_section (decl, sname, reloc);
4162 return default_elf_select_section (decl, reloc, align);
4165 /* Build up a unique section name, expressed as a
4166 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4167 RELOC indicates whether the initial value of EXP requires
4168 link-time relocations. */
4170 static void ATTRIBUTE_UNUSED
4171 x86_64_elf_unique_section (tree decl, int reloc)
4173 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4174 && ix86_in_large_data_p (decl))
4176 const char *prefix = NULL;
4177 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4178 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4180 switch (categorize_decl_for_section (decl, reloc))
4183 case SECCAT_DATA_REL:
4184 case SECCAT_DATA_REL_LOCAL:
4185 case SECCAT_DATA_REL_RO:
4186 case SECCAT_DATA_REL_RO_LOCAL:
4187 prefix = one_only ? ".ld" : ".ldata";
4190 prefix = one_only ? ".lb" : ".lbss";
4193 case SECCAT_RODATA_MERGE_STR:
4194 case SECCAT_RODATA_MERGE_STR_INIT:
4195 case SECCAT_RODATA_MERGE_CONST:
4196 prefix = one_only ? ".lr" : ".lrodata";
4198 case SECCAT_SRODATA:
4205 /* We don't split these for medium model. Place them into
4206 default sections and hope for best. */
4208 case SECCAT_EMUTLS_VAR:
4209 prefix = targetm.emutls.var_section;
4211 case SECCAT_EMUTLS_TMPL:
4212 prefix = targetm.emutls.tmpl_section;
4217 const char *name, *linkonce;
4220 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4221 name = targetm.strip_name_encoding (name);
4223 /* If we're using one_only, then there needs to be a .gnu.linkonce
4224 prefix to the section name. */
4225 linkonce = one_only ? ".gnu.linkonce" : "";
4227 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4229 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4233 default_unique_section (decl, reloc);
4236 #ifdef COMMON_ASM_OP
4237 /* This says how to output assembler code to declare an
4238 uninitialized external linkage data object.
4240 For medium model x86-64 we need to use .largecomm opcode for
4243 x86_elf_aligned_common (FILE *file,
4244 const char *name, unsigned HOST_WIDE_INT size,
4247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4248 && size > (unsigned int)ix86_section_threshold)
4249 fputs (".largecomm\t", file);
4251 fputs (COMMON_ASM_OP, file);
4252 assemble_name (file, name);
4253 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4254 size, align / BITS_PER_UNIT);
4258 /* Utility function for targets to use in implementing
4259 ASM_OUTPUT_ALIGNED_BSS. */
4262 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4263 const char *name, unsigned HOST_WIDE_INT size,
4266 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4267 && size > (unsigned int)ix86_section_threshold)
4268 switch_to_section (get_named_section (decl, ".lbss", 0));
4270 switch_to_section (bss_section);
4271 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4272 #ifdef ASM_DECLARE_OBJECT_NAME
4273 last_assemble_variable_decl = decl;
4274 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4276 /* Standard thing is just output label for the object. */
4277 ASM_OUTPUT_LABEL (file, name);
4278 #endif /* ASM_DECLARE_OBJECT_NAME */
4279 ASM_OUTPUT_SKIP (file, size ? size : 1);
4283 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4285 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4286 make the problem with not enough registers even worse. */
4287 #ifdef INSN_SCHEDULING
4289 flag_schedule_insns = 0;
4293 /* The Darwin libraries never set errno, so we might as well
4294 avoid calling them when that's the only reason we would. */
4295 flag_errno_math = 0;
4297 /* The default values of these switches depend on the TARGET_64BIT
4298 that is not known at this moment. Mark these values with 2 and
4299 let user the to override these. In case there is no command line option
4300 specifying them, we will set the defaults in override_options. */
4302 flag_omit_frame_pointer = 2;
4303 flag_pcc_struct_return = 2;
4304 flag_asynchronous_unwind_tables = 2;
4305 flag_vect_cost_model = 1;
4306 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4307 SUBTARGET_OPTIMIZATION_OPTIONS;
4311 /* Decide whether we can make a sibling call to a function. DECL is the
4312 declaration of the function being targeted by the call and EXP is the
4313 CALL_EXPR representing the call. */
4316 ix86_function_ok_for_sibcall (tree decl, tree exp)
4318 tree type, decl_or_type;
4321 /* If we are generating position-independent code, we cannot sibcall
4322 optimize any indirect call, or a direct call to a global function,
4323 as the PLT requires %ebx be live. */
4324 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4327 /* If we need to align the outgoing stack, then sibcalling would
4328 unalign the stack, which may break the called function. */
4329 if (ix86_minimum_incoming_stack_boundary (true)
4330 < PREFERRED_STACK_BOUNDARY)
4335 decl_or_type = decl;
4336 type = TREE_TYPE (decl);
4340 /* We're looking at the CALL_EXPR, we need the type of the function. */
4341 type = CALL_EXPR_FN (exp); /* pointer expression */
4342 type = TREE_TYPE (type); /* pointer type */
4343 type = TREE_TYPE (type); /* function type */
4344 decl_or_type = type;
4347 /* Check that the return value locations are the same. Like
4348 if we are returning floats on the 80387 register stack, we cannot
4349 make a sibcall from a function that doesn't return a float to a
4350 function that does or, conversely, from a function that does return
4351 a float to a function that doesn't; the necessary stack adjustment
4352 would not be executed. This is also the place we notice
4353 differences in the return value ABI. Note that it is ok for one
4354 of the functions to have void return type as long as the return
4355 value of the other is passed in a register. */
4356 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4357 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4359 if (STACK_REG_P (a) || STACK_REG_P (b))
4361 if (!rtx_equal_p (a, b))
4364 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4366 else if (!rtx_equal_p (a, b))
4371 /* The SYSV ABI has more call-clobbered registers;
4372 disallow sibcalls from MS to SYSV. */
4373 if (cfun->machine->call_abi == MS_ABI
4374 && ix86_function_type_abi (type) == SYSV_ABI)
4379 /* If this call is indirect, we'll need to be able to use a
4380 call-clobbered register for the address of the target function.
4381 Make sure that all such registers are not used for passing
4382 parameters. Note that DLLIMPORT functions are indirect. */
4384 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4386 if (ix86_function_regparm (type, NULL) >= 3)
4388 /* ??? Need to count the actual number of registers to be used,
4389 not the possible number of registers. Fix later. */
4395 /* Otherwise okay. That also includes certain types of indirect calls. */
4399 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4400 and "sseregparm" calling convention attributes;
4401 arguments as in struct attribute_spec.handler. */
4404 ix86_handle_cconv_attribute (tree *node, tree name,
4406 int flags ATTRIBUTE_UNUSED,
4409 if (TREE_CODE (*node) != FUNCTION_TYPE
4410 && TREE_CODE (*node) != METHOD_TYPE
4411 && TREE_CODE (*node) != FIELD_DECL
4412 && TREE_CODE (*node) != TYPE_DECL)
4414 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4416 *no_add_attrs = true;
4420 /* Can combine regparm with all attributes but fastcall. */
4421 if (is_attribute_p ("regparm", name))
4425 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4427 error ("fastcall and regparm attributes are not compatible");
4430 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4432 error ("regparam and thiscall attributes are not compatible");
4435 cst = TREE_VALUE (args);
4436 if (TREE_CODE (cst) != INTEGER_CST)
4438 warning (OPT_Wattributes,
4439 "%qE attribute requires an integer constant argument",
4441 *no_add_attrs = true;
4443 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4445 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4447 *no_add_attrs = true;
4455 /* Do not warn when emulating the MS ABI. */
4456 if ((TREE_CODE (*node) != FUNCTION_TYPE
4457 && TREE_CODE (*node) != METHOD_TYPE)
4458 || ix86_function_type_abi (*node) != MS_ABI)
4459 warning (OPT_Wattributes, "%qE attribute ignored",
4461 *no_add_attrs = true;
4465 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4466 if (is_attribute_p ("fastcall", name))
4468 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4470 error ("fastcall and cdecl attributes are not compatible");
4472 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4474 error ("fastcall and stdcall attributes are not compatible");
4476 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4478 error ("fastcall and regparm attributes are not compatible");
4480 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4482 error ("fastcall and thiscall attributes are not compatible");
4486 /* Can combine stdcall with fastcall (redundant), regparm and
4488 else if (is_attribute_p ("stdcall", name))
4490 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4492 error ("stdcall and cdecl attributes are not compatible");
4494 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4496 error ("stdcall and fastcall attributes are not compatible");
4498 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4500 error ("stdcall and thiscall attributes are not compatible");
4504 /* Can combine cdecl with regparm and sseregparm. */
4505 else if (is_attribute_p ("cdecl", name))
4507 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4509 error ("stdcall and cdecl attributes are not compatible");
4511 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4513 error ("fastcall and cdecl attributes are not compatible");
4515 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4517 error ("cdecl and thiscall attributes are not compatible");
4520 else if (is_attribute_p ("thiscall", name))
4522 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4523 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4525 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4527 error ("stdcall and thiscall attributes are not compatible");
4529 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4531 error ("fastcall and thiscall attributes are not compatible");
4533 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4535 error ("cdecl and thiscall attributes are not compatible");
4539 /* Can combine sseregparm with all attributes. */
4544 /* Return 0 if the attributes for two types are incompatible, 1 if they
4545 are compatible, and 2 if they are nearly compatible (which causes a
4546 warning to be generated). */
4549 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4551 /* Check for mismatch of non-default calling convention. */
4552 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4554 if (TREE_CODE (type1) != FUNCTION_TYPE
4555 && TREE_CODE (type1) != METHOD_TYPE)
4558 /* Check for mismatched fastcall/regparm types. */
4559 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4560 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4561 || (ix86_function_regparm (type1, NULL)
4562 != ix86_function_regparm (type2, NULL)))
4565 /* Check for mismatched sseregparm types. */
4566 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4567 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4570 /* Check for mismatched thiscall types. */
4571 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4572 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4575 /* Check for mismatched return types (cdecl vs stdcall). */
4576 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4577 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4583 /* Return the regparm value for a function with the indicated TYPE and DECL.
4584 DECL may be NULL when calling function indirectly
4585 or considering a libcall. */
4588 ix86_function_regparm (const_tree type, const_tree decl)
4594 return (ix86_function_type_abi (type) == SYSV_ABI
4595 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4597 regparm = ix86_regparm;
4598 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4601 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4605 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4608 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4611 /* Use register calling convention for local functions when possible. */
4613 && TREE_CODE (decl) == FUNCTION_DECL
4617 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4618 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4621 int local_regparm, globals = 0, regno;
4623 /* Make sure no regparm register is taken by a
4624 fixed register variable. */
4625 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4626 if (fixed_regs[local_regparm])
4629 /* We don't want to use regparm(3) for nested functions as
4630 these use a static chain pointer in the third argument. */
4631 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4634 /* Each fixed register usage increases register pressure,
4635 so less registers should be used for argument passing.
4636 This functionality can be overriden by an explicit
4638 for (regno = 0; regno <= DI_REG; regno++)
4639 if (fixed_regs[regno])
4643 = globals < local_regparm ? local_regparm - globals : 0;
4645 if (local_regparm > regparm)
4646 regparm = local_regparm;
4653 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4654 DFmode (2) arguments in SSE registers for a function with the
4655 indicated TYPE and DECL. DECL may be NULL when calling function
4656 indirectly or considering a libcall. Otherwise return 0. */
4659 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4661 gcc_assert (!TARGET_64BIT);
4663 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4664 by the sseregparm attribute. */
4665 if (TARGET_SSEREGPARM
4666 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4673 error ("Calling %qD with attribute sseregparm without "
4674 "SSE/SSE2 enabled", decl);
4676 error ("Calling %qT with attribute sseregparm without "
4677 "SSE/SSE2 enabled", type);
4685 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4686 (and DFmode for SSE2) arguments in SSE registers. */
4687 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4689 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4690 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4692 return TARGET_SSE2 ? 2 : 1;
4698 /* Return true if EAX is live at the start of the function. Used by
4699 ix86_expand_prologue to determine if we need special help before
4700 calling allocate_stack_worker. */
4703 ix86_eax_live_at_start_p (void)
4705 /* Cheat. Don't bother working forward from ix86_function_regparm
4706 to the function type to whether an actual argument is located in
4707 eax. Instead just look at cfg info, which is still close enough
4708 to correct at this point. This gives false positives for broken
4709 functions that might use uninitialized data that happens to be
4710 allocated in eax, but who cares? */
4711 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4714 /* Value is the number of bytes of arguments automatically
4715 popped when returning from a subroutine call.
4716 FUNDECL is the declaration node of the function (as a tree),
4717 FUNTYPE is the data type of the function (as a tree),
4718 or for a library call it is an identifier node for the subroutine name.
4719 SIZE is the number of bytes of arguments passed on the stack.
4721 On the 80386, the RTD insn may be used to pop them if the number
4722 of args is fixed, but if the number is variable then the caller
4723 must pop them all. RTD can't be used for library calls now
4724 because the library is compiled with the Unix compiler.
4725 Use of RTD is a selectable option, since it is incompatible with
4726 standard Unix calling sequences. If the option is not selected,
4727 the caller must always pop the args.
4729 The attribute stdcall is equivalent to RTD on a per module basis. */
4732 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4736 /* None of the 64-bit ABIs pop arguments. */
4740 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4742 /* Cdecl functions override -mrtd, and never pop the stack. */
4743 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4745 /* Stdcall and fastcall functions will pop the stack if not
4747 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4748 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4749 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4752 if (rtd && ! stdarg_p (funtype))
4756 /* Lose any fake structure return argument if it is passed on the stack. */
4757 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4758 && !KEEP_AGGREGATE_RETURN_POINTER)
4760 int nregs = ix86_function_regparm (funtype, fundecl);
4762 return GET_MODE_SIZE (Pmode);
4768 /* Argument support functions. */
4770 /* Return true when register may be used to pass function parameters. */
4772 ix86_function_arg_regno_p (int regno)
4775 const int *parm_regs;
4780 return (regno < REGPARM_MAX
4781 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4783 return (regno < REGPARM_MAX
4784 || (TARGET_MMX && MMX_REGNO_P (regno)
4785 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4786 || (TARGET_SSE && SSE_REGNO_P (regno)
4787 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4792 if (SSE_REGNO_P (regno) && TARGET_SSE)
4797 if (TARGET_SSE && SSE_REGNO_P (regno)
4798 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4802 /* TODO: The function should depend on current function ABI but
4803 builtins.c would need updating then. Therefore we use the
4806 /* RAX is used as hidden argument to va_arg functions. */
4807 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4810 if (ix86_abi == MS_ABI)
4811 parm_regs = x86_64_ms_abi_int_parameter_registers;
4813 parm_regs = x86_64_int_parameter_registers;
4814 for (i = 0; i < (ix86_abi == MS_ABI
4815 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4816 if (regno == parm_regs[i])
4821 /* Return if we do not know how to pass TYPE solely in registers. */
4824 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4826 if (must_pass_in_stack_var_size_or_pad (mode, type))
4829 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4830 The layout_type routine is crafty and tries to trick us into passing
4831 currently unsupported vector types on the stack by using TImode. */
4832 return (!TARGET_64BIT && mode == TImode
4833 && type && TREE_CODE (type) != VECTOR_TYPE);
4836 /* It returns the size, in bytes, of the area reserved for arguments passed
4837 in registers for the function represented by fndecl dependent to the used
4840 ix86_reg_parm_stack_space (const_tree fndecl)
4842 enum calling_abi call_abi = SYSV_ABI;
4843 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4844 call_abi = ix86_function_abi (fndecl);
4846 call_abi = ix86_function_type_abi (fndecl);
4847 if (call_abi == MS_ABI)
4852 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4855 ix86_function_type_abi (const_tree fntype)
4857 if (TARGET_64BIT && fntype != NULL)
4859 enum calling_abi abi = ix86_abi;
4860 if (abi == SYSV_ABI)
4862 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4865 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4873 ix86_function_ms_hook_prologue (const_tree fntype)
4877 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4879 if (decl_function_context (fntype) != NULL_TREE)
4881 error_at (DECL_SOURCE_LOCATION (fntype),
4882 "ms_hook_prologue is not compatible with nested function");
4891 static enum calling_abi
4892 ix86_function_abi (const_tree fndecl)
4896 return ix86_function_type_abi (TREE_TYPE (fndecl));
4899 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4902 ix86_cfun_abi (void)
4904 if (! cfun || ! TARGET_64BIT)
4906 return cfun->machine->call_abi;
4910 extern void init_regs (void);
4912 /* Implementation of call abi switching target hook. Specific to FNDECL
4913 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4914 for more details. */
4916 ix86_call_abi_override (const_tree fndecl)
4918 if (fndecl == NULL_TREE)
4919 cfun->machine->call_abi = ix86_abi;
4921 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4924 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4925 re-initialization of init_regs each time we switch function context since
4926 this is needed only during RTL expansion. */
4928 ix86_maybe_switch_abi (void)
4931 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4935 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4936 for a call to a function whose data type is FNTYPE.
4937 For a library call, FNTYPE is 0. */
4940 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4941 tree fntype, /* tree ptr for function decl */
4942 rtx libname, /* SYMBOL_REF of library name or 0 */
4945 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4946 memset (cum, 0, sizeof (*cum));
4949 cum->call_abi = ix86_function_abi (fndecl);
4951 cum->call_abi = ix86_function_type_abi (fntype);
4952 /* Set up the number of registers to use for passing arguments. */
4954 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4955 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4956 "or subtarget optimization implying it");
4957 cum->nregs = ix86_regparm;
4960 if (cum->call_abi != ix86_abi)
4961 cum->nregs = (ix86_abi != SYSV_ABI
4962 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4966 cum->sse_nregs = SSE_REGPARM_MAX;
4969 if (cum->call_abi != ix86_abi)
4970 cum->sse_nregs = (ix86_abi != SYSV_ABI
4971 ? X86_64_SSE_REGPARM_MAX
4972 : X86_64_MS_SSE_REGPARM_MAX);
4976 cum->mmx_nregs = MMX_REGPARM_MAX;
4977 cum->warn_avx = true;
4978 cum->warn_sse = true;
4979 cum->warn_mmx = true;
4981 /* Because type might mismatch in between caller and callee, we need to
4982 use actual type of function for local calls.
4983 FIXME: cgraph_analyze can be told to actually record if function uses
4984 va_start so for local functions maybe_vaarg can be made aggressive
4986 FIXME: once typesytem is fixed, we won't need this code anymore. */
4988 fntype = TREE_TYPE (fndecl);
4989 cum->maybe_vaarg = (fntype
4990 ? (!prototype_p (fntype) || stdarg_p (fntype))
4995 /* If there are variable arguments, then we won't pass anything
4996 in registers in 32-bit mode. */
4997 if (stdarg_p (fntype))
5008 /* Use ecx and edx registers if function has fastcall attribute,
5009 else look for regparm information. */
5012 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5015 cum->fastcall = 1; /* Same first register as in fastcall. */
5017 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5023 cum->nregs = ix86_function_regparm (fntype, fndecl);
5026 /* Set up the number of SSE registers used for passing SFmode
5027 and DFmode arguments. Warn for mismatching ABI. */
5028 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5032 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5033 But in the case of vector types, it is some vector mode.
5035 When we have only some of our vector isa extensions enabled, then there
5036 are some modes for which vector_mode_supported_p is false. For these
5037 modes, the generic vector support in gcc will choose some non-vector mode
5038 in order to implement the type. By computing the natural mode, we'll
5039 select the proper ABI location for the operand and not depend on whatever
5040 the middle-end decides to do with these vector types.
5042 The midde-end can't deal with the vector types > 16 bytes. In this
5043 case, we return the original mode and warn ABI change if CUM isn't
5046 static enum machine_mode
5047 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5049 enum machine_mode mode = TYPE_MODE (type);
5051 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5053 HOST_WIDE_INT size = int_size_in_bytes (type);
5054 if ((size == 8 || size == 16 || size == 32)
5055 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5056 && TYPE_VECTOR_SUBPARTS (type) > 1)
5058 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5060 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5061 mode = MIN_MODE_VECTOR_FLOAT;
5063 mode = MIN_MODE_VECTOR_INT;
5065 /* Get the mode which has this inner mode and number of units. */
5066 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5067 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5068 && GET_MODE_INNER (mode) == innermode)
5070 if (size == 32 && !TARGET_AVX)
5072 static bool warnedavx;
5079 warning (0, "AVX vector argument without AVX "
5080 "enabled changes the ABI");
5082 return TYPE_MODE (type);
5095 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5096 this may not agree with the mode that the type system has chosen for the
5097 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5098 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5101 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5106 if (orig_mode != BLKmode)
5107 tmp = gen_rtx_REG (orig_mode, regno);
5110 tmp = gen_rtx_REG (mode, regno);
5111 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5112 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5118 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5119 of this code is to classify each 8bytes of incoming argument by the register
5120 class and assign registers accordingly. */
5122 /* Return the union class of CLASS1 and CLASS2.
5123 See the x86-64 PS ABI for details. */
5125 static enum x86_64_reg_class
5126 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5128 /* Rule #1: If both classes are equal, this is the resulting class. */
5129 if (class1 == class2)
5132 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5134 if (class1 == X86_64_NO_CLASS)
5136 if (class2 == X86_64_NO_CLASS)
5139 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5140 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5141 return X86_64_MEMORY_CLASS;
5143 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5144 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5145 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5146 return X86_64_INTEGERSI_CLASS;
5147 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5148 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5149 return X86_64_INTEGER_CLASS;
5151 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5153 if (class1 == X86_64_X87_CLASS
5154 || class1 == X86_64_X87UP_CLASS
5155 || class1 == X86_64_COMPLEX_X87_CLASS
5156 || class2 == X86_64_X87_CLASS
5157 || class2 == X86_64_X87UP_CLASS
5158 || class2 == X86_64_COMPLEX_X87_CLASS)
5159 return X86_64_MEMORY_CLASS;
5161 /* Rule #6: Otherwise class SSE is used. */
5162 return X86_64_SSE_CLASS;
5165 /* Classify the argument of type TYPE and mode MODE.
5166 CLASSES will be filled by the register class used to pass each word
5167 of the operand. The number of words is returned. In case the parameter
5168 should be passed in memory, 0 is returned. As a special case for zero
5169 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5171 BIT_OFFSET is used internally for handling records and specifies offset
5172 of the offset in bits modulo 256 to avoid overflow cases.
5174 See the x86-64 PS ABI for details.
5178 classify_argument (enum machine_mode mode, const_tree type,
5179 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5181 HOST_WIDE_INT bytes =
5182 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5183 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5185 /* Variable sized entities are always passed/returned in memory. */
5189 if (mode != VOIDmode
5190 && targetm.calls.must_pass_in_stack (mode, type))
5193 if (type && AGGREGATE_TYPE_P (type))
5197 enum x86_64_reg_class subclasses[MAX_CLASSES];
5199 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5203 for (i = 0; i < words; i++)
5204 classes[i] = X86_64_NO_CLASS;
5206 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5207 signalize memory class, so handle it as special case. */
5210 classes[0] = X86_64_NO_CLASS;
5214 /* Classify each field of record and merge classes. */
5215 switch (TREE_CODE (type))
5218 /* And now merge the fields of structure. */
5219 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5221 if (TREE_CODE (field) == FIELD_DECL)
5225 if (TREE_TYPE (field) == error_mark_node)
5228 /* Bitfields are always classified as integer. Handle them
5229 early, since later code would consider them to be
5230 misaligned integers. */
5231 if (DECL_BIT_FIELD (field))
5233 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5234 i < ((int_bit_position (field) + (bit_offset % 64))
5235 + tree_low_cst (DECL_SIZE (field), 0)
5238 merge_classes (X86_64_INTEGER_CLASS,
5245 type = TREE_TYPE (field);
5247 /* Flexible array member is ignored. */
5248 if (TYPE_MODE (type) == BLKmode
5249 && TREE_CODE (type) == ARRAY_TYPE
5250 && TYPE_SIZE (type) == NULL_TREE
5251 && TYPE_DOMAIN (type) != NULL_TREE
5252 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5257 if (!warned && warn_psabi)
5260 inform (input_location,
5261 "The ABI of passing struct with"
5262 " a flexible array member has"
5263 " changed in GCC 4.4");
5267 num = classify_argument (TYPE_MODE (type), type,
5269 (int_bit_position (field)
5270 + bit_offset) % 256);
5273 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5274 for (i = 0; i < num && (i + pos) < words; i++)
5276 merge_classes (subclasses[i], classes[i + pos]);
5283 /* Arrays are handled as small records. */
5286 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5287 TREE_TYPE (type), subclasses, bit_offset);
5291 /* The partial classes are now full classes. */
5292 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5293 subclasses[0] = X86_64_SSE_CLASS;
5294 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5295 && !((bit_offset % 64) == 0 && bytes == 4))
5296 subclasses[0] = X86_64_INTEGER_CLASS;
5298 for (i = 0; i < words; i++)
5299 classes[i] = subclasses[i % num];
5304 case QUAL_UNION_TYPE:
5305 /* Unions are similar to RECORD_TYPE but offset is always 0.
5307 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5309 if (TREE_CODE (field) == FIELD_DECL)
5313 if (TREE_TYPE (field) == error_mark_node)
5316 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5317 TREE_TYPE (field), subclasses,
5321 for (i = 0; i < num; i++)
5322 classes[i] = merge_classes (subclasses[i], classes[i]);
5333 /* When size > 16 bytes, if the first one isn't
5334 X86_64_SSE_CLASS or any other ones aren't
5335 X86_64_SSEUP_CLASS, everything should be passed in
5337 if (classes[0] != X86_64_SSE_CLASS)
5340 for (i = 1; i < words; i++)
5341 if (classes[i] != X86_64_SSEUP_CLASS)
5345 /* Final merger cleanup. */
5346 for (i = 0; i < words; i++)
5348 /* If one class is MEMORY, everything should be passed in
5350 if (classes[i] == X86_64_MEMORY_CLASS)
5353 /* The X86_64_SSEUP_CLASS should be always preceded by
5354 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5355 if (classes[i] == X86_64_SSEUP_CLASS
5356 && classes[i - 1] != X86_64_SSE_CLASS
5357 && classes[i - 1] != X86_64_SSEUP_CLASS)
5359 /* The first one should never be X86_64_SSEUP_CLASS. */
5360 gcc_assert (i != 0);
5361 classes[i] = X86_64_SSE_CLASS;
5364 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5365 everything should be passed in memory. */
5366 if (classes[i] == X86_64_X87UP_CLASS
5367 && (classes[i - 1] != X86_64_X87_CLASS))
5371 /* The first one should never be X86_64_X87UP_CLASS. */
5372 gcc_assert (i != 0);
5373 if (!warned && warn_psabi)
5376 inform (input_location,
5377 "The ABI of passing union with long double"
5378 " has changed in GCC 4.4");
5386 /* Compute alignment needed. We align all types to natural boundaries with
5387 exception of XFmode that is aligned to 64bits. */
5388 if (mode != VOIDmode && mode != BLKmode)
5390 int mode_alignment = GET_MODE_BITSIZE (mode);
5393 mode_alignment = 128;
5394 else if (mode == XCmode)
5395 mode_alignment = 256;
5396 if (COMPLEX_MODE_P (mode))
5397 mode_alignment /= 2;
5398 /* Misaligned fields are always returned in memory. */
5399 if (bit_offset % mode_alignment)
5403 /* for V1xx modes, just use the base mode */
5404 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5405 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5406 mode = GET_MODE_INNER (mode);
5408 /* Classification of atomic types. */
5413 classes[0] = X86_64_SSE_CLASS;
5416 classes[0] = X86_64_SSE_CLASS;
5417 classes[1] = X86_64_SSEUP_CLASS;
5427 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5431 classes[0] = X86_64_INTEGERSI_CLASS;
5434 else if (size <= 64)
5436 classes[0] = X86_64_INTEGER_CLASS;
5439 else if (size <= 64+32)
5441 classes[0] = X86_64_INTEGER_CLASS;
5442 classes[1] = X86_64_INTEGERSI_CLASS;
5445 else if (size <= 64+64)
5447 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5455 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5459 /* OImode shouldn't be used directly. */
5464 if (!(bit_offset % 64))
5465 classes[0] = X86_64_SSESF_CLASS;
5467 classes[0] = X86_64_SSE_CLASS;
5470 classes[0] = X86_64_SSEDF_CLASS;
5473 classes[0] = X86_64_X87_CLASS;
5474 classes[1] = X86_64_X87UP_CLASS;
5477 classes[0] = X86_64_SSE_CLASS;
5478 classes[1] = X86_64_SSEUP_CLASS;
5481 classes[0] = X86_64_SSE_CLASS;
5482 if (!(bit_offset % 64))
5488 if (!warned && warn_psabi)
5491 inform (input_location,
5492 "The ABI of passing structure with complex float"
5493 " member has changed in GCC 4.4");
5495 classes[1] = X86_64_SSESF_CLASS;
5499 classes[0] = X86_64_SSEDF_CLASS;
5500 classes[1] = X86_64_SSEDF_CLASS;
5503 classes[0] = X86_64_COMPLEX_X87_CLASS;
5506 /* This modes is larger than 16 bytes. */
5514 classes[0] = X86_64_SSE_CLASS;
5515 classes[1] = X86_64_SSEUP_CLASS;
5516 classes[2] = X86_64_SSEUP_CLASS;
5517 classes[3] = X86_64_SSEUP_CLASS;
5525 classes[0] = X86_64_SSE_CLASS;
5526 classes[1] = X86_64_SSEUP_CLASS;
5534 classes[0] = X86_64_SSE_CLASS;
5540 gcc_assert (VECTOR_MODE_P (mode));
5545 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5547 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5548 classes[0] = X86_64_INTEGERSI_CLASS;
5550 classes[0] = X86_64_INTEGER_CLASS;
5551 classes[1] = X86_64_INTEGER_CLASS;
5552 return 1 + (bytes > 8);
5556 /* Examine the argument and return set number of register required in each
5557 class. Return 0 iff parameter should be passed in memory. */
5559 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5560 int *int_nregs, int *sse_nregs)
5562 enum x86_64_reg_class regclass[MAX_CLASSES];
5563 int n = classify_argument (mode, type, regclass, 0);
5569 for (n--; n >= 0; n--)
5570 switch (regclass[n])
5572 case X86_64_INTEGER_CLASS:
5573 case X86_64_INTEGERSI_CLASS:
5576 case X86_64_SSE_CLASS:
5577 case X86_64_SSESF_CLASS:
5578 case X86_64_SSEDF_CLASS:
5581 case X86_64_NO_CLASS:
5582 case X86_64_SSEUP_CLASS:
5584 case X86_64_X87_CLASS:
5585 case X86_64_X87UP_CLASS:
5589 case X86_64_COMPLEX_X87_CLASS:
5590 return in_return ? 2 : 0;
5591 case X86_64_MEMORY_CLASS:
5597 /* Construct container for the argument used by GCC interface. See
5598 FUNCTION_ARG for the detailed description. */
5601 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5602 const_tree type, int in_return, int nintregs, int nsseregs,
5603 const int *intreg, int sse_regno)
5605 /* The following variables hold the static issued_error state. */
5606 static bool issued_sse_arg_error;
5607 static bool issued_sse_ret_error;
5608 static bool issued_x87_ret_error;
5610 enum machine_mode tmpmode;
5612 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5613 enum x86_64_reg_class regclass[MAX_CLASSES];
5617 int needed_sseregs, needed_intregs;
5618 rtx exp[MAX_CLASSES];
5621 n = classify_argument (mode, type, regclass, 0);
5624 if (!examine_argument (mode, type, in_return, &needed_intregs,
5627 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5630 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5631 some less clueful developer tries to use floating-point anyway. */
5632 if (needed_sseregs && !TARGET_SSE)
5636 if (!issued_sse_ret_error)
5638 error ("SSE register return with SSE disabled");
5639 issued_sse_ret_error = true;
5642 else if (!issued_sse_arg_error)
5644 error ("SSE register argument with SSE disabled");
5645 issued_sse_arg_error = true;
5650 /* Likewise, error if the ABI requires us to return values in the
5651 x87 registers and the user specified -mno-80387. */
5652 if (!TARGET_80387 && in_return)
5653 for (i = 0; i < n; i++)
5654 if (regclass[i] == X86_64_X87_CLASS
5655 || regclass[i] == X86_64_X87UP_CLASS
5656 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5658 if (!issued_x87_ret_error)
5660 error ("x87 register return with x87 disabled");
5661 issued_x87_ret_error = true;
5666 /* First construct simple cases. Avoid SCmode, since we want to use
5667 single register to pass this type. */
5668 if (n == 1 && mode != SCmode)
5669 switch (regclass[0])
5671 case X86_64_INTEGER_CLASS:
5672 case X86_64_INTEGERSI_CLASS:
5673 return gen_rtx_REG (mode, intreg[0]);
5674 case X86_64_SSE_CLASS:
5675 case X86_64_SSESF_CLASS:
5676 case X86_64_SSEDF_CLASS:
5677 if (mode != BLKmode)
5678 return gen_reg_or_parallel (mode, orig_mode,
5679 SSE_REGNO (sse_regno));
5681 case X86_64_X87_CLASS:
5682 case X86_64_COMPLEX_X87_CLASS:
5683 return gen_rtx_REG (mode, FIRST_STACK_REG);
5684 case X86_64_NO_CLASS:
5685 /* Zero sized array, struct or class. */
5690 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5691 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5692 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5694 && regclass[0] == X86_64_SSE_CLASS
5695 && regclass[1] == X86_64_SSEUP_CLASS
5696 && regclass[2] == X86_64_SSEUP_CLASS
5697 && regclass[3] == X86_64_SSEUP_CLASS
5699 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5702 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5703 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5704 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5705 && regclass[1] == X86_64_INTEGER_CLASS
5706 && (mode == CDImode || mode == TImode || mode == TFmode)
5707 && intreg[0] + 1 == intreg[1])
5708 return gen_rtx_REG (mode, intreg[0]);
5710 /* Otherwise figure out the entries of the PARALLEL. */
5711 for (i = 0; i < n; i++)
5715 switch (regclass[i])
5717 case X86_64_NO_CLASS:
5719 case X86_64_INTEGER_CLASS:
5720 case X86_64_INTEGERSI_CLASS:
5721 /* Merge TImodes on aligned occasions here too. */
5722 if (i * 8 + 8 > bytes)
5723 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5724 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5728 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5729 if (tmpmode == BLKmode)
5731 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5732 gen_rtx_REG (tmpmode, *intreg),
5736 case X86_64_SSESF_CLASS:
5737 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5738 gen_rtx_REG (SFmode,
5739 SSE_REGNO (sse_regno)),
5743 case X86_64_SSEDF_CLASS:
5744 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5745 gen_rtx_REG (DFmode,
5746 SSE_REGNO (sse_regno)),
5750 case X86_64_SSE_CLASS:
5758 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5768 && regclass[1] == X86_64_SSEUP_CLASS
5769 && regclass[2] == X86_64_SSEUP_CLASS
5770 && regclass[3] == X86_64_SSEUP_CLASS);
5777 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5778 gen_rtx_REG (tmpmode,
5779 SSE_REGNO (sse_regno)),
5788 /* Empty aligned struct, union or class. */
5792 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5793 for (i = 0; i < nexps; i++)
5794 XVECEXP (ret, 0, i) = exp [i];
5798 /* Update the data in CUM to advance over an argument of mode MODE
5799 and data type TYPE. (TYPE is null for libcalls where that information
5800 may not be available.) */
5803 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5804 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5820 cum->words += words;
5821 cum->nregs -= words;
5822 cum->regno += words;
5824 if (cum->nregs <= 0)
5832 /* OImode shouldn't be used directly. */
5836 if (cum->float_in_sse < 2)
5839 if (cum->float_in_sse < 1)
5856 if (!type || !AGGREGATE_TYPE_P (type))
5858 cum->sse_words += words;
5859 cum->sse_nregs -= 1;
5860 cum->sse_regno += 1;
5861 if (cum->sse_nregs <= 0)
5875 if (!type || !AGGREGATE_TYPE_P (type))
5877 cum->mmx_words += words;
5878 cum->mmx_nregs -= 1;
5879 cum->mmx_regno += 1;
5880 if (cum->mmx_nregs <= 0)
5891 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5892 tree type, HOST_WIDE_INT words, int named)
5894 int int_nregs, sse_nregs;
5896 /* Unnamed 256bit vector mode parameters are passed on stack. */
5897 if (!named && VALID_AVX256_REG_MODE (mode))
5900 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5901 cum->words += words;
5902 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5904 cum->nregs -= int_nregs;
5905 cum->sse_nregs -= sse_nregs;
5906 cum->regno += int_nregs;
5907 cum->sse_regno += sse_nregs;
5910 cum->words += words;
5914 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5915 HOST_WIDE_INT words)
5917 /* Otherwise, this should be passed indirect. */
5918 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5920 cum->words += words;
5929 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5930 tree type, int named)
5932 HOST_WIDE_INT bytes, words;
5934 if (mode == BLKmode)
5935 bytes = int_size_in_bytes (type);
5937 bytes = GET_MODE_SIZE (mode);
5938 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5941 mode = type_natural_mode (type, NULL);
5943 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5944 function_arg_advance_ms_64 (cum, bytes, words);
5945 else if (TARGET_64BIT)
5946 function_arg_advance_64 (cum, mode, type, words, named);
5948 function_arg_advance_32 (cum, mode, type, bytes, words);
5951 /* Define where to put the arguments to a function.
5952 Value is zero to push the argument on the stack,
5953 or a hard register in which to store the argument.
5955 MODE is the argument's machine mode.
5956 TYPE is the data type of the argument (as a tree).
5957 This is null for libcalls where that information may
5959 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5960 the preceding args and about the function being called.
5961 NAMED is nonzero if this argument is a named parameter
5962 (otherwise it is an extra parameter matching an ellipsis). */
5965 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5966 enum machine_mode orig_mode, tree type,
5967 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5969 static bool warnedsse, warnedmmx;
5971 /* Avoid the AL settings for the Unix64 ABI. */
5972 if (mode == VOIDmode)
5988 if (words <= cum->nregs)
5990 int regno = cum->regno;
5992 /* Fastcall allocates the first two DWORD (SImode) or
5993 smaller arguments to ECX and EDX if it isn't an
5999 || (type && AGGREGATE_TYPE_P (type)))
6002 /* ECX not EAX is the first allocated register. */
6003 if (regno == AX_REG)
6006 return gen_rtx_REG (mode, regno);
6011 if (cum->float_in_sse < 2)
6014 if (cum->float_in_sse < 1)
6018 /* In 32bit, we pass TImode in xmm registers. */
6025 if (!type || !AGGREGATE_TYPE_P (type))
6027 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6030 warning (0, "SSE vector argument without SSE enabled "
6034 return gen_reg_or_parallel (mode, orig_mode,
6035 cum->sse_regno + FIRST_SSE_REG);
6040 /* OImode shouldn't be used directly. */
6049 if (!type || !AGGREGATE_TYPE_P (type))
6052 return gen_reg_or_parallel (mode, orig_mode,
6053 cum->sse_regno + FIRST_SSE_REG);
6063 if (!type || !AGGREGATE_TYPE_P (type))
6065 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6068 warning (0, "MMX vector argument without MMX enabled "
6072 return gen_reg_or_parallel (mode, orig_mode,
6073 cum->mmx_regno + FIRST_MMX_REG);
6082 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6083 enum machine_mode orig_mode, tree type, int named)
6085 /* Handle a hidden AL argument containing number of registers
6086 for varargs x86-64 functions. */
6087 if (mode == VOIDmode)
6088 return GEN_INT (cum->maybe_vaarg
6089 ? (cum->sse_nregs < 0
6090 ? (cum->call_abi == ix86_abi
6092 : (ix86_abi != SYSV_ABI
6093 ? X86_64_SSE_REGPARM_MAX
6094 : X86_64_MS_SSE_REGPARM_MAX))
6109 /* Unnamed 256bit vector mode parameters are passed on stack. */
6115 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6117 &x86_64_int_parameter_registers [cum->regno],
6122 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6123 enum machine_mode orig_mode, int named,
6124 HOST_WIDE_INT bytes)
6128 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6129 We use value of -2 to specify that current function call is MSABI. */
6130 if (mode == VOIDmode)
6131 return GEN_INT (-2);
6133 /* If we've run out of registers, it goes on the stack. */
6134 if (cum->nregs == 0)
6137 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6139 /* Only floating point modes are passed in anything but integer regs. */
6140 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6143 regno = cum->regno + FIRST_SSE_REG;
6148 /* Unnamed floating parameters are passed in both the
6149 SSE and integer registers. */
6150 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6151 t2 = gen_rtx_REG (mode, regno);
6152 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6153 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6154 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6157 /* Handle aggregated types passed in register. */
6158 if (orig_mode == BLKmode)
6160 if (bytes > 0 && bytes <= 8)
6161 mode = (bytes > 4 ? DImode : SImode);
6162 if (mode == BLKmode)
6166 return gen_reg_or_parallel (mode, orig_mode, regno);
6170 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6171 tree type, int named)
6173 enum machine_mode mode = omode;
6174 HOST_WIDE_INT bytes, words;
6176 if (mode == BLKmode)
6177 bytes = int_size_in_bytes (type);
6179 bytes = GET_MODE_SIZE (mode);
6180 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6182 /* To simplify the code below, represent vector types with a vector mode
6183 even if MMX/SSE are not active. */
6184 if (type && TREE_CODE (type) == VECTOR_TYPE)
6185 mode = type_natural_mode (type, cum);
6187 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6188 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6189 else if (TARGET_64BIT)
6190 return function_arg_64 (cum, mode, omode, type, named);
6192 return function_arg_32 (cum, mode, omode, type, bytes, words);
6195 /* A C expression that indicates when an argument must be passed by
6196 reference. If nonzero for an argument, a copy of that argument is
6197 made in memory and a pointer to the argument is passed instead of
6198 the argument itself. The pointer is passed in whatever way is
6199 appropriate for passing a pointer to that type. */
6202 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6203 enum machine_mode mode ATTRIBUTE_UNUSED,
6204 const_tree type, bool named ATTRIBUTE_UNUSED)
6206 /* See Windows x64 Software Convention. */
6207 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6209 int msize = (int) GET_MODE_SIZE (mode);
6212 /* Arrays are passed by reference. */
6213 if (TREE_CODE (type) == ARRAY_TYPE)
6216 if (AGGREGATE_TYPE_P (type))
6218 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6219 are passed by reference. */
6220 msize = int_size_in_bytes (type);
6224 /* __m128 is passed by reference. */
6226 case 1: case 2: case 4: case 8:
6232 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6238 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6241 contains_aligned_value_p (tree type)
6243 enum machine_mode mode = TYPE_MODE (type);
6244 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6248 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6250 if (TYPE_ALIGN (type) < 128)
6253 if (AGGREGATE_TYPE_P (type))
6255 /* Walk the aggregates recursively. */
6256 switch (TREE_CODE (type))
6260 case QUAL_UNION_TYPE:
6264 /* Walk all the structure fields. */
6265 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6267 if (TREE_CODE (field) == FIELD_DECL
6268 && contains_aligned_value_p (TREE_TYPE (field)))
6275 /* Just for use if some languages passes arrays by value. */
6276 if (contains_aligned_value_p (TREE_TYPE (type)))
6287 /* Gives the alignment boundary, in bits, of an argument with the
6288 specified mode and type. */
6291 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6296 /* Since canonical type is used for call, we convert it to
6297 canonical type if needed. */
6298 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6299 type = TYPE_CANONICAL (type);
6300 align = TYPE_ALIGN (type);
6303 align = GET_MODE_ALIGNMENT (mode);
6304 if (align < PARM_BOUNDARY)
6305 align = PARM_BOUNDARY;
6306 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6307 natural boundaries. */
6308 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6310 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6311 make an exception for SSE modes since these require 128bit
6314 The handling here differs from field_alignment. ICC aligns MMX
6315 arguments to 4 byte boundaries, while structure fields are aligned
6316 to 8 byte boundaries. */
6319 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6320 align = PARM_BOUNDARY;
6324 if (!contains_aligned_value_p (type))
6325 align = PARM_BOUNDARY;
6328 if (align > BIGGEST_ALIGNMENT)
6329 align = BIGGEST_ALIGNMENT;
6333 /* Return true if N is a possible register number of function value. */
6336 ix86_function_value_regno_p (const unsigned int regno)
6343 case FIRST_FLOAT_REG:
6344 /* TODO: The function should depend on current function ABI but
6345 builtins.c would need updating then. Therefore we use the
6347 if (TARGET_64BIT && ix86_abi == MS_ABI)
6349 return TARGET_FLOAT_RETURNS_IN_80387;
6355 if (TARGET_MACHO || TARGET_64BIT)
6363 /* Define how to find the value returned by a function.
6364 VALTYPE is the data type of the value (as a tree).
6365 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6366 otherwise, FUNC is 0. */
6369 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6370 const_tree fntype, const_tree fn)
6374 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6375 we normally prevent this case when mmx is not available. However
6376 some ABIs may require the result to be returned like DImode. */
6377 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6378 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6380 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6381 we prevent this case when sse is not available. However some ABIs
6382 may require the result to be returned like integer TImode. */
6383 else if (mode == TImode
6384 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6385 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6387 /* 32-byte vector modes in %ymm0. */
6388 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6389 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6391 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6392 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6393 regno = FIRST_FLOAT_REG;
6395 /* Most things go in %eax. */
6398 /* Override FP return register with %xmm0 for local functions when
6399 SSE math is enabled or for functions with sseregparm attribute. */
6400 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6402 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6403 if ((sse_level >= 1 && mode == SFmode)
6404 || (sse_level == 2 && mode == DFmode))
6405 regno = FIRST_SSE_REG;
6408 /* OImode shouldn't be used directly. */
6409 gcc_assert (mode != OImode);
6411 return gen_rtx_REG (orig_mode, regno);
6415 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6420 /* Handle libcalls, which don't provide a type node. */
6421 if (valtype == NULL)
6433 return gen_rtx_REG (mode, FIRST_SSE_REG);
6436 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6440 return gen_rtx_REG (mode, AX_REG);
6444 ret = construct_container (mode, orig_mode, valtype, 1,
6445 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6446 x86_64_int_return_registers, 0);
6448 /* For zero sized structures, construct_container returns NULL, but we
6449 need to keep rest of compiler happy by returning meaningful value. */
6451 ret = gen_rtx_REG (orig_mode, AX_REG);
6457 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6459 unsigned int regno = AX_REG;
6463 switch (GET_MODE_SIZE (mode))
6466 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6467 && !COMPLEX_MODE_P (mode))
6468 regno = FIRST_SSE_REG;
6472 if (mode == SFmode || mode == DFmode)
6473 regno = FIRST_SSE_REG;
6479 return gen_rtx_REG (orig_mode, regno);
6483 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6484 enum machine_mode orig_mode, enum machine_mode mode)
6486 const_tree fn, fntype;
6489 if (fntype_or_decl && DECL_P (fntype_or_decl))
6490 fn = fntype_or_decl;
6491 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6493 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6494 return function_value_ms_64 (orig_mode, mode);
6495 else if (TARGET_64BIT)
6496 return function_value_64 (orig_mode, mode, valtype);
6498 return function_value_32 (orig_mode, mode, fntype, fn);
6502 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6503 bool outgoing ATTRIBUTE_UNUSED)
6505 enum machine_mode mode, orig_mode;
6507 orig_mode = TYPE_MODE (valtype);
6508 mode = type_natural_mode (valtype, NULL);
6509 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6513 ix86_libcall_value (enum machine_mode mode)
6515 return ix86_function_value_1 (NULL, NULL, mode, mode);
6518 /* Return true iff type is returned in memory. */
6520 static int ATTRIBUTE_UNUSED
6521 return_in_memory_32 (const_tree type, enum machine_mode mode)
6525 if (mode == BLKmode)
6528 size = int_size_in_bytes (type);
6530 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6533 if (VECTOR_MODE_P (mode) || mode == TImode)
6535 /* User-created vectors small enough to fit in EAX. */
6539 /* MMX/3dNow values are returned in MM0,
6540 except when it doesn't exits. */
6542 return (TARGET_MMX ? 0 : 1);
6544 /* SSE values are returned in XMM0, except when it doesn't exist. */
6546 return (TARGET_SSE ? 0 : 1);
6548 /* AVX values are returned in YMM0, except when it doesn't exist. */
6550 return TARGET_AVX ? 0 : 1;
6559 /* OImode shouldn't be used directly. */
6560 gcc_assert (mode != OImode);
6565 static int ATTRIBUTE_UNUSED
6566 return_in_memory_64 (const_tree type, enum machine_mode mode)
6568 int needed_intregs, needed_sseregs;
6569 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6572 static int ATTRIBUTE_UNUSED
6573 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6575 HOST_WIDE_INT size = int_size_in_bytes (type);
6577 /* __m128 is returned in xmm0. */
6578 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6579 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6582 /* Otherwise, the size must be exactly in [1248]. */
6583 return (size != 1 && size != 2 && size != 4 && size != 8);
6587 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6589 #ifdef SUBTARGET_RETURN_IN_MEMORY
6590 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6592 const enum machine_mode mode = type_natural_mode (type, NULL);
6596 if (ix86_function_type_abi (fntype) == MS_ABI)
6597 return return_in_memory_ms_64 (type, mode);
6599 return return_in_memory_64 (type, mode);
6602 return return_in_memory_32 (type, mode);
6606 /* Return false iff TYPE is returned in memory. This version is used
6607 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6608 but differs notably in that when MMX is available, 8-byte vectors
6609 are returned in memory, rather than in MMX registers. */
6612 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6615 enum machine_mode mode = type_natural_mode (type, NULL);
6618 return return_in_memory_64 (type, mode);
6620 if (mode == BLKmode)
6623 size = int_size_in_bytes (type);
6625 if (VECTOR_MODE_P (mode))
6627 /* Return in memory only if MMX registers *are* available. This
6628 seems backwards, but it is consistent with the existing
6635 else if (mode == TImode)
6637 else if (mode == XFmode)
6643 /* When returning SSE vector types, we have a choice of either
6644 (1) being abi incompatible with a -march switch, or
6645 (2) generating an error.
6646 Given no good solution, I think the safest thing is one warning.
6647 The user won't be able to use -Werror, but....
6649 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6650 called in response to actually generating a caller or callee that
6651 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6652 via aggregate_value_p for general type probing from tree-ssa. */
6655 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6657 static bool warnedsse, warnedmmx;
6659 if (!TARGET_64BIT && type)
6661 /* Look at the return type of the function, not the function type. */
6662 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6664 if (!TARGET_SSE && !warnedsse)
6667 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6670 warning (0, "SSE vector return without SSE enabled "
6675 if (!TARGET_MMX && !warnedmmx)
6677 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6680 warning (0, "MMX vector return without MMX enabled "
6690 /* Create the va_list data type. */
6692 /* Returns the calling convention specific va_list date type.
6693 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6696 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6698 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6700 /* For i386 we use plain pointer to argument area. */
6701 if (!TARGET_64BIT || abi == MS_ABI)
6702 return build_pointer_type (char_type_node);
6704 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6705 type_decl = build_decl (BUILTINS_LOCATION,
6706 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6708 f_gpr = build_decl (BUILTINS_LOCATION,
6709 FIELD_DECL, get_identifier ("gp_offset"),
6710 unsigned_type_node);
6711 f_fpr = build_decl (BUILTINS_LOCATION,
6712 FIELD_DECL, get_identifier ("fp_offset"),
6713 unsigned_type_node);
6714 f_ovf = build_decl (BUILTINS_LOCATION,
6715 FIELD_DECL, get_identifier ("overflow_arg_area"),
6717 f_sav = build_decl (BUILTINS_LOCATION,
6718 FIELD_DECL, get_identifier ("reg_save_area"),
6721 va_list_gpr_counter_field = f_gpr;
6722 va_list_fpr_counter_field = f_fpr;
6724 DECL_FIELD_CONTEXT (f_gpr) = record;
6725 DECL_FIELD_CONTEXT (f_fpr) = record;
6726 DECL_FIELD_CONTEXT (f_ovf) = record;
6727 DECL_FIELD_CONTEXT (f_sav) = record;
6729 TREE_CHAIN (record) = type_decl;
6730 TYPE_NAME (record) = type_decl;
6731 TYPE_FIELDS (record) = f_gpr;
6732 TREE_CHAIN (f_gpr) = f_fpr;
6733 TREE_CHAIN (f_fpr) = f_ovf;
6734 TREE_CHAIN (f_ovf) = f_sav;
6736 layout_type (record);
6738 /* The correct type is an array type of one element. */
6739 return build_array_type (record, build_index_type (size_zero_node));
6742 /* Setup the builtin va_list data type and for 64-bit the additional
6743 calling convention specific va_list data types. */
6746 ix86_build_builtin_va_list (void)
6748 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6750 /* Initialize abi specific va_list builtin types. */
6754 if (ix86_abi == MS_ABI)
6756 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6757 if (TREE_CODE (t) != RECORD_TYPE)
6758 t = build_variant_type_copy (t);
6759 sysv_va_list_type_node = t;
6764 if (TREE_CODE (t) != RECORD_TYPE)
6765 t = build_variant_type_copy (t);
6766 sysv_va_list_type_node = t;
6768 if (ix86_abi != MS_ABI)
6770 t = ix86_build_builtin_va_list_abi (MS_ABI);
6771 if (TREE_CODE (t) != RECORD_TYPE)
6772 t = build_variant_type_copy (t);
6773 ms_va_list_type_node = t;
6778 if (TREE_CODE (t) != RECORD_TYPE)
6779 t = build_variant_type_copy (t);
6780 ms_va_list_type_node = t;
6787 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6790 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6798 int regparm = ix86_regparm;
6800 if (cum->call_abi != ix86_abi)
6801 regparm = (ix86_abi != SYSV_ABI
6802 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6804 /* GPR size of varargs save area. */
6805 if (cfun->va_list_gpr_size)
6806 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6808 ix86_varargs_gpr_size = 0;
6810 /* FPR size of varargs save area. We don't need it if we don't pass
6811 anything in SSE registers. */
6812 if (cum->sse_nregs && cfun->va_list_fpr_size)
6813 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6815 ix86_varargs_fpr_size = 0;
6817 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6820 save_area = frame_pointer_rtx;
6821 set = get_varargs_alias_set ();
6823 for (i = cum->regno;
6825 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6828 mem = gen_rtx_MEM (Pmode,
6829 plus_constant (save_area, i * UNITS_PER_WORD));
6830 MEM_NOTRAP_P (mem) = 1;
6831 set_mem_alias_set (mem, set);
6832 emit_move_insn (mem, gen_rtx_REG (Pmode,
6833 x86_64_int_parameter_registers[i]));
6836 if (ix86_varargs_fpr_size)
6838 /* Now emit code to save SSE registers. The AX parameter contains number
6839 of SSE parameter registers used to call this function. We use
6840 sse_prologue_save insn template that produces computed jump across
6841 SSE saves. We need some preparation work to get this working. */
6843 label = gen_label_rtx ();
6845 nsse_reg = gen_reg_rtx (Pmode);
6846 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6848 /* Compute address of memory block we save into. We always use pointer
6849 pointing 127 bytes after first byte to store - this is needed to keep
6850 instruction size limited by 4 bytes (5 bytes for AVX) with one
6851 byte displacement. */
6852 tmp_reg = gen_reg_rtx (Pmode);
6853 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6854 plus_constant (save_area,
6855 ix86_varargs_gpr_size + 127)));
6856 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6857 MEM_NOTRAP_P (mem) = 1;
6858 set_mem_alias_set (mem, set);
6859 set_mem_align (mem, 64);
6861 /* And finally do the dirty job! */
6862 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6863 GEN_INT (cum->sse_regno), label,
6864 gen_reg_rtx (Pmode)));
6869 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6871 alias_set_type set = get_varargs_alias_set ();
6874 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6878 mem = gen_rtx_MEM (Pmode,
6879 plus_constant (virtual_incoming_args_rtx,
6880 i * UNITS_PER_WORD));
6881 MEM_NOTRAP_P (mem) = 1;
6882 set_mem_alias_set (mem, set);
6884 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6885 emit_move_insn (mem, reg);
6890 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6891 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6894 CUMULATIVE_ARGS next_cum;
6897 /* This argument doesn't appear to be used anymore. Which is good,
6898 because the old code here didn't suppress rtl generation. */
6899 gcc_assert (!no_rtl);
6904 fntype = TREE_TYPE (current_function_decl);
6906 /* For varargs, we do not want to skip the dummy va_dcl argument.
6907 For stdargs, we do want to skip the last named argument. */
6909 if (stdarg_p (fntype))
6910 function_arg_advance (&next_cum, mode, type, 1);
6912 if (cum->call_abi == MS_ABI)
6913 setup_incoming_varargs_ms_64 (&next_cum);
6915 setup_incoming_varargs_64 (&next_cum);
6918 /* Checks if TYPE is of kind va_list char *. */
6921 is_va_list_char_pointer (tree type)
6925 /* For 32-bit it is always true. */
6928 canonic = ix86_canonical_va_list_type (type);
6929 return (canonic == ms_va_list_type_node
6930 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6933 /* Implement va_start. */
6936 ix86_va_start (tree valist, rtx nextarg)
6938 HOST_WIDE_INT words, n_gpr, n_fpr;
6939 tree f_gpr, f_fpr, f_ovf, f_sav;
6940 tree gpr, fpr, ovf, sav, t;
6943 /* Only 64bit target needs something special. */
6944 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6946 std_expand_builtin_va_start (valist, nextarg);
6950 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6951 f_fpr = TREE_CHAIN (f_gpr);
6952 f_ovf = TREE_CHAIN (f_fpr);
6953 f_sav = TREE_CHAIN (f_ovf);
6955 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6956 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6957 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6958 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6959 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6961 /* Count number of gp and fp argument registers used. */
6962 words = crtl->args.info.words;
6963 n_gpr = crtl->args.info.regno;
6964 n_fpr = crtl->args.info.sse_regno;
6966 if (cfun->va_list_gpr_size)
6968 type = TREE_TYPE (gpr);
6969 t = build2 (MODIFY_EXPR, type,
6970 gpr, build_int_cst (type, n_gpr * 8));
6971 TREE_SIDE_EFFECTS (t) = 1;
6972 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6975 if (TARGET_SSE && cfun->va_list_fpr_size)
6977 type = TREE_TYPE (fpr);
6978 t = build2 (MODIFY_EXPR, type, fpr,
6979 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6980 TREE_SIDE_EFFECTS (t) = 1;
6981 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6984 /* Find the overflow area. */
6985 type = TREE_TYPE (ovf);
6986 t = make_tree (type, crtl->args.internal_arg_pointer);
6988 t = build2 (POINTER_PLUS_EXPR, type, t,
6989 size_int (words * UNITS_PER_WORD));
6990 t = build2 (MODIFY_EXPR, type, ovf, t);
6991 TREE_SIDE_EFFECTS (t) = 1;
6992 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6994 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6996 /* Find the register save area.
6997 Prologue of the function save it right above stack frame. */
6998 type = TREE_TYPE (sav);
6999 t = make_tree (type, frame_pointer_rtx);
7000 if (!ix86_varargs_gpr_size)
7001 t = build2 (POINTER_PLUS_EXPR, type, t,
7002 size_int (-8 * X86_64_REGPARM_MAX));
7003 t = build2 (MODIFY_EXPR, type, sav, t);
7004 TREE_SIDE_EFFECTS (t) = 1;
7005 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7009 /* Implement va_arg. */
7012 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7015 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7016 tree f_gpr, f_fpr, f_ovf, f_sav;
7017 tree gpr, fpr, ovf, sav, t;
7019 tree lab_false, lab_over = NULL_TREE;
7024 enum machine_mode nat_mode;
7025 unsigned int arg_boundary;
7027 /* Only 64bit target needs something special. */
7028 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7029 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7031 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7032 f_fpr = TREE_CHAIN (f_gpr);
7033 f_ovf = TREE_CHAIN (f_fpr);
7034 f_sav = TREE_CHAIN (f_ovf);
7036 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7037 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7038 valist = build_va_arg_indirect_ref (valist);
7039 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7040 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7041 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7043 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7045 type = build_pointer_type (type);
7046 size = int_size_in_bytes (type);
7047 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7049 nat_mode = type_natural_mode (type, NULL);
7058 /* Unnamed 256bit vector mode parameters are passed on stack. */
7059 if (ix86_cfun_abi () == SYSV_ABI)
7066 container = construct_container (nat_mode, TYPE_MODE (type),
7067 type, 0, X86_64_REGPARM_MAX,
7068 X86_64_SSE_REGPARM_MAX, intreg,
7073 /* Pull the value out of the saved registers. */
7075 addr = create_tmp_var (ptr_type_node, "addr");
7079 int needed_intregs, needed_sseregs;
7081 tree int_addr, sse_addr;
7083 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7084 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7086 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7088 need_temp = (!REG_P (container)
7089 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7090 || TYPE_ALIGN (type) > 128));
7092 /* In case we are passing structure, verify that it is consecutive block
7093 on the register save area. If not we need to do moves. */
7094 if (!need_temp && !REG_P (container))
7096 /* Verify that all registers are strictly consecutive */
7097 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7101 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7103 rtx slot = XVECEXP (container, 0, i);
7104 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7105 || INTVAL (XEXP (slot, 1)) != i * 16)
7113 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7115 rtx slot = XVECEXP (container, 0, i);
7116 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7117 || INTVAL (XEXP (slot, 1)) != i * 8)
7129 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7130 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7133 /* First ensure that we fit completely in registers. */
7136 t = build_int_cst (TREE_TYPE (gpr),
7137 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7138 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7139 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7140 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7141 gimplify_and_add (t, pre_p);
7145 t = build_int_cst (TREE_TYPE (fpr),
7146 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7147 + X86_64_REGPARM_MAX * 8);
7148 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7149 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7150 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7151 gimplify_and_add (t, pre_p);
7154 /* Compute index to start of area used for integer regs. */
7157 /* int_addr = gpr + sav; */
7158 t = fold_convert (sizetype, gpr);
7159 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7160 gimplify_assign (int_addr, t, pre_p);
7164 /* sse_addr = fpr + sav; */
7165 t = fold_convert (sizetype, fpr);
7166 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7167 gimplify_assign (sse_addr, t, pre_p);
7172 tree temp = create_tmp_var (type, "va_arg_tmp");
7175 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7176 gimplify_assign (addr, t, pre_p);
7178 for (i = 0; i < XVECLEN (container, 0); i++)
7180 rtx slot = XVECEXP (container, 0, i);
7181 rtx reg = XEXP (slot, 0);
7182 enum machine_mode mode = GET_MODE (reg);
7183 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7184 tree addr_type = build_pointer_type (piece_type);
7185 tree daddr_type = build_pointer_type_for_mode (piece_type,
7189 tree dest_addr, dest;
7191 if (SSE_REGNO_P (REGNO (reg)))
7193 src_addr = sse_addr;
7194 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7198 src_addr = int_addr;
7199 src_offset = REGNO (reg) * 8;
7201 src_addr = fold_convert (addr_type, src_addr);
7202 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7203 size_int (src_offset));
7204 src = build_va_arg_indirect_ref (src_addr);
7206 dest_addr = fold_convert (daddr_type, addr);
7207 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7208 size_int (INTVAL (XEXP (slot, 1))));
7209 dest = build_va_arg_indirect_ref (dest_addr);
7211 gimplify_assign (dest, src, pre_p);
7217 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7218 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7219 gimplify_assign (gpr, t, pre_p);
7224 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7225 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7226 gimplify_assign (fpr, t, pre_p);
7229 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7231 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7234 /* ... otherwise out of the overflow area. */
7236 /* When we align parameter on stack for caller, if the parameter
7237 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7238 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7239 here with caller. */
7240 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7241 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7242 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7244 /* Care for on-stack alignment if needed. */
7245 if (arg_boundary <= 64
7246 || integer_zerop (TYPE_SIZE (type)))
7250 HOST_WIDE_INT align = arg_boundary / 8;
7251 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7252 size_int (align - 1));
7253 t = fold_convert (sizetype, t);
7254 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7256 t = fold_convert (TREE_TYPE (ovf), t);
7257 if (crtl->stack_alignment_needed < arg_boundary)
7258 crtl->stack_alignment_needed = arg_boundary;
7260 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7261 gimplify_assign (addr, t, pre_p);
7263 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7264 size_int (rsize * UNITS_PER_WORD));
7265 gimplify_assign (unshare_expr (ovf), t, pre_p);
7268 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7270 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7271 addr = fold_convert (ptrtype, addr);
7274 addr = build_va_arg_indirect_ref (addr);
7275 return build_va_arg_indirect_ref (addr);
7278 /* Return nonzero if OPNUM's MEM should be matched
7279 in movabs* patterns. */
7282 ix86_check_movabs (rtx insn, int opnum)
7286 set = PATTERN (insn);
7287 if (GET_CODE (set) == PARALLEL)
7288 set = XVECEXP (set, 0, 0);
7289 gcc_assert (GET_CODE (set) == SET);
7290 mem = XEXP (set, opnum);
7291 while (GET_CODE (mem) == SUBREG)
7292 mem = SUBREG_REG (mem);
7293 gcc_assert (MEM_P (mem));
7294 return (volatile_ok || !MEM_VOLATILE_P (mem));
7297 /* Initialize the table of extra 80387 mathematical constants. */
7300 init_ext_80387_constants (void)
7302 static const char * cst[5] =
7304 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7305 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7306 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7307 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7308 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7312 for (i = 0; i < 5; i++)
7314 real_from_string (&ext_80387_constants_table[i], cst[i]);
7315 /* Ensure each constant is rounded to XFmode precision. */
7316 real_convert (&ext_80387_constants_table[i],
7317 XFmode, &ext_80387_constants_table[i]);
7320 ext_80387_constants_init = 1;
7323 /* Return true if the constant is something that can be loaded with
7324 a special instruction. */
7327 standard_80387_constant_p (rtx x)
7329 enum machine_mode mode = GET_MODE (x);
7333 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7336 if (x == CONST0_RTX (mode))
7338 if (x == CONST1_RTX (mode))
7341 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7343 /* For XFmode constants, try to find a special 80387 instruction when
7344 optimizing for size or on those CPUs that benefit from them. */
7346 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7350 if (! ext_80387_constants_init)
7351 init_ext_80387_constants ();
7353 for (i = 0; i < 5; i++)
7354 if (real_identical (&r, &ext_80387_constants_table[i]))
7358 /* Load of the constant -0.0 or -1.0 will be split as
7359 fldz;fchs or fld1;fchs sequence. */
7360 if (real_isnegzero (&r))
7362 if (real_identical (&r, &dconstm1))
7368 /* Return the opcode of the special instruction to be used to load
7372 standard_80387_constant_opcode (rtx x)
7374 switch (standard_80387_constant_p (x))
7398 /* Return the CONST_DOUBLE representing the 80387 constant that is
7399 loaded by the specified special instruction. The argument IDX
7400 matches the return value from standard_80387_constant_p. */
7403 standard_80387_constant_rtx (int idx)
7407 if (! ext_80387_constants_init)
7408 init_ext_80387_constants ();
7424 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7428 /* Return 1 if X is all 0s and 2 if x is all 1s
7429 in supported SSE vector mode. */
7432 standard_sse_constant_p (rtx x)
7434 enum machine_mode mode = GET_MODE (x);
7436 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7438 if (vector_all_ones_operand (x, mode))
7454 /* Return the opcode of the special instruction to be used to load
7458 standard_sse_constant_opcode (rtx insn, rtx x)
7460 switch (standard_sse_constant_p (x))
7463 switch (get_attr_mode (insn))
7466 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7468 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7470 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7472 return "vxorps\t%x0, %x0, %x0";
7474 return "vxorpd\t%x0, %x0, %x0";
7476 return "vpxor\t%x0, %x0, %x0";
7481 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7488 /* Returns 1 if OP contains a symbol reference */
7491 symbolic_reference_mentioned_p (rtx op)
7496 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7499 fmt = GET_RTX_FORMAT (GET_CODE (op));
7500 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7506 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7507 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7511 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7518 /* Return 1 if it is appropriate to emit `ret' instructions in the
7519 body of a function. Do this only if the epilogue is simple, needing a
7520 couple of insns. Prior to reloading, we can't tell how many registers
7521 must be saved, so return 0 then. Return 0 if there is no frame
7522 marker to de-allocate. */
7525 ix86_can_use_return_insn_p (void)
7527 struct ix86_frame frame;
7529 if (! reload_completed || frame_pointer_needed)
7532 /* Don't allow more than 32 pop, since that's all we can do
7533 with one instruction. */
7534 if (crtl->args.pops_args
7535 && crtl->args.size >= 32768)
7538 ix86_compute_frame_layout (&frame);
7539 return frame.to_allocate == 0 && frame.padding0 == 0
7540 && (frame.nregs + frame.nsseregs) == 0;
7543 /* Value should be nonzero if functions must have frame pointers.
7544 Zero means the frame pointer need not be set up (and parms may
7545 be accessed via the stack pointer) in functions that seem suitable. */
7548 ix86_frame_pointer_required (void)
7550 /* If we accessed previous frames, then the generated code expects
7551 to be able to access the saved ebp value in our frame. */
7552 if (cfun->machine->accesses_prev_frame)
7555 /* Several x86 os'es need a frame pointer for other reasons,
7556 usually pertaining to setjmp. */
7557 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7560 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7561 the frame pointer by default. Turn it back on now if we've not
7562 got a leaf function. */
7563 if (TARGET_OMIT_LEAF_FRAME_POINTER
7564 && (!current_function_is_leaf
7565 || ix86_current_function_calls_tls_descriptor))
7574 /* Record that the current function accesses previous call frames. */
7577 ix86_setup_frame_addresses (void)
7579 cfun->machine->accesses_prev_frame = 1;
7582 #ifndef USE_HIDDEN_LINKONCE
7583 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7584 # define USE_HIDDEN_LINKONCE 1
7586 # define USE_HIDDEN_LINKONCE 0
7590 static int pic_labels_used;
7592 /* Fills in the label name that should be used for a pc thunk for
7593 the given register. */
7596 get_pc_thunk_name (char name[32], unsigned int regno)
7598 gcc_assert (!TARGET_64BIT);
7600 if (USE_HIDDEN_LINKONCE)
7601 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7603 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7607 /* This function generates code for -fpic that loads %ebx with
7608 the return address of the caller and then returns. */
7611 ix86_code_end (void)
7616 for (regno = 0; regno < 8; ++regno)
7621 if (! ((pic_labels_used >> regno) & 1))
7624 get_pc_thunk_name (name, regno);
7626 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7627 get_identifier (name),
7628 build_function_type (void_type_node, void_list_node));
7629 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7630 NULL_TREE, void_type_node);
7631 TREE_PUBLIC (decl) = 1;
7632 TREE_STATIC (decl) = 1;
7637 switch_to_section (darwin_sections[text_coal_section]);
7638 fputs ("\t.weak_definition\t", asm_out_file);
7639 assemble_name (asm_out_file, name);
7640 fputs ("\n\t.private_extern\t", asm_out_file);
7641 assemble_name (asm_out_file, name);
7642 fputs ("\n", asm_out_file);
7643 ASM_OUTPUT_LABEL (asm_out_file, name);
7644 DECL_WEAK (decl) = 1;
7648 if (USE_HIDDEN_LINKONCE)
7650 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7652 (*targetm.asm_out.unique_section) (decl, 0);
7653 switch_to_section (get_named_section (decl, NULL, 0));
7655 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7656 fputs ("\t.hidden\t", asm_out_file);
7657 assemble_name (asm_out_file, name);
7658 putc ('\n', asm_out_file);
7659 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7663 switch_to_section (text_section);
7664 ASM_OUTPUT_LABEL (asm_out_file, name);
7667 DECL_INITIAL (decl) = make_node (BLOCK);
7668 current_function_decl = decl;
7669 init_function_start (decl);
7670 first_function_block_is_cold = false;
7671 /* Make sure unwind info is emitted for the thunk if needed. */
7672 final_start_function (emit_barrier (), asm_out_file, 1);
7674 xops[0] = gen_rtx_REG (Pmode, regno);
7675 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7676 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7677 output_asm_insn ("ret", xops);
7678 final_end_function ();
7679 init_insn_lengths ();
7680 free_after_compilation (cfun);
7682 current_function_decl = NULL;
7686 /* Emit code for the SET_GOT patterns. */
7689 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7695 if (TARGET_VXWORKS_RTP && flag_pic)
7697 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7698 xops[2] = gen_rtx_MEM (Pmode,
7699 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7700 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7702 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7703 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7704 an unadorned address. */
7705 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7706 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7707 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7711 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7713 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7715 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7718 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7721 output_asm_insn ("call\t%a2", xops);
7722 #ifdef DWARF2_UNWIND_INFO
7723 /* The call to next label acts as a push. */
7724 if (dwarf2out_do_frame ())
7728 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7729 gen_rtx_PLUS (Pmode,
7732 RTX_FRAME_RELATED_P (insn) = 1;
7733 dwarf2out_frame_debug (insn, true);
7740 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7741 is what will be referenced by the Mach-O PIC subsystem. */
7743 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7746 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7747 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7751 output_asm_insn ("pop%z0\t%0", xops);
7752 #ifdef DWARF2_UNWIND_INFO
7753 /* The pop is a pop and clobbers dest, but doesn't restore it
7754 for unwind info purposes. */
7755 if (dwarf2out_do_frame ())
7759 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7760 dwarf2out_frame_debug (insn, true);
7761 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7762 gen_rtx_PLUS (Pmode,
7765 RTX_FRAME_RELATED_P (insn) = 1;
7766 dwarf2out_frame_debug (insn, true);
7775 get_pc_thunk_name (name, REGNO (dest));
7776 pic_labels_used |= 1 << REGNO (dest);
7778 #ifdef DWARF2_UNWIND_INFO
7779 /* Ensure all queued register saves are flushed before the
7781 if (dwarf2out_do_frame ())
7785 insn = emit_barrier ();
7787 dwarf2out_frame_debug (insn, false);
7790 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7791 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7792 output_asm_insn ("call\t%X2", xops);
7793 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7794 is what will be referenced by the Mach-O PIC subsystem. */
7797 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7799 targetm.asm_out.internal_label (asm_out_file, "L",
7800 CODE_LABEL_NUMBER (label));
7807 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7808 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7810 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7815 /* Generate an "push" pattern for input ARG. */
7820 if (ix86_cfa_state->reg == stack_pointer_rtx)
7821 ix86_cfa_state->offset += UNITS_PER_WORD;
7823 return gen_rtx_SET (VOIDmode,
7825 gen_rtx_PRE_DEC (Pmode,
7826 stack_pointer_rtx)),
7830 /* Return >= 0 if there is an unused call-clobbered register available
7831 for the entire function. */
7834 ix86_select_alt_pic_regnum (void)
7836 if (current_function_is_leaf && !crtl->profile
7837 && !ix86_current_function_calls_tls_descriptor)
7840 /* Can't use the same register for both PIC and DRAP. */
7842 drap = REGNO (crtl->drap_reg);
7845 for (i = 2; i >= 0; --i)
7846 if (i != drap && !df_regs_ever_live_p (i))
7850 return INVALID_REGNUM;
7853 /* Return 1 if we need to save REGNO. */
7855 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7857 if (pic_offset_table_rtx
7858 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7859 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7861 || crtl->calls_eh_return
7862 || crtl->uses_const_pool))
7864 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7869 if (crtl->calls_eh_return && maybe_eh_return)
7874 unsigned test = EH_RETURN_DATA_REGNO (i);
7875 if (test == INVALID_REGNUM)
7882 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7885 return (df_regs_ever_live_p (regno)
7886 && !call_used_regs[regno]
7887 && !fixed_regs[regno]
7888 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7891 /* Return number of saved general prupose registers. */
7894 ix86_nsaved_regs (void)
7899 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7900 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7905 /* Return number of saved SSE registrers. */
7908 ix86_nsaved_sseregs (void)
7913 if (ix86_cfun_abi () != MS_ABI)
7915 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7916 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7921 /* Given FROM and TO register numbers, say whether this elimination is
7922 allowed. If stack alignment is needed, we can only replace argument
7923 pointer with hard frame pointer, or replace frame pointer with stack
7924 pointer. Otherwise, frame pointer elimination is automatically
7925 handled and all other eliminations are valid. */
7928 ix86_can_eliminate (const int from, const int to)
7930 if (stack_realign_fp)
7931 return ((from == ARG_POINTER_REGNUM
7932 && to == HARD_FRAME_POINTER_REGNUM)
7933 || (from == FRAME_POINTER_REGNUM
7934 && to == STACK_POINTER_REGNUM));
7936 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7939 /* Return the offset between two registers, one to be eliminated, and the other
7940 its replacement, at the start of a routine. */
7943 ix86_initial_elimination_offset (int from, int to)
7945 struct ix86_frame frame;
7946 ix86_compute_frame_layout (&frame);
7948 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7949 return frame.hard_frame_pointer_offset;
7950 else if (from == FRAME_POINTER_REGNUM
7951 && to == HARD_FRAME_POINTER_REGNUM)
7952 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7955 gcc_assert (to == STACK_POINTER_REGNUM);
7957 if (from == ARG_POINTER_REGNUM)
7958 return frame.stack_pointer_offset;
7960 gcc_assert (from == FRAME_POINTER_REGNUM);
7961 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7965 /* In a dynamically-aligned function, we can't know the offset from
7966 stack pointer to frame pointer, so we must ensure that setjmp
7967 eliminates fp against the hard fp (%ebp) rather than trying to
7968 index from %esp up to the top of the frame across a gap that is
7969 of unknown (at compile-time) size. */
7971 ix86_builtin_setjmp_frame_value (void)
7973 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7976 /* Fill structure ix86_frame about frame of currently computed function. */
7979 ix86_compute_frame_layout (struct ix86_frame *frame)
7981 unsigned int stack_alignment_needed;
7982 HOST_WIDE_INT offset;
7983 unsigned int preferred_alignment;
7984 HOST_WIDE_INT size = get_frame_size ();
7986 frame->nregs = ix86_nsaved_regs ();
7987 frame->nsseregs = ix86_nsaved_sseregs ();
7989 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7990 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7992 /* MS ABI seem to require stack alignment to be always 16 except for function
7994 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7996 preferred_alignment = 16;
7997 stack_alignment_needed = 16;
7998 crtl->preferred_stack_boundary = 128;
7999 crtl->stack_alignment_needed = 128;
8002 gcc_assert (!size || stack_alignment_needed);
8003 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8004 gcc_assert (preferred_alignment <= stack_alignment_needed);
8006 /* During reload iteration the amount of registers saved can change.
8007 Recompute the value as needed. Do not recompute when amount of registers
8008 didn't change as reload does multiple calls to the function and does not
8009 expect the decision to change within single iteration. */
8010 if (!optimize_function_for_size_p (cfun)
8011 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8013 int count = frame->nregs;
8014 struct cgraph_node *node = cgraph_node (current_function_decl);
8016 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8017 /* The fast prologue uses move instead of push to save registers. This
8018 is significantly longer, but also executes faster as modern hardware
8019 can execute the moves in parallel, but can't do that for push/pop.
8021 Be careful about choosing what prologue to emit: When function takes
8022 many instructions to execute we may use slow version as well as in
8023 case function is known to be outside hot spot (this is known with
8024 feedback only). Weight the size of function by number of registers
8025 to save as it is cheap to use one or two push instructions but very
8026 slow to use many of them. */
8028 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8029 if (node->frequency < NODE_FREQUENCY_NORMAL
8030 || (flag_branch_probabilities
8031 && node->frequency < NODE_FREQUENCY_HOT))
8032 cfun->machine->use_fast_prologue_epilogue = false;
8034 cfun->machine->use_fast_prologue_epilogue
8035 = !expensive_function_p (count);
8037 if (TARGET_PROLOGUE_USING_MOVE
8038 && cfun->machine->use_fast_prologue_epilogue)
8039 frame->save_regs_using_mov = true;
8041 frame->save_regs_using_mov = false;
8043 /* Skip return address. */
8044 offset = UNITS_PER_WORD;
8046 /* Skip pushed static chain. */
8047 if (ix86_static_chain_on_stack)
8048 offset += UNITS_PER_WORD;
8050 /* Skip saved base pointer. */
8051 if (frame_pointer_needed)
8052 offset += UNITS_PER_WORD;
8054 frame->hard_frame_pointer_offset = offset;
8056 /* Set offset to aligned because the realigned frame starts from
8058 if (stack_realign_fp)
8059 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8061 /* Register save area */
8062 offset += frame->nregs * UNITS_PER_WORD;
8064 /* Align SSE reg save area. */
8065 if (frame->nsseregs)
8066 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8068 frame->padding0 = 0;
8070 /* SSE register save area. */
8071 offset += frame->padding0 + frame->nsseregs * 16;
8074 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8075 offset += frame->va_arg_size;
8077 /* Align start of frame for local function. */
8078 frame->padding1 = ((offset + stack_alignment_needed - 1)
8079 & -stack_alignment_needed) - offset;
8081 offset += frame->padding1;
8083 /* Frame pointer points here. */
8084 frame->frame_pointer_offset = offset;
8088 /* Add outgoing arguments area. Can be skipped if we eliminated
8089 all the function calls as dead code.
8090 Skipping is however impossible when function calls alloca. Alloca
8091 expander assumes that last crtl->outgoing_args_size
8092 of stack frame are unused. */
8093 if (ACCUMULATE_OUTGOING_ARGS
8094 && (!current_function_is_leaf || cfun->calls_alloca
8095 || ix86_current_function_calls_tls_descriptor))
8097 offset += crtl->outgoing_args_size;
8098 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8101 frame->outgoing_arguments_size = 0;
8103 /* Align stack boundary. Only needed if we're calling another function
8105 if (!current_function_is_leaf || cfun->calls_alloca
8106 || ix86_current_function_calls_tls_descriptor)
8107 frame->padding2 = ((offset + preferred_alignment - 1)
8108 & -preferred_alignment) - offset;
8110 frame->padding2 = 0;
8112 offset += frame->padding2;
8114 /* We've reached end of stack frame. */
8115 frame->stack_pointer_offset = offset;
8117 /* Size prologue needs to allocate. */
8118 frame->to_allocate =
8119 (size + frame->padding1 + frame->padding2
8120 + frame->outgoing_arguments_size + frame->va_arg_size);
8122 if ((!frame->to_allocate && frame->nregs <= 1)
8123 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8124 frame->save_regs_using_mov = false;
8126 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8127 && current_function_sp_is_unchanging
8128 && current_function_is_leaf
8129 && !ix86_current_function_calls_tls_descriptor)
8131 frame->red_zone_size = frame->to_allocate;
8132 if (frame->save_regs_using_mov)
8133 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8134 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8135 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8138 frame->red_zone_size = 0;
8139 frame->to_allocate -= frame->red_zone_size;
8140 frame->stack_pointer_offset -= frame->red_zone_size;
8143 /* Emit code to save registers in the prologue. */
8146 ix86_emit_save_regs (void)
8151 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8152 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8154 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8155 RTX_FRAME_RELATED_P (insn) = 1;
8159 /* Emit code to save registers using MOV insns. First register
8160 is restored from POINTER + OFFSET. */
8162 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8167 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8168 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8170 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8172 gen_rtx_REG (Pmode, regno));
8173 RTX_FRAME_RELATED_P (insn) = 1;
8174 offset += UNITS_PER_WORD;
8178 /* Emit code to save registers using MOV insns. First register
8179 is restored from POINTER + OFFSET. */
8181 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8187 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8188 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8190 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8191 set_mem_align (mem, 128);
8192 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8193 RTX_FRAME_RELATED_P (insn) = 1;
8198 static GTY(()) rtx queued_cfa_restores;
8200 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8201 manipulation insn. Don't add it if the previously
8202 saved value will be left untouched within stack red-zone till return,
8203 as unwinders can find the same value in the register and
8207 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8210 && !TARGET_64BIT_MS_ABI
8211 && red_offset + RED_ZONE_SIZE >= 0
8212 && crtl->args.pops_args < 65536)
8217 add_reg_note (insn, REG_CFA_RESTORE, reg);
8218 RTX_FRAME_RELATED_P (insn) = 1;
8222 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8225 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8228 ix86_add_queued_cfa_restore_notes (rtx insn)
8231 if (!queued_cfa_restores)
8233 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8235 XEXP (last, 1) = REG_NOTES (insn);
8236 REG_NOTES (insn) = queued_cfa_restores;
8237 queued_cfa_restores = NULL_RTX;
8238 RTX_FRAME_RELATED_P (insn) = 1;
8241 /* Expand prologue or epilogue stack adjustment.
8242 The pattern exist to put a dependency on all ebp-based memory accesses.
8243 STYLE should be negative if instructions should be marked as frame related,
8244 zero if %r11 register is live and cannot be freely used and positive
8248 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8249 int style, bool set_cfa)
8254 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8255 else if (x86_64_immediate_operand (offset, DImode))
8256 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8260 /* r11 is used by indirect sibcall return as well, set before the
8261 epilogue and used after the epilogue. ATM indirect sibcall
8262 shouldn't be used together with huge frame sizes in one
8263 function because of the frame_size check in sibcall.c. */
8265 r11 = gen_rtx_REG (DImode, R11_REG);
8266 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8268 RTX_FRAME_RELATED_P (insn) = 1;
8269 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8274 ix86_add_queued_cfa_restore_notes (insn);
8280 gcc_assert (ix86_cfa_state->reg == src);
8281 ix86_cfa_state->offset += INTVAL (offset);
8282 ix86_cfa_state->reg = dest;
8284 r = gen_rtx_PLUS (Pmode, src, offset);
8285 r = gen_rtx_SET (VOIDmode, dest, r);
8286 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8287 RTX_FRAME_RELATED_P (insn) = 1;
8290 RTX_FRAME_RELATED_P (insn) = 1;
8293 /* Find an available register to be used as dynamic realign argument
8294 pointer regsiter. Such a register will be written in prologue and
8295 used in begin of body, so it must not be
8296 1. parameter passing register.
8298 We reuse static-chain register if it is available. Otherwise, we
8299 use DI for i386 and R13 for x86-64. We chose R13 since it has
8302 Return: the regno of chosen register. */
8305 find_drap_reg (void)
8307 tree decl = cfun->decl;
8311 /* Use R13 for nested function or function need static chain.
8312 Since function with tail call may use any caller-saved
8313 registers in epilogue, DRAP must not use caller-saved
8314 register in such case. */
8315 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8322 /* Use DI for nested function or function need static chain.
8323 Since function with tail call may use any caller-saved
8324 registers in epilogue, DRAP must not use caller-saved
8325 register in such case. */
8326 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8329 /* Reuse static chain register if it isn't used for parameter
8331 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8332 && !lookup_attribute ("fastcall",
8333 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8334 && !lookup_attribute ("thiscall",
8335 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8342 /* Return minimum incoming stack alignment. */
8345 ix86_minimum_incoming_stack_boundary (bool sibcall)
8347 unsigned int incoming_stack_boundary;
8349 /* Prefer the one specified at command line. */
8350 if (ix86_user_incoming_stack_boundary)
8351 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8352 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8353 if -mstackrealign is used, it isn't used for sibcall check and
8354 estimated stack alignment is 128bit. */
8357 && ix86_force_align_arg_pointer
8358 && crtl->stack_alignment_estimated == 128)
8359 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8361 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8363 /* Incoming stack alignment can be changed on individual functions
8364 via force_align_arg_pointer attribute. We use the smallest
8365 incoming stack boundary. */
8366 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8367 && lookup_attribute (ix86_force_align_arg_pointer_string,
8368 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8369 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8371 /* The incoming stack frame has to be aligned at least at
8372 parm_stack_boundary. */
8373 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8374 incoming_stack_boundary = crtl->parm_stack_boundary;
8376 /* Stack at entrance of main is aligned by runtime. We use the
8377 smallest incoming stack boundary. */
8378 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8379 && DECL_NAME (current_function_decl)
8380 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8381 && DECL_FILE_SCOPE_P (current_function_decl))
8382 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8384 return incoming_stack_boundary;
8387 /* Update incoming stack boundary and estimated stack alignment. */
8390 ix86_update_stack_boundary (void)
8392 ix86_incoming_stack_boundary
8393 = ix86_minimum_incoming_stack_boundary (false);
8395 /* x86_64 vararg needs 16byte stack alignment for register save
8399 && crtl->stack_alignment_estimated < 128)
8400 crtl->stack_alignment_estimated = 128;
8403 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8404 needed or an rtx for DRAP otherwise. */
8407 ix86_get_drap_rtx (void)
8409 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8410 crtl->need_drap = true;
8412 if (stack_realign_drap)
8414 /* Assign DRAP to vDRAP and returns vDRAP */
8415 unsigned int regno = find_drap_reg ();
8420 arg_ptr = gen_rtx_REG (Pmode, regno);
8421 crtl->drap_reg = arg_ptr;
8424 drap_vreg = copy_to_reg (arg_ptr);
8428 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8431 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8432 RTX_FRAME_RELATED_P (insn) = 1;
8440 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8443 ix86_internal_arg_pointer (void)
8445 return virtual_incoming_args_rtx;
8448 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8449 to be generated in correct form. */
8451 ix86_finalize_stack_realign_flags (void)
8453 /* Check if stack realign is really needed after reload, and
8454 stores result in cfun */
8455 unsigned int incoming_stack_boundary
8456 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8457 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8458 unsigned int stack_realign = (incoming_stack_boundary
8459 < (current_function_is_leaf
8460 ? crtl->max_used_stack_slot_alignment
8461 : crtl->stack_alignment_needed));
8463 if (crtl->stack_realign_finalized)
8465 /* After stack_realign_needed is finalized, we can't no longer
8467 gcc_assert (crtl->stack_realign_needed == stack_realign);
8471 crtl->stack_realign_needed = stack_realign;
8472 crtl->stack_realign_finalized = true;
8476 /* Expand the prologue into a bunch of separate insns. */
8479 ix86_expand_prologue (void)
8483 struct ix86_frame frame;
8484 HOST_WIDE_INT allocate;
8485 int gen_frame_pointer = frame_pointer_needed;
8487 ix86_finalize_stack_realign_flags ();
8489 /* DRAP should not coexist with stack_realign_fp */
8490 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8492 /* Initialize CFA state for before the prologue. */
8493 ix86_cfa_state->reg = stack_pointer_rtx;
8494 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8496 ix86_compute_frame_layout (&frame);
8498 if (ix86_function_ms_hook_prologue (current_function_decl))
8502 /* Make sure the function starts with
8503 8b ff movl.s %edi,%edi
8505 8b ec movl.s %esp,%ebp
8507 This matches the hookable function prologue in Win32 API
8508 functions in Microsoft Windows XP Service Pack 2 and newer.
8509 Wine uses this to enable Windows apps to hook the Win32 API
8510 functions provided by Wine. */
8511 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8512 gen_rtx_REG (SImode, DI_REG)));
8513 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8514 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8515 stack_pointer_rtx));
8517 if (frame_pointer_needed && !(crtl->drap_reg
8518 && crtl->stack_realign_needed))
8520 /* The push %ebp and movl.s %esp, %ebp already set up
8521 the frame pointer. No need to do this again. */
8522 gen_frame_pointer = 0;
8523 RTX_FRAME_RELATED_P (push) = 1;
8524 RTX_FRAME_RELATED_P (mov) = 1;
8525 if (ix86_cfa_state->reg == stack_pointer_rtx)
8526 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8529 /* If the frame pointer is not needed, pop %ebp again. This
8530 could be optimized for cases where ebp needs to be backed up
8531 for some other reason. If stack realignment is needed, pop
8532 the base pointer again, align the stack, and later regenerate
8533 the frame pointer setup. The frame pointer generated by the
8534 hook prologue is not aligned, so it can't be used. */
8535 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8538 /* The first insn of a function that accepts its static chain on the
8539 stack is to push the register that would be filled in by a direct
8540 call. This insn will be skipped by the trampoline. */
8541 if (ix86_static_chain_on_stack)
8545 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8546 emit_insn (gen_blockage ());
8548 /* We don't want to interpret this push insn as a register save,
8549 only as a stack adjustment. The real copy of the register as
8550 a save will be done later, if needed. */
8551 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8552 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8553 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8554 RTX_FRAME_RELATED_P (insn) = 1;
8557 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8558 of DRAP is needed and stack realignment is really needed after reload */
8559 if (crtl->drap_reg && crtl->stack_realign_needed)
8562 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8563 int param_ptr_offset = UNITS_PER_WORD;
8565 if (ix86_static_chain_on_stack)
8566 param_ptr_offset += UNITS_PER_WORD;
8567 if (!call_used_regs[REGNO (crtl->drap_reg)])
8568 param_ptr_offset += UNITS_PER_WORD;
8570 gcc_assert (stack_realign_drap);
8572 /* Grab the argument pointer. */
8573 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8576 /* Only need to push parameter pointer reg if it is caller
8578 if (!call_used_regs[REGNO (crtl->drap_reg)])
8580 /* Push arg pointer reg */
8581 insn = emit_insn (gen_push (y));
8582 RTX_FRAME_RELATED_P (insn) = 1;
8585 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8586 RTX_FRAME_RELATED_P (insn) = 1;
8587 ix86_cfa_state->reg = crtl->drap_reg;
8589 /* Align the stack. */
8590 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8592 GEN_INT (-align_bytes)));
8593 RTX_FRAME_RELATED_P (insn) = 1;
8595 /* Replicate the return address on the stack so that return
8596 address can be reached via (argp - 1) slot. This is needed
8597 to implement macro RETURN_ADDR_RTX and intrinsic function
8598 expand_builtin_return_addr etc. */
8600 x = gen_frame_mem (Pmode,
8601 plus_constant (x, -UNITS_PER_WORD));
8602 insn = emit_insn (gen_push (x));
8603 RTX_FRAME_RELATED_P (insn) = 1;
8606 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8607 slower on all targets. Also sdb doesn't like it. */
8609 if (gen_frame_pointer)
8611 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8612 RTX_FRAME_RELATED_P (insn) = 1;
8614 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8615 RTX_FRAME_RELATED_P (insn) = 1;
8617 if (ix86_cfa_state->reg == stack_pointer_rtx)
8618 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8621 if (stack_realign_fp)
8623 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8624 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8626 /* Align the stack. */
8627 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8629 GEN_INT (-align_bytes)));
8630 RTX_FRAME_RELATED_P (insn) = 1;
8633 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8635 if (!frame.save_regs_using_mov)
8636 ix86_emit_save_regs ();
8638 allocate += frame.nregs * UNITS_PER_WORD;
8640 /* When using red zone we may start register saving before allocating
8641 the stack frame saving one cycle of the prologue. However I will
8642 avoid doing this if I am going to have to probe the stack since
8643 at least on x86_64 the stack probe can turn into a call that clobbers
8644 a red zone location */
8645 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8646 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8647 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8648 && !crtl->stack_realign_needed)
8649 ? hard_frame_pointer_rtx
8650 : stack_pointer_rtx,
8651 -frame.nregs * UNITS_PER_WORD);
8655 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8656 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8657 GEN_INT (-allocate), -1,
8658 ix86_cfa_state->reg == stack_pointer_rtx);
8661 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8665 if (cfun->machine->call_abi == MS_ABI)
8668 eax_live = ix86_eax_live_at_start_p ();
8672 emit_insn (gen_push (eax));
8673 allocate -= UNITS_PER_WORD;
8676 emit_move_insn (eax, GEN_INT (allocate));
8679 insn = gen_allocate_stack_worker_64 (eax, eax);
8681 insn = gen_allocate_stack_worker_32 (eax, eax);
8682 insn = emit_insn (insn);
8684 if (ix86_cfa_state->reg == stack_pointer_rtx)
8686 ix86_cfa_state->offset += allocate;
8687 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8688 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8689 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8690 RTX_FRAME_RELATED_P (insn) = 1;
8695 if (frame_pointer_needed)
8696 t = plus_constant (hard_frame_pointer_rtx,
8699 - frame.nregs * UNITS_PER_WORD);
8701 t = plus_constant (stack_pointer_rtx, allocate);
8702 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8706 if (frame.save_regs_using_mov
8707 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8708 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8710 if (!frame_pointer_needed
8711 || !(frame.to_allocate + frame.padding0)
8712 || crtl->stack_realign_needed)
8713 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8715 + frame.nsseregs * 16 + frame.padding0);
8717 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8718 -frame.nregs * UNITS_PER_WORD);
8720 if (!frame_pointer_needed
8721 || !(frame.to_allocate + frame.padding0)
8722 || crtl->stack_realign_needed)
8723 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8726 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8727 - frame.nregs * UNITS_PER_WORD
8728 - frame.nsseregs * 16
8731 pic_reg_used = false;
8732 if (pic_offset_table_rtx
8733 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8736 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8738 if (alt_pic_reg_used != INVALID_REGNUM)
8739 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8741 pic_reg_used = true;
8748 if (ix86_cmodel == CM_LARGE_PIC)
8750 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8751 rtx label = gen_label_rtx ();
8753 LABEL_PRESERVE_P (label) = 1;
8754 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8755 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8756 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8757 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8758 pic_offset_table_rtx, tmp_reg));
8761 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8764 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8767 /* In the pic_reg_used case, make sure that the got load isn't deleted
8768 when mcount needs it. Blockage to avoid call movement across mcount
8769 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8771 if (crtl->profile && pic_reg_used)
8772 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8774 if (crtl->drap_reg && !crtl->stack_realign_needed)
8776 /* vDRAP is setup but after reload it turns out stack realign
8777 isn't necessary, here we will emit prologue to setup DRAP
8778 without stack realign adjustment */
8780 int drap_bp_offset = UNITS_PER_WORD * 2;
8782 if (ix86_static_chain_on_stack)
8783 drap_bp_offset += UNITS_PER_WORD;
8784 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8785 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8788 /* Prevent instructions from being scheduled into register save push
8789 sequence when access to the redzone area is done through frame pointer.
8790 The offset between the frame pointer and the stack pointer is calculated
8791 relative to the value of the stack pointer at the end of the function
8792 prologue, and moving instructions that access redzone area via frame
8793 pointer inside push sequence violates this assumption. */
8794 if (frame_pointer_needed && frame.red_zone_size)
8795 emit_insn (gen_memory_blockage ());
8797 /* Emit cld instruction if stringops are used in the function. */
8798 if (TARGET_CLD && ix86_current_function_needs_cld)
8799 emit_insn (gen_cld ());
8802 /* Emit code to restore REG using a POP insn. */
8805 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8807 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8809 if (ix86_cfa_state->reg == crtl->drap_reg
8810 && REGNO (reg) == REGNO (crtl->drap_reg))
8812 /* Previously we'd represented the CFA as an expression
8813 like *(%ebp - 8). We've just popped that value from
8814 the stack, which means we need to reset the CFA to
8815 the drap register. This will remain until we restore
8816 the stack pointer. */
8817 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8818 RTX_FRAME_RELATED_P (insn) = 1;
8822 if (ix86_cfa_state->reg == stack_pointer_rtx)
8824 ix86_cfa_state->offset -= UNITS_PER_WORD;
8825 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8826 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8827 RTX_FRAME_RELATED_P (insn) = 1;
8830 /* When the frame pointer is the CFA, and we pop it, we are
8831 swapping back to the stack pointer as the CFA. This happens
8832 for stack frames that don't allocate other data, so we assume
8833 the stack pointer is now pointing at the return address, i.e.
8834 the function entry state, which makes the offset be 1 word. */
8835 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8836 && reg == hard_frame_pointer_rtx)
8838 ix86_cfa_state->reg = stack_pointer_rtx;
8839 ix86_cfa_state->offset -= UNITS_PER_WORD;
8841 add_reg_note (insn, REG_CFA_DEF_CFA,
8842 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8843 GEN_INT (ix86_cfa_state->offset)));
8844 RTX_FRAME_RELATED_P (insn) = 1;
8847 ix86_add_cfa_restore_note (insn, reg, red_offset);
8850 /* Emit code to restore saved registers using POP insns. */
8853 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8857 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8858 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8860 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8862 red_offset += UNITS_PER_WORD;
8866 /* Emit code and notes for the LEAVE instruction. */
8869 ix86_emit_leave (HOST_WIDE_INT red_offset)
8871 rtx insn = emit_insn (ix86_gen_leave ());
8873 ix86_add_queued_cfa_restore_notes (insn);
8875 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8877 ix86_cfa_state->reg = stack_pointer_rtx;
8878 ix86_cfa_state->offset -= UNITS_PER_WORD;
8880 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8881 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8882 RTX_FRAME_RELATED_P (insn) = 1;
8883 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8887 /* Emit code to restore saved registers using MOV insns. First register
8888 is restored from POINTER + OFFSET. */
8890 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8891 HOST_WIDE_INT red_offset,
8892 int maybe_eh_return)
8895 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8898 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8899 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8901 rtx reg = gen_rtx_REG (Pmode, regno);
8903 /* Ensure that adjust_address won't be forced to produce pointer
8904 out of range allowed by x86-64 instruction set. */
8905 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8909 r11 = gen_rtx_REG (DImode, R11_REG);
8910 emit_move_insn (r11, GEN_INT (offset));
8911 emit_insn (gen_adddi3 (r11, r11, pointer));
8912 base_address = gen_rtx_MEM (Pmode, r11);
8915 insn = emit_move_insn (reg,
8916 adjust_address (base_address, Pmode, offset));
8917 offset += UNITS_PER_WORD;
8919 if (ix86_cfa_state->reg == crtl->drap_reg
8920 && regno == REGNO (crtl->drap_reg))
8922 /* Previously we'd represented the CFA as an expression
8923 like *(%ebp - 8). We've just popped that value from
8924 the stack, which means we need to reset the CFA to
8925 the drap register. This will remain until we restore
8926 the stack pointer. */
8927 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8928 RTX_FRAME_RELATED_P (insn) = 1;
8931 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8933 red_offset += UNITS_PER_WORD;
8937 /* Emit code to restore saved registers using MOV insns. First register
8938 is restored from POINTER + OFFSET. */
8940 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8941 HOST_WIDE_INT red_offset,
8942 int maybe_eh_return)
8945 rtx base_address = gen_rtx_MEM (TImode, pointer);
8948 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8949 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8951 rtx reg = gen_rtx_REG (TImode, regno);
8953 /* Ensure that adjust_address won't be forced to produce pointer
8954 out of range allowed by x86-64 instruction set. */
8955 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8959 r11 = gen_rtx_REG (DImode, R11_REG);
8960 emit_move_insn (r11, GEN_INT (offset));
8961 emit_insn (gen_adddi3 (r11, r11, pointer));
8962 base_address = gen_rtx_MEM (TImode, r11);
8965 mem = adjust_address (base_address, TImode, offset);
8966 set_mem_align (mem, 128);
8967 emit_move_insn (reg, mem);
8970 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8976 /* Restore function stack, frame, and registers. */
8979 ix86_expand_epilogue (int style)
8982 struct ix86_frame frame;
8983 HOST_WIDE_INT offset, red_offset;
8984 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8987 ix86_finalize_stack_realign_flags ();
8989 /* When stack is realigned, SP must be valid. */
8990 sp_valid = (!frame_pointer_needed
8991 || current_function_sp_is_unchanging
8992 || stack_realign_fp);
8994 ix86_compute_frame_layout (&frame);
8996 /* See the comment about red zone and frame
8997 pointer usage in ix86_expand_prologue. */
8998 if (frame_pointer_needed && frame.red_zone_size)
8999 emit_insn (gen_memory_blockage ());
9001 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9002 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9004 /* Calculate start of saved registers relative to ebp. Special care
9005 must be taken for the normal return case of a function using
9006 eh_return: the eax and edx registers are marked as saved, but not
9007 restored along this path. */
9008 offset = frame.nregs;
9009 if (crtl->calls_eh_return && style != 2)
9011 offset *= -UNITS_PER_WORD;
9012 offset -= frame.nsseregs * 16 + frame.padding0;
9014 /* Calculate start of saved registers relative to esp on entry of the
9015 function. When realigning stack, this needs to be the most negative
9016 value possible at runtime. */
9017 red_offset = offset;
9019 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9021 else if (stack_realign_fp)
9022 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9024 if (ix86_static_chain_on_stack)
9025 red_offset -= UNITS_PER_WORD;
9026 if (frame_pointer_needed)
9027 red_offset -= UNITS_PER_WORD;
9029 /* If we're only restoring one register and sp is not valid then
9030 using a move instruction to restore the register since it's
9031 less work than reloading sp and popping the register.
9033 The default code result in stack adjustment using add/lea instruction,
9034 while this code results in LEAVE instruction (or discrete equivalent),
9035 so it is profitable in some other cases as well. Especially when there
9036 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9037 and there is exactly one register to pop. This heuristic may need some
9038 tuning in future. */
9039 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9040 || (TARGET_EPILOGUE_USING_MOVE
9041 && cfun->machine->use_fast_prologue_epilogue
9042 && ((frame.nregs + frame.nsseregs) > 1
9043 || (frame.to_allocate + frame.padding0) != 0))
9044 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9045 && (frame.to_allocate + frame.padding0) != 0)
9046 || (frame_pointer_needed && TARGET_USE_LEAVE
9047 && cfun->machine->use_fast_prologue_epilogue
9048 && (frame.nregs + frame.nsseregs) == 1)
9049 || crtl->calls_eh_return)
9051 /* Restore registers. We can use ebp or esp to address the memory
9052 locations. If both are available, default to ebp, since offsets
9053 are known to be small. Only exception is esp pointing directly
9054 to the end of block of saved registers, where we may simplify
9057 If we are realigning stack with bp and sp, regs restore can't
9058 be addressed by bp. sp must be used instead. */
9060 if (!frame_pointer_needed
9061 || (sp_valid && !(frame.to_allocate + frame.padding0))
9062 || stack_realign_fp)
9064 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9065 frame.to_allocate, red_offset,
9067 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9069 + frame.nsseregs * 16
9072 + frame.nsseregs * 16
9073 + frame.padding0, style == 2);
9077 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9080 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9082 + frame.nsseregs * 16
9085 + frame.nsseregs * 16
9086 + frame.padding0, style == 2);
9089 red_offset -= offset;
9091 /* eh_return epilogues need %ecx added to the stack pointer. */
9094 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9096 /* Stack align doesn't work with eh_return. */
9097 gcc_assert (!crtl->stack_realign_needed);
9098 /* Neither does regparm nested functions. */
9099 gcc_assert (!ix86_static_chain_on_stack);
9101 if (frame_pointer_needed)
9103 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9104 tmp = plus_constant (tmp, UNITS_PER_WORD);
9105 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9107 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9108 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9110 /* Note that we use SA as a temporary CFA, as the return
9111 address is at the proper place relative to it. We
9112 pretend this happens at the FP restore insn because
9113 prior to this insn the FP would be stored at the wrong
9114 offset relative to SA, and after this insn we have no
9115 other reasonable register to use for the CFA. We don't
9116 bother resetting the CFA to the SP for the duration of
9118 add_reg_note (tmp, REG_CFA_DEF_CFA,
9119 plus_constant (sa, UNITS_PER_WORD));
9120 ix86_add_queued_cfa_restore_notes (tmp);
9121 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9122 RTX_FRAME_RELATED_P (tmp) = 1;
9123 ix86_cfa_state->reg = sa;
9124 ix86_cfa_state->offset = UNITS_PER_WORD;
9126 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9127 const0_rtx, style, false);
9131 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9132 tmp = plus_constant (tmp, (frame.to_allocate
9133 + frame.nregs * UNITS_PER_WORD
9134 + frame.nsseregs * 16
9136 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9137 ix86_add_queued_cfa_restore_notes (tmp);
9139 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9140 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9142 ix86_cfa_state->offset = UNITS_PER_WORD;
9143 add_reg_note (tmp, REG_CFA_DEF_CFA,
9144 plus_constant (stack_pointer_rtx,
9146 RTX_FRAME_RELATED_P (tmp) = 1;
9150 else if (!frame_pointer_needed)
9151 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9152 GEN_INT (frame.to_allocate
9153 + frame.nregs * UNITS_PER_WORD
9154 + frame.nsseregs * 16
9156 style, !using_drap);
9157 /* If not an i386, mov & pop is faster than "leave". */
9158 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9159 || !cfun->machine->use_fast_prologue_epilogue)
9160 ix86_emit_leave (red_offset);
9163 pro_epilogue_adjust_stack (stack_pointer_rtx,
9164 hard_frame_pointer_rtx,
9165 const0_rtx, style, !using_drap);
9167 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9172 /* First step is to deallocate the stack frame so that we can
9175 If we realign stack with frame pointer, then stack pointer
9176 won't be able to recover via lea $offset(%bp), %sp, because
9177 there is a padding area between bp and sp for realign.
9178 "add $to_allocate, %sp" must be used instead. */
9181 gcc_assert (frame_pointer_needed);
9182 gcc_assert (!stack_realign_fp);
9183 pro_epilogue_adjust_stack (stack_pointer_rtx,
9184 hard_frame_pointer_rtx,
9185 GEN_INT (offset), style, false);
9186 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9189 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9190 GEN_INT (frame.nsseregs * 16
9194 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9196 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9197 frame.to_allocate, red_offset,
9199 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9200 GEN_INT (frame.to_allocate
9201 + frame.nsseregs * 16
9202 + frame.padding0), style,
9203 !using_drap && !frame_pointer_needed);
9206 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9208 red_offset -= offset;
9210 if (frame_pointer_needed)
9212 /* Leave results in shorter dependency chains on CPUs that are
9213 able to grok it fast. */
9214 if (TARGET_USE_LEAVE)
9215 ix86_emit_leave (red_offset);
9218 /* For stack realigned really happens, recover stack
9219 pointer to hard frame pointer is a must, if not using
9221 if (stack_realign_fp)
9222 pro_epilogue_adjust_stack (stack_pointer_rtx,
9223 hard_frame_pointer_rtx,
9224 const0_rtx, style, !using_drap);
9225 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9233 int param_ptr_offset = UNITS_PER_WORD;
9236 gcc_assert (stack_realign_drap);
9238 if (ix86_static_chain_on_stack)
9239 param_ptr_offset += UNITS_PER_WORD;
9240 if (!call_used_regs[REGNO (crtl->drap_reg)])
9241 param_ptr_offset += UNITS_PER_WORD;
9243 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9245 GEN_INT (-param_ptr_offset)));
9247 ix86_cfa_state->reg = stack_pointer_rtx;
9248 ix86_cfa_state->offset = param_ptr_offset;
9250 add_reg_note (insn, REG_CFA_DEF_CFA,
9251 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9252 GEN_INT (ix86_cfa_state->offset)));
9253 RTX_FRAME_RELATED_P (insn) = 1;
9255 if (!call_used_regs[REGNO (crtl->drap_reg)])
9256 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9259 /* Remove the saved static chain from the stack. The use of ECX is
9260 merely as a scratch register, not as the actual static chain. */
9261 if (ix86_static_chain_on_stack)
9265 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9266 ix86_cfa_state->offset += UNITS_PER_WORD;
9268 r = gen_rtx_REG (Pmode, CX_REG);
9269 insn = emit_insn (ix86_gen_pop1 (r));
9271 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9272 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9273 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9274 RTX_FRAME_RELATED_P (insn) = 1;
9277 /* Sibcall epilogues don't want a return instruction. */
9280 *ix86_cfa_state = cfa_state_save;
9284 if (crtl->args.pops_args && crtl->args.size)
9286 rtx popc = GEN_INT (crtl->args.pops_args);
9288 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9289 address, do explicit add, and jump indirectly to the caller. */
9291 if (crtl->args.pops_args >= 65536)
9293 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9296 /* There is no "pascal" calling convention in any 64bit ABI. */
9297 gcc_assert (!TARGET_64BIT);
9299 insn = emit_insn (gen_popsi1 (ecx));
9300 ix86_cfa_state->offset -= UNITS_PER_WORD;
9302 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9303 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9304 add_reg_note (insn, REG_CFA_REGISTER,
9305 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9306 RTX_FRAME_RELATED_P (insn) = 1;
9308 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9310 emit_jump_insn (gen_return_indirect_internal (ecx));
9313 emit_jump_insn (gen_return_pop_internal (popc));
9316 emit_jump_insn (gen_return_internal ());
9318 /* Restore the state back to the state from the prologue,
9319 so that it's correct for the next epilogue. */
9320 *ix86_cfa_state = cfa_state_save;
9323 /* Reset from the function's potential modifications. */
9326 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9327 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9329 if (pic_offset_table_rtx)
9330 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9332 /* Mach-O doesn't support labels at the end of objects, so if
9333 it looks like we might want one, insert a NOP. */
9335 rtx insn = get_last_insn ();
9338 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9339 insn = PREV_INSN (insn);
9343 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9344 fputs ("\tnop\n", file);
9350 /* Extract the parts of an RTL expression that is a valid memory address
9351 for an instruction. Return 0 if the structure of the address is
9352 grossly off. Return -1 if the address contains ASHIFT, so it is not
9353 strictly valid, but still used for computing length of lea instruction. */
9356 ix86_decompose_address (rtx addr, struct ix86_address *out)
9358 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9359 rtx base_reg, index_reg;
9360 HOST_WIDE_INT scale = 1;
9361 rtx scale_rtx = NULL_RTX;
9364 enum ix86_address_seg seg = SEG_DEFAULT;
9366 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9368 else if (GET_CODE (addr) == PLUS)
9378 addends[n++] = XEXP (op, 1);
9381 while (GET_CODE (op) == PLUS);
9386 for (i = n; i >= 0; --i)
9389 switch (GET_CODE (op))
9394 index = XEXP (op, 0);
9395 scale_rtx = XEXP (op, 1);
9401 index = XEXP (op, 0);
9403 if (!CONST_INT_P (tmp))
9405 scale = INTVAL (tmp);
9406 if ((unsigned HOST_WIDE_INT) scale > 3)
9412 if (XINT (op, 1) == UNSPEC_TP
9413 && TARGET_TLS_DIRECT_SEG_REFS
9414 && seg == SEG_DEFAULT)
9415 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9444 else if (GET_CODE (addr) == MULT)
9446 index = XEXP (addr, 0); /* index*scale */
9447 scale_rtx = XEXP (addr, 1);
9449 else if (GET_CODE (addr) == ASHIFT)
9451 /* We're called for lea too, which implements ashift on occasion. */
9452 index = XEXP (addr, 0);
9453 tmp = XEXP (addr, 1);
9454 if (!CONST_INT_P (tmp))
9456 scale = INTVAL (tmp);
9457 if ((unsigned HOST_WIDE_INT) scale > 3)
9463 disp = addr; /* displacement */
9465 /* Extract the integral value of scale. */
9468 if (!CONST_INT_P (scale_rtx))
9470 scale = INTVAL (scale_rtx);
9473 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9474 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9476 /* Avoid useless 0 displacement. */
9477 if (disp == const0_rtx && (base || index))
9480 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9481 if (base_reg && index_reg && scale == 1
9482 && (index_reg == arg_pointer_rtx
9483 || index_reg == frame_pointer_rtx
9484 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9487 tmp = base, base = index, index = tmp;
9488 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9491 /* Special case: %ebp cannot be encoded as a base without a displacement.
9495 && (base_reg == hard_frame_pointer_rtx
9496 || base_reg == frame_pointer_rtx
9497 || base_reg == arg_pointer_rtx
9498 || (REG_P (base_reg)
9499 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9500 || REGNO (base_reg) == R13_REG))))
9503 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9504 Avoid this by transforming to [%esi+0].
9505 Reload calls address legitimization without cfun defined, so we need
9506 to test cfun for being non-NULL. */
9507 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9508 && base_reg && !index_reg && !disp
9510 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9513 /* Special case: encode reg+reg instead of reg*2. */
9514 if (!base && index && scale == 2)
9515 base = index, base_reg = index_reg, scale = 1;
9517 /* Special case: scaling cannot be encoded without base or displacement. */
9518 if (!base && !disp && index && scale != 1)
9530 /* Return cost of the memory address x.
9531 For i386, it is better to use a complex address than let gcc copy
9532 the address into a reg and make a new pseudo. But not if the address
9533 requires to two regs - that would mean more pseudos with longer
9536 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9538 struct ix86_address parts;
9540 int ok = ix86_decompose_address (x, &parts);
9544 if (parts.base && GET_CODE (parts.base) == SUBREG)
9545 parts.base = SUBREG_REG (parts.base);
9546 if (parts.index && GET_CODE (parts.index) == SUBREG)
9547 parts.index = SUBREG_REG (parts.index);
9549 /* Attempt to minimize number of registers in the address. */
9551 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9553 && (!REG_P (parts.index)
9554 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9558 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9560 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9561 && parts.base != parts.index)
9564 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9565 since it's predecode logic can't detect the length of instructions
9566 and it degenerates to vector decoded. Increase cost of such
9567 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9568 to split such addresses or even refuse such addresses at all.
9570 Following addressing modes are affected:
9575 The first and last case may be avoidable by explicitly coding the zero in
9576 memory address, but I don't have AMD-K6 machine handy to check this
9580 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9581 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9582 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9588 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9589 this is used for to form addresses to local data when -fPIC is in
9593 darwin_local_data_pic (rtx disp)
9595 return (GET_CODE (disp) == UNSPEC
9596 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9599 /* Determine if a given RTX is a valid constant. We already know this
9600 satisfies CONSTANT_P. */
9603 legitimate_constant_p (rtx x)
9605 switch (GET_CODE (x))
9610 if (GET_CODE (x) == PLUS)
9612 if (!CONST_INT_P (XEXP (x, 1)))
9617 if (TARGET_MACHO && darwin_local_data_pic (x))
9620 /* Only some unspecs are valid as "constants". */
9621 if (GET_CODE (x) == UNSPEC)
9622 switch (XINT (x, 1))
9627 return TARGET_64BIT;
9630 x = XVECEXP (x, 0, 0);
9631 return (GET_CODE (x) == SYMBOL_REF
9632 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9634 x = XVECEXP (x, 0, 0);
9635 return (GET_CODE (x) == SYMBOL_REF
9636 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9641 /* We must have drilled down to a symbol. */
9642 if (GET_CODE (x) == LABEL_REF)
9644 if (GET_CODE (x) != SYMBOL_REF)
9649 /* TLS symbols are never valid. */
9650 if (SYMBOL_REF_TLS_MODEL (x))
9653 /* DLLIMPORT symbols are never valid. */
9654 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9655 && SYMBOL_REF_DLLIMPORT_P (x))
9660 if (GET_MODE (x) == TImode
9661 && x != CONST0_RTX (TImode)
9667 if (!standard_sse_constant_p (x))
9674 /* Otherwise we handle everything else in the move patterns. */
9678 /* Determine if it's legal to put X into the constant pool. This
9679 is not possible for the address of thread-local symbols, which
9680 is checked above. */
9683 ix86_cannot_force_const_mem (rtx x)
9685 /* We can always put integral constants and vectors in memory. */
9686 switch (GET_CODE (x))
9696 return !legitimate_constant_p (x);
9700 /* Nonzero if the constant value X is a legitimate general operand
9701 when generating PIC code. It is given that flag_pic is on and
9702 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9705 legitimate_pic_operand_p (rtx x)
9709 switch (GET_CODE (x))
9712 inner = XEXP (x, 0);
9713 if (GET_CODE (inner) == PLUS
9714 && CONST_INT_P (XEXP (inner, 1)))
9715 inner = XEXP (inner, 0);
9717 /* Only some unspecs are valid as "constants". */
9718 if (GET_CODE (inner) == UNSPEC)
9719 switch (XINT (inner, 1))
9724 return TARGET_64BIT;
9726 x = XVECEXP (inner, 0, 0);
9727 return (GET_CODE (x) == SYMBOL_REF
9728 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9729 case UNSPEC_MACHOPIC_OFFSET:
9730 return legitimate_pic_address_disp_p (x);
9738 return legitimate_pic_address_disp_p (x);
9745 /* Determine if a given CONST RTX is a valid memory displacement
9749 legitimate_pic_address_disp_p (rtx disp)
9753 /* In 64bit mode we can allow direct addresses of symbols and labels
9754 when they are not dynamic symbols. */
9757 rtx op0 = disp, op1;
9759 switch (GET_CODE (disp))
9765 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9767 op0 = XEXP (XEXP (disp, 0), 0);
9768 op1 = XEXP (XEXP (disp, 0), 1);
9769 if (!CONST_INT_P (op1)
9770 || INTVAL (op1) >= 16*1024*1024
9771 || INTVAL (op1) < -16*1024*1024)
9773 if (GET_CODE (op0) == LABEL_REF)
9775 if (GET_CODE (op0) != SYMBOL_REF)
9780 /* TLS references should always be enclosed in UNSPEC. */
9781 if (SYMBOL_REF_TLS_MODEL (op0))
9783 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9784 && ix86_cmodel != CM_LARGE_PIC)
9792 if (GET_CODE (disp) != CONST)
9794 disp = XEXP (disp, 0);
9798 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9799 of GOT tables. We should not need these anyway. */
9800 if (GET_CODE (disp) != UNSPEC
9801 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9802 && XINT (disp, 1) != UNSPEC_GOTOFF
9803 && XINT (disp, 1) != UNSPEC_PLTOFF))
9806 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9807 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9813 if (GET_CODE (disp) == PLUS)
9815 if (!CONST_INT_P (XEXP (disp, 1)))
9817 disp = XEXP (disp, 0);
9821 if (TARGET_MACHO && darwin_local_data_pic (disp))
9824 if (GET_CODE (disp) != UNSPEC)
9827 switch (XINT (disp, 1))
9832 /* We need to check for both symbols and labels because VxWorks loads
9833 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9835 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9836 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9838 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9839 While ABI specify also 32bit relocation but we don't produce it in
9840 small PIC model at all. */
9841 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9842 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9844 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9846 case UNSPEC_GOTTPOFF:
9847 case UNSPEC_GOTNTPOFF:
9848 case UNSPEC_INDNTPOFF:
9851 disp = XVECEXP (disp, 0, 0);
9852 return (GET_CODE (disp) == SYMBOL_REF
9853 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9855 disp = XVECEXP (disp, 0, 0);
9856 return (GET_CODE (disp) == SYMBOL_REF
9857 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9859 disp = XVECEXP (disp, 0, 0);
9860 return (GET_CODE (disp) == SYMBOL_REF
9861 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9867 /* Recognizes RTL expressions that are valid memory addresses for an
9868 instruction. The MODE argument is the machine mode for the MEM
9869 expression that wants to use this address.
9871 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9872 convert common non-canonical forms to canonical form so that they will
9876 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9877 rtx addr, bool strict)
9879 struct ix86_address parts;
9880 rtx base, index, disp;
9881 HOST_WIDE_INT scale;
9883 if (ix86_decompose_address (addr, &parts) <= 0)
9884 /* Decomposition failed. */
9888 index = parts.index;
9890 scale = parts.scale;
9892 /* Validate base register.
9894 Don't allow SUBREG's that span more than a word here. It can lead to spill
9895 failures when the base is one word out of a two word structure, which is
9896 represented internally as a DImode int. */
9904 else if (GET_CODE (base) == SUBREG
9905 && REG_P (SUBREG_REG (base))
9906 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9908 reg = SUBREG_REG (base);
9910 /* Base is not a register. */
9913 if (GET_MODE (base) != Pmode)
9914 /* Base is not in Pmode. */
9917 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9918 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9919 /* Base is not valid. */
9923 /* Validate index register.
9925 Don't allow SUBREG's that span more than a word here -- same as above. */
9933 else if (GET_CODE (index) == SUBREG
9934 && REG_P (SUBREG_REG (index))
9935 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9937 reg = SUBREG_REG (index);
9939 /* Index is not a register. */
9942 if (GET_MODE (index) != Pmode)
9943 /* Index is not in Pmode. */
9946 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9947 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9948 /* Index is not valid. */
9952 /* Validate scale factor. */
9956 /* Scale without index. */
9959 if (scale != 2 && scale != 4 && scale != 8)
9960 /* Scale is not a valid multiplier. */
9964 /* Validate displacement. */
9967 if (GET_CODE (disp) == CONST
9968 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9969 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9970 switch (XINT (XEXP (disp, 0), 1))
9972 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9973 used. While ABI specify also 32bit relocations, we don't produce
9974 them at all and use IP relative instead. */
9977 gcc_assert (flag_pic);
9979 goto is_legitimate_pic;
9981 /* 64bit address unspec. */
9984 case UNSPEC_GOTPCREL:
9985 gcc_assert (flag_pic);
9986 goto is_legitimate_pic;
9988 case UNSPEC_GOTTPOFF:
9989 case UNSPEC_GOTNTPOFF:
9990 case UNSPEC_INDNTPOFF:
9996 /* Invalid address unspec. */
10000 else if (SYMBOLIC_CONST (disp)
10004 && MACHOPIC_INDIRECT
10005 && !machopic_operand_p (disp)
10011 if (TARGET_64BIT && (index || base))
10013 /* foo@dtpoff(%rX) is ok. */
10014 if (GET_CODE (disp) != CONST
10015 || GET_CODE (XEXP (disp, 0)) != PLUS
10016 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10017 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10018 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10019 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10020 /* Non-constant pic memory reference. */
10023 else if (! legitimate_pic_address_disp_p (disp))
10024 /* Displacement is an invalid pic construct. */
10027 /* This code used to verify that a symbolic pic displacement
10028 includes the pic_offset_table_rtx register.
10030 While this is good idea, unfortunately these constructs may
10031 be created by "adds using lea" optimization for incorrect
10040 This code is nonsensical, but results in addressing
10041 GOT table with pic_offset_table_rtx base. We can't
10042 just refuse it easily, since it gets matched by
10043 "addsi3" pattern, that later gets split to lea in the
10044 case output register differs from input. While this
10045 can be handled by separate addsi pattern for this case
10046 that never results in lea, this seems to be easier and
10047 correct fix for crash to disable this test. */
10049 else if (GET_CODE (disp) != LABEL_REF
10050 && !CONST_INT_P (disp)
10051 && (GET_CODE (disp) != CONST
10052 || !legitimate_constant_p (disp))
10053 && (GET_CODE (disp) != SYMBOL_REF
10054 || !legitimate_constant_p (disp)))
10055 /* Displacement is not constant. */
10057 else if (TARGET_64BIT
10058 && !x86_64_immediate_operand (disp, VOIDmode))
10059 /* Displacement is out of range. */
10063 /* Everything looks valid. */
10067 /* Determine if a given RTX is a valid constant address. */
10070 constant_address_p (rtx x)
10072 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10075 /* Return a unique alias set for the GOT. */
10077 static alias_set_type
10078 ix86_GOT_alias_set (void)
10080 static alias_set_type set = -1;
10082 set = new_alias_set ();
10086 /* Return a legitimate reference for ORIG (an address) using the
10087 register REG. If REG is 0, a new pseudo is generated.
10089 There are two types of references that must be handled:
10091 1. Global data references must load the address from the GOT, via
10092 the PIC reg. An insn is emitted to do this load, and the reg is
10095 2. Static data references, constant pool addresses, and code labels
10096 compute the address as an offset from the GOT, whose base is in
10097 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10098 differentiate them from global data objects. The returned
10099 address is the PIC reg + an unspec constant.
10101 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10102 reg also appears in the address. */
10105 legitimize_pic_address (rtx orig, rtx reg)
10108 rtx new_rtx = orig;
10112 if (TARGET_MACHO && !TARGET_64BIT)
10115 reg = gen_reg_rtx (Pmode);
10116 /* Use the generic Mach-O PIC machinery. */
10117 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10121 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10123 else if (TARGET_64BIT
10124 && ix86_cmodel != CM_SMALL_PIC
10125 && gotoff_operand (addr, Pmode))
10128 /* This symbol may be referenced via a displacement from the PIC
10129 base address (@GOTOFF). */
10131 if (reload_in_progress)
10132 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10133 if (GET_CODE (addr) == CONST)
10134 addr = XEXP (addr, 0);
10135 if (GET_CODE (addr) == PLUS)
10137 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10139 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10142 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10143 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10145 tmpreg = gen_reg_rtx (Pmode);
10148 emit_move_insn (tmpreg, new_rtx);
10152 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10153 tmpreg, 1, OPTAB_DIRECT);
10156 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10158 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10160 /* This symbol may be referenced via a displacement from the PIC
10161 base address (@GOTOFF). */
10163 if (reload_in_progress)
10164 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10165 if (GET_CODE (addr) == CONST)
10166 addr = XEXP (addr, 0);
10167 if (GET_CODE (addr) == PLUS)
10169 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10171 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10174 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10175 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10176 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10180 emit_move_insn (reg, new_rtx);
10184 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10185 /* We can't use @GOTOFF for text labels on VxWorks;
10186 see gotoff_operand. */
10187 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10189 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10191 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10192 return legitimize_dllimport_symbol (addr, true);
10193 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10194 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10195 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10197 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10198 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10202 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10204 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10205 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10206 new_rtx = gen_const_mem (Pmode, new_rtx);
10207 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10210 reg = gen_reg_rtx (Pmode);
10211 /* Use directly gen_movsi, otherwise the address is loaded
10212 into register for CSE. We don't want to CSE this addresses,
10213 instead we CSE addresses from the GOT table, so skip this. */
10214 emit_insn (gen_movsi (reg, new_rtx));
10219 /* This symbol must be referenced via a load from the
10220 Global Offset Table (@GOT). */
10222 if (reload_in_progress)
10223 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10224 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10225 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10227 new_rtx = force_reg (Pmode, new_rtx);
10228 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10229 new_rtx = gen_const_mem (Pmode, new_rtx);
10230 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10233 reg = gen_reg_rtx (Pmode);
10234 emit_move_insn (reg, new_rtx);
10240 if (CONST_INT_P (addr)
10241 && !x86_64_immediate_operand (addr, VOIDmode))
10245 emit_move_insn (reg, addr);
10249 new_rtx = force_reg (Pmode, addr);
10251 else if (GET_CODE (addr) == CONST)
10253 addr = XEXP (addr, 0);
10255 /* We must match stuff we generate before. Assume the only
10256 unspecs that can get here are ours. Not that we could do
10257 anything with them anyway.... */
10258 if (GET_CODE (addr) == UNSPEC
10259 || (GET_CODE (addr) == PLUS
10260 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10262 gcc_assert (GET_CODE (addr) == PLUS);
10264 if (GET_CODE (addr) == PLUS)
10266 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10268 /* Check first to see if this is a constant offset from a @GOTOFF
10269 symbol reference. */
10270 if (gotoff_operand (op0, Pmode)
10271 && CONST_INT_P (op1))
10275 if (reload_in_progress)
10276 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10277 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10279 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10280 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10281 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10285 emit_move_insn (reg, new_rtx);
10291 if (INTVAL (op1) < -16*1024*1024
10292 || INTVAL (op1) >= 16*1024*1024)
10294 if (!x86_64_immediate_operand (op1, Pmode))
10295 op1 = force_reg (Pmode, op1);
10296 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10302 base = legitimize_pic_address (XEXP (addr, 0), reg);
10303 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10304 base == reg ? NULL_RTX : reg);
10306 if (CONST_INT_P (new_rtx))
10307 new_rtx = plus_constant (base, INTVAL (new_rtx));
10310 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10312 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10313 new_rtx = XEXP (new_rtx, 1);
10315 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10323 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10326 get_thread_pointer (int to_reg)
10330 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10334 reg = gen_reg_rtx (Pmode);
10335 insn = gen_rtx_SET (VOIDmode, reg, tp);
10336 insn = emit_insn (insn);
10341 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10342 false if we expect this to be used for a memory address and true if
10343 we expect to load the address into a register. */
10346 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10348 rtx dest, base, off, pic, tp;
10353 case TLS_MODEL_GLOBAL_DYNAMIC:
10354 dest = gen_reg_rtx (Pmode);
10355 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10357 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10359 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10362 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10363 insns = get_insns ();
10366 RTL_CONST_CALL_P (insns) = 1;
10367 emit_libcall_block (insns, dest, rax, x);
10369 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10370 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10372 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10374 if (TARGET_GNU2_TLS)
10376 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10378 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10382 case TLS_MODEL_LOCAL_DYNAMIC:
10383 base = gen_reg_rtx (Pmode);
10384 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10386 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10388 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10391 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10392 insns = get_insns ();
10395 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10396 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10397 RTL_CONST_CALL_P (insns) = 1;
10398 emit_libcall_block (insns, base, rax, note);
10400 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10401 emit_insn (gen_tls_local_dynamic_base_64 (base));
10403 emit_insn (gen_tls_local_dynamic_base_32 (base));
10405 if (TARGET_GNU2_TLS)
10407 rtx x = ix86_tls_module_base ();
10409 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10410 gen_rtx_MINUS (Pmode, x, tp));
10413 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10414 off = gen_rtx_CONST (Pmode, off);
10416 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10418 if (TARGET_GNU2_TLS)
10420 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10422 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10427 case TLS_MODEL_INITIAL_EXEC:
10431 type = UNSPEC_GOTNTPOFF;
10435 if (reload_in_progress)
10436 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10437 pic = pic_offset_table_rtx;
10438 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10440 else if (!TARGET_ANY_GNU_TLS)
10442 pic = gen_reg_rtx (Pmode);
10443 emit_insn (gen_set_got (pic));
10444 type = UNSPEC_GOTTPOFF;
10449 type = UNSPEC_INDNTPOFF;
10452 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10453 off = gen_rtx_CONST (Pmode, off);
10455 off = gen_rtx_PLUS (Pmode, pic, off);
10456 off = gen_const_mem (Pmode, off);
10457 set_mem_alias_set (off, ix86_GOT_alias_set ());
10459 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10461 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10462 off = force_reg (Pmode, off);
10463 return gen_rtx_PLUS (Pmode, base, off);
10467 base = get_thread_pointer (true);
10468 dest = gen_reg_rtx (Pmode);
10469 emit_insn (gen_subsi3 (dest, base, off));
10473 case TLS_MODEL_LOCAL_EXEC:
10474 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10475 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10476 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10477 off = gen_rtx_CONST (Pmode, off);
10479 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10481 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10482 return gen_rtx_PLUS (Pmode, base, off);
10486 base = get_thread_pointer (true);
10487 dest = gen_reg_rtx (Pmode);
10488 emit_insn (gen_subsi3 (dest, base, off));
10493 gcc_unreachable ();
10499 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10502 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10503 htab_t dllimport_map;
10506 get_dllimport_decl (tree decl)
10508 struct tree_map *h, in;
10511 const char *prefix;
10512 size_t namelen, prefixlen;
10517 if (!dllimport_map)
10518 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10520 in.hash = htab_hash_pointer (decl);
10521 in.base.from = decl;
10522 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10523 h = (struct tree_map *) *loc;
10527 *loc = h = GGC_NEW (struct tree_map);
10529 h->base.from = decl;
10530 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10531 VAR_DECL, NULL, ptr_type_node);
10532 DECL_ARTIFICIAL (to) = 1;
10533 DECL_IGNORED_P (to) = 1;
10534 DECL_EXTERNAL (to) = 1;
10535 TREE_READONLY (to) = 1;
10537 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10538 name = targetm.strip_name_encoding (name);
10539 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10540 ? "*__imp_" : "*__imp__";
10541 namelen = strlen (name);
10542 prefixlen = strlen (prefix);
10543 imp_name = (char *) alloca (namelen + prefixlen + 1);
10544 memcpy (imp_name, prefix, prefixlen);
10545 memcpy (imp_name + prefixlen, name, namelen + 1);
10547 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10548 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10549 SET_SYMBOL_REF_DECL (rtl, to);
10550 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10552 rtl = gen_const_mem (Pmode, rtl);
10553 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10555 SET_DECL_RTL (to, rtl);
10556 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10561 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10562 true if we require the result be a register. */
10565 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10570 gcc_assert (SYMBOL_REF_DECL (symbol));
10571 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10573 x = DECL_RTL (imp_decl);
10575 x = force_reg (Pmode, x);
10579 /* Try machine-dependent ways of modifying an illegitimate address
10580 to be legitimate. If we find one, return the new, valid address.
10581 This macro is used in only one place: `memory_address' in explow.c.
10583 OLDX is the address as it was before break_out_memory_refs was called.
10584 In some cases it is useful to look at this to decide what needs to be done.
10586 It is always safe for this macro to do nothing. It exists to recognize
10587 opportunities to optimize the output.
10589 For the 80386, we handle X+REG by loading X into a register R and
10590 using R+REG. R will go in a general reg and indexing will be used.
10591 However, if REG is a broken-out memory address or multiplication,
10592 nothing needs to be done because REG can certainly go in a general reg.
10594 When -fpic is used, special handling is needed for symbolic references.
10595 See comments by legitimize_pic_address in i386.c for details. */
10598 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10599 enum machine_mode mode)
10604 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10606 return legitimize_tls_address (x, (enum tls_model) log, false);
10607 if (GET_CODE (x) == CONST
10608 && GET_CODE (XEXP (x, 0)) == PLUS
10609 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10610 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10612 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10613 (enum tls_model) log, false);
10614 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10617 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10619 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10620 return legitimize_dllimport_symbol (x, true);
10621 if (GET_CODE (x) == CONST
10622 && GET_CODE (XEXP (x, 0)) == PLUS
10623 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10624 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10626 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10627 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10631 if (flag_pic && SYMBOLIC_CONST (x))
10632 return legitimize_pic_address (x, 0);
10634 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10635 if (GET_CODE (x) == ASHIFT
10636 && CONST_INT_P (XEXP (x, 1))
10637 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10640 log = INTVAL (XEXP (x, 1));
10641 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10642 GEN_INT (1 << log));
10645 if (GET_CODE (x) == PLUS)
10647 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10649 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10650 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10651 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10654 log = INTVAL (XEXP (XEXP (x, 0), 1));
10655 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10656 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10657 GEN_INT (1 << log));
10660 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10661 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10662 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10665 log = INTVAL (XEXP (XEXP (x, 1), 1));
10666 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10667 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10668 GEN_INT (1 << log));
10671 /* Put multiply first if it isn't already. */
10672 if (GET_CODE (XEXP (x, 1)) == MULT)
10674 rtx tmp = XEXP (x, 0);
10675 XEXP (x, 0) = XEXP (x, 1);
10680 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10681 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10682 created by virtual register instantiation, register elimination, and
10683 similar optimizations. */
10684 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10687 x = gen_rtx_PLUS (Pmode,
10688 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10689 XEXP (XEXP (x, 1), 0)),
10690 XEXP (XEXP (x, 1), 1));
10694 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10695 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10696 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10697 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10698 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10699 && CONSTANT_P (XEXP (x, 1)))
10702 rtx other = NULL_RTX;
10704 if (CONST_INT_P (XEXP (x, 1)))
10706 constant = XEXP (x, 1);
10707 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10709 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10711 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10712 other = XEXP (x, 1);
10720 x = gen_rtx_PLUS (Pmode,
10721 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10722 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10723 plus_constant (other, INTVAL (constant)));
10727 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10730 if (GET_CODE (XEXP (x, 0)) == MULT)
10733 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10736 if (GET_CODE (XEXP (x, 1)) == MULT)
10739 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10743 && REG_P (XEXP (x, 1))
10744 && REG_P (XEXP (x, 0)))
10747 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10750 x = legitimize_pic_address (x, 0);
10753 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10756 if (REG_P (XEXP (x, 0)))
10758 rtx temp = gen_reg_rtx (Pmode);
10759 rtx val = force_operand (XEXP (x, 1), temp);
10761 emit_move_insn (temp, val);
10763 XEXP (x, 1) = temp;
10767 else if (REG_P (XEXP (x, 1)))
10769 rtx temp = gen_reg_rtx (Pmode);
10770 rtx val = force_operand (XEXP (x, 0), temp);
10772 emit_move_insn (temp, val);
10774 XEXP (x, 0) = temp;
10782 /* Print an integer constant expression in assembler syntax. Addition
10783 and subtraction are the only arithmetic that may appear in these
10784 expressions. FILE is the stdio stream to write to, X is the rtx, and
10785 CODE is the operand print code from the output string. */
10788 output_pic_addr_const (FILE *file, rtx x, int code)
10792 switch (GET_CODE (x))
10795 gcc_assert (flag_pic);
10800 if (! TARGET_MACHO || TARGET_64BIT)
10801 output_addr_const (file, x);
10804 const char *name = XSTR (x, 0);
10806 /* Mark the decl as referenced so that cgraph will
10807 output the function. */
10808 if (SYMBOL_REF_DECL (x))
10809 mark_decl_referenced (SYMBOL_REF_DECL (x));
10812 if (MACHOPIC_INDIRECT
10813 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10814 name = machopic_indirection_name (x, /*stub_p=*/true);
10816 assemble_name (file, name);
10818 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10819 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10820 fputs ("@PLT", file);
10827 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10828 assemble_name (asm_out_file, buf);
10832 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10836 /* This used to output parentheses around the expression,
10837 but that does not work on the 386 (either ATT or BSD assembler). */
10838 output_pic_addr_const (file, XEXP (x, 0), code);
10842 if (GET_MODE (x) == VOIDmode)
10844 /* We can use %d if the number is <32 bits and positive. */
10845 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10846 fprintf (file, "0x%lx%08lx",
10847 (unsigned long) CONST_DOUBLE_HIGH (x),
10848 (unsigned long) CONST_DOUBLE_LOW (x));
10850 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10853 /* We can't handle floating point constants;
10854 PRINT_OPERAND must handle them. */
10855 output_operand_lossage ("floating constant misused");
10859 /* Some assemblers need integer constants to appear first. */
10860 if (CONST_INT_P (XEXP (x, 0)))
10862 output_pic_addr_const (file, XEXP (x, 0), code);
10864 output_pic_addr_const (file, XEXP (x, 1), code);
10868 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10869 output_pic_addr_const (file, XEXP (x, 1), code);
10871 output_pic_addr_const (file, XEXP (x, 0), code);
10877 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10878 output_pic_addr_const (file, XEXP (x, 0), code);
10880 output_pic_addr_const (file, XEXP (x, 1), code);
10882 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10886 gcc_assert (XVECLEN (x, 0) == 1);
10887 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10888 switch (XINT (x, 1))
10891 fputs ("@GOT", file);
10893 case UNSPEC_GOTOFF:
10894 fputs ("@GOTOFF", file);
10896 case UNSPEC_PLTOFF:
10897 fputs ("@PLTOFF", file);
10899 case UNSPEC_GOTPCREL:
10900 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10901 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10903 case UNSPEC_GOTTPOFF:
10904 /* FIXME: This might be @TPOFF in Sun ld too. */
10905 fputs ("@gottpoff", file);
10908 fputs ("@tpoff", file);
10910 case UNSPEC_NTPOFF:
10912 fputs ("@tpoff", file);
10914 fputs ("@ntpoff", file);
10916 case UNSPEC_DTPOFF:
10917 fputs ("@dtpoff", file);
10919 case UNSPEC_GOTNTPOFF:
10921 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10922 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10924 fputs ("@gotntpoff", file);
10926 case UNSPEC_INDNTPOFF:
10927 fputs ("@indntpoff", file);
10930 case UNSPEC_MACHOPIC_OFFSET:
10932 machopic_output_function_base_name (file);
10936 output_operand_lossage ("invalid UNSPEC as operand");
10942 output_operand_lossage ("invalid expression as operand");
10946 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10947 We need to emit DTP-relative relocations. */
10949 static void ATTRIBUTE_UNUSED
10950 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10952 fputs (ASM_LONG, file);
10953 output_addr_const (file, x);
10954 fputs ("@dtpoff", file);
10960 fputs (", 0", file);
10963 gcc_unreachable ();
10967 /* Return true if X is a representation of the PIC register. This copes
10968 with calls from ix86_find_base_term, where the register might have
10969 been replaced by a cselib value. */
10972 ix86_pic_register_p (rtx x)
10974 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10975 return (pic_offset_table_rtx
10976 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10978 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10981 /* In the name of slightly smaller debug output, and to cater to
10982 general assembler lossage, recognize PIC+GOTOFF and turn it back
10983 into a direct symbol reference.
10985 On Darwin, this is necessary to avoid a crash, because Darwin
10986 has a different PIC label for each routine but the DWARF debugging
10987 information is not associated with any particular routine, so it's
10988 necessary to remove references to the PIC label from RTL stored by
10989 the DWARF output code. */
10992 ix86_delegitimize_address (rtx x)
10994 rtx orig_x = delegitimize_mem_from_attrs (x);
10995 /* addend is NULL or some rtx if x is something+GOTOFF where
10996 something doesn't include the PIC register. */
10997 rtx addend = NULL_RTX;
10998 /* reg_addend is NULL or a multiple of some register. */
10999 rtx reg_addend = NULL_RTX;
11000 /* const_addend is NULL or a const_int. */
11001 rtx const_addend = NULL_RTX;
11002 /* This is the result, or NULL. */
11003 rtx result = NULL_RTX;
11012 if (GET_CODE (x) != CONST
11013 || GET_CODE (XEXP (x, 0)) != UNSPEC
11014 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11015 || !MEM_P (orig_x))
11017 x = XVECEXP (XEXP (x, 0), 0, 0);
11018 if (GET_MODE (orig_x) != Pmode)
11019 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
11023 if (GET_CODE (x) != PLUS
11024 || GET_CODE (XEXP (x, 1)) != CONST)
11027 if (ix86_pic_register_p (XEXP (x, 0)))
11028 /* %ebx + GOT/GOTOFF */
11030 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11032 /* %ebx + %reg * scale + GOT/GOTOFF */
11033 reg_addend = XEXP (x, 0);
11034 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11035 reg_addend = XEXP (reg_addend, 1);
11036 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11037 reg_addend = XEXP (reg_addend, 0);
11040 reg_addend = NULL_RTX;
11041 addend = XEXP (x, 0);
11045 addend = XEXP (x, 0);
11047 x = XEXP (XEXP (x, 1), 0);
11048 if (GET_CODE (x) == PLUS
11049 && CONST_INT_P (XEXP (x, 1)))
11051 const_addend = XEXP (x, 1);
11055 if (GET_CODE (x) == UNSPEC
11056 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11057 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11058 result = XVECEXP (x, 0, 0);
11060 if (TARGET_MACHO && darwin_local_data_pic (x)
11061 && !MEM_P (orig_x))
11062 result = XVECEXP (x, 0, 0);
11068 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11070 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11073 /* If the rest of original X doesn't involve the PIC register, add
11074 addend and subtract pic_offset_table_rtx. This can happen e.g.
11076 leal (%ebx, %ecx, 4), %ecx
11078 movl foo@GOTOFF(%ecx), %edx
11079 in which case we return (%ecx - %ebx) + foo. */
11080 if (pic_offset_table_rtx)
11081 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11082 pic_offset_table_rtx),
11087 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
11088 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
11092 /* If X is a machine specific address (i.e. a symbol or label being
11093 referenced as a displacement from the GOT implemented using an
11094 UNSPEC), then return the base term. Otherwise return X. */
11097 ix86_find_base_term (rtx x)
11103 if (GET_CODE (x) != CONST)
11105 term = XEXP (x, 0);
11106 if (GET_CODE (term) == PLUS
11107 && (CONST_INT_P (XEXP (term, 1))
11108 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11109 term = XEXP (term, 0);
11110 if (GET_CODE (term) != UNSPEC
11111 || XINT (term, 1) != UNSPEC_GOTPCREL)
11114 return XVECEXP (term, 0, 0);
11117 return ix86_delegitimize_address (x);
11121 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11122 int fp, FILE *file)
11124 const char *suffix;
11126 if (mode == CCFPmode || mode == CCFPUmode)
11128 code = ix86_fp_compare_code_to_integer (code);
11132 code = reverse_condition (code);
11183 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11187 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11188 Those same assemblers have the same but opposite lossage on cmov. */
11189 if (mode == CCmode)
11190 suffix = fp ? "nbe" : "a";
11191 else if (mode == CCCmode)
11194 gcc_unreachable ();
11210 gcc_unreachable ();
11214 gcc_assert (mode == CCmode || mode == CCCmode);
11231 gcc_unreachable ();
11235 /* ??? As above. */
11236 gcc_assert (mode == CCmode || mode == CCCmode);
11237 suffix = fp ? "nb" : "ae";
11240 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11244 /* ??? As above. */
11245 if (mode == CCmode)
11247 else if (mode == CCCmode)
11248 suffix = fp ? "nb" : "ae";
11250 gcc_unreachable ();
11253 suffix = fp ? "u" : "p";
11256 suffix = fp ? "nu" : "np";
11259 gcc_unreachable ();
11261 fputs (suffix, file);
11264 /* Print the name of register X to FILE based on its machine mode and number.
11265 If CODE is 'w', pretend the mode is HImode.
11266 If CODE is 'b', pretend the mode is QImode.
11267 If CODE is 'k', pretend the mode is SImode.
11268 If CODE is 'q', pretend the mode is DImode.
11269 If CODE is 'x', pretend the mode is V4SFmode.
11270 If CODE is 't', pretend the mode is V8SFmode.
11271 If CODE is 'h', pretend the reg is the 'high' byte register.
11272 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11273 If CODE is 'd', duplicate the operand for AVX instruction.
11277 print_reg (rtx x, int code, FILE *file)
11280 bool duplicated = code == 'd' && TARGET_AVX;
11282 gcc_assert (x == pc_rtx
11283 || (REGNO (x) != ARG_POINTER_REGNUM
11284 && REGNO (x) != FRAME_POINTER_REGNUM
11285 && REGNO (x) != FLAGS_REG
11286 && REGNO (x) != FPSR_REG
11287 && REGNO (x) != FPCR_REG));
11289 if (ASSEMBLER_DIALECT == ASM_ATT)
11294 gcc_assert (TARGET_64BIT);
11295 fputs ("rip", file);
11299 if (code == 'w' || MMX_REG_P (x))
11301 else if (code == 'b')
11303 else if (code == 'k')
11305 else if (code == 'q')
11307 else if (code == 'y')
11309 else if (code == 'h')
11311 else if (code == 'x')
11313 else if (code == 't')
11316 code = GET_MODE_SIZE (GET_MODE (x));
11318 /* Irritatingly, AMD extended registers use different naming convention
11319 from the normal registers. */
11320 if (REX_INT_REG_P (x))
11322 gcc_assert (TARGET_64BIT);
11326 error ("extended registers have no high halves");
11329 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11332 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11335 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11338 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11341 error ("unsupported operand size for extended register");
11351 if (STACK_TOP_P (x))
11360 if (! ANY_FP_REG_P (x))
11361 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11366 reg = hi_reg_name[REGNO (x)];
11369 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11371 reg = qi_reg_name[REGNO (x)];
11374 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11376 reg = qi_high_reg_name[REGNO (x)];
11381 gcc_assert (!duplicated);
11383 fputs (hi_reg_name[REGNO (x)] + 1, file);
11388 gcc_unreachable ();
11394 if (ASSEMBLER_DIALECT == ASM_ATT)
11395 fprintf (file, ", %%%s", reg);
11397 fprintf (file, ", %s", reg);
11401 /* Locate some local-dynamic symbol still in use by this function
11402 so that we can print its name in some tls_local_dynamic_base
11406 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11410 if (GET_CODE (x) == SYMBOL_REF
11411 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11413 cfun->machine->some_ld_name = XSTR (x, 0);
11420 static const char *
11421 get_some_local_dynamic_name (void)
11425 if (cfun->machine->some_ld_name)
11426 return cfun->machine->some_ld_name;
11428 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11429 if (NONDEBUG_INSN_P (insn)
11430 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11431 return cfun->machine->some_ld_name;
11436 /* Meaning of CODE:
11437 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11438 C -- print opcode suffix for set/cmov insn.
11439 c -- like C, but print reversed condition
11440 F,f -- likewise, but for floating-point.
11441 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11443 R -- print the prefix for register names.
11444 z -- print the opcode suffix for the size of the current operand.
11445 Z -- likewise, with special suffixes for x87 instructions.
11446 * -- print a star (in certain assembler syntax)
11447 A -- print an absolute memory reference.
11448 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11449 s -- print a shift double count, followed by the assemblers argument
11451 b -- print the QImode name of the register for the indicated operand.
11452 %b0 would print %al if operands[0] is reg 0.
11453 w -- likewise, print the HImode name of the register.
11454 k -- likewise, print the SImode name of the register.
11455 q -- likewise, print the DImode name of the register.
11456 x -- likewise, print the V4SFmode name of the register.
11457 t -- likewise, print the V8SFmode name of the register.
11458 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11459 y -- print "st(0)" instead of "st" as a register.
11460 d -- print duplicated register operand for AVX instruction.
11461 D -- print condition for SSE cmp instruction.
11462 P -- if PIC, print an @PLT suffix.
11463 X -- don't print any sort of PIC '@' suffix for a symbol.
11464 & -- print some in-use local-dynamic symbol name.
11465 H -- print a memory address offset by 8; used for sse high-parts
11466 Y -- print condition for XOP pcom* instruction.
11467 + -- print a branch hint as 'cs' or 'ds' prefix
11468 ; -- print a semicolon (after prefixes due to bug in older gas).
11472 print_operand (FILE *file, rtx x, int code)
11479 if (ASSEMBLER_DIALECT == ASM_ATT)
11485 const char *name = get_some_local_dynamic_name ();
11487 output_operand_lossage ("'%%&' used without any "
11488 "local dynamic TLS references");
11490 assemble_name (file, name);
11495 switch (ASSEMBLER_DIALECT)
11502 /* Intel syntax. For absolute addresses, registers should not
11503 be surrounded by braces. */
11507 PRINT_OPERAND (file, x, 0);
11514 gcc_unreachable ();
11517 PRINT_OPERAND (file, x, 0);
11522 if (ASSEMBLER_DIALECT == ASM_ATT)
11527 if (ASSEMBLER_DIALECT == ASM_ATT)
11532 if (ASSEMBLER_DIALECT == ASM_ATT)
11537 if (ASSEMBLER_DIALECT == ASM_ATT)
11542 if (ASSEMBLER_DIALECT == ASM_ATT)
11547 if (ASSEMBLER_DIALECT == ASM_ATT)
11552 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11554 /* Opcodes don't get size suffixes if using Intel opcodes. */
11555 if (ASSEMBLER_DIALECT == ASM_INTEL)
11558 switch (GET_MODE_SIZE (GET_MODE (x)))
11577 output_operand_lossage
11578 ("invalid operand size for operand code '%c'", code);
11583 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11585 (0, "non-integer operand used with operand code '%c'", code);
11589 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11590 if (ASSEMBLER_DIALECT == ASM_INTEL)
11593 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11595 switch (GET_MODE_SIZE (GET_MODE (x)))
11598 #ifdef HAVE_AS_IX86_FILDS
11608 #ifdef HAVE_AS_IX86_FILDQ
11611 fputs ("ll", file);
11619 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11621 /* 387 opcodes don't get size suffixes
11622 if the operands are registers. */
11623 if (STACK_REG_P (x))
11626 switch (GET_MODE_SIZE (GET_MODE (x)))
11647 output_operand_lossage
11648 ("invalid operand type used with operand code '%c'", code);
11652 output_operand_lossage
11653 ("invalid operand size for operand code '%c'", code);
11670 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11672 PRINT_OPERAND (file, x, 0);
11673 fputs (", ", file);
11678 /* Little bit of braindamage here. The SSE compare instructions
11679 does use completely different names for the comparisons that the
11680 fp conditional moves. */
11683 switch (GET_CODE (x))
11686 fputs ("eq", file);
11689 fputs ("eq_us", file);
11692 fputs ("lt", file);
11695 fputs ("nge", file);
11698 fputs ("le", file);
11701 fputs ("ngt", file);
11704 fputs ("unord", file);
11707 fputs ("neq", file);
11710 fputs ("neq_oq", file);
11713 fputs ("ge", file);
11716 fputs ("nlt", file);
11719 fputs ("gt", file);
11722 fputs ("nle", file);
11725 fputs ("ord", file);
11728 output_operand_lossage ("operand is not a condition code, "
11729 "invalid operand code 'D'");
11735 switch (GET_CODE (x))
11739 fputs ("eq", file);
11743 fputs ("lt", file);
11747 fputs ("le", file);
11750 fputs ("unord", file);
11754 fputs ("neq", file);
11758 fputs ("nlt", file);
11762 fputs ("nle", file);
11765 fputs ("ord", file);
11768 output_operand_lossage ("operand is not a condition code, "
11769 "invalid operand code 'D'");
11775 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11776 if (ASSEMBLER_DIALECT == ASM_ATT)
11778 switch (GET_MODE (x))
11780 case HImode: putc ('w', file); break;
11782 case SFmode: putc ('l', file); break;
11784 case DFmode: putc ('q', file); break;
11785 default: gcc_unreachable ();
11792 if (!COMPARISON_P (x))
11794 output_operand_lossage ("operand is neither a constant nor a "
11795 "condition code, invalid operand code "
11799 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11802 if (!COMPARISON_P (x))
11804 output_operand_lossage ("operand is neither a constant nor a "
11805 "condition code, invalid operand code "
11809 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11810 if (ASSEMBLER_DIALECT == ASM_ATT)
11813 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11816 /* Like above, but reverse condition */
11818 /* Check to see if argument to %c is really a constant
11819 and not a condition code which needs to be reversed. */
11820 if (!COMPARISON_P (x))
11822 output_operand_lossage ("operand is neither a constant nor a "
11823 "condition code, invalid operand "
11827 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11830 if (!COMPARISON_P (x))
11832 output_operand_lossage ("operand is neither a constant nor a "
11833 "condition code, invalid operand "
11837 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11838 if (ASSEMBLER_DIALECT == ASM_ATT)
11841 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11845 /* It doesn't actually matter what mode we use here, as we're
11846 only going to use this for printing. */
11847 x = adjust_address_nv (x, DImode, 8);
11855 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11858 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11861 int pred_val = INTVAL (XEXP (x, 0));
11863 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11864 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11866 int taken = pred_val > REG_BR_PROB_BASE / 2;
11867 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11869 /* Emit hints only in the case default branch prediction
11870 heuristics would fail. */
11871 if (taken != cputaken)
11873 /* We use 3e (DS) prefix for taken branches and
11874 2e (CS) prefix for not taken branches. */
11876 fputs ("ds ; ", file);
11878 fputs ("cs ; ", file);
11886 switch (GET_CODE (x))
11889 fputs ("neq", file);
11892 fputs ("eq", file);
11896 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11900 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11904 fputs ("le", file);
11908 fputs ("lt", file);
11911 fputs ("unord", file);
11914 fputs ("ord", file);
11917 fputs ("ueq", file);
11920 fputs ("nlt", file);
11923 fputs ("nle", file);
11926 fputs ("ule", file);
11929 fputs ("ult", file);
11932 fputs ("une", file);
11935 output_operand_lossage ("operand is not a condition code, "
11936 "invalid operand code 'Y'");
11943 fputs (" ; ", file);
11950 output_operand_lossage ("invalid operand code '%c'", code);
11955 print_reg (x, code, file);
11957 else if (MEM_P (x))
11959 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11960 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11961 && GET_MODE (x) != BLKmode)
11964 switch (GET_MODE_SIZE (GET_MODE (x)))
11966 case 1: size = "BYTE"; break;
11967 case 2: size = "WORD"; break;
11968 case 4: size = "DWORD"; break;
11969 case 8: size = "QWORD"; break;
11970 case 12: size = "TBYTE"; break;
11972 if (GET_MODE (x) == XFmode)
11977 case 32: size = "YMMWORD"; break;
11979 gcc_unreachable ();
11982 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11985 else if (code == 'w')
11987 else if (code == 'k')
11990 fputs (size, file);
11991 fputs (" PTR ", file);
11995 /* Avoid (%rip) for call operands. */
11996 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11997 && !CONST_INT_P (x))
11998 output_addr_const (file, x);
11999 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12000 output_operand_lossage ("invalid constraints for operand");
12002 output_address (x);
12005 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12010 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12011 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12013 if (ASSEMBLER_DIALECT == ASM_ATT)
12015 fprintf (file, "0x%08lx", (long unsigned int) l);
12018 /* These float cases don't actually occur as immediate operands. */
12019 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12023 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12024 fputs (dstr, file);
12027 else if (GET_CODE (x) == CONST_DOUBLE
12028 && GET_MODE (x) == XFmode)
12032 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12033 fputs (dstr, file);
12038 /* We have patterns that allow zero sets of memory, for instance.
12039 In 64-bit mode, we should probably support all 8-byte vectors,
12040 since we can in fact encode that into an immediate. */
12041 if (GET_CODE (x) == CONST_VECTOR)
12043 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12049 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12051 if (ASSEMBLER_DIALECT == ASM_ATT)
12054 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12055 || GET_CODE (x) == LABEL_REF)
12057 if (ASSEMBLER_DIALECT == ASM_ATT)
12060 fputs ("OFFSET FLAT:", file);
12063 if (CONST_INT_P (x))
12064 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12066 output_pic_addr_const (file, x, code);
12068 output_addr_const (file, x);
12072 /* Print a memory operand whose address is ADDR. */
12075 print_operand_address (FILE *file, rtx addr)
12077 struct ix86_address parts;
12078 rtx base, index, disp;
12080 int ok = ix86_decompose_address (addr, &parts);
12085 index = parts.index;
12087 scale = parts.scale;
12095 if (ASSEMBLER_DIALECT == ASM_ATT)
12097 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12100 gcc_unreachable ();
12103 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12104 if (TARGET_64BIT && !base && !index)
12108 if (GET_CODE (disp) == CONST
12109 && GET_CODE (XEXP (disp, 0)) == PLUS
12110 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12111 symbol = XEXP (XEXP (disp, 0), 0);
12113 if (GET_CODE (symbol) == LABEL_REF
12114 || (GET_CODE (symbol) == SYMBOL_REF
12115 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12118 if (!base && !index)
12120 /* Displacement only requires special attention. */
12122 if (CONST_INT_P (disp))
12124 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12125 fputs ("ds:", file);
12126 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12129 output_pic_addr_const (file, disp, 0);
12131 output_addr_const (file, disp);
12135 if (ASSEMBLER_DIALECT == ASM_ATT)
12140 output_pic_addr_const (file, disp, 0);
12141 else if (GET_CODE (disp) == LABEL_REF)
12142 output_asm_label (disp);
12144 output_addr_const (file, disp);
12149 print_reg (base, 0, file);
12153 print_reg (index, 0, file);
12155 fprintf (file, ",%d", scale);
12161 rtx offset = NULL_RTX;
12165 /* Pull out the offset of a symbol; print any symbol itself. */
12166 if (GET_CODE (disp) == CONST
12167 && GET_CODE (XEXP (disp, 0)) == PLUS
12168 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12170 offset = XEXP (XEXP (disp, 0), 1);
12171 disp = gen_rtx_CONST (VOIDmode,
12172 XEXP (XEXP (disp, 0), 0));
12176 output_pic_addr_const (file, disp, 0);
12177 else if (GET_CODE (disp) == LABEL_REF)
12178 output_asm_label (disp);
12179 else if (CONST_INT_P (disp))
12182 output_addr_const (file, disp);
12188 print_reg (base, 0, file);
12191 if (INTVAL (offset) >= 0)
12193 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12197 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12204 print_reg (index, 0, file);
12206 fprintf (file, "*%d", scale);
12214 output_addr_const_extra (FILE *file, rtx x)
12218 if (GET_CODE (x) != UNSPEC)
12221 op = XVECEXP (x, 0, 0);
12222 switch (XINT (x, 1))
12224 case UNSPEC_GOTTPOFF:
12225 output_addr_const (file, op);
12226 /* FIXME: This might be @TPOFF in Sun ld. */
12227 fputs ("@gottpoff", file);
12230 output_addr_const (file, op);
12231 fputs ("@tpoff", file);
12233 case UNSPEC_NTPOFF:
12234 output_addr_const (file, op);
12236 fputs ("@tpoff", file);
12238 fputs ("@ntpoff", file);
12240 case UNSPEC_DTPOFF:
12241 output_addr_const (file, op);
12242 fputs ("@dtpoff", file);
12244 case UNSPEC_GOTNTPOFF:
12245 output_addr_const (file, op);
12247 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12248 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12250 fputs ("@gotntpoff", file);
12252 case UNSPEC_INDNTPOFF:
12253 output_addr_const (file, op);
12254 fputs ("@indntpoff", file);
12257 case UNSPEC_MACHOPIC_OFFSET:
12258 output_addr_const (file, op);
12260 machopic_output_function_base_name (file);
12271 /* Split one or more DImode RTL references into pairs of SImode
12272 references. The RTL can be REG, offsettable MEM, integer constant, or
12273 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12274 split and "num" is its length. lo_half and hi_half are output arrays
12275 that parallel "operands". */
12278 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12282 rtx op = operands[num];
12284 /* simplify_subreg refuse to split volatile memory addresses,
12285 but we still have to handle it. */
12288 lo_half[num] = adjust_address (op, SImode, 0);
12289 hi_half[num] = adjust_address (op, SImode, 4);
12293 lo_half[num] = simplify_gen_subreg (SImode, op,
12294 GET_MODE (op) == VOIDmode
12295 ? DImode : GET_MODE (op), 0);
12296 hi_half[num] = simplify_gen_subreg (SImode, op,
12297 GET_MODE (op) == VOIDmode
12298 ? DImode : GET_MODE (op), 4);
12302 /* Split one or more TImode RTL references into pairs of DImode
12303 references. The RTL can be REG, offsettable MEM, integer constant, or
12304 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12305 split and "num" is its length. lo_half and hi_half are output arrays
12306 that parallel "operands". */
12309 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12313 rtx op = operands[num];
12315 /* simplify_subreg refuse to split volatile memory addresses, but we
12316 still have to handle it. */
12319 lo_half[num] = adjust_address (op, DImode, 0);
12320 hi_half[num] = adjust_address (op, DImode, 8);
12324 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12325 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12330 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12331 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12332 is the expression of the binary operation. The output may either be
12333 emitted here, or returned to the caller, like all output_* functions.
12335 There is no guarantee that the operands are the same mode, as they
12336 might be within FLOAT or FLOAT_EXTEND expressions. */
12338 #ifndef SYSV386_COMPAT
12339 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12340 wants to fix the assemblers because that causes incompatibility
12341 with gcc. No-one wants to fix gcc because that causes
12342 incompatibility with assemblers... You can use the option of
12343 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12344 #define SYSV386_COMPAT 1
12348 output_387_binary_op (rtx insn, rtx *operands)
12350 static char buf[40];
12353 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12355 #ifdef ENABLE_CHECKING
12356 /* Even if we do not want to check the inputs, this documents input
12357 constraints. Which helps in understanding the following code. */
12358 if (STACK_REG_P (operands[0])
12359 && ((REG_P (operands[1])
12360 && REGNO (operands[0]) == REGNO (operands[1])
12361 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12362 || (REG_P (operands[2])
12363 && REGNO (operands[0]) == REGNO (operands[2])
12364 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12365 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12368 gcc_assert (is_sse);
12371 switch (GET_CODE (operands[3]))
12374 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12375 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12383 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12384 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12392 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12393 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12401 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12402 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12410 gcc_unreachable ();
12417 strcpy (buf, ssep);
12418 if (GET_MODE (operands[0]) == SFmode)
12419 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12421 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12425 strcpy (buf, ssep + 1);
12426 if (GET_MODE (operands[0]) == SFmode)
12427 strcat (buf, "ss\t{%2, %0|%0, %2}");
12429 strcat (buf, "sd\t{%2, %0|%0, %2}");
12435 switch (GET_CODE (operands[3]))
12439 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12441 rtx temp = operands[2];
12442 operands[2] = operands[1];
12443 operands[1] = temp;
12446 /* know operands[0] == operands[1]. */
12448 if (MEM_P (operands[2]))
12454 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12456 if (STACK_TOP_P (operands[0]))
12457 /* How is it that we are storing to a dead operand[2]?
12458 Well, presumably operands[1] is dead too. We can't
12459 store the result to st(0) as st(0) gets popped on this
12460 instruction. Instead store to operands[2] (which I
12461 think has to be st(1)). st(1) will be popped later.
12462 gcc <= 2.8.1 didn't have this check and generated
12463 assembly code that the Unixware assembler rejected. */
12464 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12466 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12470 if (STACK_TOP_P (operands[0]))
12471 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12473 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12478 if (MEM_P (operands[1]))
12484 if (MEM_P (operands[2]))
12490 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12493 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12494 derived assemblers, confusingly reverse the direction of
12495 the operation for fsub{r} and fdiv{r} when the
12496 destination register is not st(0). The Intel assembler
12497 doesn't have this brain damage. Read !SYSV386_COMPAT to
12498 figure out what the hardware really does. */
12499 if (STACK_TOP_P (operands[0]))
12500 p = "{p\t%0, %2|rp\t%2, %0}";
12502 p = "{rp\t%2, %0|p\t%0, %2}";
12504 if (STACK_TOP_P (operands[0]))
12505 /* As above for fmul/fadd, we can't store to st(0). */
12506 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12508 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12513 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12516 if (STACK_TOP_P (operands[0]))
12517 p = "{rp\t%0, %1|p\t%1, %0}";
12519 p = "{p\t%1, %0|rp\t%0, %1}";
12521 if (STACK_TOP_P (operands[0]))
12522 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12524 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12529 if (STACK_TOP_P (operands[0]))
12531 if (STACK_TOP_P (operands[1]))
12532 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12534 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12537 else if (STACK_TOP_P (operands[1]))
12540 p = "{\t%1, %0|r\t%0, %1}";
12542 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12548 p = "{r\t%2, %0|\t%0, %2}";
12550 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12556 gcc_unreachable ();
12563 /* Return needed mode for entity in optimize_mode_switching pass. */
12566 ix86_mode_needed (int entity, rtx insn)
12568 enum attr_i387_cw mode;
12570 /* The mode UNINITIALIZED is used to store control word after a
12571 function call or ASM pattern. The mode ANY specify that function
12572 has no requirements on the control word and make no changes in the
12573 bits we are interested in. */
12576 || (NONJUMP_INSN_P (insn)
12577 && (asm_noperands (PATTERN (insn)) >= 0
12578 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12579 return I387_CW_UNINITIALIZED;
12581 if (recog_memoized (insn) < 0)
12582 return I387_CW_ANY;
12584 mode = get_attr_i387_cw (insn);
12589 if (mode == I387_CW_TRUNC)
12594 if (mode == I387_CW_FLOOR)
12599 if (mode == I387_CW_CEIL)
12604 if (mode == I387_CW_MASK_PM)
12609 gcc_unreachable ();
12612 return I387_CW_ANY;
12615 /* Output code to initialize control word copies used by trunc?f?i and
12616 rounding patterns. CURRENT_MODE is set to current control word,
12617 while NEW_MODE is set to new control word. */
12620 emit_i387_cw_initialization (int mode)
12622 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12625 enum ix86_stack_slot slot;
12627 rtx reg = gen_reg_rtx (HImode);
12629 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12630 emit_move_insn (reg, copy_rtx (stored_mode));
12632 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12633 || optimize_function_for_size_p (cfun))
12637 case I387_CW_TRUNC:
12638 /* round toward zero (truncate) */
12639 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12640 slot = SLOT_CW_TRUNC;
12643 case I387_CW_FLOOR:
12644 /* round down toward -oo */
12645 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12646 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12647 slot = SLOT_CW_FLOOR;
12651 /* round up toward +oo */
12652 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12653 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12654 slot = SLOT_CW_CEIL;
12657 case I387_CW_MASK_PM:
12658 /* mask precision exception for nearbyint() */
12659 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12660 slot = SLOT_CW_MASK_PM;
12664 gcc_unreachable ();
12671 case I387_CW_TRUNC:
12672 /* round toward zero (truncate) */
12673 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12674 slot = SLOT_CW_TRUNC;
12677 case I387_CW_FLOOR:
12678 /* round down toward -oo */
12679 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12680 slot = SLOT_CW_FLOOR;
12684 /* round up toward +oo */
12685 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12686 slot = SLOT_CW_CEIL;
12689 case I387_CW_MASK_PM:
12690 /* mask precision exception for nearbyint() */
12691 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12692 slot = SLOT_CW_MASK_PM;
12696 gcc_unreachable ();
12700 gcc_assert (slot < MAX_386_STACK_LOCALS);
12702 new_mode = assign_386_stack_local (HImode, slot);
12703 emit_move_insn (new_mode, reg);
12706 /* Output code for INSN to convert a float to a signed int. OPERANDS
12707 are the insn operands. The output may be [HSD]Imode and the input
12708 operand may be [SDX]Fmode. */
12711 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12713 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12714 int dimode_p = GET_MODE (operands[0]) == DImode;
12715 int round_mode = get_attr_i387_cw (insn);
12717 /* Jump through a hoop or two for DImode, since the hardware has no
12718 non-popping instruction. We used to do this a different way, but
12719 that was somewhat fragile and broke with post-reload splitters. */
12720 if ((dimode_p || fisttp) && !stack_top_dies)
12721 output_asm_insn ("fld\t%y1", operands);
12723 gcc_assert (STACK_TOP_P (operands[1]));
12724 gcc_assert (MEM_P (operands[0]));
12725 gcc_assert (GET_MODE (operands[1]) != TFmode);
12728 output_asm_insn ("fisttp%Z0\t%0", operands);
12731 if (round_mode != I387_CW_ANY)
12732 output_asm_insn ("fldcw\t%3", operands);
12733 if (stack_top_dies || dimode_p)
12734 output_asm_insn ("fistp%Z0\t%0", operands);
12736 output_asm_insn ("fist%Z0\t%0", operands);
12737 if (round_mode != I387_CW_ANY)
12738 output_asm_insn ("fldcw\t%2", operands);
12744 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12745 have the values zero or one, indicates the ffreep insn's operand
12746 from the OPERANDS array. */
12748 static const char *
12749 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12751 if (TARGET_USE_FFREEP)
12752 #ifdef HAVE_AS_IX86_FFREEP
12753 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12756 static char retval[32];
12757 int regno = REGNO (operands[opno]);
12759 gcc_assert (FP_REGNO_P (regno));
12761 regno -= FIRST_STACK_REG;
12763 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12768 return opno ? "fstp\t%y1" : "fstp\t%y0";
12772 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12773 should be used. UNORDERED_P is true when fucom should be used. */
12776 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12778 int stack_top_dies;
12779 rtx cmp_op0, cmp_op1;
12780 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12784 cmp_op0 = operands[0];
12785 cmp_op1 = operands[1];
12789 cmp_op0 = operands[1];
12790 cmp_op1 = operands[2];
12795 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12796 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12797 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12798 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12800 if (GET_MODE (operands[0]) == SFmode)
12802 return &ucomiss[TARGET_AVX ? 0 : 1];
12804 return &comiss[TARGET_AVX ? 0 : 1];
12807 return &ucomisd[TARGET_AVX ? 0 : 1];
12809 return &comisd[TARGET_AVX ? 0 : 1];
12812 gcc_assert (STACK_TOP_P (cmp_op0));
12814 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12816 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12818 if (stack_top_dies)
12820 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12821 return output_387_ffreep (operands, 1);
12824 return "ftst\n\tfnstsw\t%0";
12827 if (STACK_REG_P (cmp_op1)
12829 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12830 && REGNO (cmp_op1) != FIRST_STACK_REG)
12832 /* If both the top of the 387 stack dies, and the other operand
12833 is also a stack register that dies, then this must be a
12834 `fcompp' float compare */
12838 /* There is no double popping fcomi variant. Fortunately,
12839 eflags is immune from the fstp's cc clobbering. */
12841 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12843 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12844 return output_387_ffreep (operands, 0);
12849 return "fucompp\n\tfnstsw\t%0";
12851 return "fcompp\n\tfnstsw\t%0";
12856 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12858 static const char * const alt[16] =
12860 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12861 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12862 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12863 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12865 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12866 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12870 "fcomi\t{%y1, %0|%0, %y1}",
12871 "fcomip\t{%y1, %0|%0, %y1}",
12872 "fucomi\t{%y1, %0|%0, %y1}",
12873 "fucomip\t{%y1, %0|%0, %y1}",
12884 mask = eflags_p << 3;
12885 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12886 mask |= unordered_p << 1;
12887 mask |= stack_top_dies;
12889 gcc_assert (mask < 16);
12898 ix86_output_addr_vec_elt (FILE *file, int value)
12900 const char *directive = ASM_LONG;
12904 directive = ASM_QUAD;
12906 gcc_assert (!TARGET_64BIT);
12909 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12913 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12915 const char *directive = ASM_LONG;
12918 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12919 directive = ASM_QUAD;
12921 gcc_assert (!TARGET_64BIT);
12923 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12924 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12925 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12926 directive, value, rel);
12927 else if (HAVE_AS_GOTOFF_IN_DATA)
12928 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12930 else if (TARGET_MACHO)
12932 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12933 machopic_output_function_base_name (file);
12938 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12939 GOT_SYMBOL_NAME, value);
12942 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12946 ix86_expand_clear (rtx dest)
12950 /* We play register width games, which are only valid after reload. */
12951 gcc_assert (reload_completed);
12953 /* Avoid HImode and its attendant prefix byte. */
12954 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12955 dest = gen_rtx_REG (SImode, REGNO (dest));
12956 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12958 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12959 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12961 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12962 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12968 /* X is an unchanging MEM. If it is a constant pool reference, return
12969 the constant pool rtx, else NULL. */
12972 maybe_get_pool_constant (rtx x)
12974 x = ix86_delegitimize_address (XEXP (x, 0));
12976 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12977 return get_pool_constant (x);
12983 ix86_expand_move (enum machine_mode mode, rtx operands[])
12986 enum tls_model model;
12991 if (GET_CODE (op1) == SYMBOL_REF)
12993 model = SYMBOL_REF_TLS_MODEL (op1);
12996 op1 = legitimize_tls_address (op1, model, true);
12997 op1 = force_operand (op1, op0);
13001 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13002 && SYMBOL_REF_DLLIMPORT_P (op1))
13003 op1 = legitimize_dllimport_symbol (op1, false);
13005 else if (GET_CODE (op1) == CONST
13006 && GET_CODE (XEXP (op1, 0)) == PLUS
13007 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13009 rtx addend = XEXP (XEXP (op1, 0), 1);
13010 rtx symbol = XEXP (XEXP (op1, 0), 0);
13013 model = SYMBOL_REF_TLS_MODEL (symbol);
13015 tmp = legitimize_tls_address (symbol, model, true);
13016 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13017 && SYMBOL_REF_DLLIMPORT_P (symbol))
13018 tmp = legitimize_dllimport_symbol (symbol, true);
13022 tmp = force_operand (tmp, NULL);
13023 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13024 op0, 1, OPTAB_DIRECT);
13030 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13032 if (TARGET_MACHO && !TARGET_64BIT)
13037 rtx temp = ((reload_in_progress
13038 || ((op0 && REG_P (op0))
13040 ? op0 : gen_reg_rtx (Pmode));
13041 op1 = machopic_indirect_data_reference (op1, temp);
13042 op1 = machopic_legitimize_pic_address (op1, mode,
13043 temp == op1 ? 0 : temp);
13045 else if (MACHOPIC_INDIRECT)
13046 op1 = machopic_indirect_data_reference (op1, 0);
13054 op1 = force_reg (Pmode, op1);
13055 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13057 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13058 op1 = legitimize_pic_address (op1, reg);
13067 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13068 || !push_operand (op0, mode))
13070 op1 = force_reg (mode, op1);
13072 if (push_operand (op0, mode)
13073 && ! general_no_elim_operand (op1, mode))
13074 op1 = copy_to_mode_reg (mode, op1);
13076 /* Force large constants in 64bit compilation into register
13077 to get them CSEed. */
13078 if (can_create_pseudo_p ()
13079 && (mode == DImode) && TARGET_64BIT
13080 && immediate_operand (op1, mode)
13081 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13082 && !register_operand (op0, mode)
13084 op1 = copy_to_mode_reg (mode, op1);
13086 if (can_create_pseudo_p ()
13087 && FLOAT_MODE_P (mode)
13088 && GET_CODE (op1) == CONST_DOUBLE)
13090 /* If we are loading a floating point constant to a register,
13091 force the value to memory now, since we'll get better code
13092 out the back end. */
13094 op1 = validize_mem (force_const_mem (mode, op1));
13095 if (!register_operand (op0, mode))
13097 rtx temp = gen_reg_rtx (mode);
13098 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13099 emit_move_insn (op0, temp);
13105 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13109 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13111 rtx op0 = operands[0], op1 = operands[1];
13112 unsigned int align = GET_MODE_ALIGNMENT (mode);
13114 /* Force constants other than zero into memory. We do not know how
13115 the instructions used to build constants modify the upper 64 bits
13116 of the register, once we have that information we may be able
13117 to handle some of them more efficiently. */
13118 if (can_create_pseudo_p ()
13119 && register_operand (op0, mode)
13120 && (CONSTANT_P (op1)
13121 || (GET_CODE (op1) == SUBREG
13122 && CONSTANT_P (SUBREG_REG (op1))))
13123 && !standard_sse_constant_p (op1))
13124 op1 = validize_mem (force_const_mem (mode, op1));
13126 /* We need to check memory alignment for SSE mode since attribute
13127 can make operands unaligned. */
13128 if (can_create_pseudo_p ()
13129 && SSE_REG_MODE_P (mode)
13130 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13131 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13135 /* ix86_expand_vector_move_misalign() does not like constants ... */
13136 if (CONSTANT_P (op1)
13137 || (GET_CODE (op1) == SUBREG
13138 && CONSTANT_P (SUBREG_REG (op1))))
13139 op1 = validize_mem (force_const_mem (mode, op1));
13141 /* ... nor both arguments in memory. */
13142 if (!register_operand (op0, mode)
13143 && !register_operand (op1, mode))
13144 op1 = force_reg (mode, op1);
13146 tmp[0] = op0; tmp[1] = op1;
13147 ix86_expand_vector_move_misalign (mode, tmp);
13151 /* Make operand1 a register if it isn't already. */
13152 if (can_create_pseudo_p ()
13153 && !register_operand (op0, mode)
13154 && !register_operand (op1, mode))
13156 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13160 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13163 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13164 straight to ix86_expand_vector_move. */
13165 /* Code generation for scalar reg-reg moves of single and double precision data:
13166 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13170 if (x86_sse_partial_reg_dependency == true)
13175 Code generation for scalar loads of double precision data:
13176 if (x86_sse_split_regs == true)
13177 movlpd mem, reg (gas syntax)
13181 Code generation for unaligned packed loads of single precision data
13182 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13183 if (x86_sse_unaligned_move_optimal)
13186 if (x86_sse_partial_reg_dependency == true)
13198 Code generation for unaligned packed loads of double precision data
13199 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13200 if (x86_sse_unaligned_move_optimal)
13203 if (x86_sse_split_regs == true)
13216 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13225 switch (GET_MODE_CLASS (mode))
13227 case MODE_VECTOR_INT:
13229 switch (GET_MODE_SIZE (mode))
13232 op0 = gen_lowpart (V16QImode, op0);
13233 op1 = gen_lowpart (V16QImode, op1);
13234 emit_insn (gen_avx_movdqu (op0, op1));
13237 op0 = gen_lowpart (V32QImode, op0);
13238 op1 = gen_lowpart (V32QImode, op1);
13239 emit_insn (gen_avx_movdqu256 (op0, op1));
13242 gcc_unreachable ();
13245 case MODE_VECTOR_FLOAT:
13246 op0 = gen_lowpart (mode, op0);
13247 op1 = gen_lowpart (mode, op1);
13252 emit_insn (gen_avx_movups (op0, op1));
13255 emit_insn (gen_avx_movups256 (op0, op1));
13258 emit_insn (gen_avx_movupd (op0, op1));
13261 emit_insn (gen_avx_movupd256 (op0, op1));
13264 gcc_unreachable ();
13269 gcc_unreachable ();
13277 /* If we're optimizing for size, movups is the smallest. */
13278 if (optimize_insn_for_size_p ())
13280 op0 = gen_lowpart (V4SFmode, op0);
13281 op1 = gen_lowpart (V4SFmode, op1);
13282 emit_insn (gen_sse_movups (op0, op1));
13286 /* ??? If we have typed data, then it would appear that using
13287 movdqu is the only way to get unaligned data loaded with
13289 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13291 op0 = gen_lowpart (V16QImode, op0);
13292 op1 = gen_lowpart (V16QImode, op1);
13293 emit_insn (gen_sse2_movdqu (op0, op1));
13297 if (TARGET_SSE2 && mode == V2DFmode)
13301 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13303 op0 = gen_lowpart (V2DFmode, op0);
13304 op1 = gen_lowpart (V2DFmode, op1);
13305 emit_insn (gen_sse2_movupd (op0, op1));
13309 /* When SSE registers are split into halves, we can avoid
13310 writing to the top half twice. */
13311 if (TARGET_SSE_SPLIT_REGS)
13313 emit_clobber (op0);
13318 /* ??? Not sure about the best option for the Intel chips.
13319 The following would seem to satisfy; the register is
13320 entirely cleared, breaking the dependency chain. We
13321 then store to the upper half, with a dependency depth
13322 of one. A rumor has it that Intel recommends two movsd
13323 followed by an unpacklpd, but this is unconfirmed. And
13324 given that the dependency depth of the unpacklpd would
13325 still be one, I'm not sure why this would be better. */
13326 zero = CONST0_RTX (V2DFmode);
13329 m = adjust_address (op1, DFmode, 0);
13330 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13331 m = adjust_address (op1, DFmode, 8);
13332 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13336 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13338 op0 = gen_lowpart (V4SFmode, op0);
13339 op1 = gen_lowpart (V4SFmode, op1);
13340 emit_insn (gen_sse_movups (op0, op1));
13344 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13345 emit_move_insn (op0, CONST0_RTX (mode));
13347 emit_clobber (op0);
13349 if (mode != V4SFmode)
13350 op0 = gen_lowpart (V4SFmode, op0);
13351 m = adjust_address (op1, V2SFmode, 0);
13352 emit_insn (gen_sse_loadlps (op0, op0, m));
13353 m = adjust_address (op1, V2SFmode, 8);
13354 emit_insn (gen_sse_loadhps (op0, op0, m));
13357 else if (MEM_P (op0))
13359 /* If we're optimizing for size, movups is the smallest. */
13360 if (optimize_insn_for_size_p ())
13362 op0 = gen_lowpart (V4SFmode, op0);
13363 op1 = gen_lowpart (V4SFmode, op1);
13364 emit_insn (gen_sse_movups (op0, op1));
13368 /* ??? Similar to above, only less clear because of quote
13369 typeless stores unquote. */
13370 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13371 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13373 op0 = gen_lowpart (V16QImode, op0);
13374 op1 = gen_lowpart (V16QImode, op1);
13375 emit_insn (gen_sse2_movdqu (op0, op1));
13379 if (TARGET_SSE2 && mode == V2DFmode)
13381 m = adjust_address (op0, DFmode, 0);
13382 emit_insn (gen_sse2_storelpd (m, op1));
13383 m = adjust_address (op0, DFmode, 8);
13384 emit_insn (gen_sse2_storehpd (m, op1));
13388 if (mode != V4SFmode)
13389 op1 = gen_lowpart (V4SFmode, op1);
13390 m = adjust_address (op0, V2SFmode, 0);
13391 emit_insn (gen_sse_storelps (m, op1));
13392 m = adjust_address (op0, V2SFmode, 8);
13393 emit_insn (gen_sse_storehps (m, op1));
13397 gcc_unreachable ();
13400 /* Expand a push in MODE. This is some mode for which we do not support
13401 proper push instructions, at least from the registers that we expect
13402 the value to live in. */
13405 ix86_expand_push (enum machine_mode mode, rtx x)
13409 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13410 GEN_INT (-GET_MODE_SIZE (mode)),
13411 stack_pointer_rtx, 1, OPTAB_DIRECT);
13412 if (tmp != stack_pointer_rtx)
13413 emit_move_insn (stack_pointer_rtx, tmp);
13415 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13417 /* When we push an operand onto stack, it has to be aligned at least
13418 at the function argument boundary. However since we don't have
13419 the argument type, we can't determine the actual argument
13421 emit_move_insn (tmp, x);
13424 /* Helper function of ix86_fixup_binary_operands to canonicalize
13425 operand order. Returns true if the operands should be swapped. */
13428 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13431 rtx dst = operands[0];
13432 rtx src1 = operands[1];
13433 rtx src2 = operands[2];
13435 /* If the operation is not commutative, we can't do anything. */
13436 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13439 /* Highest priority is that src1 should match dst. */
13440 if (rtx_equal_p (dst, src1))
13442 if (rtx_equal_p (dst, src2))
13445 /* Next highest priority is that immediate constants come second. */
13446 if (immediate_operand (src2, mode))
13448 if (immediate_operand (src1, mode))
13451 /* Lowest priority is that memory references should come second. */
13461 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13462 destination to use for the operation. If different from the true
13463 destination in operands[0], a copy operation will be required. */
13466 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13469 rtx dst = operands[0];
13470 rtx src1 = operands[1];
13471 rtx src2 = operands[2];
13473 /* Canonicalize operand order. */
13474 if (ix86_swap_binary_operands_p (code, mode, operands))
13478 /* It is invalid to swap operands of different modes. */
13479 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13486 /* Both source operands cannot be in memory. */
13487 if (MEM_P (src1) && MEM_P (src2))
13489 /* Optimization: Only read from memory once. */
13490 if (rtx_equal_p (src1, src2))
13492 src2 = force_reg (mode, src2);
13496 src2 = force_reg (mode, src2);
13499 /* If the destination is memory, and we do not have matching source
13500 operands, do things in registers. */
13501 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13502 dst = gen_reg_rtx (mode);
13504 /* Source 1 cannot be a constant. */
13505 if (CONSTANT_P (src1))
13506 src1 = force_reg (mode, src1);
13508 /* Source 1 cannot be a non-matching memory. */
13509 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13510 src1 = force_reg (mode, src1);
13512 operands[1] = src1;
13513 operands[2] = src2;
13517 /* Similarly, but assume that the destination has already been
13518 set up properly. */
13521 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13522 enum machine_mode mode, rtx operands[])
13524 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13525 gcc_assert (dst == operands[0]);
13528 /* Attempt to expand a binary operator. Make the expansion closer to the
13529 actual machine, then just general_operand, which will allow 3 separate
13530 memory references (one output, two input) in a single insn. */
13533 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13536 rtx src1, src2, dst, op, clob;
13538 dst = ix86_fixup_binary_operands (code, mode, operands);
13539 src1 = operands[1];
13540 src2 = operands[2];
13542 /* Emit the instruction. */
13544 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13545 if (reload_in_progress)
13547 /* Reload doesn't know about the flags register, and doesn't know that
13548 it doesn't want to clobber it. We can only do this with PLUS. */
13549 gcc_assert (code == PLUS);
13554 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13555 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13558 /* Fix up the destination if needed. */
13559 if (dst != operands[0])
13560 emit_move_insn (operands[0], dst);
13563 /* Return TRUE or FALSE depending on whether the binary operator meets the
13564 appropriate constraints. */
13567 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13570 rtx dst = operands[0];
13571 rtx src1 = operands[1];
13572 rtx src2 = operands[2];
13574 /* Both source operands cannot be in memory. */
13575 if (MEM_P (src1) && MEM_P (src2))
13578 /* Canonicalize operand order for commutative operators. */
13579 if (ix86_swap_binary_operands_p (code, mode, operands))
13586 /* If the destination is memory, we must have a matching source operand. */
13587 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13590 /* Source 1 cannot be a constant. */
13591 if (CONSTANT_P (src1))
13594 /* Source 1 cannot be a non-matching memory. */
13595 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13601 /* Attempt to expand a unary operator. Make the expansion closer to the
13602 actual machine, then just general_operand, which will allow 2 separate
13603 memory references (one output, one input) in a single insn. */
13606 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13609 int matching_memory;
13610 rtx src, dst, op, clob;
13615 /* If the destination is memory, and we do not have matching source
13616 operands, do things in registers. */
13617 matching_memory = 0;
13620 if (rtx_equal_p (dst, src))
13621 matching_memory = 1;
13623 dst = gen_reg_rtx (mode);
13626 /* When source operand is memory, destination must match. */
13627 if (MEM_P (src) && !matching_memory)
13628 src = force_reg (mode, src);
13630 /* Emit the instruction. */
13632 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13633 if (reload_in_progress || code == NOT)
13635 /* Reload doesn't know about the flags register, and doesn't know that
13636 it doesn't want to clobber it. */
13637 gcc_assert (code == NOT);
13642 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13643 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13646 /* Fix up the destination if needed. */
13647 if (dst != operands[0])
13648 emit_move_insn (operands[0], dst);
13651 #define LEA_SEARCH_THRESHOLD 12
13653 /* Search backward for non-agu definition of register number REGNO1
13654 or register number REGNO2 in INSN's basic block until
13655 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13656 2. Reach BB boundary, or
13657 3. Reach agu definition.
13658 Returns the distance between the non-agu definition point and INSN.
13659 If no definition point, returns -1. */
13662 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13665 basic_block bb = BLOCK_FOR_INSN (insn);
13668 enum attr_type insn_type;
13670 if (insn != BB_HEAD (bb))
13672 rtx prev = PREV_INSN (insn);
13673 while (prev && distance < LEA_SEARCH_THRESHOLD)
13675 if (NONDEBUG_INSN_P (prev))
13678 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13679 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13680 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13681 && (regno1 == DF_REF_REGNO (*def_rec)
13682 || regno2 == DF_REF_REGNO (*def_rec)))
13684 insn_type = get_attr_type (prev);
13685 if (insn_type != TYPE_LEA)
13689 if (prev == BB_HEAD (bb))
13691 prev = PREV_INSN (prev);
13695 if (distance < LEA_SEARCH_THRESHOLD)
13699 bool simple_loop = false;
13701 FOR_EACH_EDGE (e, ei, bb->preds)
13704 simple_loop = true;
13710 rtx prev = BB_END (bb);
13713 && distance < LEA_SEARCH_THRESHOLD)
13715 if (NONDEBUG_INSN_P (prev))
13718 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13719 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13720 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13721 && (regno1 == DF_REF_REGNO (*def_rec)
13722 || regno2 == DF_REF_REGNO (*def_rec)))
13724 insn_type = get_attr_type (prev);
13725 if (insn_type != TYPE_LEA)
13729 prev = PREV_INSN (prev);
13737 /* get_attr_type may modify recog data. We want to make sure
13738 that recog data is valid for instruction INSN, on which
13739 distance_non_agu_define is called. INSN is unchanged here. */
13740 extract_insn_cached (insn);
13744 /* Return the distance between INSN and the next insn that uses
13745 register number REGNO0 in memory address. Return -1 if no such
13746 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13749 distance_agu_use (unsigned int regno0, rtx insn)
13751 basic_block bb = BLOCK_FOR_INSN (insn);
13756 if (insn != BB_END (bb))
13758 rtx next = NEXT_INSN (insn);
13759 while (next && distance < LEA_SEARCH_THRESHOLD)
13761 if (NONDEBUG_INSN_P (next))
13765 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13766 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13767 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13768 && regno0 == DF_REF_REGNO (*use_rec))
13770 /* Return DISTANCE if OP0 is used in memory
13771 address in NEXT. */
13775 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13776 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13777 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13778 && regno0 == DF_REF_REGNO (*def_rec))
13780 /* Return -1 if OP0 is set in NEXT. */
13784 if (next == BB_END (bb))
13786 next = NEXT_INSN (next);
13790 if (distance < LEA_SEARCH_THRESHOLD)
13794 bool simple_loop = false;
13796 FOR_EACH_EDGE (e, ei, bb->succs)
13799 simple_loop = true;
13805 rtx next = BB_HEAD (bb);
13808 && distance < LEA_SEARCH_THRESHOLD)
13810 if (NONDEBUG_INSN_P (next))
13814 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13815 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13816 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13817 && regno0 == DF_REF_REGNO (*use_rec))
13819 /* Return DISTANCE if OP0 is used in memory
13820 address in NEXT. */
13824 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13825 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13826 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13827 && regno0 == DF_REF_REGNO (*def_rec))
13829 /* Return -1 if OP0 is set in NEXT. */
13834 next = NEXT_INSN (next);
13842 /* Define this macro to tune LEA priority vs ADD, it take effect when
13843 there is a dilemma of choicing LEA or ADD
13844 Negative value: ADD is more preferred than LEA
13846 Positive value: LEA is more preferred than ADD*/
13847 #define IX86_LEA_PRIORITY 2
13849 /* Return true if it is ok to optimize an ADD operation to LEA
13850 operation to avoid flag register consumation. For the processors
13851 like ATOM, if the destination register of LEA holds an actual
13852 address which will be used soon, LEA is better and otherwise ADD
13856 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13857 rtx insn, rtx operands[])
13859 unsigned int regno0 = true_regnum (operands[0]);
13860 unsigned int regno1 = true_regnum (operands[1]);
13861 unsigned int regno2;
13863 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13864 return regno0 != regno1;
13866 regno2 = true_regnum (operands[2]);
13868 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13869 if (regno0 != regno1 && regno0 != regno2)
13873 int dist_define, dist_use;
13874 dist_define = distance_non_agu_define (regno1, regno2, insn);
13875 if (dist_define <= 0)
13878 /* If this insn has both backward non-agu dependence and forward
13879 agu dependence, the one with short distance take effect. */
13880 dist_use = distance_agu_use (regno0, insn);
13882 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13889 /* Return true if destination reg of SET_BODY is shift count of
13893 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13899 /* Retrieve destination of SET_BODY. */
13900 switch (GET_CODE (set_body))
13903 set_dest = SET_DEST (set_body);
13904 if (!set_dest || !REG_P (set_dest))
13908 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13909 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13917 /* Retrieve shift count of USE_BODY. */
13918 switch (GET_CODE (use_body))
13921 shift_rtx = XEXP (use_body, 1);
13924 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13925 if (ix86_dep_by_shift_count_body (set_body,
13926 XVECEXP (use_body, 0, i)))
13934 && (GET_CODE (shift_rtx) == ASHIFT
13935 || GET_CODE (shift_rtx) == LSHIFTRT
13936 || GET_CODE (shift_rtx) == ASHIFTRT
13937 || GET_CODE (shift_rtx) == ROTATE
13938 || GET_CODE (shift_rtx) == ROTATERT))
13940 rtx shift_count = XEXP (shift_rtx, 1);
13942 /* Return true if shift count is dest of SET_BODY. */
13943 if (REG_P (shift_count)
13944 && true_regnum (set_dest) == true_regnum (shift_count))
13951 /* Return true if destination reg of SET_INSN is shift count of
13955 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13957 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13958 PATTERN (use_insn));
13961 /* Return TRUE or FALSE depending on whether the unary operator meets the
13962 appropriate constraints. */
13965 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13966 enum machine_mode mode ATTRIBUTE_UNUSED,
13967 rtx operands[2] ATTRIBUTE_UNUSED)
13969 /* If one of operands is memory, source and destination must match. */
13970 if ((MEM_P (operands[0])
13971 || MEM_P (operands[1]))
13972 && ! rtx_equal_p (operands[0], operands[1]))
13977 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13978 are ok, keeping in mind the possible movddup alternative. */
13981 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13983 if (MEM_P (operands[0]))
13984 return rtx_equal_p (operands[0], operands[1 + high]);
13985 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13986 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13990 /* Post-reload splitter for converting an SF or DFmode value in an
13991 SSE register into an unsigned SImode. */
13994 ix86_split_convert_uns_si_sse (rtx operands[])
13996 enum machine_mode vecmode;
13997 rtx value, large, zero_or_two31, input, two31, x;
13999 large = operands[1];
14000 zero_or_two31 = operands[2];
14001 input = operands[3];
14002 two31 = operands[4];
14003 vecmode = GET_MODE (large);
14004 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14006 /* Load up the value into the low element. We must ensure that the other
14007 elements are valid floats -- zero is the easiest such value. */
14010 if (vecmode == V4SFmode)
14011 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14013 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14017 input = gen_rtx_REG (vecmode, REGNO (input));
14018 emit_move_insn (value, CONST0_RTX (vecmode));
14019 if (vecmode == V4SFmode)
14020 emit_insn (gen_sse_movss (value, value, input));
14022 emit_insn (gen_sse2_movsd (value, value, input));
14025 emit_move_insn (large, two31);
14026 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14028 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14029 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14031 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14032 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14034 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14035 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14037 large = gen_rtx_REG (V4SImode, REGNO (large));
14038 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14040 x = gen_rtx_REG (V4SImode, REGNO (value));
14041 if (vecmode == V4SFmode)
14042 emit_insn (gen_sse2_cvttps2dq (x, value));
14044 emit_insn (gen_sse2_cvttpd2dq (x, value));
14047 emit_insn (gen_xorv4si3 (value, value, large));
14050 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14051 Expects the 64-bit DImode to be supplied in a pair of integral
14052 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14053 -mfpmath=sse, !optimize_size only. */
14056 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14058 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14059 rtx int_xmm, fp_xmm;
14060 rtx biases, exponents;
14063 int_xmm = gen_reg_rtx (V4SImode);
14064 if (TARGET_INTER_UNIT_MOVES)
14065 emit_insn (gen_movdi_to_sse (int_xmm, input));
14066 else if (TARGET_SSE_SPLIT_REGS)
14068 emit_clobber (int_xmm);
14069 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14073 x = gen_reg_rtx (V2DImode);
14074 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14075 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14078 x = gen_rtx_CONST_VECTOR (V4SImode,
14079 gen_rtvec (4, GEN_INT (0x43300000UL),
14080 GEN_INT (0x45300000UL),
14081 const0_rtx, const0_rtx));
14082 exponents = validize_mem (force_const_mem (V4SImode, x));
14084 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14085 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14087 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14088 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14089 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14090 (0x1.0p84 + double(fp_value_hi_xmm)).
14091 Note these exponents differ by 32. */
14093 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14095 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14096 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14097 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14098 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14099 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14100 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14101 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14102 biases = validize_mem (force_const_mem (V2DFmode, biases));
14103 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14105 /* Add the upper and lower DFmode values together. */
14107 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14110 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14111 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14112 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14115 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14118 /* Not used, but eases macroization of patterns. */
14120 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14121 rtx input ATTRIBUTE_UNUSED)
14123 gcc_unreachable ();
14126 /* Convert an unsigned SImode value into a DFmode. Only currently used
14127 for SSE, but applicable anywhere. */
14130 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14132 REAL_VALUE_TYPE TWO31r;
14135 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14136 NULL, 1, OPTAB_DIRECT);
14138 fp = gen_reg_rtx (DFmode);
14139 emit_insn (gen_floatsidf2 (fp, x));
14141 real_ldexp (&TWO31r, &dconst1, 31);
14142 x = const_double_from_real_value (TWO31r, DFmode);
14144 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14146 emit_move_insn (target, x);
14149 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14150 32-bit mode; otherwise we have a direct convert instruction. */
14153 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14155 REAL_VALUE_TYPE TWO32r;
14156 rtx fp_lo, fp_hi, x;
14158 fp_lo = gen_reg_rtx (DFmode);
14159 fp_hi = gen_reg_rtx (DFmode);
14161 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14163 real_ldexp (&TWO32r, &dconst1, 32);
14164 x = const_double_from_real_value (TWO32r, DFmode);
14165 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14167 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14169 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14172 emit_move_insn (target, x);
14175 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14176 For x86_32, -mfpmath=sse, !optimize_size only. */
14178 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14180 REAL_VALUE_TYPE ONE16r;
14181 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14183 real_ldexp (&ONE16r, &dconst1, 16);
14184 x = const_double_from_real_value (ONE16r, SFmode);
14185 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14186 NULL, 0, OPTAB_DIRECT);
14187 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14188 NULL, 0, OPTAB_DIRECT);
14189 fp_hi = gen_reg_rtx (SFmode);
14190 fp_lo = gen_reg_rtx (SFmode);
14191 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14192 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14193 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14195 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14197 if (!rtx_equal_p (target, fp_hi))
14198 emit_move_insn (target, fp_hi);
14201 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14202 then replicate the value for all elements of the vector
14206 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14213 v = gen_rtvec (4, value, value, value, value);
14214 return gen_rtx_CONST_VECTOR (V4SImode, v);
14218 v = gen_rtvec (2, value, value);
14219 return gen_rtx_CONST_VECTOR (V2DImode, v);
14223 v = gen_rtvec (4, value, value, value, value);
14225 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14226 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14227 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14231 v = gen_rtvec (2, value, value);
14233 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14234 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14237 gcc_unreachable ();
14241 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14242 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14243 for an SSE register. If VECT is true, then replicate the mask for
14244 all elements of the vector register. If INVERT is true, then create
14245 a mask excluding the sign bit. */
14248 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14250 enum machine_mode vec_mode, imode;
14251 HOST_WIDE_INT hi, lo;
14256 /* Find the sign bit, sign extended to 2*HWI. */
14262 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14263 lo = 0x80000000, hi = lo < 0;
14269 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14270 if (HOST_BITS_PER_WIDE_INT >= 64)
14271 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14273 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14278 vec_mode = VOIDmode;
14279 if (HOST_BITS_PER_WIDE_INT >= 64)
14282 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14289 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14293 lo = ~lo, hi = ~hi;
14299 mask = immed_double_const (lo, hi, imode);
14301 vec = gen_rtvec (2, v, mask);
14302 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14303 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14310 gcc_unreachable ();
14314 lo = ~lo, hi = ~hi;
14316 /* Force this value into the low part of a fp vector constant. */
14317 mask = immed_double_const (lo, hi, imode);
14318 mask = gen_lowpart (mode, mask);
14320 if (vec_mode == VOIDmode)
14321 return force_reg (mode, mask);
14323 v = ix86_build_const_vector (mode, vect, mask);
14324 return force_reg (vec_mode, v);
14327 /* Generate code for floating point ABS or NEG. */
14330 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14333 rtx mask, set, use, clob, dst, src;
14334 bool use_sse = false;
14335 bool vector_mode = VECTOR_MODE_P (mode);
14336 enum machine_mode elt_mode = mode;
14340 elt_mode = GET_MODE_INNER (mode);
14343 else if (mode == TFmode)
14345 else if (TARGET_SSE_MATH)
14346 use_sse = SSE_FLOAT_MODE_P (mode);
14348 /* NEG and ABS performed with SSE use bitwise mask operations.
14349 Create the appropriate mask now. */
14351 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14360 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14361 set = gen_rtx_SET (VOIDmode, dst, set);
14366 set = gen_rtx_fmt_e (code, mode, src);
14367 set = gen_rtx_SET (VOIDmode, dst, set);
14370 use = gen_rtx_USE (VOIDmode, mask);
14371 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14372 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14373 gen_rtvec (3, set, use, clob)));
14380 /* Expand a copysign operation. Special case operand 0 being a constant. */
14383 ix86_expand_copysign (rtx operands[])
14385 enum machine_mode mode;
14386 rtx dest, op0, op1, mask, nmask;
14388 dest = operands[0];
14392 mode = GET_MODE (dest);
14394 if (GET_CODE (op0) == CONST_DOUBLE)
14396 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14398 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14399 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14401 if (mode == SFmode || mode == DFmode)
14403 enum machine_mode vmode;
14405 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14407 if (op0 == CONST0_RTX (mode))
14408 op0 = CONST0_RTX (vmode);
14411 rtx v = ix86_build_const_vector (mode, false, op0);
14413 op0 = force_reg (vmode, v);
14416 else if (op0 != CONST0_RTX (mode))
14417 op0 = force_reg (mode, op0);
14419 mask = ix86_build_signbit_mask (mode, 0, 0);
14421 if (mode == SFmode)
14422 copysign_insn = gen_copysignsf3_const;
14423 else if (mode == DFmode)
14424 copysign_insn = gen_copysigndf3_const;
14426 copysign_insn = gen_copysigntf3_const;
14428 emit_insn (copysign_insn (dest, op0, op1, mask));
14432 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14434 nmask = ix86_build_signbit_mask (mode, 0, 1);
14435 mask = ix86_build_signbit_mask (mode, 0, 0);
14437 if (mode == SFmode)
14438 copysign_insn = gen_copysignsf3_var;
14439 else if (mode == DFmode)
14440 copysign_insn = gen_copysigndf3_var;
14442 copysign_insn = gen_copysigntf3_var;
14444 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14448 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14449 be a constant, and so has already been expanded into a vector constant. */
14452 ix86_split_copysign_const (rtx operands[])
14454 enum machine_mode mode, vmode;
14455 rtx dest, op0, mask, x;
14457 dest = operands[0];
14459 mask = operands[3];
14461 mode = GET_MODE (dest);
14462 vmode = GET_MODE (mask);
14464 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14465 x = gen_rtx_AND (vmode, dest, mask);
14466 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14468 if (op0 != CONST0_RTX (vmode))
14470 x = gen_rtx_IOR (vmode, dest, op0);
14471 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14475 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14476 so we have to do two masks. */
14479 ix86_split_copysign_var (rtx operands[])
14481 enum machine_mode mode, vmode;
14482 rtx dest, scratch, op0, op1, mask, nmask, x;
14484 dest = operands[0];
14485 scratch = operands[1];
14488 nmask = operands[4];
14489 mask = operands[5];
14491 mode = GET_MODE (dest);
14492 vmode = GET_MODE (mask);
14494 if (rtx_equal_p (op0, op1))
14496 /* Shouldn't happen often (it's useless, obviously), but when it does
14497 we'd generate incorrect code if we continue below. */
14498 emit_move_insn (dest, op0);
14502 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14504 gcc_assert (REGNO (op1) == REGNO (scratch));
14506 x = gen_rtx_AND (vmode, scratch, mask);
14507 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14510 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14511 x = gen_rtx_NOT (vmode, dest);
14512 x = gen_rtx_AND (vmode, x, op0);
14513 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14517 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14519 x = gen_rtx_AND (vmode, scratch, mask);
14521 else /* alternative 2,4 */
14523 gcc_assert (REGNO (mask) == REGNO (scratch));
14524 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14525 x = gen_rtx_AND (vmode, scratch, op1);
14527 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14529 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14531 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14532 x = gen_rtx_AND (vmode, dest, nmask);
14534 else /* alternative 3,4 */
14536 gcc_assert (REGNO (nmask) == REGNO (dest));
14538 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14539 x = gen_rtx_AND (vmode, dest, op0);
14541 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14544 x = gen_rtx_IOR (vmode, dest, scratch);
14545 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14548 /* Return TRUE or FALSE depending on whether the first SET in INSN
14549 has source and destination with matching CC modes, and that the
14550 CC mode is at least as constrained as REQ_MODE. */
14553 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14556 enum machine_mode set_mode;
14558 set = PATTERN (insn);
14559 if (GET_CODE (set) == PARALLEL)
14560 set = XVECEXP (set, 0, 0);
14561 gcc_assert (GET_CODE (set) == SET);
14562 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14564 set_mode = GET_MODE (SET_DEST (set));
14568 if (req_mode != CCNOmode
14569 && (req_mode != CCmode
14570 || XEXP (SET_SRC (set), 1) != const0_rtx))
14574 if (req_mode == CCGCmode)
14578 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14582 if (req_mode == CCZmode)
14593 gcc_unreachable ();
14596 return (GET_MODE (SET_SRC (set)) == set_mode);
14599 /* Generate insn patterns to do an integer compare of OPERANDS. */
14602 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14604 enum machine_mode cmpmode;
14607 cmpmode = SELECT_CC_MODE (code, op0, op1);
14608 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14610 /* This is very simple, but making the interface the same as in the
14611 FP case makes the rest of the code easier. */
14612 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14613 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14615 /* Return the test that should be put into the flags user, i.e.
14616 the bcc, scc, or cmov instruction. */
14617 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14620 /* Figure out whether to use ordered or unordered fp comparisons.
14621 Return the appropriate mode to use. */
14624 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14626 /* ??? In order to make all comparisons reversible, we do all comparisons
14627 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14628 all forms trapping and nontrapping comparisons, we can make inequality
14629 comparisons trapping again, since it results in better code when using
14630 FCOM based compares. */
14631 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14635 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14637 enum machine_mode mode = GET_MODE (op0);
14639 if (SCALAR_FLOAT_MODE_P (mode))
14641 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14642 return ix86_fp_compare_mode (code);
14647 /* Only zero flag is needed. */
14648 case EQ: /* ZF=0 */
14649 case NE: /* ZF!=0 */
14651 /* Codes needing carry flag. */
14652 case GEU: /* CF=0 */
14653 case LTU: /* CF=1 */
14654 /* Detect overflow checks. They need just the carry flag. */
14655 if (GET_CODE (op0) == PLUS
14656 && rtx_equal_p (op1, XEXP (op0, 0)))
14660 case GTU: /* CF=0 & ZF=0 */
14661 case LEU: /* CF=1 | ZF=1 */
14662 /* Detect overflow checks. They need just the carry flag. */
14663 if (GET_CODE (op0) == MINUS
14664 && rtx_equal_p (op1, XEXP (op0, 0)))
14668 /* Codes possibly doable only with sign flag when
14669 comparing against zero. */
14670 case GE: /* SF=OF or SF=0 */
14671 case LT: /* SF<>OF or SF=1 */
14672 if (op1 == const0_rtx)
14675 /* For other cases Carry flag is not required. */
14677 /* Codes doable only with sign flag when comparing
14678 against zero, but we miss jump instruction for it
14679 so we need to use relational tests against overflow
14680 that thus needs to be zero. */
14681 case GT: /* ZF=0 & SF=OF */
14682 case LE: /* ZF=1 | SF<>OF */
14683 if (op1 == const0_rtx)
14687 /* strcmp pattern do (use flags) and combine may ask us for proper
14692 gcc_unreachable ();
14696 /* Return the fixed registers used for condition codes. */
14699 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14706 /* If two condition code modes are compatible, return a condition code
14707 mode which is compatible with both. Otherwise, return
14710 static enum machine_mode
14711 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14716 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14719 if ((m1 == CCGCmode && m2 == CCGOCmode)
14720 || (m1 == CCGOCmode && m2 == CCGCmode))
14726 gcc_unreachable ();
14756 /* These are only compatible with themselves, which we already
14763 /* Return a comparison we can do and that it is equivalent to
14764 swap_condition (code) apart possibly from orderedness.
14765 But, never change orderedness if TARGET_IEEE_FP, returning
14766 UNKNOWN in that case if necessary. */
14768 static enum rtx_code
14769 ix86_fp_swap_condition (enum rtx_code code)
14773 case GT: /* GTU - CF=0 & ZF=0 */
14774 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14775 case GE: /* GEU - CF=0 */
14776 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14777 case UNLT: /* LTU - CF=1 */
14778 return TARGET_IEEE_FP ? UNKNOWN : GT;
14779 case UNLE: /* LEU - CF=1 | ZF=1 */
14780 return TARGET_IEEE_FP ? UNKNOWN : GE;
14782 return swap_condition (code);
14786 /* Return cost of comparison CODE using the best strategy for performance.
14787 All following functions do use number of instructions as a cost metrics.
14788 In future this should be tweaked to compute bytes for optimize_size and
14789 take into account performance of various instructions on various CPUs. */
14792 ix86_fp_comparison_cost (enum rtx_code code)
14796 /* The cost of code using bit-twiddling on %ah. */
14813 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14817 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14820 gcc_unreachable ();
14823 switch (ix86_fp_comparison_strategy (code))
14825 case IX86_FPCMP_COMI:
14826 return arith_cost > 4 ? 3 : 2;
14827 case IX86_FPCMP_SAHF:
14828 return arith_cost > 4 ? 4 : 3;
14834 /* Return strategy to use for floating-point. We assume that fcomi is always
14835 preferrable where available, since that is also true when looking at size
14836 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14838 enum ix86_fpcmp_strategy
14839 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14841 /* Do fcomi/sahf based test when profitable. */
14844 return IX86_FPCMP_COMI;
14846 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14847 return IX86_FPCMP_SAHF;
14849 return IX86_FPCMP_ARITH;
14852 /* Swap, force into registers, or otherwise massage the two operands
14853 to a fp comparison. The operands are updated in place; the new
14854 comparison code is returned. */
14856 static enum rtx_code
14857 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14859 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14860 rtx op0 = *pop0, op1 = *pop1;
14861 enum machine_mode op_mode = GET_MODE (op0);
14862 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14864 /* All of the unordered compare instructions only work on registers.
14865 The same is true of the fcomi compare instructions. The XFmode
14866 compare instructions require registers except when comparing
14867 against zero or when converting operand 1 from fixed point to
14871 && (fpcmp_mode == CCFPUmode
14872 || (op_mode == XFmode
14873 && ! (standard_80387_constant_p (op0) == 1
14874 || standard_80387_constant_p (op1) == 1)
14875 && GET_CODE (op1) != FLOAT)
14876 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14878 op0 = force_reg (op_mode, op0);
14879 op1 = force_reg (op_mode, op1);
14883 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14884 things around if they appear profitable, otherwise force op0
14885 into a register. */
14887 if (standard_80387_constant_p (op0) == 0
14889 && ! (standard_80387_constant_p (op1) == 0
14892 enum rtx_code new_code = ix86_fp_swap_condition (code);
14893 if (new_code != UNKNOWN)
14896 tmp = op0, op0 = op1, op1 = tmp;
14902 op0 = force_reg (op_mode, op0);
14904 if (CONSTANT_P (op1))
14906 int tmp = standard_80387_constant_p (op1);
14908 op1 = validize_mem (force_const_mem (op_mode, op1));
14912 op1 = force_reg (op_mode, op1);
14915 op1 = force_reg (op_mode, op1);
14919 /* Try to rearrange the comparison to make it cheaper. */
14920 if (ix86_fp_comparison_cost (code)
14921 > ix86_fp_comparison_cost (swap_condition (code))
14922 && (REG_P (op1) || can_create_pseudo_p ()))
14925 tmp = op0, op0 = op1, op1 = tmp;
14926 code = swap_condition (code);
14928 op0 = force_reg (op_mode, op0);
14936 /* Convert comparison codes we use to represent FP comparison to integer
14937 code that will result in proper branch. Return UNKNOWN if no such code
14941 ix86_fp_compare_code_to_integer (enum rtx_code code)
14970 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14973 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14975 enum machine_mode fpcmp_mode, intcmp_mode;
14978 fpcmp_mode = ix86_fp_compare_mode (code);
14979 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14981 /* Do fcomi/sahf based test when profitable. */
14982 switch (ix86_fp_comparison_strategy (code))
14984 case IX86_FPCMP_COMI:
14985 intcmp_mode = fpcmp_mode;
14986 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14987 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14992 case IX86_FPCMP_SAHF:
14993 intcmp_mode = fpcmp_mode;
14994 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14995 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14999 scratch = gen_reg_rtx (HImode);
15000 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15001 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15004 case IX86_FPCMP_ARITH:
15005 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15006 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15007 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15009 scratch = gen_reg_rtx (HImode);
15010 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15012 /* In the unordered case, we have to check C2 for NaN's, which
15013 doesn't happen to work out to anything nice combination-wise.
15014 So do some bit twiddling on the value we've got in AH to come
15015 up with an appropriate set of condition codes. */
15017 intcmp_mode = CCNOmode;
15022 if (code == GT || !TARGET_IEEE_FP)
15024 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15029 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15030 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15031 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15032 intcmp_mode = CCmode;
15038 if (code == LT && TARGET_IEEE_FP)
15040 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15041 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15042 intcmp_mode = CCmode;
15047 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15053 if (code == GE || !TARGET_IEEE_FP)
15055 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15060 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15061 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15067 if (code == LE && TARGET_IEEE_FP)
15069 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15070 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15071 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15072 intcmp_mode = CCmode;
15077 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15083 if (code == EQ && TARGET_IEEE_FP)
15085 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15086 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15087 intcmp_mode = CCmode;
15092 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15098 if (code == NE && TARGET_IEEE_FP)
15100 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15101 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15107 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15113 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15117 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15122 gcc_unreachable ();
15130 /* Return the test that should be put into the flags user, i.e.
15131 the bcc, scc, or cmov instruction. */
15132 return gen_rtx_fmt_ee (code, VOIDmode,
15133 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15138 ix86_expand_compare (enum rtx_code code)
15141 op0 = ix86_compare_op0;
15142 op1 = ix86_compare_op1;
15144 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15145 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15147 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15149 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15150 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15153 ret = ix86_expand_int_compare (code, op0, op1);
15159 ix86_expand_branch (enum rtx_code code, rtx label)
15163 switch (GET_MODE (ix86_compare_op0))
15172 tmp = ix86_expand_compare (code);
15173 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15174 gen_rtx_LABEL_REF (VOIDmode, label),
15176 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15183 /* Expand DImode branch into multiple compare+branch. */
15185 rtx lo[2], hi[2], label2;
15186 enum rtx_code code1, code2, code3;
15187 enum machine_mode submode;
15189 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15191 tmp = ix86_compare_op0;
15192 ix86_compare_op0 = ix86_compare_op1;
15193 ix86_compare_op1 = tmp;
15194 code = swap_condition (code);
15196 if (GET_MODE (ix86_compare_op0) == DImode)
15198 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15199 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15204 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15205 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15209 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15210 avoid two branches. This costs one extra insn, so disable when
15211 optimizing for size. */
15213 if ((code == EQ || code == NE)
15214 && (!optimize_insn_for_size_p ()
15215 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15220 if (hi[1] != const0_rtx)
15221 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15222 NULL_RTX, 0, OPTAB_WIDEN);
15225 if (lo[1] != const0_rtx)
15226 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15227 NULL_RTX, 0, OPTAB_WIDEN);
15229 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15230 NULL_RTX, 0, OPTAB_WIDEN);
15232 ix86_compare_op0 = tmp;
15233 ix86_compare_op1 = const0_rtx;
15234 ix86_expand_branch (code, label);
15238 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15239 op1 is a constant and the low word is zero, then we can just
15240 examine the high word. Similarly for low word -1 and
15241 less-or-equal-than or greater-than. */
15243 if (CONST_INT_P (hi[1]))
15246 case LT: case LTU: case GE: case GEU:
15247 if (lo[1] == const0_rtx)
15249 ix86_compare_op0 = hi[0];
15250 ix86_compare_op1 = hi[1];
15251 ix86_expand_branch (code, label);
15255 case LE: case LEU: case GT: case GTU:
15256 if (lo[1] == constm1_rtx)
15258 ix86_compare_op0 = hi[0];
15259 ix86_compare_op1 = hi[1];
15260 ix86_expand_branch (code, label);
15268 /* Otherwise, we need two or three jumps. */
15270 label2 = gen_label_rtx ();
15273 code2 = swap_condition (code);
15274 code3 = unsigned_condition (code);
15278 case LT: case GT: case LTU: case GTU:
15281 case LE: code1 = LT; code2 = GT; break;
15282 case GE: code1 = GT; code2 = LT; break;
15283 case LEU: code1 = LTU; code2 = GTU; break;
15284 case GEU: code1 = GTU; code2 = LTU; break;
15286 case EQ: code1 = UNKNOWN; code2 = NE; break;
15287 case NE: code2 = UNKNOWN; break;
15290 gcc_unreachable ();
15295 * if (hi(a) < hi(b)) goto true;
15296 * if (hi(a) > hi(b)) goto false;
15297 * if (lo(a) < lo(b)) goto true;
15301 ix86_compare_op0 = hi[0];
15302 ix86_compare_op1 = hi[1];
15304 if (code1 != UNKNOWN)
15305 ix86_expand_branch (code1, label);
15306 if (code2 != UNKNOWN)
15307 ix86_expand_branch (code2, label2);
15309 ix86_compare_op0 = lo[0];
15310 ix86_compare_op1 = lo[1];
15311 ix86_expand_branch (code3, label);
15313 if (code2 != UNKNOWN)
15314 emit_label (label2);
15319 /* If we have already emitted a compare insn, go straight to simple.
15320 ix86_expand_compare won't emit anything if ix86_compare_emitted
15322 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15327 /* Split branch based on floating point condition. */
15329 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15330 rtx target1, rtx target2, rtx tmp, rtx pushed)
15335 if (target2 != pc_rtx)
15338 code = reverse_condition_maybe_unordered (code);
15343 condition = ix86_expand_fp_compare (code, op1, op2,
15346 /* Remove pushed operand from stack. */
15348 ix86_free_from_memory (GET_MODE (pushed));
15350 i = emit_jump_insn (gen_rtx_SET
15352 gen_rtx_IF_THEN_ELSE (VOIDmode,
15353 condition, target1, target2)));
15354 if (split_branch_probability >= 0)
15355 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15359 ix86_expand_setcc (enum rtx_code code, rtx dest)
15363 gcc_assert (GET_MODE (dest) == QImode);
15365 ret = ix86_expand_compare (code);
15366 PUT_MODE (ret, QImode);
15367 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15370 /* Expand comparison setting or clearing carry flag. Return true when
15371 successful and set pop for the operation. */
15373 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15375 enum machine_mode mode =
15376 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15378 /* Do not handle DImode compares that go through special path. */
15379 if (mode == (TARGET_64BIT ? TImode : DImode))
15382 if (SCALAR_FLOAT_MODE_P (mode))
15384 rtx compare_op, compare_seq;
15386 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15388 /* Shortcut: following common codes never translate
15389 into carry flag compares. */
15390 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15391 || code == ORDERED || code == UNORDERED)
15394 /* These comparisons require zero flag; swap operands so they won't. */
15395 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15396 && !TARGET_IEEE_FP)
15401 code = swap_condition (code);
15404 /* Try to expand the comparison and verify that we end up with
15405 carry flag based comparison. This fails to be true only when
15406 we decide to expand comparison using arithmetic that is not
15407 too common scenario. */
15409 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15410 compare_seq = get_insns ();
15413 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15414 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15415 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15417 code = GET_CODE (compare_op);
15419 if (code != LTU && code != GEU)
15422 emit_insn (compare_seq);
15427 if (!INTEGRAL_MODE_P (mode))
15436 /* Convert a==0 into (unsigned)a<1. */
15439 if (op1 != const0_rtx)
15442 code = (code == EQ ? LTU : GEU);
15445 /* Convert a>b into b<a or a>=b-1. */
15448 if (CONST_INT_P (op1))
15450 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15451 /* Bail out on overflow. We still can swap operands but that
15452 would force loading of the constant into register. */
15453 if (op1 == const0_rtx
15454 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15456 code = (code == GTU ? GEU : LTU);
15463 code = (code == GTU ? LTU : GEU);
15467 /* Convert a>=0 into (unsigned)a<0x80000000. */
15470 if (mode == DImode || op1 != const0_rtx)
15472 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15473 code = (code == LT ? GEU : LTU);
15477 if (mode == DImode || op1 != constm1_rtx)
15479 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15480 code = (code == LE ? GEU : LTU);
15486 /* Swapping operands may cause constant to appear as first operand. */
15487 if (!nonimmediate_operand (op0, VOIDmode))
15489 if (!can_create_pseudo_p ())
15491 op0 = force_reg (mode, op0);
15493 ix86_compare_op0 = op0;
15494 ix86_compare_op1 = op1;
15495 *pop = ix86_expand_compare (code);
15496 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15501 ix86_expand_int_movcc (rtx operands[])
15503 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15504 rtx compare_seq, compare_op;
15505 enum machine_mode mode = GET_MODE (operands[0]);
15506 bool sign_bit_compare_p = false;
15509 ix86_compare_op0 = XEXP (operands[1], 0);
15510 ix86_compare_op1 = XEXP (operands[1], 1);
15511 compare_op = ix86_expand_compare (code);
15512 compare_seq = get_insns ();
15515 compare_code = GET_CODE (compare_op);
15517 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15518 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15519 sign_bit_compare_p = true;
15521 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15522 HImode insns, we'd be swallowed in word prefix ops. */
15524 if ((mode != HImode || TARGET_FAST_PREFIX)
15525 && (mode != (TARGET_64BIT ? TImode : DImode))
15526 && CONST_INT_P (operands[2])
15527 && CONST_INT_P (operands[3]))
15529 rtx out = operands[0];
15530 HOST_WIDE_INT ct = INTVAL (operands[2]);
15531 HOST_WIDE_INT cf = INTVAL (operands[3]);
15532 HOST_WIDE_INT diff;
15535 /* Sign bit compares are better done using shifts than we do by using
15537 if (sign_bit_compare_p
15538 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15539 ix86_compare_op1, &compare_op))
15541 /* Detect overlap between destination and compare sources. */
15544 if (!sign_bit_compare_p)
15547 bool fpcmp = false;
15549 compare_code = GET_CODE (compare_op);
15551 flags = XEXP (compare_op, 0);
15553 if (GET_MODE (flags) == CCFPmode
15554 || GET_MODE (flags) == CCFPUmode)
15558 = ix86_fp_compare_code_to_integer (compare_code);
15561 /* To simplify rest of code, restrict to the GEU case. */
15562 if (compare_code == LTU)
15564 HOST_WIDE_INT tmp = ct;
15567 compare_code = reverse_condition (compare_code);
15568 code = reverse_condition (code);
15573 PUT_CODE (compare_op,
15574 reverse_condition_maybe_unordered
15575 (GET_CODE (compare_op)));
15577 PUT_CODE (compare_op,
15578 reverse_condition (GET_CODE (compare_op)));
15582 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15583 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15584 tmp = gen_reg_rtx (mode);
15586 if (mode == DImode)
15587 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15589 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15590 flags, compare_op));
15594 if (code == GT || code == GE)
15595 code = reverse_condition (code);
15598 HOST_WIDE_INT tmp = ct;
15603 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15604 ix86_compare_op1, VOIDmode, 0, -1);
15617 tmp = expand_simple_binop (mode, PLUS,
15619 copy_rtx (tmp), 1, OPTAB_DIRECT);
15630 tmp = expand_simple_binop (mode, IOR,
15632 copy_rtx (tmp), 1, OPTAB_DIRECT);
15634 else if (diff == -1 && ct)
15644 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15646 tmp = expand_simple_binop (mode, PLUS,
15647 copy_rtx (tmp), GEN_INT (cf),
15648 copy_rtx (tmp), 1, OPTAB_DIRECT);
15656 * andl cf - ct, dest
15666 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15669 tmp = expand_simple_binop (mode, AND,
15671 gen_int_mode (cf - ct, mode),
15672 copy_rtx (tmp), 1, OPTAB_DIRECT);
15674 tmp = expand_simple_binop (mode, PLUS,
15675 copy_rtx (tmp), GEN_INT (ct),
15676 copy_rtx (tmp), 1, OPTAB_DIRECT);
15679 if (!rtx_equal_p (tmp, out))
15680 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15682 return 1; /* DONE */
15687 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15690 tmp = ct, ct = cf, cf = tmp;
15693 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15695 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15697 /* We may be reversing unordered compare to normal compare, that
15698 is not valid in general (we may convert non-trapping condition
15699 to trapping one), however on i386 we currently emit all
15700 comparisons unordered. */
15701 compare_code = reverse_condition_maybe_unordered (compare_code);
15702 code = reverse_condition_maybe_unordered (code);
15706 compare_code = reverse_condition (compare_code);
15707 code = reverse_condition (code);
15711 compare_code = UNKNOWN;
15712 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15713 && CONST_INT_P (ix86_compare_op1))
15715 if (ix86_compare_op1 == const0_rtx
15716 && (code == LT || code == GE))
15717 compare_code = code;
15718 else if (ix86_compare_op1 == constm1_rtx)
15722 else if (code == GT)
15727 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15728 if (compare_code != UNKNOWN
15729 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15730 && (cf == -1 || ct == -1))
15732 /* If lea code below could be used, only optimize
15733 if it results in a 2 insn sequence. */
15735 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15736 || diff == 3 || diff == 5 || diff == 9)
15737 || (compare_code == LT && ct == -1)
15738 || (compare_code == GE && cf == -1))
15741 * notl op1 (if necessary)
15749 code = reverse_condition (code);
15752 out = emit_store_flag (out, code, ix86_compare_op0,
15753 ix86_compare_op1, VOIDmode, 0, -1);
15755 out = expand_simple_binop (mode, IOR,
15757 out, 1, OPTAB_DIRECT);
15758 if (out != operands[0])
15759 emit_move_insn (operands[0], out);
15761 return 1; /* DONE */
15766 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15767 || diff == 3 || diff == 5 || diff == 9)
15768 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15770 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15776 * lea cf(dest*(ct-cf)),dest
15780 * This also catches the degenerate setcc-only case.
15786 out = emit_store_flag (out, code, ix86_compare_op0,
15787 ix86_compare_op1, VOIDmode, 0, 1);
15790 /* On x86_64 the lea instruction operates on Pmode, so we need
15791 to get arithmetics done in proper mode to match. */
15793 tmp = copy_rtx (out);
15797 out1 = copy_rtx (out);
15798 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15802 tmp = gen_rtx_PLUS (mode, tmp, out1);
15808 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15811 if (!rtx_equal_p (tmp, out))
15814 out = force_operand (tmp, copy_rtx (out));
15816 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15818 if (!rtx_equal_p (out, operands[0]))
15819 emit_move_insn (operands[0], copy_rtx (out));
15821 return 1; /* DONE */
15825 * General case: Jumpful:
15826 * xorl dest,dest cmpl op1, op2
15827 * cmpl op1, op2 movl ct, dest
15828 * setcc dest jcc 1f
15829 * decl dest movl cf, dest
15830 * andl (cf-ct),dest 1:
15833 * Size 20. Size 14.
15835 * This is reasonably steep, but branch mispredict costs are
15836 * high on modern cpus, so consider failing only if optimizing
15840 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15841 && BRANCH_COST (optimize_insn_for_speed_p (),
15846 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15851 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15853 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15855 /* We may be reversing unordered compare to normal compare,
15856 that is not valid in general (we may convert non-trapping
15857 condition to trapping one), however on i386 we currently
15858 emit all comparisons unordered. */
15859 code = reverse_condition_maybe_unordered (code);
15863 code = reverse_condition (code);
15864 if (compare_code != UNKNOWN)
15865 compare_code = reverse_condition (compare_code);
15869 if (compare_code != UNKNOWN)
15871 /* notl op1 (if needed)
15876 For x < 0 (resp. x <= -1) there will be no notl,
15877 so if possible swap the constants to get rid of the
15879 True/false will be -1/0 while code below (store flag
15880 followed by decrement) is 0/-1, so the constants need
15881 to be exchanged once more. */
15883 if (compare_code == GE || !cf)
15885 code = reverse_condition (code);
15890 HOST_WIDE_INT tmp = cf;
15895 out = emit_store_flag (out, code, ix86_compare_op0,
15896 ix86_compare_op1, VOIDmode, 0, -1);
15900 out = emit_store_flag (out, code, ix86_compare_op0,
15901 ix86_compare_op1, VOIDmode, 0, 1);
15903 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15904 copy_rtx (out), 1, OPTAB_DIRECT);
15907 out = expand_simple_binop (mode, AND, copy_rtx (out),
15908 gen_int_mode (cf - ct, mode),
15909 copy_rtx (out), 1, OPTAB_DIRECT);
15911 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15912 copy_rtx (out), 1, OPTAB_DIRECT);
15913 if (!rtx_equal_p (out, operands[0]))
15914 emit_move_insn (operands[0], copy_rtx (out));
15916 return 1; /* DONE */
15920 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15922 /* Try a few things more with specific constants and a variable. */
15925 rtx var, orig_out, out, tmp;
15927 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15928 return 0; /* FAIL */
15930 /* If one of the two operands is an interesting constant, load a
15931 constant with the above and mask it in with a logical operation. */
15933 if (CONST_INT_P (operands[2]))
15936 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15937 operands[3] = constm1_rtx, op = and_optab;
15938 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15939 operands[3] = const0_rtx, op = ior_optab;
15941 return 0; /* FAIL */
15943 else if (CONST_INT_P (operands[3]))
15946 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15947 operands[2] = constm1_rtx, op = and_optab;
15948 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15949 operands[2] = const0_rtx, op = ior_optab;
15951 return 0; /* FAIL */
15954 return 0; /* FAIL */
15956 orig_out = operands[0];
15957 tmp = gen_reg_rtx (mode);
15960 /* Recurse to get the constant loaded. */
15961 if (ix86_expand_int_movcc (operands) == 0)
15962 return 0; /* FAIL */
15964 /* Mask in the interesting variable. */
15965 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15967 if (!rtx_equal_p (out, orig_out))
15968 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15970 return 1; /* DONE */
15974 * For comparison with above,
15984 if (! nonimmediate_operand (operands[2], mode))
15985 operands[2] = force_reg (mode, operands[2]);
15986 if (! nonimmediate_operand (operands[3], mode))
15987 operands[3] = force_reg (mode, operands[3]);
15989 if (! register_operand (operands[2], VOIDmode)
15991 || ! register_operand (operands[3], VOIDmode)))
15992 operands[2] = force_reg (mode, operands[2]);
15995 && ! register_operand (operands[3], VOIDmode))
15996 operands[3] = force_reg (mode, operands[3]);
15998 emit_insn (compare_seq);
15999 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16000 gen_rtx_IF_THEN_ELSE (mode,
16001 compare_op, operands[2],
16004 return 1; /* DONE */
16007 /* Swap, force into registers, or otherwise massage the two operands
16008 to an sse comparison with a mask result. Thus we differ a bit from
16009 ix86_prepare_fp_compare_args which expects to produce a flags result.
16011 The DEST operand exists to help determine whether to commute commutative
16012 operators. The POP0/POP1 operands are updated in place. The new
16013 comparison code is returned, or UNKNOWN if not implementable. */
16015 static enum rtx_code
16016 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16017 rtx *pop0, rtx *pop1)
16025 /* We have no LTGT as an operator. We could implement it with
16026 NE & ORDERED, but this requires an extra temporary. It's
16027 not clear that it's worth it. */
16034 /* These are supported directly. */
16041 /* For commutative operators, try to canonicalize the destination
16042 operand to be first in the comparison - this helps reload to
16043 avoid extra moves. */
16044 if (!dest || !rtx_equal_p (dest, *pop1))
16052 /* These are not supported directly. Swap the comparison operands
16053 to transform into something that is supported. */
16057 code = swap_condition (code);
16061 gcc_unreachable ();
16067 /* Detect conditional moves that exactly match min/max operational
16068 semantics. Note that this is IEEE safe, as long as we don't
16069 interchange the operands.
16071 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16072 and TRUE if the operation is successful and instructions are emitted. */
16075 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16076 rtx cmp_op1, rtx if_true, rtx if_false)
16078 enum machine_mode mode;
16084 else if (code == UNGE)
16087 if_true = if_false;
16093 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16095 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16100 mode = GET_MODE (dest);
16102 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16103 but MODE may be a vector mode and thus not appropriate. */
16104 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16106 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16109 if_true = force_reg (mode, if_true);
16110 v = gen_rtvec (2, if_true, if_false);
16111 tmp = gen_rtx_UNSPEC (mode, v, u);
16115 code = is_min ? SMIN : SMAX;
16116 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16119 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16123 /* Expand an sse vector comparison. Return the register with the result. */
16126 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16127 rtx op_true, rtx op_false)
16129 enum machine_mode mode = GET_MODE (dest);
16132 cmp_op0 = force_reg (mode, cmp_op0);
16133 if (!nonimmediate_operand (cmp_op1, mode))
16134 cmp_op1 = force_reg (mode, cmp_op1);
16137 || reg_overlap_mentioned_p (dest, op_true)
16138 || reg_overlap_mentioned_p (dest, op_false))
16139 dest = gen_reg_rtx (mode);
16141 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16142 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16147 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16148 operations. This is used for both scalar and vector conditional moves. */
16151 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16153 enum machine_mode mode = GET_MODE (dest);
16156 if (op_false == CONST0_RTX (mode))
16158 op_true = force_reg (mode, op_true);
16159 x = gen_rtx_AND (mode, cmp, op_true);
16160 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16162 else if (op_true == CONST0_RTX (mode))
16164 op_false = force_reg (mode, op_false);
16165 x = gen_rtx_NOT (mode, cmp);
16166 x = gen_rtx_AND (mode, x, op_false);
16167 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16169 else if (TARGET_XOP)
16171 rtx pcmov = gen_rtx_SET (mode, dest,
16172 gen_rtx_IF_THEN_ELSE (mode, cmp,
16179 op_true = force_reg (mode, op_true);
16180 op_false = force_reg (mode, op_false);
16182 t2 = gen_reg_rtx (mode);
16184 t3 = gen_reg_rtx (mode);
16188 x = gen_rtx_AND (mode, op_true, cmp);
16189 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16191 x = gen_rtx_NOT (mode, cmp);
16192 x = gen_rtx_AND (mode, x, op_false);
16193 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16195 x = gen_rtx_IOR (mode, t3, t2);
16196 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16200 /* Expand a floating-point conditional move. Return true if successful. */
16203 ix86_expand_fp_movcc (rtx operands[])
16205 enum machine_mode mode = GET_MODE (operands[0]);
16206 enum rtx_code code = GET_CODE (operands[1]);
16207 rtx tmp, compare_op;
16209 ix86_compare_op0 = XEXP (operands[1], 0);
16210 ix86_compare_op1 = XEXP (operands[1], 1);
16211 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16213 enum machine_mode cmode;
16215 /* Since we've no cmove for sse registers, don't force bad register
16216 allocation just to gain access to it. Deny movcc when the
16217 comparison mode doesn't match the move mode. */
16218 cmode = GET_MODE (ix86_compare_op0);
16219 if (cmode == VOIDmode)
16220 cmode = GET_MODE (ix86_compare_op1);
16224 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16226 &ix86_compare_op1);
16227 if (code == UNKNOWN)
16230 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16231 ix86_compare_op1, operands[2],
16235 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16236 ix86_compare_op1, operands[2], operands[3]);
16237 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16241 /* The floating point conditional move instructions don't directly
16242 support conditions resulting from a signed integer comparison. */
16244 compare_op = ix86_expand_compare (code);
16245 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16247 tmp = gen_reg_rtx (QImode);
16248 ix86_expand_setcc (code, tmp);
16250 ix86_compare_op0 = tmp;
16251 ix86_compare_op1 = const0_rtx;
16252 compare_op = ix86_expand_compare (code);
16255 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16256 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16257 operands[2], operands[3])));
16262 /* Expand a floating-point vector conditional move; a vcond operation
16263 rather than a movcc operation. */
16266 ix86_expand_fp_vcond (rtx operands[])
16268 enum rtx_code code = GET_CODE (operands[3]);
16271 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16272 &operands[4], &operands[5]);
16273 if (code == UNKNOWN)
16276 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16277 operands[5], operands[1], operands[2]))
16280 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16281 operands[1], operands[2]);
16282 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16286 /* Expand a signed/unsigned integral vector conditional move. */
16289 ix86_expand_int_vcond (rtx operands[])
16291 enum machine_mode mode = GET_MODE (operands[0]);
16292 enum rtx_code code = GET_CODE (operands[3]);
16293 bool negate = false;
16296 cop0 = operands[4];
16297 cop1 = operands[5];
16299 /* XOP supports all of the comparisons on all vector int types. */
16302 /* Canonicalize the comparison to EQ, GT, GTU. */
16313 code = reverse_condition (code);
16319 code = reverse_condition (code);
16325 code = swap_condition (code);
16326 x = cop0, cop0 = cop1, cop1 = x;
16330 gcc_unreachable ();
16333 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16334 if (mode == V2DImode)
16339 /* SSE4.1 supports EQ. */
16340 if (!TARGET_SSE4_1)
16346 /* SSE4.2 supports GT/GTU. */
16347 if (!TARGET_SSE4_2)
16352 gcc_unreachable ();
16356 /* Unsigned parallel compare is not supported by the hardware.
16357 Play some tricks to turn this into a signed comparison
16361 cop0 = force_reg (mode, cop0);
16369 rtx (*gen_sub3) (rtx, rtx, rtx);
16371 /* Subtract (-(INT MAX) - 1) from both operands to make
16373 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16375 gen_sub3 = (mode == V4SImode
16376 ? gen_subv4si3 : gen_subv2di3);
16377 t1 = gen_reg_rtx (mode);
16378 emit_insn (gen_sub3 (t1, cop0, mask));
16380 t2 = gen_reg_rtx (mode);
16381 emit_insn (gen_sub3 (t2, cop1, mask));
16391 /* Perform a parallel unsigned saturating subtraction. */
16392 x = gen_reg_rtx (mode);
16393 emit_insn (gen_rtx_SET (VOIDmode, x,
16394 gen_rtx_US_MINUS (mode, cop0, cop1)));
16397 cop1 = CONST0_RTX (mode);
16403 gcc_unreachable ();
16408 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16409 operands[1+negate], operands[2-negate]);
16411 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16412 operands[2-negate]);
16416 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16417 true if we should do zero extension, else sign extension. HIGH_P is
16418 true if we want the N/2 high elements, else the low elements. */
16421 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16423 enum machine_mode imode = GET_MODE (operands[1]);
16424 rtx (*unpack)(rtx, rtx, rtx);
16431 unpack = gen_vec_interleave_highv16qi;
16433 unpack = gen_vec_interleave_lowv16qi;
16437 unpack = gen_vec_interleave_highv8hi;
16439 unpack = gen_vec_interleave_lowv8hi;
16443 unpack = gen_vec_interleave_highv4si;
16445 unpack = gen_vec_interleave_lowv4si;
16448 gcc_unreachable ();
16451 dest = gen_lowpart (imode, operands[0]);
16454 se = force_reg (imode, CONST0_RTX (imode));
16456 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16457 operands[1], pc_rtx, pc_rtx);
16459 emit_insn (unpack (dest, operands[1], se));
16462 /* This function performs the same task as ix86_expand_sse_unpack,
16463 but with SSE4.1 instructions. */
16466 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16468 enum machine_mode imode = GET_MODE (operands[1]);
16469 rtx (*unpack)(rtx, rtx);
16476 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16478 unpack = gen_sse4_1_extendv8qiv8hi2;
16482 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16484 unpack = gen_sse4_1_extendv4hiv4si2;
16488 unpack = gen_sse4_1_zero_extendv2siv2di2;
16490 unpack = gen_sse4_1_extendv2siv2di2;
16493 gcc_unreachable ();
16496 dest = operands[0];
16499 /* Shift higher 8 bytes to lower 8 bytes. */
16500 src = gen_reg_rtx (imode);
16501 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16502 gen_lowpart (V1TImode, operands[1]),
16508 emit_insn (unpack (dest, src));
16511 /* Expand conditional increment or decrement using adb/sbb instructions.
16512 The default case using setcc followed by the conditional move can be
16513 done by generic code. */
16515 ix86_expand_int_addcc (rtx operands[])
16517 enum rtx_code code = GET_CODE (operands[1]);
16519 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16521 rtx val = const0_rtx;
16522 bool fpcmp = false;
16523 enum machine_mode mode;
16525 ix86_compare_op0 = XEXP (operands[1], 0);
16526 ix86_compare_op1 = XEXP (operands[1], 1);
16527 if (operands[3] != const1_rtx
16528 && operands[3] != constm1_rtx)
16530 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16531 ix86_compare_op1, &compare_op))
16533 code = GET_CODE (compare_op);
16535 flags = XEXP (compare_op, 0);
16537 if (GET_MODE (flags) == CCFPmode
16538 || GET_MODE (flags) == CCFPUmode)
16541 code = ix86_fp_compare_code_to_integer (code);
16548 PUT_CODE (compare_op,
16549 reverse_condition_maybe_unordered
16550 (GET_CODE (compare_op)));
16552 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16555 mode = GET_MODE (operands[0]);
16557 /* Construct either adc or sbb insn. */
16558 if ((code == LTU) == (operands[3] == constm1_rtx))
16563 insn = gen_subqi3_carry;
16566 insn = gen_subhi3_carry;
16569 insn = gen_subsi3_carry;
16572 insn = gen_subdi3_carry;
16575 gcc_unreachable ();
16583 insn = gen_addqi3_carry;
16586 insn = gen_addhi3_carry;
16589 insn = gen_addsi3_carry;
16592 insn = gen_adddi3_carry;
16595 gcc_unreachable ();
16598 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16600 return 1; /* DONE */
16604 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16605 works for floating pointer parameters and nonoffsetable memories.
16606 For pushes, it returns just stack offsets; the values will be saved
16607 in the right order. Maximally three parts are generated. */
16610 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16615 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16617 size = (GET_MODE_SIZE (mode) + 4) / 8;
16619 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16620 gcc_assert (size >= 2 && size <= 4);
16622 /* Optimize constant pool reference to immediates. This is used by fp
16623 moves, that force all constants to memory to allow combining. */
16624 if (MEM_P (operand) && MEM_READONLY_P (operand))
16626 rtx tmp = maybe_get_pool_constant (operand);
16631 if (MEM_P (operand) && !offsettable_memref_p (operand))
16633 /* The only non-offsetable memories we handle are pushes. */
16634 int ok = push_operand (operand, VOIDmode);
16638 operand = copy_rtx (operand);
16639 PUT_MODE (operand, Pmode);
16640 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16644 if (GET_CODE (operand) == CONST_VECTOR)
16646 enum machine_mode imode = int_mode_for_mode (mode);
16647 /* Caution: if we looked through a constant pool memory above,
16648 the operand may actually have a different mode now. That's
16649 ok, since we want to pun this all the way back to an integer. */
16650 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16651 gcc_assert (operand != NULL);
16657 if (mode == DImode)
16658 split_di (&operand, 1, &parts[0], &parts[1]);
16663 if (REG_P (operand))
16665 gcc_assert (reload_completed);
16666 for (i = 0; i < size; i++)
16667 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16669 else if (offsettable_memref_p (operand))
16671 operand = adjust_address (operand, SImode, 0);
16672 parts[0] = operand;
16673 for (i = 1; i < size; i++)
16674 parts[i] = adjust_address (operand, SImode, 4 * i);
16676 else if (GET_CODE (operand) == CONST_DOUBLE)
16681 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16685 real_to_target (l, &r, mode);
16686 parts[3] = gen_int_mode (l[3], SImode);
16687 parts[2] = gen_int_mode (l[2], SImode);
16690 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16691 parts[2] = gen_int_mode (l[2], SImode);
16694 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16697 gcc_unreachable ();
16699 parts[1] = gen_int_mode (l[1], SImode);
16700 parts[0] = gen_int_mode (l[0], SImode);
16703 gcc_unreachable ();
16708 if (mode == TImode)
16709 split_ti (&operand, 1, &parts[0], &parts[1]);
16710 if (mode == XFmode || mode == TFmode)
16712 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16713 if (REG_P (operand))
16715 gcc_assert (reload_completed);
16716 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16717 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16719 else if (offsettable_memref_p (operand))
16721 operand = adjust_address (operand, DImode, 0);
16722 parts[0] = operand;
16723 parts[1] = adjust_address (operand, upper_mode, 8);
16725 else if (GET_CODE (operand) == CONST_DOUBLE)
16730 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16731 real_to_target (l, &r, mode);
16733 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16734 if (HOST_BITS_PER_WIDE_INT >= 64)
16737 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16738 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16741 parts[0] = immed_double_const (l[0], l[1], DImode);
16743 if (upper_mode == SImode)
16744 parts[1] = gen_int_mode (l[2], SImode);
16745 else if (HOST_BITS_PER_WIDE_INT >= 64)
16748 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16749 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16752 parts[1] = immed_double_const (l[2], l[3], DImode);
16755 gcc_unreachable ();
16762 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16763 Return false when normal moves are needed; true when all required
16764 insns have been emitted. Operands 2-4 contain the input values
16765 int the correct order; operands 5-7 contain the output values. */
16768 ix86_split_long_move (rtx operands[])
16773 int collisions = 0;
16774 enum machine_mode mode = GET_MODE (operands[0]);
16775 bool collisionparts[4];
16777 /* The DFmode expanders may ask us to move double.
16778 For 64bit target this is single move. By hiding the fact
16779 here we simplify i386.md splitters. */
16780 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16782 /* Optimize constant pool reference to immediates. This is used by
16783 fp moves, that force all constants to memory to allow combining. */
16785 if (MEM_P (operands[1])
16786 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16787 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16788 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16789 if (push_operand (operands[0], VOIDmode))
16791 operands[0] = copy_rtx (operands[0]);
16792 PUT_MODE (operands[0], Pmode);
16795 operands[0] = gen_lowpart (DImode, operands[0]);
16796 operands[1] = gen_lowpart (DImode, operands[1]);
16797 emit_move_insn (operands[0], operands[1]);
16801 /* The only non-offsettable memory we handle is push. */
16802 if (push_operand (operands[0], VOIDmode))
16805 gcc_assert (!MEM_P (operands[0])
16806 || offsettable_memref_p (operands[0]));
16808 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16809 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16811 /* When emitting push, take care for source operands on the stack. */
16812 if (push && MEM_P (operands[1])
16813 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16815 rtx src_base = XEXP (part[1][nparts - 1], 0);
16817 /* Compensate for the stack decrement by 4. */
16818 if (!TARGET_64BIT && nparts == 3
16819 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16820 src_base = plus_constant (src_base, 4);
16822 /* src_base refers to the stack pointer and is
16823 automatically decreased by emitted push. */
16824 for (i = 0; i < nparts; i++)
16825 part[1][i] = change_address (part[1][i],
16826 GET_MODE (part[1][i]), src_base);
16829 /* We need to do copy in the right order in case an address register
16830 of the source overlaps the destination. */
16831 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16835 for (i = 0; i < nparts; i++)
16838 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16839 if (collisionparts[i])
16843 /* Collision in the middle part can be handled by reordering. */
16844 if (collisions == 1 && nparts == 3 && collisionparts [1])
16846 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16847 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16849 else if (collisions == 1
16851 && (collisionparts [1] || collisionparts [2]))
16853 if (collisionparts [1])
16855 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16856 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16860 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16861 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16865 /* If there are more collisions, we can't handle it by reordering.
16866 Do an lea to the last part and use only one colliding move. */
16867 else if (collisions > 1)
16873 base = part[0][nparts - 1];
16875 /* Handle the case when the last part isn't valid for lea.
16876 Happens in 64-bit mode storing the 12-byte XFmode. */
16877 if (GET_MODE (base) != Pmode)
16878 base = gen_rtx_REG (Pmode, REGNO (base));
16880 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16881 part[1][0] = replace_equiv_address (part[1][0], base);
16882 for (i = 1; i < nparts; i++)
16884 tmp = plus_constant (base, UNITS_PER_WORD * i);
16885 part[1][i] = replace_equiv_address (part[1][i], tmp);
16896 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16897 emit_insn (gen_addsi3 (stack_pointer_rtx,
16898 stack_pointer_rtx, GEN_INT (-4)));
16899 emit_move_insn (part[0][2], part[1][2]);
16901 else if (nparts == 4)
16903 emit_move_insn (part[0][3], part[1][3]);
16904 emit_move_insn (part[0][2], part[1][2]);
16909 /* In 64bit mode we don't have 32bit push available. In case this is
16910 register, it is OK - we will just use larger counterpart. We also
16911 retype memory - these comes from attempt to avoid REX prefix on
16912 moving of second half of TFmode value. */
16913 if (GET_MODE (part[1][1]) == SImode)
16915 switch (GET_CODE (part[1][1]))
16918 part[1][1] = adjust_address (part[1][1], DImode, 0);
16922 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16926 gcc_unreachable ();
16929 if (GET_MODE (part[1][0]) == SImode)
16930 part[1][0] = part[1][1];
16933 emit_move_insn (part[0][1], part[1][1]);
16934 emit_move_insn (part[0][0], part[1][0]);
16938 /* Choose correct order to not overwrite the source before it is copied. */
16939 if ((REG_P (part[0][0])
16940 && REG_P (part[1][1])
16941 && (REGNO (part[0][0]) == REGNO (part[1][1])
16943 && REGNO (part[0][0]) == REGNO (part[1][2]))
16945 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16947 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16949 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16951 operands[2 + i] = part[0][j];
16952 operands[6 + i] = part[1][j];
16957 for (i = 0; i < nparts; i++)
16959 operands[2 + i] = part[0][i];
16960 operands[6 + i] = part[1][i];
16964 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16965 if (optimize_insn_for_size_p ())
16967 for (j = 0; j < nparts - 1; j++)
16968 if (CONST_INT_P (operands[6 + j])
16969 && operands[6 + j] != const0_rtx
16970 && REG_P (operands[2 + j]))
16971 for (i = j; i < nparts - 1; i++)
16972 if (CONST_INT_P (operands[7 + i])
16973 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16974 operands[7 + i] = operands[2 + j];
16977 for (i = 0; i < nparts; i++)
16978 emit_move_insn (operands[2 + i], operands[6 + i]);
16983 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16984 left shift by a constant, either using a single shift or
16985 a sequence of add instructions. */
16988 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16992 emit_insn ((mode == DImode
16994 : gen_adddi3) (operand, operand, operand));
16996 else if (!optimize_insn_for_size_p ()
16997 && count * ix86_cost->add <= ix86_cost->shift_const)
17000 for (i=0; i<count; i++)
17002 emit_insn ((mode == DImode
17004 : gen_adddi3) (operand, operand, operand));
17008 emit_insn ((mode == DImode
17010 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17014 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17016 rtx low[2], high[2];
17018 const int single_width = mode == DImode ? 32 : 64;
17020 if (CONST_INT_P (operands[2]))
17022 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17023 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17025 if (count >= single_width)
17027 emit_move_insn (high[0], low[1]);
17028 emit_move_insn (low[0], const0_rtx);
17030 if (count > single_width)
17031 ix86_expand_ashl_const (high[0], count - single_width, mode);
17035 if (!rtx_equal_p (operands[0], operands[1]))
17036 emit_move_insn (operands[0], operands[1]);
17037 emit_insn ((mode == DImode
17039 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17040 ix86_expand_ashl_const (low[0], count, mode);
17045 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17047 if (operands[1] == const1_rtx)
17049 /* Assuming we've chosen a QImode capable registers, then 1 << N
17050 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17051 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17053 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17055 ix86_expand_clear (low[0]);
17056 ix86_expand_clear (high[0]);
17057 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17059 d = gen_lowpart (QImode, low[0]);
17060 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17061 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17062 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17064 d = gen_lowpart (QImode, high[0]);
17065 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17066 s = gen_rtx_NE (QImode, flags, const0_rtx);
17067 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17070 /* Otherwise, we can get the same results by manually performing
17071 a bit extract operation on bit 5/6, and then performing the two
17072 shifts. The two methods of getting 0/1 into low/high are exactly
17073 the same size. Avoiding the shift in the bit extract case helps
17074 pentium4 a bit; no one else seems to care much either way. */
17079 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17080 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17082 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17083 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17085 emit_insn ((mode == DImode
17087 : gen_lshrdi3) (high[0], high[0],
17088 GEN_INT (mode == DImode ? 5 : 6)));
17089 emit_insn ((mode == DImode
17091 : gen_anddi3) (high[0], high[0], const1_rtx));
17092 emit_move_insn (low[0], high[0]);
17093 emit_insn ((mode == DImode
17095 : gen_xordi3) (low[0], low[0], const1_rtx));
17098 emit_insn ((mode == DImode
17100 : gen_ashldi3) (low[0], low[0], operands[2]));
17101 emit_insn ((mode == DImode
17103 : gen_ashldi3) (high[0], high[0], operands[2]));
17107 if (operands[1] == constm1_rtx)
17109 /* For -1 << N, we can avoid the shld instruction, because we
17110 know that we're shifting 0...31/63 ones into a -1. */
17111 emit_move_insn (low[0], constm1_rtx);
17112 if (optimize_insn_for_size_p ())
17113 emit_move_insn (high[0], low[0]);
17115 emit_move_insn (high[0], constm1_rtx);
17119 if (!rtx_equal_p (operands[0], operands[1]))
17120 emit_move_insn (operands[0], operands[1]);
17122 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17123 emit_insn ((mode == DImode
17125 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17128 emit_insn ((mode == DImode
17130 : gen_ashldi3) (low[0], low[0], operands[2]));
17132 if (TARGET_CMOVE && scratch)
17134 ix86_expand_clear (scratch);
17135 emit_insn ((mode == DImode
17136 ? gen_x86_shiftsi_adj_1
17137 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17141 emit_insn ((mode == DImode
17142 ? gen_x86_shiftsi_adj_2
17143 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17147 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17149 rtx low[2], high[2];
17151 const int single_width = mode == DImode ? 32 : 64;
17153 if (CONST_INT_P (operands[2]))
17155 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17156 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17158 if (count == single_width * 2 - 1)
17160 emit_move_insn (high[0], high[1]);
17161 emit_insn ((mode == DImode
17163 : gen_ashrdi3) (high[0], high[0],
17164 GEN_INT (single_width - 1)));
17165 emit_move_insn (low[0], high[0]);
17168 else if (count >= single_width)
17170 emit_move_insn (low[0], high[1]);
17171 emit_move_insn (high[0], low[0]);
17172 emit_insn ((mode == DImode
17174 : gen_ashrdi3) (high[0], high[0],
17175 GEN_INT (single_width - 1)));
17176 if (count > single_width)
17177 emit_insn ((mode == DImode
17179 : gen_ashrdi3) (low[0], low[0],
17180 GEN_INT (count - single_width)));
17184 if (!rtx_equal_p (operands[0], operands[1]))
17185 emit_move_insn (operands[0], operands[1]);
17186 emit_insn ((mode == DImode
17188 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17189 emit_insn ((mode == DImode
17191 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17196 if (!rtx_equal_p (operands[0], operands[1]))
17197 emit_move_insn (operands[0], operands[1]);
17199 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17201 emit_insn ((mode == DImode
17203 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17204 emit_insn ((mode == DImode
17206 : gen_ashrdi3) (high[0], high[0], operands[2]));
17208 if (TARGET_CMOVE && scratch)
17210 emit_move_insn (scratch, high[0]);
17211 emit_insn ((mode == DImode
17213 : gen_ashrdi3) (scratch, scratch,
17214 GEN_INT (single_width - 1)));
17215 emit_insn ((mode == DImode
17216 ? gen_x86_shiftsi_adj_1
17217 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17221 emit_insn ((mode == DImode
17222 ? gen_x86_shiftsi_adj_3
17223 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17228 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17230 rtx low[2], high[2];
17232 const int single_width = mode == DImode ? 32 : 64;
17234 if (CONST_INT_P (operands[2]))
17236 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17237 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17239 if (count >= single_width)
17241 emit_move_insn (low[0], high[1]);
17242 ix86_expand_clear (high[0]);
17244 if (count > single_width)
17245 emit_insn ((mode == DImode
17247 : gen_lshrdi3) (low[0], low[0],
17248 GEN_INT (count - single_width)));
17252 if (!rtx_equal_p (operands[0], operands[1]))
17253 emit_move_insn (operands[0], operands[1]);
17254 emit_insn ((mode == DImode
17256 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17257 emit_insn ((mode == DImode
17259 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17264 if (!rtx_equal_p (operands[0], operands[1]))
17265 emit_move_insn (operands[0], operands[1]);
17267 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17269 emit_insn ((mode == DImode
17271 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17272 emit_insn ((mode == DImode
17274 : gen_lshrdi3) (high[0], high[0], operands[2]));
17276 /* Heh. By reversing the arguments, we can reuse this pattern. */
17277 if (TARGET_CMOVE && scratch)
17279 ix86_expand_clear (scratch);
17280 emit_insn ((mode == DImode
17281 ? gen_x86_shiftsi_adj_1
17282 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17286 emit_insn ((mode == DImode
17287 ? gen_x86_shiftsi_adj_2
17288 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17292 /* Predict just emitted jump instruction to be taken with probability PROB. */
17294 predict_jump (int prob)
17296 rtx insn = get_last_insn ();
17297 gcc_assert (JUMP_P (insn));
17298 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17301 /* Helper function for the string operations below. Dest VARIABLE whether
17302 it is aligned to VALUE bytes. If true, jump to the label. */
17304 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17306 rtx label = gen_label_rtx ();
17307 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17308 if (GET_MODE (variable) == DImode)
17309 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17311 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17312 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17315 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17317 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17321 /* Adjust COUNTER by the VALUE. */
17323 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17325 if (GET_MODE (countreg) == DImode)
17326 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17328 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17331 /* Zero extend possibly SImode EXP to Pmode register. */
17333 ix86_zero_extend_to_Pmode (rtx exp)
17336 if (GET_MODE (exp) == VOIDmode)
17337 return force_reg (Pmode, exp);
17338 if (GET_MODE (exp) == Pmode)
17339 return copy_to_mode_reg (Pmode, exp);
17340 r = gen_reg_rtx (Pmode);
17341 emit_insn (gen_zero_extendsidi2 (r, exp));
17345 /* Divide COUNTREG by SCALE. */
17347 scale_counter (rtx countreg, int scale)
17353 if (CONST_INT_P (countreg))
17354 return GEN_INT (INTVAL (countreg) / scale);
17355 gcc_assert (REG_P (countreg));
17357 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17358 GEN_INT (exact_log2 (scale)),
17359 NULL, 1, OPTAB_DIRECT);
17363 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17364 DImode for constant loop counts. */
17366 static enum machine_mode
17367 counter_mode (rtx count_exp)
17369 if (GET_MODE (count_exp) != VOIDmode)
17370 return GET_MODE (count_exp);
17371 if (!CONST_INT_P (count_exp))
17373 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17378 /* When SRCPTR is non-NULL, output simple loop to move memory
17379 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17380 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17381 equivalent loop to set memory by VALUE (supposed to be in MODE).
17383 The size is rounded down to whole number of chunk size moved at once.
17384 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17388 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17389 rtx destptr, rtx srcptr, rtx value,
17390 rtx count, enum machine_mode mode, int unroll,
17393 rtx out_label, top_label, iter, tmp;
17394 enum machine_mode iter_mode = counter_mode (count);
17395 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17396 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17402 top_label = gen_label_rtx ();
17403 out_label = gen_label_rtx ();
17404 iter = gen_reg_rtx (iter_mode);
17406 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17407 NULL, 1, OPTAB_DIRECT);
17408 /* Those two should combine. */
17409 if (piece_size == const1_rtx)
17411 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17413 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17415 emit_move_insn (iter, const0_rtx);
17417 emit_label (top_label);
17419 tmp = convert_modes (Pmode, iter_mode, iter, true);
17420 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17421 destmem = change_address (destmem, mode, x_addr);
17425 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17426 srcmem = change_address (srcmem, mode, y_addr);
17428 /* When unrolling for chips that reorder memory reads and writes,
17429 we can save registers by using single temporary.
17430 Also using 4 temporaries is overkill in 32bit mode. */
17431 if (!TARGET_64BIT && 0)
17433 for (i = 0; i < unroll; i++)
17438 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17440 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17442 emit_move_insn (destmem, srcmem);
17448 gcc_assert (unroll <= 4);
17449 for (i = 0; i < unroll; i++)
17451 tmpreg[i] = gen_reg_rtx (mode);
17455 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17457 emit_move_insn (tmpreg[i], srcmem);
17459 for (i = 0; i < unroll; i++)
17464 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17466 emit_move_insn (destmem, tmpreg[i]);
17471 for (i = 0; i < unroll; i++)
17475 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17476 emit_move_insn (destmem, value);
17479 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17480 true, OPTAB_LIB_WIDEN);
17482 emit_move_insn (iter, tmp);
17484 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17486 if (expected_size != -1)
17488 expected_size /= GET_MODE_SIZE (mode) * unroll;
17489 if (expected_size == 0)
17491 else if (expected_size > REG_BR_PROB_BASE)
17492 predict_jump (REG_BR_PROB_BASE - 1);
17494 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17497 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17498 iter = ix86_zero_extend_to_Pmode (iter);
17499 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17500 true, OPTAB_LIB_WIDEN);
17501 if (tmp != destptr)
17502 emit_move_insn (destptr, tmp);
17505 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17506 true, OPTAB_LIB_WIDEN);
17508 emit_move_insn (srcptr, tmp);
17510 emit_label (out_label);
17513 /* Output "rep; mov" instruction.
17514 Arguments have same meaning as for previous function */
17516 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17517 rtx destptr, rtx srcptr,
17519 enum machine_mode mode)
17525 /* If the size is known, it is shorter to use rep movs. */
17526 if (mode == QImode && CONST_INT_P (count)
17527 && !(INTVAL (count) & 3))
17530 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17531 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17532 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17533 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17534 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17535 if (mode != QImode)
17537 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17538 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17539 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17540 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17541 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17542 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17546 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17547 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17549 if (CONST_INT_P (count))
17551 count = GEN_INT (INTVAL (count)
17552 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17553 destmem = shallow_copy_rtx (destmem);
17554 srcmem = shallow_copy_rtx (srcmem);
17555 set_mem_size (destmem, count);
17556 set_mem_size (srcmem, count);
17560 if (MEM_SIZE (destmem))
17561 set_mem_size (destmem, NULL_RTX);
17562 if (MEM_SIZE (srcmem))
17563 set_mem_size (srcmem, NULL_RTX);
17565 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17569 /* Output "rep; stos" instruction.
17570 Arguments have same meaning as for previous function */
17572 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17573 rtx count, enum machine_mode mode,
17579 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17580 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17581 value = force_reg (mode, gen_lowpart (mode, value));
17582 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17583 if (mode != QImode)
17585 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17586 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17587 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17590 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17591 if (orig_value == const0_rtx && CONST_INT_P (count))
17593 count = GEN_INT (INTVAL (count)
17594 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17595 destmem = shallow_copy_rtx (destmem);
17596 set_mem_size (destmem, count);
17598 else if (MEM_SIZE (destmem))
17599 set_mem_size (destmem, NULL_RTX);
17600 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17604 emit_strmov (rtx destmem, rtx srcmem,
17605 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17607 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17608 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17609 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17612 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17614 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17615 rtx destptr, rtx srcptr, rtx count, int max_size)
17618 if (CONST_INT_P (count))
17620 HOST_WIDE_INT countval = INTVAL (count);
17623 if ((countval & 0x10) && max_size > 16)
17627 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17628 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17631 gcc_unreachable ();
17634 if ((countval & 0x08) && max_size > 8)
17637 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17640 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17641 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17645 if ((countval & 0x04) && max_size > 4)
17647 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17650 if ((countval & 0x02) && max_size > 2)
17652 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17655 if ((countval & 0x01) && max_size > 1)
17657 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17664 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17665 count, 1, OPTAB_DIRECT);
17666 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17667 count, QImode, 1, 4);
17671 /* When there are stringops, we can cheaply increase dest and src pointers.
17672 Otherwise we save code size by maintaining offset (zero is readily
17673 available from preceding rep operation) and using x86 addressing modes.
17675 if (TARGET_SINGLE_STRINGOP)
17679 rtx label = ix86_expand_aligntest (count, 4, true);
17680 src = change_address (srcmem, SImode, srcptr);
17681 dest = change_address (destmem, SImode, destptr);
17682 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17683 emit_label (label);
17684 LABEL_NUSES (label) = 1;
17688 rtx label = ix86_expand_aligntest (count, 2, true);
17689 src = change_address (srcmem, HImode, srcptr);
17690 dest = change_address (destmem, HImode, destptr);
17691 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17692 emit_label (label);
17693 LABEL_NUSES (label) = 1;
17697 rtx label = ix86_expand_aligntest (count, 1, true);
17698 src = change_address (srcmem, QImode, srcptr);
17699 dest = change_address (destmem, QImode, destptr);
17700 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17701 emit_label (label);
17702 LABEL_NUSES (label) = 1;
17707 rtx offset = force_reg (Pmode, const0_rtx);
17712 rtx label = ix86_expand_aligntest (count, 4, true);
17713 src = change_address (srcmem, SImode, srcptr);
17714 dest = change_address (destmem, SImode, destptr);
17715 emit_move_insn (dest, src);
17716 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17717 true, OPTAB_LIB_WIDEN);
17719 emit_move_insn (offset, tmp);
17720 emit_label (label);
17721 LABEL_NUSES (label) = 1;
17725 rtx label = ix86_expand_aligntest (count, 2, true);
17726 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17727 src = change_address (srcmem, HImode, tmp);
17728 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17729 dest = change_address (destmem, HImode, tmp);
17730 emit_move_insn (dest, src);
17731 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17732 true, OPTAB_LIB_WIDEN);
17734 emit_move_insn (offset, tmp);
17735 emit_label (label);
17736 LABEL_NUSES (label) = 1;
17740 rtx label = ix86_expand_aligntest (count, 1, true);
17741 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17742 src = change_address (srcmem, QImode, tmp);
17743 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17744 dest = change_address (destmem, QImode, tmp);
17745 emit_move_insn (dest, src);
17746 emit_label (label);
17747 LABEL_NUSES (label) = 1;
17752 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17754 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17755 rtx count, int max_size)
17758 expand_simple_binop (counter_mode (count), AND, count,
17759 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17760 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17761 gen_lowpart (QImode, value), count, QImode,
17765 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17767 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17771 if (CONST_INT_P (count))
17773 HOST_WIDE_INT countval = INTVAL (count);
17776 if ((countval & 0x10) && max_size > 16)
17780 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17781 emit_insn (gen_strset (destptr, dest, value));
17782 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17783 emit_insn (gen_strset (destptr, dest, value));
17786 gcc_unreachable ();
17789 if ((countval & 0x08) && max_size > 8)
17793 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17794 emit_insn (gen_strset (destptr, dest, value));
17798 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17799 emit_insn (gen_strset (destptr, dest, value));
17800 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17801 emit_insn (gen_strset (destptr, dest, value));
17805 if ((countval & 0x04) && max_size > 4)
17807 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17808 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17811 if ((countval & 0x02) && max_size > 2)
17813 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17814 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17817 if ((countval & 0x01) && max_size > 1)
17819 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17820 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17827 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17832 rtx label = ix86_expand_aligntest (count, 16, true);
17835 dest = change_address (destmem, DImode, destptr);
17836 emit_insn (gen_strset (destptr, dest, value));
17837 emit_insn (gen_strset (destptr, dest, value));
17841 dest = change_address (destmem, SImode, destptr);
17842 emit_insn (gen_strset (destptr, dest, value));
17843 emit_insn (gen_strset (destptr, dest, value));
17844 emit_insn (gen_strset (destptr, dest, value));
17845 emit_insn (gen_strset (destptr, dest, value));
17847 emit_label (label);
17848 LABEL_NUSES (label) = 1;
17852 rtx label = ix86_expand_aligntest (count, 8, true);
17855 dest = change_address (destmem, DImode, destptr);
17856 emit_insn (gen_strset (destptr, dest, value));
17860 dest = change_address (destmem, SImode, destptr);
17861 emit_insn (gen_strset (destptr, dest, value));
17862 emit_insn (gen_strset (destptr, dest, value));
17864 emit_label (label);
17865 LABEL_NUSES (label) = 1;
17869 rtx label = ix86_expand_aligntest (count, 4, true);
17870 dest = change_address (destmem, SImode, destptr);
17871 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17872 emit_label (label);
17873 LABEL_NUSES (label) = 1;
17877 rtx label = ix86_expand_aligntest (count, 2, true);
17878 dest = change_address (destmem, HImode, destptr);
17879 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17880 emit_label (label);
17881 LABEL_NUSES (label) = 1;
17885 rtx label = ix86_expand_aligntest (count, 1, true);
17886 dest = change_address (destmem, QImode, destptr);
17887 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17888 emit_label (label);
17889 LABEL_NUSES (label) = 1;
17893 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17894 DESIRED_ALIGNMENT. */
17896 expand_movmem_prologue (rtx destmem, rtx srcmem,
17897 rtx destptr, rtx srcptr, rtx count,
17898 int align, int desired_alignment)
17900 if (align <= 1 && desired_alignment > 1)
17902 rtx label = ix86_expand_aligntest (destptr, 1, false);
17903 srcmem = change_address (srcmem, QImode, srcptr);
17904 destmem = change_address (destmem, QImode, destptr);
17905 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17906 ix86_adjust_counter (count, 1);
17907 emit_label (label);
17908 LABEL_NUSES (label) = 1;
17910 if (align <= 2 && desired_alignment > 2)
17912 rtx label = ix86_expand_aligntest (destptr, 2, false);
17913 srcmem = change_address (srcmem, HImode, srcptr);
17914 destmem = change_address (destmem, HImode, destptr);
17915 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17916 ix86_adjust_counter (count, 2);
17917 emit_label (label);
17918 LABEL_NUSES (label) = 1;
17920 if (align <= 4 && desired_alignment > 4)
17922 rtx label = ix86_expand_aligntest (destptr, 4, false);
17923 srcmem = change_address (srcmem, SImode, srcptr);
17924 destmem = change_address (destmem, SImode, destptr);
17925 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17926 ix86_adjust_counter (count, 4);
17927 emit_label (label);
17928 LABEL_NUSES (label) = 1;
17930 gcc_assert (desired_alignment <= 8);
17933 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17934 ALIGN_BYTES is how many bytes need to be copied. */
17936 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17937 int desired_align, int align_bytes)
17940 rtx src_size, dst_size;
17942 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17943 if (src_align_bytes >= 0)
17944 src_align_bytes = desired_align - src_align_bytes;
17945 src_size = MEM_SIZE (src);
17946 dst_size = MEM_SIZE (dst);
17947 if (align_bytes & 1)
17949 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17950 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17952 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17954 if (align_bytes & 2)
17956 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17957 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17958 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17959 set_mem_align (dst, 2 * BITS_PER_UNIT);
17960 if (src_align_bytes >= 0
17961 && (src_align_bytes & 1) == (align_bytes & 1)
17962 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17963 set_mem_align (src, 2 * BITS_PER_UNIT);
17965 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17967 if (align_bytes & 4)
17969 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17970 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17971 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17972 set_mem_align (dst, 4 * BITS_PER_UNIT);
17973 if (src_align_bytes >= 0)
17975 unsigned int src_align = 0;
17976 if ((src_align_bytes & 3) == (align_bytes & 3))
17978 else if ((src_align_bytes & 1) == (align_bytes & 1))
17980 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17981 set_mem_align (src, src_align * BITS_PER_UNIT);
17984 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17986 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17987 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17988 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17989 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17990 if (src_align_bytes >= 0)
17992 unsigned int src_align = 0;
17993 if ((src_align_bytes & 7) == (align_bytes & 7))
17995 else if ((src_align_bytes & 3) == (align_bytes & 3))
17997 else if ((src_align_bytes & 1) == (align_bytes & 1))
17999 if (src_align > (unsigned int) desired_align)
18000 src_align = desired_align;
18001 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18002 set_mem_align (src, src_align * BITS_PER_UNIT);
18005 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18007 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18012 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18013 DESIRED_ALIGNMENT. */
18015 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18016 int align, int desired_alignment)
18018 if (align <= 1 && desired_alignment > 1)
18020 rtx label = ix86_expand_aligntest (destptr, 1, false);
18021 destmem = change_address (destmem, QImode, destptr);
18022 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18023 ix86_adjust_counter (count, 1);
18024 emit_label (label);
18025 LABEL_NUSES (label) = 1;
18027 if (align <= 2 && desired_alignment > 2)
18029 rtx label = ix86_expand_aligntest (destptr, 2, false);
18030 destmem = change_address (destmem, HImode, destptr);
18031 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18032 ix86_adjust_counter (count, 2);
18033 emit_label (label);
18034 LABEL_NUSES (label) = 1;
18036 if (align <= 4 && desired_alignment > 4)
18038 rtx label = ix86_expand_aligntest (destptr, 4, false);
18039 destmem = change_address (destmem, SImode, destptr);
18040 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18041 ix86_adjust_counter (count, 4);
18042 emit_label (label);
18043 LABEL_NUSES (label) = 1;
18045 gcc_assert (desired_alignment <= 8);
18048 /* Set enough from DST to align DST known to by aligned by ALIGN to
18049 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18051 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18052 int desired_align, int align_bytes)
18055 rtx dst_size = MEM_SIZE (dst);
18056 if (align_bytes & 1)
18058 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18060 emit_insn (gen_strset (destreg, dst,
18061 gen_lowpart (QImode, value)));
18063 if (align_bytes & 2)
18065 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18066 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18067 set_mem_align (dst, 2 * BITS_PER_UNIT);
18069 emit_insn (gen_strset (destreg, dst,
18070 gen_lowpart (HImode, value)));
18072 if (align_bytes & 4)
18074 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18075 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18076 set_mem_align (dst, 4 * BITS_PER_UNIT);
18078 emit_insn (gen_strset (destreg, dst,
18079 gen_lowpart (SImode, value)));
18081 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18082 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18083 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18085 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18089 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18090 static enum stringop_alg
18091 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18092 int *dynamic_check)
18094 const struct stringop_algs * algs;
18095 bool optimize_for_speed;
18096 /* Algorithms using the rep prefix want at least edi and ecx;
18097 additionally, memset wants eax and memcpy wants esi. Don't
18098 consider such algorithms if the user has appropriated those
18099 registers for their own purposes. */
18100 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18102 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18104 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18105 || (alg != rep_prefix_1_byte \
18106 && alg != rep_prefix_4_byte \
18107 && alg != rep_prefix_8_byte))
18108 const struct processor_costs *cost;
18110 /* Even if the string operation call is cold, we still might spend a lot
18111 of time processing large blocks. */
18112 if (optimize_function_for_size_p (cfun)
18113 || (optimize_insn_for_size_p ()
18114 && expected_size != -1 && expected_size < 256))
18115 optimize_for_speed = false;
18117 optimize_for_speed = true;
18119 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18121 *dynamic_check = -1;
18123 algs = &cost->memset[TARGET_64BIT != 0];
18125 algs = &cost->memcpy[TARGET_64BIT != 0];
18126 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18127 return stringop_alg;
18128 /* rep; movq or rep; movl is the smallest variant. */
18129 else if (!optimize_for_speed)
18131 if (!count || (count & 3))
18132 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18134 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18136 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18138 else if (expected_size != -1 && expected_size < 4)
18139 return loop_1_byte;
18140 else if (expected_size != -1)
18143 enum stringop_alg alg = libcall;
18144 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18146 /* We get here if the algorithms that were not libcall-based
18147 were rep-prefix based and we are unable to use rep prefixes
18148 based on global register usage. Break out of the loop and
18149 use the heuristic below. */
18150 if (algs->size[i].max == 0)
18152 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18154 enum stringop_alg candidate = algs->size[i].alg;
18156 if (candidate != libcall && ALG_USABLE_P (candidate))
18158 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18159 last non-libcall inline algorithm. */
18160 if (TARGET_INLINE_ALL_STRINGOPS)
18162 /* When the current size is best to be copied by a libcall,
18163 but we are still forced to inline, run the heuristic below
18164 that will pick code for medium sized blocks. */
18165 if (alg != libcall)
18169 else if (ALG_USABLE_P (candidate))
18173 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18175 /* When asked to inline the call anyway, try to pick meaningful choice.
18176 We look for maximal size of block that is faster to copy by hand and
18177 take blocks of at most of that size guessing that average size will
18178 be roughly half of the block.
18180 If this turns out to be bad, we might simply specify the preferred
18181 choice in ix86_costs. */
18182 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18183 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18186 enum stringop_alg alg;
18188 bool any_alg_usable_p = true;
18190 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18192 enum stringop_alg candidate = algs->size[i].alg;
18193 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18195 if (candidate != libcall && candidate
18196 && ALG_USABLE_P (candidate))
18197 max = algs->size[i].max;
18199 /* If there aren't any usable algorithms, then recursing on
18200 smaller sizes isn't going to find anything. Just return the
18201 simple byte-at-a-time copy loop. */
18202 if (!any_alg_usable_p)
18204 /* Pick something reasonable. */
18205 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18206 *dynamic_check = 128;
18207 return loop_1_byte;
18211 alg = decide_alg (count, max / 2, memset, dynamic_check);
18212 gcc_assert (*dynamic_check == -1);
18213 gcc_assert (alg != libcall);
18214 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18215 *dynamic_check = max;
18218 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18219 #undef ALG_USABLE_P
18222 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18223 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18225 decide_alignment (int align,
18226 enum stringop_alg alg,
18229 int desired_align = 0;
18233 gcc_unreachable ();
18235 case unrolled_loop:
18236 desired_align = GET_MODE_SIZE (Pmode);
18238 case rep_prefix_8_byte:
18241 case rep_prefix_4_byte:
18242 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18243 copying whole cacheline at once. */
18244 if (TARGET_PENTIUMPRO)
18249 case rep_prefix_1_byte:
18250 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18251 copying whole cacheline at once. */
18252 if (TARGET_PENTIUMPRO)
18266 if (desired_align < align)
18267 desired_align = align;
18268 if (expected_size != -1 && expected_size < 4)
18269 desired_align = align;
18270 return desired_align;
18273 /* Return the smallest power of 2 greater than VAL. */
18275 smallest_pow2_greater_than (int val)
18283 /* Expand string move (memcpy) operation. Use i386 string operations when
18284 profitable. expand_setmem contains similar code. The code depends upon
18285 architecture, block size and alignment, but always has the same
18288 1) Prologue guard: Conditional that jumps up to epilogues for small
18289 blocks that can be handled by epilogue alone. This is faster but
18290 also needed for correctness, since prologue assume the block is larger
18291 than the desired alignment.
18293 Optional dynamic check for size and libcall for large
18294 blocks is emitted here too, with -minline-stringops-dynamically.
18296 2) Prologue: copy first few bytes in order to get destination aligned
18297 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18298 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18299 We emit either a jump tree on power of two sized blocks, or a byte loop.
18301 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18302 with specified algorithm.
18304 4) Epilogue: code copying tail of the block that is too small to be
18305 handled by main body (or up to size guarded by prologue guard). */
18308 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18309 rtx expected_align_exp, rtx expected_size_exp)
18315 rtx jump_around_label = NULL;
18316 HOST_WIDE_INT align = 1;
18317 unsigned HOST_WIDE_INT count = 0;
18318 HOST_WIDE_INT expected_size = -1;
18319 int size_needed = 0, epilogue_size_needed;
18320 int desired_align = 0, align_bytes = 0;
18321 enum stringop_alg alg;
18323 bool need_zero_guard = false;
18325 if (CONST_INT_P (align_exp))
18326 align = INTVAL (align_exp);
18327 /* i386 can do misaligned access on reasonably increased cost. */
18328 if (CONST_INT_P (expected_align_exp)
18329 && INTVAL (expected_align_exp) > align)
18330 align = INTVAL (expected_align_exp);
18331 /* ALIGN is the minimum of destination and source alignment, but we care here
18332 just about destination alignment. */
18333 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18334 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18336 if (CONST_INT_P (count_exp))
18337 count = expected_size = INTVAL (count_exp);
18338 if (CONST_INT_P (expected_size_exp) && count == 0)
18339 expected_size = INTVAL (expected_size_exp);
18341 /* Make sure we don't need to care about overflow later on. */
18342 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18345 /* Step 0: Decide on preferred algorithm, desired alignment and
18346 size of chunks to be copied by main loop. */
18348 alg = decide_alg (count, expected_size, false, &dynamic_check);
18349 desired_align = decide_alignment (align, alg, expected_size);
18351 if (!TARGET_ALIGN_STRINGOPS)
18352 align = desired_align;
18354 if (alg == libcall)
18356 gcc_assert (alg != no_stringop);
18358 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18359 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18360 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18365 gcc_unreachable ();
18367 need_zero_guard = true;
18368 size_needed = GET_MODE_SIZE (Pmode);
18370 case unrolled_loop:
18371 need_zero_guard = true;
18372 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18374 case rep_prefix_8_byte:
18377 case rep_prefix_4_byte:
18380 case rep_prefix_1_byte:
18384 need_zero_guard = true;
18389 epilogue_size_needed = size_needed;
18391 /* Step 1: Prologue guard. */
18393 /* Alignment code needs count to be in register. */
18394 if (CONST_INT_P (count_exp) && desired_align > align)
18396 if (INTVAL (count_exp) > desired_align
18397 && INTVAL (count_exp) > size_needed)
18400 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18401 if (align_bytes <= 0)
18404 align_bytes = desired_align - align_bytes;
18406 if (align_bytes == 0)
18407 count_exp = force_reg (counter_mode (count_exp), count_exp);
18409 gcc_assert (desired_align >= 1 && align >= 1);
18411 /* Ensure that alignment prologue won't copy past end of block. */
18412 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18414 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18415 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18416 Make sure it is power of 2. */
18417 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18421 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18423 /* If main algorithm works on QImode, no epilogue is needed.
18424 For small sizes just don't align anything. */
18425 if (size_needed == 1)
18426 desired_align = align;
18433 label = gen_label_rtx ();
18434 emit_cmp_and_jump_insns (count_exp,
18435 GEN_INT (epilogue_size_needed),
18436 LTU, 0, counter_mode (count_exp), 1, label);
18437 if (expected_size == -1 || expected_size < epilogue_size_needed)
18438 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18440 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18444 /* Emit code to decide on runtime whether library call or inline should be
18446 if (dynamic_check != -1)
18448 if (CONST_INT_P (count_exp))
18450 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18452 emit_block_move_via_libcall (dst, src, count_exp, false);
18453 count_exp = const0_rtx;
18459 rtx hot_label = gen_label_rtx ();
18460 jump_around_label = gen_label_rtx ();
18461 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18462 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18463 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18464 emit_block_move_via_libcall (dst, src, count_exp, false);
18465 emit_jump (jump_around_label);
18466 emit_label (hot_label);
18470 /* Step 2: Alignment prologue. */
18472 if (desired_align > align)
18474 if (align_bytes == 0)
18476 /* Except for the first move in epilogue, we no longer know
18477 constant offset in aliasing info. It don't seems to worth
18478 the pain to maintain it for the first move, so throw away
18480 src = change_address (src, BLKmode, srcreg);
18481 dst = change_address (dst, BLKmode, destreg);
18482 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18487 /* If we know how many bytes need to be stored before dst is
18488 sufficiently aligned, maintain aliasing info accurately. */
18489 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18490 desired_align, align_bytes);
18491 count_exp = plus_constant (count_exp, -align_bytes);
18492 count -= align_bytes;
18494 if (need_zero_guard
18495 && (count < (unsigned HOST_WIDE_INT) size_needed
18496 || (align_bytes == 0
18497 && count < ((unsigned HOST_WIDE_INT) size_needed
18498 + desired_align - align))))
18500 /* It is possible that we copied enough so the main loop will not
18502 gcc_assert (size_needed > 1);
18503 if (label == NULL_RTX)
18504 label = gen_label_rtx ();
18505 emit_cmp_and_jump_insns (count_exp,
18506 GEN_INT (size_needed),
18507 LTU, 0, counter_mode (count_exp), 1, label);
18508 if (expected_size == -1
18509 || expected_size < (desired_align - align) / 2 + size_needed)
18510 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18512 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18515 if (label && size_needed == 1)
18517 emit_label (label);
18518 LABEL_NUSES (label) = 1;
18520 epilogue_size_needed = 1;
18522 else if (label == NULL_RTX)
18523 epilogue_size_needed = size_needed;
18525 /* Step 3: Main loop. */
18531 gcc_unreachable ();
18533 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18534 count_exp, QImode, 1, expected_size);
18537 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18538 count_exp, Pmode, 1, expected_size);
18540 case unrolled_loop:
18541 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18542 registers for 4 temporaries anyway. */
18543 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18544 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18547 case rep_prefix_8_byte:
18548 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18551 case rep_prefix_4_byte:
18552 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18555 case rep_prefix_1_byte:
18556 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18560 /* Adjust properly the offset of src and dest memory for aliasing. */
18561 if (CONST_INT_P (count_exp))
18563 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18564 (count / size_needed) * size_needed);
18565 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18566 (count / size_needed) * size_needed);
18570 src = change_address (src, BLKmode, srcreg);
18571 dst = change_address (dst, BLKmode, destreg);
18574 /* Step 4: Epilogue to copy the remaining bytes. */
18578 /* When the main loop is done, COUNT_EXP might hold original count,
18579 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18580 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18581 bytes. Compensate if needed. */
18583 if (size_needed < epilogue_size_needed)
18586 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18587 GEN_INT (size_needed - 1), count_exp, 1,
18589 if (tmp != count_exp)
18590 emit_move_insn (count_exp, tmp);
18592 emit_label (label);
18593 LABEL_NUSES (label) = 1;
18596 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18597 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18598 epilogue_size_needed);
18599 if (jump_around_label)
18600 emit_label (jump_around_label);
18604 /* Helper function for memcpy. For QImode value 0xXY produce
18605 0xXYXYXYXY of wide specified by MODE. This is essentially
18606 a * 0x10101010, but we can do slightly better than
18607 synth_mult by unwinding the sequence by hand on CPUs with
18610 promote_duplicated_reg (enum machine_mode mode, rtx val)
18612 enum machine_mode valmode = GET_MODE (val);
18614 int nops = mode == DImode ? 3 : 2;
18616 gcc_assert (mode == SImode || mode == DImode);
18617 if (val == const0_rtx)
18618 return copy_to_mode_reg (mode, const0_rtx);
18619 if (CONST_INT_P (val))
18621 HOST_WIDE_INT v = INTVAL (val) & 255;
18625 if (mode == DImode)
18626 v |= (v << 16) << 16;
18627 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18630 if (valmode == VOIDmode)
18632 if (valmode != QImode)
18633 val = gen_lowpart (QImode, val);
18634 if (mode == QImode)
18636 if (!TARGET_PARTIAL_REG_STALL)
18638 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18639 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18640 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18641 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18643 rtx reg = convert_modes (mode, QImode, val, true);
18644 tmp = promote_duplicated_reg (mode, const1_rtx);
18645 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18650 rtx reg = convert_modes (mode, QImode, val, true);
18652 if (!TARGET_PARTIAL_REG_STALL)
18653 if (mode == SImode)
18654 emit_insn (gen_movsi_insv_1 (reg, reg));
18656 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18659 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18660 NULL, 1, OPTAB_DIRECT);
18662 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18664 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18665 NULL, 1, OPTAB_DIRECT);
18666 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18667 if (mode == SImode)
18669 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18670 NULL, 1, OPTAB_DIRECT);
18671 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18676 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18677 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18678 alignment from ALIGN to DESIRED_ALIGN. */
18680 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18685 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18686 promoted_val = promote_duplicated_reg (DImode, val);
18687 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18688 promoted_val = promote_duplicated_reg (SImode, val);
18689 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18690 promoted_val = promote_duplicated_reg (HImode, val);
18692 promoted_val = val;
18694 return promoted_val;
18697 /* Expand string clear operation (bzero). Use i386 string operations when
18698 profitable. See expand_movmem comment for explanation of individual
18699 steps performed. */
18701 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18702 rtx expected_align_exp, rtx expected_size_exp)
18707 rtx jump_around_label = NULL;
18708 HOST_WIDE_INT align = 1;
18709 unsigned HOST_WIDE_INT count = 0;
18710 HOST_WIDE_INT expected_size = -1;
18711 int size_needed = 0, epilogue_size_needed;
18712 int desired_align = 0, align_bytes = 0;
18713 enum stringop_alg alg;
18714 rtx promoted_val = NULL;
18715 bool force_loopy_epilogue = false;
18717 bool need_zero_guard = false;
18719 if (CONST_INT_P (align_exp))
18720 align = INTVAL (align_exp);
18721 /* i386 can do misaligned access on reasonably increased cost. */
18722 if (CONST_INT_P (expected_align_exp)
18723 && INTVAL (expected_align_exp) > align)
18724 align = INTVAL (expected_align_exp);
18725 if (CONST_INT_P (count_exp))
18726 count = expected_size = INTVAL (count_exp);
18727 if (CONST_INT_P (expected_size_exp) && count == 0)
18728 expected_size = INTVAL (expected_size_exp);
18730 /* Make sure we don't need to care about overflow later on. */
18731 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18734 /* Step 0: Decide on preferred algorithm, desired alignment and
18735 size of chunks to be copied by main loop. */
18737 alg = decide_alg (count, expected_size, true, &dynamic_check);
18738 desired_align = decide_alignment (align, alg, expected_size);
18740 if (!TARGET_ALIGN_STRINGOPS)
18741 align = desired_align;
18743 if (alg == libcall)
18745 gcc_assert (alg != no_stringop);
18747 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18748 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18753 gcc_unreachable ();
18755 need_zero_guard = true;
18756 size_needed = GET_MODE_SIZE (Pmode);
18758 case unrolled_loop:
18759 need_zero_guard = true;
18760 size_needed = GET_MODE_SIZE (Pmode) * 4;
18762 case rep_prefix_8_byte:
18765 case rep_prefix_4_byte:
18768 case rep_prefix_1_byte:
18772 need_zero_guard = true;
18776 epilogue_size_needed = size_needed;
18778 /* Step 1: Prologue guard. */
18780 /* Alignment code needs count to be in register. */
18781 if (CONST_INT_P (count_exp) && desired_align > align)
18783 if (INTVAL (count_exp) > desired_align
18784 && INTVAL (count_exp) > size_needed)
18787 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18788 if (align_bytes <= 0)
18791 align_bytes = desired_align - align_bytes;
18793 if (align_bytes == 0)
18795 enum machine_mode mode = SImode;
18796 if (TARGET_64BIT && (count & ~0xffffffff))
18798 count_exp = force_reg (mode, count_exp);
18801 /* Do the cheap promotion to allow better CSE across the
18802 main loop and epilogue (ie one load of the big constant in the
18803 front of all code. */
18804 if (CONST_INT_P (val_exp))
18805 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18806 desired_align, align);
18807 /* Ensure that alignment prologue won't copy past end of block. */
18808 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18810 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18811 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18812 Make sure it is power of 2. */
18813 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18815 /* To improve performance of small blocks, we jump around the VAL
18816 promoting mode. This mean that if the promoted VAL is not constant,
18817 we might not use it in the epilogue and have to use byte
18819 if (epilogue_size_needed > 2 && !promoted_val)
18820 force_loopy_epilogue = true;
18823 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18825 /* If main algorithm works on QImode, no epilogue is needed.
18826 For small sizes just don't align anything. */
18827 if (size_needed == 1)
18828 desired_align = align;
18835 label = gen_label_rtx ();
18836 emit_cmp_and_jump_insns (count_exp,
18837 GEN_INT (epilogue_size_needed),
18838 LTU, 0, counter_mode (count_exp), 1, label);
18839 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18840 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18842 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18845 if (dynamic_check != -1)
18847 rtx hot_label = gen_label_rtx ();
18848 jump_around_label = gen_label_rtx ();
18849 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18850 LEU, 0, counter_mode (count_exp), 1, hot_label);
18851 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18852 set_storage_via_libcall (dst, count_exp, val_exp, false);
18853 emit_jump (jump_around_label);
18854 emit_label (hot_label);
18857 /* Step 2: Alignment prologue. */
18859 /* Do the expensive promotion once we branched off the small blocks. */
18861 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18862 desired_align, align);
18863 gcc_assert (desired_align >= 1 && align >= 1);
18865 if (desired_align > align)
18867 if (align_bytes == 0)
18869 /* Except for the first move in epilogue, we no longer know
18870 constant offset in aliasing info. It don't seems to worth
18871 the pain to maintain it for the first move, so throw away
18873 dst = change_address (dst, BLKmode, destreg);
18874 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18879 /* If we know how many bytes need to be stored before dst is
18880 sufficiently aligned, maintain aliasing info accurately. */
18881 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18882 desired_align, align_bytes);
18883 count_exp = plus_constant (count_exp, -align_bytes);
18884 count -= align_bytes;
18886 if (need_zero_guard
18887 && (count < (unsigned HOST_WIDE_INT) size_needed
18888 || (align_bytes == 0
18889 && count < ((unsigned HOST_WIDE_INT) size_needed
18890 + desired_align - align))))
18892 /* It is possible that we copied enough so the main loop will not
18894 gcc_assert (size_needed > 1);
18895 if (label == NULL_RTX)
18896 label = gen_label_rtx ();
18897 emit_cmp_and_jump_insns (count_exp,
18898 GEN_INT (size_needed),
18899 LTU, 0, counter_mode (count_exp), 1, label);
18900 if (expected_size == -1
18901 || expected_size < (desired_align - align) / 2 + size_needed)
18902 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18904 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18907 if (label && size_needed == 1)
18909 emit_label (label);
18910 LABEL_NUSES (label) = 1;
18912 promoted_val = val_exp;
18913 epilogue_size_needed = 1;
18915 else if (label == NULL_RTX)
18916 epilogue_size_needed = size_needed;
18918 /* Step 3: Main loop. */
18924 gcc_unreachable ();
18926 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18927 count_exp, QImode, 1, expected_size);
18930 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18931 count_exp, Pmode, 1, expected_size);
18933 case unrolled_loop:
18934 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18935 count_exp, Pmode, 4, expected_size);
18937 case rep_prefix_8_byte:
18938 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18941 case rep_prefix_4_byte:
18942 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18945 case rep_prefix_1_byte:
18946 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18950 /* Adjust properly the offset of src and dest memory for aliasing. */
18951 if (CONST_INT_P (count_exp))
18952 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18953 (count / size_needed) * size_needed);
18955 dst = change_address (dst, BLKmode, destreg);
18957 /* Step 4: Epilogue to copy the remaining bytes. */
18961 /* When the main loop is done, COUNT_EXP might hold original count,
18962 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18963 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18964 bytes. Compensate if needed. */
18966 if (size_needed < epilogue_size_needed)
18969 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18970 GEN_INT (size_needed - 1), count_exp, 1,
18972 if (tmp != count_exp)
18973 emit_move_insn (count_exp, tmp);
18975 emit_label (label);
18976 LABEL_NUSES (label) = 1;
18979 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18981 if (force_loopy_epilogue)
18982 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18983 epilogue_size_needed);
18985 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18986 epilogue_size_needed);
18988 if (jump_around_label)
18989 emit_label (jump_around_label);
18993 /* Expand the appropriate insns for doing strlen if not just doing
18996 out = result, initialized with the start address
18997 align_rtx = alignment of the address.
18998 scratch = scratch register, initialized with the startaddress when
18999 not aligned, otherwise undefined
19001 This is just the body. It needs the initializations mentioned above and
19002 some address computing at the end. These things are done in i386.md. */
19005 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19009 rtx align_2_label = NULL_RTX;
19010 rtx align_3_label = NULL_RTX;
19011 rtx align_4_label = gen_label_rtx ();
19012 rtx end_0_label = gen_label_rtx ();
19014 rtx tmpreg = gen_reg_rtx (SImode);
19015 rtx scratch = gen_reg_rtx (SImode);
19019 if (CONST_INT_P (align_rtx))
19020 align = INTVAL (align_rtx);
19022 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19024 /* Is there a known alignment and is it less than 4? */
19027 rtx scratch1 = gen_reg_rtx (Pmode);
19028 emit_move_insn (scratch1, out);
19029 /* Is there a known alignment and is it not 2? */
19032 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19033 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19035 /* Leave just the 3 lower bits. */
19036 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19037 NULL_RTX, 0, OPTAB_WIDEN);
19039 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19040 Pmode, 1, align_4_label);
19041 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19042 Pmode, 1, align_2_label);
19043 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19044 Pmode, 1, align_3_label);
19048 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19049 check if is aligned to 4 - byte. */
19051 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19052 NULL_RTX, 0, OPTAB_WIDEN);
19054 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19055 Pmode, 1, align_4_label);
19058 mem = change_address (src, QImode, out);
19060 /* Now compare the bytes. */
19062 /* Compare the first n unaligned byte on a byte per byte basis. */
19063 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19064 QImode, 1, end_0_label);
19066 /* Increment the address. */
19067 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19069 /* Not needed with an alignment of 2 */
19072 emit_label (align_2_label);
19074 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19077 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19079 emit_label (align_3_label);
19082 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19085 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19088 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19089 align this loop. It gives only huge programs, but does not help to
19091 emit_label (align_4_label);
19093 mem = change_address (src, SImode, out);
19094 emit_move_insn (scratch, mem);
19095 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19097 /* This formula yields a nonzero result iff one of the bytes is zero.
19098 This saves three branches inside loop and many cycles. */
19100 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19101 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19102 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19103 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19104 gen_int_mode (0x80808080, SImode)));
19105 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19110 rtx reg = gen_reg_rtx (SImode);
19111 rtx reg2 = gen_reg_rtx (Pmode);
19112 emit_move_insn (reg, tmpreg);
19113 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19115 /* If zero is not in the first two bytes, move two bytes forward. */
19116 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19117 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19118 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19119 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19120 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19123 /* Emit lea manually to avoid clobbering of flags. */
19124 emit_insn (gen_rtx_SET (SImode, reg2,
19125 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19127 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19128 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19129 emit_insn (gen_rtx_SET (VOIDmode, out,
19130 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19136 rtx end_2_label = gen_label_rtx ();
19137 /* Is zero in the first two bytes? */
19139 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19140 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19141 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19142 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19143 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19145 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19146 JUMP_LABEL (tmp) = end_2_label;
19148 /* Not in the first two. Move two bytes forward. */
19149 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19150 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19152 emit_label (end_2_label);
19156 /* Avoid branch in fixing the byte. */
19157 tmpreg = gen_lowpart (QImode, tmpreg);
19158 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19159 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19160 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19161 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19163 emit_label (end_0_label);
19166 /* Expand strlen. */
19169 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19171 rtx addr, scratch1, scratch2, scratch3, scratch4;
19173 /* The generic case of strlen expander is long. Avoid it's
19174 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19176 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19177 && !TARGET_INLINE_ALL_STRINGOPS
19178 && !optimize_insn_for_size_p ()
19179 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19182 addr = force_reg (Pmode, XEXP (src, 0));
19183 scratch1 = gen_reg_rtx (Pmode);
19185 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19186 && !optimize_insn_for_size_p ())
19188 /* Well it seems that some optimizer does not combine a call like
19189 foo(strlen(bar), strlen(bar));
19190 when the move and the subtraction is done here. It does calculate
19191 the length just once when these instructions are done inside of
19192 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19193 often used and I use one fewer register for the lifetime of
19194 output_strlen_unroll() this is better. */
19196 emit_move_insn (out, addr);
19198 ix86_expand_strlensi_unroll_1 (out, src, align);
19200 /* strlensi_unroll_1 returns the address of the zero at the end of
19201 the string, like memchr(), so compute the length by subtracting
19202 the start address. */
19203 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19209 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19210 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19213 scratch2 = gen_reg_rtx (Pmode);
19214 scratch3 = gen_reg_rtx (Pmode);
19215 scratch4 = force_reg (Pmode, constm1_rtx);
19217 emit_move_insn (scratch3, addr);
19218 eoschar = force_reg (QImode, eoschar);
19220 src = replace_equiv_address_nv (src, scratch3);
19222 /* If .md starts supporting :P, this can be done in .md. */
19223 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19224 scratch4), UNSPEC_SCAS);
19225 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19226 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19227 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19232 /* For given symbol (function) construct code to compute address of it's PLT
19233 entry in large x86-64 PIC model. */
19235 construct_plt_address (rtx symbol)
19237 rtx tmp = gen_reg_rtx (Pmode);
19238 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19240 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19241 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19243 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19244 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19249 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19251 rtx pop, int sibcall)
19253 rtx use = NULL, call;
19255 if (pop == const0_rtx)
19257 gcc_assert (!TARGET_64BIT || !pop);
19259 if (TARGET_MACHO && !TARGET_64BIT)
19262 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19263 fnaddr = machopic_indirect_call_target (fnaddr);
19268 /* Static functions and indirect calls don't need the pic register. */
19269 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19270 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19271 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19272 use_reg (&use, pic_offset_table_rtx);
19275 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19277 rtx al = gen_rtx_REG (QImode, AX_REG);
19278 emit_move_insn (al, callarg2);
19279 use_reg (&use, al);
19282 if (ix86_cmodel == CM_LARGE_PIC
19284 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19285 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19286 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19288 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19289 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19291 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19292 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19295 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19297 call = gen_rtx_SET (VOIDmode, retval, call);
19300 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19301 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19302 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19305 && ix86_cfun_abi () == MS_ABI
19306 && (!callarg2 || INTVAL (callarg2) != -2))
19308 /* We need to represent that SI and DI registers are clobbered
19310 static int clobbered_registers[] = {
19311 XMM6_REG, XMM7_REG, XMM8_REG,
19312 XMM9_REG, XMM10_REG, XMM11_REG,
19313 XMM12_REG, XMM13_REG, XMM14_REG,
19314 XMM15_REG, SI_REG, DI_REG
19317 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19318 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19319 UNSPEC_MS_TO_SYSV_CALL);
19323 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19324 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19327 (SSE_REGNO_P (clobbered_registers[i])
19329 clobbered_registers[i]));
19331 call = gen_rtx_PARALLEL (VOIDmode,
19332 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19336 call = emit_call_insn (call);
19338 CALL_INSN_FUNCTION_USAGE (call) = use;
19342 /* Clear stack slot assignments remembered from previous functions.
19343 This is called from INIT_EXPANDERS once before RTL is emitted for each
19346 static struct machine_function *
19347 ix86_init_machine_status (void)
19349 struct machine_function *f;
19351 f = GGC_CNEW (struct machine_function);
19352 f->use_fast_prologue_epilogue_nregs = -1;
19353 f->tls_descriptor_call_expanded_p = 0;
19354 f->call_abi = ix86_abi;
19359 /* Return a MEM corresponding to a stack slot with mode MODE.
19360 Allocate a new slot if necessary.
19362 The RTL for a function can have several slots available: N is
19363 which slot to use. */
19366 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19368 struct stack_local_entry *s;
19370 gcc_assert (n < MAX_386_STACK_LOCALS);
19372 /* Virtual slot is valid only before vregs are instantiated. */
19373 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19375 for (s = ix86_stack_locals; s; s = s->next)
19376 if (s->mode == mode && s->n == n)
19377 return copy_rtx (s->rtl);
19379 s = (struct stack_local_entry *)
19380 ggc_alloc (sizeof (struct stack_local_entry));
19383 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19385 s->next = ix86_stack_locals;
19386 ix86_stack_locals = s;
19390 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19392 static GTY(()) rtx ix86_tls_symbol;
19394 ix86_tls_get_addr (void)
19397 if (!ix86_tls_symbol)
19399 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19400 (TARGET_ANY_GNU_TLS
19402 ? "___tls_get_addr"
19403 : "__tls_get_addr");
19406 return ix86_tls_symbol;
19409 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19411 static GTY(()) rtx ix86_tls_module_base_symbol;
19413 ix86_tls_module_base (void)
19416 if (!ix86_tls_module_base_symbol)
19418 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19419 "_TLS_MODULE_BASE_");
19420 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19421 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19424 return ix86_tls_module_base_symbol;
19427 /* Calculate the length of the memory address in the instruction
19428 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19431 memory_address_length (rtx addr)
19433 struct ix86_address parts;
19434 rtx base, index, disp;
19438 if (GET_CODE (addr) == PRE_DEC
19439 || GET_CODE (addr) == POST_INC
19440 || GET_CODE (addr) == PRE_MODIFY
19441 || GET_CODE (addr) == POST_MODIFY)
19444 ok = ix86_decompose_address (addr, &parts);
19447 if (parts.base && GET_CODE (parts.base) == SUBREG)
19448 parts.base = SUBREG_REG (parts.base);
19449 if (parts.index && GET_CODE (parts.index) == SUBREG)
19450 parts.index = SUBREG_REG (parts.index);
19453 index = parts.index;
19458 - esp as the base always wants an index,
19459 - ebp as the base always wants a displacement,
19460 - r12 as the base always wants an index,
19461 - r13 as the base always wants a displacement. */
19463 /* Register Indirect. */
19464 if (base && !index && !disp)
19466 /* esp (for its index) and ebp (for its displacement) need
19467 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19470 && (addr == arg_pointer_rtx
19471 || addr == frame_pointer_rtx
19472 || REGNO (addr) == SP_REG
19473 || REGNO (addr) == BP_REG
19474 || REGNO (addr) == R12_REG
19475 || REGNO (addr) == R13_REG))
19479 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19480 is not disp32, but disp32(%rip), so for disp32
19481 SIB byte is needed, unless print_operand_address
19482 optimizes it into disp32(%rip) or (%rip) is implied
19484 else if (disp && !base && !index)
19491 if (GET_CODE (disp) == CONST)
19492 symbol = XEXP (disp, 0);
19493 if (GET_CODE (symbol) == PLUS
19494 && CONST_INT_P (XEXP (symbol, 1)))
19495 symbol = XEXP (symbol, 0);
19497 if (GET_CODE (symbol) != LABEL_REF
19498 && (GET_CODE (symbol) != SYMBOL_REF
19499 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19500 && (GET_CODE (symbol) != UNSPEC
19501 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19502 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19509 /* Find the length of the displacement constant. */
19512 if (base && satisfies_constraint_K (disp))
19517 /* ebp always wants a displacement. Similarly r13. */
19518 else if (base && REG_P (base)
19519 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19522 /* An index requires the two-byte modrm form.... */
19524 /* ...like esp (or r12), which always wants an index. */
19525 || base == arg_pointer_rtx
19526 || base == frame_pointer_rtx
19527 || (base && REG_P (base)
19528 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19545 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19546 is set, expect that insn have 8bit immediate alternative. */
19548 ix86_attr_length_immediate_default (rtx insn, int shortform)
19552 extract_insn_cached (insn);
19553 for (i = recog_data.n_operands - 1; i >= 0; --i)
19554 if (CONSTANT_P (recog_data.operand[i]))
19556 enum attr_mode mode = get_attr_mode (insn);
19559 if (shortform && CONST_INT_P (recog_data.operand[i]))
19561 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19568 ival = trunc_int_for_mode (ival, HImode);
19571 ival = trunc_int_for_mode (ival, SImode);
19576 if (IN_RANGE (ival, -128, 127))
19593 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19598 fatal_insn ("unknown insn mode", insn);
19603 /* Compute default value for "length_address" attribute. */
19605 ix86_attr_length_address_default (rtx insn)
19609 if (get_attr_type (insn) == TYPE_LEA)
19611 rtx set = PATTERN (insn), addr;
19613 if (GET_CODE (set) == PARALLEL)
19614 set = XVECEXP (set, 0, 0);
19616 gcc_assert (GET_CODE (set) == SET);
19618 addr = SET_SRC (set);
19619 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19621 if (GET_CODE (addr) == ZERO_EXTEND)
19622 addr = XEXP (addr, 0);
19623 if (GET_CODE (addr) == SUBREG)
19624 addr = SUBREG_REG (addr);
19627 return memory_address_length (addr);
19630 extract_insn_cached (insn);
19631 for (i = recog_data.n_operands - 1; i >= 0; --i)
19632 if (MEM_P (recog_data.operand[i]))
19634 constrain_operands_cached (reload_completed);
19635 if (which_alternative != -1)
19637 const char *constraints = recog_data.constraints[i];
19638 int alt = which_alternative;
19640 while (*constraints == '=' || *constraints == '+')
19643 while (*constraints++ != ',')
19645 /* Skip ignored operands. */
19646 if (*constraints == 'X')
19649 return memory_address_length (XEXP (recog_data.operand[i], 0));
19654 /* Compute default value for "length_vex" attribute. It includes
19655 2 or 3 byte VEX prefix and 1 opcode byte. */
19658 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19663 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19664 byte VEX prefix. */
19665 if (!has_0f_opcode || has_vex_w)
19668 /* We can always use 2 byte VEX prefix in 32bit. */
19672 extract_insn_cached (insn);
19674 for (i = recog_data.n_operands - 1; i >= 0; --i)
19675 if (REG_P (recog_data.operand[i]))
19677 /* REX.W bit uses 3 byte VEX prefix. */
19678 if (GET_MODE (recog_data.operand[i]) == DImode
19679 && GENERAL_REG_P (recog_data.operand[i]))
19684 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19685 if (MEM_P (recog_data.operand[i])
19686 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19693 /* Return the maximum number of instructions a cpu can issue. */
19696 ix86_issue_rate (void)
19700 case PROCESSOR_PENTIUM:
19701 case PROCESSOR_ATOM:
19705 case PROCESSOR_PENTIUMPRO:
19706 case PROCESSOR_PENTIUM4:
19707 case PROCESSOR_ATHLON:
19709 case PROCESSOR_AMDFAM10:
19710 case PROCESSOR_NOCONA:
19711 case PROCESSOR_GENERIC32:
19712 case PROCESSOR_GENERIC64:
19715 case PROCESSOR_CORE2:
19723 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19724 by DEP_INSN and nothing set by DEP_INSN. */
19727 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19731 /* Simplify the test for uninteresting insns. */
19732 if (insn_type != TYPE_SETCC
19733 && insn_type != TYPE_ICMOV
19734 && insn_type != TYPE_FCMOV
19735 && insn_type != TYPE_IBR)
19738 if ((set = single_set (dep_insn)) != 0)
19740 set = SET_DEST (set);
19743 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19744 && XVECLEN (PATTERN (dep_insn), 0) == 2
19745 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19746 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19748 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19749 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19754 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19757 /* This test is true if the dependent insn reads the flags but
19758 not any other potentially set register. */
19759 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19762 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19768 /* Return true iff USE_INSN has a memory address with operands set by
19772 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19775 extract_insn_cached (use_insn);
19776 for (i = recog_data.n_operands - 1; i >= 0; --i)
19777 if (MEM_P (recog_data.operand[i]))
19779 rtx addr = XEXP (recog_data.operand[i], 0);
19780 return modified_in_p (addr, set_insn) != 0;
19786 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19788 enum attr_type insn_type, dep_insn_type;
19789 enum attr_memory memory;
19791 int dep_insn_code_number;
19793 /* Anti and output dependencies have zero cost on all CPUs. */
19794 if (REG_NOTE_KIND (link) != 0)
19797 dep_insn_code_number = recog_memoized (dep_insn);
19799 /* If we can't recognize the insns, we can't really do anything. */
19800 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19803 insn_type = get_attr_type (insn);
19804 dep_insn_type = get_attr_type (dep_insn);
19808 case PROCESSOR_PENTIUM:
19809 /* Address Generation Interlock adds a cycle of latency. */
19810 if (insn_type == TYPE_LEA)
19812 rtx addr = PATTERN (insn);
19814 if (GET_CODE (addr) == PARALLEL)
19815 addr = XVECEXP (addr, 0, 0);
19817 gcc_assert (GET_CODE (addr) == SET);
19819 addr = SET_SRC (addr);
19820 if (modified_in_p (addr, dep_insn))
19823 else if (ix86_agi_dependent (dep_insn, insn))
19826 /* ??? Compares pair with jump/setcc. */
19827 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19830 /* Floating point stores require value to be ready one cycle earlier. */
19831 if (insn_type == TYPE_FMOV
19832 && get_attr_memory (insn) == MEMORY_STORE
19833 && !ix86_agi_dependent (dep_insn, insn))
19837 case PROCESSOR_PENTIUMPRO:
19838 memory = get_attr_memory (insn);
19840 /* INT->FP conversion is expensive. */
19841 if (get_attr_fp_int_src (dep_insn))
19844 /* There is one cycle extra latency between an FP op and a store. */
19845 if (insn_type == TYPE_FMOV
19846 && (set = single_set (dep_insn)) != NULL_RTX
19847 && (set2 = single_set (insn)) != NULL_RTX
19848 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19849 && MEM_P (SET_DEST (set2)))
19852 /* Show ability of reorder buffer to hide latency of load by executing
19853 in parallel with previous instruction in case
19854 previous instruction is not needed to compute the address. */
19855 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19856 && !ix86_agi_dependent (dep_insn, insn))
19858 /* Claim moves to take one cycle, as core can issue one load
19859 at time and the next load can start cycle later. */
19860 if (dep_insn_type == TYPE_IMOV
19861 || dep_insn_type == TYPE_FMOV)
19869 memory = get_attr_memory (insn);
19871 /* The esp dependency is resolved before the instruction is really
19873 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19874 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19877 /* INT->FP conversion is expensive. */
19878 if (get_attr_fp_int_src (dep_insn))
19881 /* Show ability of reorder buffer to hide latency of load by executing
19882 in parallel with previous instruction in case
19883 previous instruction is not needed to compute the address. */
19884 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19885 && !ix86_agi_dependent (dep_insn, insn))
19887 /* Claim moves to take one cycle, as core can issue one load
19888 at time and the next load can start cycle later. */
19889 if (dep_insn_type == TYPE_IMOV
19890 || dep_insn_type == TYPE_FMOV)
19899 case PROCESSOR_ATHLON:
19901 case PROCESSOR_AMDFAM10:
19902 case PROCESSOR_ATOM:
19903 case PROCESSOR_GENERIC32:
19904 case PROCESSOR_GENERIC64:
19905 memory = get_attr_memory (insn);
19907 /* Show ability of reorder buffer to hide latency of load by executing
19908 in parallel with previous instruction in case
19909 previous instruction is not needed to compute the address. */
19910 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19911 && !ix86_agi_dependent (dep_insn, insn))
19913 enum attr_unit unit = get_attr_unit (insn);
19916 /* Because of the difference between the length of integer and
19917 floating unit pipeline preparation stages, the memory operands
19918 for floating point are cheaper.
19920 ??? For Athlon it the difference is most probably 2. */
19921 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19924 loadcost = TARGET_ATHLON ? 2 : 0;
19926 if (cost >= loadcost)
19939 /* How many alternative schedules to try. This should be as wide as the
19940 scheduling freedom in the DFA, but no wider. Making this value too
19941 large results extra work for the scheduler. */
19944 ia32_multipass_dfa_lookahead (void)
19948 case PROCESSOR_PENTIUM:
19951 case PROCESSOR_PENTIUMPRO:
19961 /* Compute the alignment given to a constant that is being placed in memory.
19962 EXP is the constant and ALIGN is the alignment that the object would
19964 The value of this function is used instead of that alignment to align
19968 ix86_constant_alignment (tree exp, int align)
19970 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19971 || TREE_CODE (exp) == INTEGER_CST)
19973 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19975 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19978 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19979 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19980 return BITS_PER_WORD;
19985 /* Compute the alignment for a static variable.
19986 TYPE is the data type, and ALIGN is the alignment that
19987 the object would ordinarily have. The value of this function is used
19988 instead of that alignment to align the object. */
19991 ix86_data_alignment (tree type, int align)
19993 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19995 if (AGGREGATE_TYPE_P (type)
19996 && TYPE_SIZE (type)
19997 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19998 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19999 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20000 && align < max_align)
20003 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20004 to 16byte boundary. */
20007 if (AGGREGATE_TYPE_P (type)
20008 && TYPE_SIZE (type)
20009 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20010 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20011 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20015 if (TREE_CODE (type) == ARRAY_TYPE)
20017 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20019 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20022 else if (TREE_CODE (type) == COMPLEX_TYPE)
20025 if (TYPE_MODE (type) == DCmode && align < 64)
20027 if ((TYPE_MODE (type) == XCmode
20028 || TYPE_MODE (type) == TCmode) && align < 128)
20031 else if ((TREE_CODE (type) == RECORD_TYPE
20032 || TREE_CODE (type) == UNION_TYPE
20033 || TREE_CODE (type) == QUAL_UNION_TYPE)
20034 && TYPE_FIELDS (type))
20036 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20038 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20041 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20042 || TREE_CODE (type) == INTEGER_TYPE)
20044 if (TYPE_MODE (type) == DFmode && align < 64)
20046 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20053 /* Compute the alignment for a local variable or a stack slot. EXP is
20054 the data type or decl itself, MODE is the widest mode available and
20055 ALIGN is the alignment that the object would ordinarily have. The
20056 value of this macro is used instead of that alignment to align the
20060 ix86_local_alignment (tree exp, enum machine_mode mode,
20061 unsigned int align)
20065 if (exp && DECL_P (exp))
20067 type = TREE_TYPE (exp);
20076 /* Don't do dynamic stack realignment for long long objects with
20077 -mpreferred-stack-boundary=2. */
20080 && ix86_preferred_stack_boundary < 64
20081 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20082 && (!type || !TYPE_USER_ALIGN (type))
20083 && (!decl || !DECL_USER_ALIGN (decl)))
20086 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20087 register in MODE. We will return the largest alignment of XF
20091 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20092 align = GET_MODE_ALIGNMENT (DFmode);
20096 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20097 to 16byte boundary. Exact wording is:
20099 An array uses the same alignment as its elements, except that a local or
20100 global array variable of length at least 16 bytes or
20101 a C99 variable-length array variable always has alignment of at least 16 bytes.
20103 This was added to allow use of aligned SSE instructions at arrays. This
20104 rule is meant for static storage (where compiler can not do the analysis
20105 by itself). We follow it for automatic variables only when convenient.
20106 We fully control everything in the function compiled and functions from
20107 other unit can not rely on the alignment.
20109 Exclude va_list type. It is the common case of local array where
20110 we can not benefit from the alignment. */
20111 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20114 if (AGGREGATE_TYPE_P (type)
20115 && (TYPE_MAIN_VARIANT (type)
20116 != TYPE_MAIN_VARIANT (va_list_type_node))
20117 && TYPE_SIZE (type)
20118 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20119 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20120 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20123 if (TREE_CODE (type) == ARRAY_TYPE)
20125 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20127 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20130 else if (TREE_CODE (type) == COMPLEX_TYPE)
20132 if (TYPE_MODE (type) == DCmode && align < 64)
20134 if ((TYPE_MODE (type) == XCmode
20135 || TYPE_MODE (type) == TCmode) && align < 128)
20138 else if ((TREE_CODE (type) == RECORD_TYPE
20139 || TREE_CODE (type) == UNION_TYPE
20140 || TREE_CODE (type) == QUAL_UNION_TYPE)
20141 && TYPE_FIELDS (type))
20143 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20145 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20148 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20149 || TREE_CODE (type) == INTEGER_TYPE)
20152 if (TYPE_MODE (type) == DFmode && align < 64)
20154 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20160 /* Compute the minimum required alignment for dynamic stack realignment
20161 purposes for a local variable, parameter or a stack slot. EXP is
20162 the data type or decl itself, MODE is its mode and ALIGN is the
20163 alignment that the object would ordinarily have. */
20166 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20167 unsigned int align)
20171 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20174 if (exp && DECL_P (exp))
20176 type = TREE_TYPE (exp);
20185 /* Don't do dynamic stack realignment for long long objects with
20186 -mpreferred-stack-boundary=2. */
20187 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20188 && (!type || !TYPE_USER_ALIGN (type))
20189 && (!decl || !DECL_USER_ALIGN (decl)))
20195 /* Find a location for the static chain incoming to a nested function.
20196 This is a register, unless all free registers are used by arguments. */
20199 ix86_static_chain (const_tree fndecl, bool incoming_p)
20203 if (!DECL_STATIC_CHAIN (fndecl))
20208 /* We always use R10 in 64-bit mode. */
20214 /* By default in 32-bit mode we use ECX to pass the static chain. */
20217 fntype = TREE_TYPE (fndecl);
20218 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20220 /* Fastcall functions use ecx/edx for arguments, which leaves
20221 us with EAX for the static chain. */
20224 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20226 /* Thiscall functions use ecx for arguments, which leaves
20227 us with EAX for the static chain. */
20230 else if (ix86_function_regparm (fntype, fndecl) == 3)
20232 /* For regparm 3, we have no free call-clobbered registers in
20233 which to store the static chain. In order to implement this,
20234 we have the trampoline push the static chain to the stack.
20235 However, we can't push a value below the return address when
20236 we call the nested function directly, so we have to use an
20237 alternate entry point. For this we use ESI, and have the
20238 alternate entry point push ESI, so that things appear the
20239 same once we're executing the nested function. */
20242 if (fndecl == current_function_decl)
20243 ix86_static_chain_on_stack = true;
20244 return gen_frame_mem (SImode,
20245 plus_constant (arg_pointer_rtx, -8));
20251 return gen_rtx_REG (Pmode, regno);
20254 /* Emit RTL insns to initialize the variable parts of a trampoline.
20255 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20256 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20257 to be passed to the target function. */
20260 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20264 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20271 /* Depending on the static chain location, either load a register
20272 with a constant, or push the constant to the stack. All of the
20273 instructions are the same size. */
20274 chain = ix86_static_chain (fndecl, true);
20277 if (REGNO (chain) == CX_REG)
20279 else if (REGNO (chain) == AX_REG)
20282 gcc_unreachable ();
20287 mem = adjust_address (m_tramp, QImode, 0);
20288 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20290 mem = adjust_address (m_tramp, SImode, 1);
20291 emit_move_insn (mem, chain_value);
20293 /* Compute offset from the end of the jmp to the target function.
20294 In the case in which the trampoline stores the static chain on
20295 the stack, we need to skip the first insn which pushes the
20296 (call-saved) register static chain; this push is 1 byte. */
20297 disp = expand_binop (SImode, sub_optab, fnaddr,
20298 plus_constant (XEXP (m_tramp, 0),
20299 MEM_P (chain) ? 9 : 10),
20300 NULL_RTX, 1, OPTAB_DIRECT);
20302 mem = adjust_address (m_tramp, QImode, 5);
20303 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20305 mem = adjust_address (m_tramp, SImode, 6);
20306 emit_move_insn (mem, disp);
20312 /* Load the function address to r11. Try to load address using
20313 the shorter movl instead of movabs. We may want to support
20314 movq for kernel mode, but kernel does not use trampolines at
20316 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20318 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20320 mem = adjust_address (m_tramp, HImode, offset);
20321 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20323 mem = adjust_address (m_tramp, SImode, offset + 2);
20324 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20329 mem = adjust_address (m_tramp, HImode, offset);
20330 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20332 mem = adjust_address (m_tramp, DImode, offset + 2);
20333 emit_move_insn (mem, fnaddr);
20337 /* Load static chain using movabs to r10. */
20338 mem = adjust_address (m_tramp, HImode, offset);
20339 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20341 mem = adjust_address (m_tramp, DImode, offset + 2);
20342 emit_move_insn (mem, chain_value);
20345 /* Jump to r11; the last (unused) byte is a nop, only there to
20346 pad the write out to a single 32-bit store. */
20347 mem = adjust_address (m_tramp, SImode, offset);
20348 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20351 gcc_assert (offset <= TRAMPOLINE_SIZE);
20354 #ifdef ENABLE_EXECUTE_STACK
20355 #ifdef CHECK_EXECUTE_STACK_ENABLED
20356 if (CHECK_EXECUTE_STACK_ENABLED)
20358 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20359 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20363 /* The following file contains several enumerations and data structures
20364 built from the definitions in i386-builtin-types.def. */
20366 #include "i386-builtin-types.inc"
20368 /* Table for the ix86 builtin non-function types. */
20369 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20371 /* Retrieve an element from the above table, building some of
20372 the types lazily. */
20375 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20377 unsigned int index;
20380 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20382 type = ix86_builtin_type_tab[(int) tcode];
20386 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20387 if (tcode <= IX86_BT_LAST_VECT)
20389 enum machine_mode mode;
20391 index = tcode - IX86_BT_LAST_PRIM - 1;
20392 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20393 mode = ix86_builtin_type_vect_mode[index];
20395 type = build_vector_type_for_mode (itype, mode);
20401 index = tcode - IX86_BT_LAST_VECT - 1;
20402 if (tcode <= IX86_BT_LAST_PTR)
20403 quals = TYPE_UNQUALIFIED;
20405 quals = TYPE_QUAL_CONST;
20407 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20408 if (quals != TYPE_UNQUALIFIED)
20409 itype = build_qualified_type (itype, quals);
20411 type = build_pointer_type (itype);
20414 ix86_builtin_type_tab[(int) tcode] = type;
20418 /* Table for the ix86 builtin function types. */
20419 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20421 /* Retrieve an element from the above table, building some of
20422 the types lazily. */
20425 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20429 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20431 type = ix86_builtin_func_type_tab[(int) tcode];
20435 if (tcode <= IX86_BT_LAST_FUNC)
20437 unsigned start = ix86_builtin_func_start[(int) tcode];
20438 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20439 tree rtype, atype, args = void_list_node;
20442 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20443 for (i = after - 1; i > start; --i)
20445 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20446 args = tree_cons (NULL, atype, args);
20449 type = build_function_type (rtype, args);
20453 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20454 enum ix86_builtin_func_type icode;
20456 icode = ix86_builtin_func_alias_base[index];
20457 type = ix86_get_builtin_func_type (icode);
20460 ix86_builtin_func_type_tab[(int) tcode] = type;
20465 /* Codes for all the SSE/MMX builtins. */
20468 IX86_BUILTIN_ADDPS,
20469 IX86_BUILTIN_ADDSS,
20470 IX86_BUILTIN_DIVPS,
20471 IX86_BUILTIN_DIVSS,
20472 IX86_BUILTIN_MULPS,
20473 IX86_BUILTIN_MULSS,
20474 IX86_BUILTIN_SUBPS,
20475 IX86_BUILTIN_SUBSS,
20477 IX86_BUILTIN_CMPEQPS,
20478 IX86_BUILTIN_CMPLTPS,
20479 IX86_BUILTIN_CMPLEPS,
20480 IX86_BUILTIN_CMPGTPS,
20481 IX86_BUILTIN_CMPGEPS,
20482 IX86_BUILTIN_CMPNEQPS,
20483 IX86_BUILTIN_CMPNLTPS,
20484 IX86_BUILTIN_CMPNLEPS,
20485 IX86_BUILTIN_CMPNGTPS,
20486 IX86_BUILTIN_CMPNGEPS,
20487 IX86_BUILTIN_CMPORDPS,
20488 IX86_BUILTIN_CMPUNORDPS,
20489 IX86_BUILTIN_CMPEQSS,
20490 IX86_BUILTIN_CMPLTSS,
20491 IX86_BUILTIN_CMPLESS,
20492 IX86_BUILTIN_CMPNEQSS,
20493 IX86_BUILTIN_CMPNLTSS,
20494 IX86_BUILTIN_CMPNLESS,
20495 IX86_BUILTIN_CMPNGTSS,
20496 IX86_BUILTIN_CMPNGESS,
20497 IX86_BUILTIN_CMPORDSS,
20498 IX86_BUILTIN_CMPUNORDSS,
20500 IX86_BUILTIN_COMIEQSS,
20501 IX86_BUILTIN_COMILTSS,
20502 IX86_BUILTIN_COMILESS,
20503 IX86_BUILTIN_COMIGTSS,
20504 IX86_BUILTIN_COMIGESS,
20505 IX86_BUILTIN_COMINEQSS,
20506 IX86_BUILTIN_UCOMIEQSS,
20507 IX86_BUILTIN_UCOMILTSS,
20508 IX86_BUILTIN_UCOMILESS,
20509 IX86_BUILTIN_UCOMIGTSS,
20510 IX86_BUILTIN_UCOMIGESS,
20511 IX86_BUILTIN_UCOMINEQSS,
20513 IX86_BUILTIN_CVTPI2PS,
20514 IX86_BUILTIN_CVTPS2PI,
20515 IX86_BUILTIN_CVTSI2SS,
20516 IX86_BUILTIN_CVTSI642SS,
20517 IX86_BUILTIN_CVTSS2SI,
20518 IX86_BUILTIN_CVTSS2SI64,
20519 IX86_BUILTIN_CVTTPS2PI,
20520 IX86_BUILTIN_CVTTSS2SI,
20521 IX86_BUILTIN_CVTTSS2SI64,
20523 IX86_BUILTIN_MAXPS,
20524 IX86_BUILTIN_MAXSS,
20525 IX86_BUILTIN_MINPS,
20526 IX86_BUILTIN_MINSS,
20528 IX86_BUILTIN_LOADUPS,
20529 IX86_BUILTIN_STOREUPS,
20530 IX86_BUILTIN_MOVSS,
20532 IX86_BUILTIN_MOVHLPS,
20533 IX86_BUILTIN_MOVLHPS,
20534 IX86_BUILTIN_LOADHPS,
20535 IX86_BUILTIN_LOADLPS,
20536 IX86_BUILTIN_STOREHPS,
20537 IX86_BUILTIN_STORELPS,
20539 IX86_BUILTIN_MASKMOVQ,
20540 IX86_BUILTIN_MOVMSKPS,
20541 IX86_BUILTIN_PMOVMSKB,
20543 IX86_BUILTIN_MOVNTPS,
20544 IX86_BUILTIN_MOVNTQ,
20546 IX86_BUILTIN_LOADDQU,
20547 IX86_BUILTIN_STOREDQU,
20549 IX86_BUILTIN_PACKSSWB,
20550 IX86_BUILTIN_PACKSSDW,
20551 IX86_BUILTIN_PACKUSWB,
20553 IX86_BUILTIN_PADDB,
20554 IX86_BUILTIN_PADDW,
20555 IX86_BUILTIN_PADDD,
20556 IX86_BUILTIN_PADDQ,
20557 IX86_BUILTIN_PADDSB,
20558 IX86_BUILTIN_PADDSW,
20559 IX86_BUILTIN_PADDUSB,
20560 IX86_BUILTIN_PADDUSW,
20561 IX86_BUILTIN_PSUBB,
20562 IX86_BUILTIN_PSUBW,
20563 IX86_BUILTIN_PSUBD,
20564 IX86_BUILTIN_PSUBQ,
20565 IX86_BUILTIN_PSUBSB,
20566 IX86_BUILTIN_PSUBSW,
20567 IX86_BUILTIN_PSUBUSB,
20568 IX86_BUILTIN_PSUBUSW,
20571 IX86_BUILTIN_PANDN,
20575 IX86_BUILTIN_PAVGB,
20576 IX86_BUILTIN_PAVGW,
20578 IX86_BUILTIN_PCMPEQB,
20579 IX86_BUILTIN_PCMPEQW,
20580 IX86_BUILTIN_PCMPEQD,
20581 IX86_BUILTIN_PCMPGTB,
20582 IX86_BUILTIN_PCMPGTW,
20583 IX86_BUILTIN_PCMPGTD,
20585 IX86_BUILTIN_PMADDWD,
20587 IX86_BUILTIN_PMAXSW,
20588 IX86_BUILTIN_PMAXUB,
20589 IX86_BUILTIN_PMINSW,
20590 IX86_BUILTIN_PMINUB,
20592 IX86_BUILTIN_PMULHUW,
20593 IX86_BUILTIN_PMULHW,
20594 IX86_BUILTIN_PMULLW,
20596 IX86_BUILTIN_PSADBW,
20597 IX86_BUILTIN_PSHUFW,
20599 IX86_BUILTIN_PSLLW,
20600 IX86_BUILTIN_PSLLD,
20601 IX86_BUILTIN_PSLLQ,
20602 IX86_BUILTIN_PSRAW,
20603 IX86_BUILTIN_PSRAD,
20604 IX86_BUILTIN_PSRLW,
20605 IX86_BUILTIN_PSRLD,
20606 IX86_BUILTIN_PSRLQ,
20607 IX86_BUILTIN_PSLLWI,
20608 IX86_BUILTIN_PSLLDI,
20609 IX86_BUILTIN_PSLLQI,
20610 IX86_BUILTIN_PSRAWI,
20611 IX86_BUILTIN_PSRADI,
20612 IX86_BUILTIN_PSRLWI,
20613 IX86_BUILTIN_PSRLDI,
20614 IX86_BUILTIN_PSRLQI,
20616 IX86_BUILTIN_PUNPCKHBW,
20617 IX86_BUILTIN_PUNPCKHWD,
20618 IX86_BUILTIN_PUNPCKHDQ,
20619 IX86_BUILTIN_PUNPCKLBW,
20620 IX86_BUILTIN_PUNPCKLWD,
20621 IX86_BUILTIN_PUNPCKLDQ,
20623 IX86_BUILTIN_SHUFPS,
20625 IX86_BUILTIN_RCPPS,
20626 IX86_BUILTIN_RCPSS,
20627 IX86_BUILTIN_RSQRTPS,
20628 IX86_BUILTIN_RSQRTPS_NR,
20629 IX86_BUILTIN_RSQRTSS,
20630 IX86_BUILTIN_RSQRTF,
20631 IX86_BUILTIN_SQRTPS,
20632 IX86_BUILTIN_SQRTPS_NR,
20633 IX86_BUILTIN_SQRTSS,
20635 IX86_BUILTIN_UNPCKHPS,
20636 IX86_BUILTIN_UNPCKLPS,
20638 IX86_BUILTIN_ANDPS,
20639 IX86_BUILTIN_ANDNPS,
20641 IX86_BUILTIN_XORPS,
20644 IX86_BUILTIN_LDMXCSR,
20645 IX86_BUILTIN_STMXCSR,
20646 IX86_BUILTIN_SFENCE,
20648 /* 3DNow! Original */
20649 IX86_BUILTIN_FEMMS,
20650 IX86_BUILTIN_PAVGUSB,
20651 IX86_BUILTIN_PF2ID,
20652 IX86_BUILTIN_PFACC,
20653 IX86_BUILTIN_PFADD,
20654 IX86_BUILTIN_PFCMPEQ,
20655 IX86_BUILTIN_PFCMPGE,
20656 IX86_BUILTIN_PFCMPGT,
20657 IX86_BUILTIN_PFMAX,
20658 IX86_BUILTIN_PFMIN,
20659 IX86_BUILTIN_PFMUL,
20660 IX86_BUILTIN_PFRCP,
20661 IX86_BUILTIN_PFRCPIT1,
20662 IX86_BUILTIN_PFRCPIT2,
20663 IX86_BUILTIN_PFRSQIT1,
20664 IX86_BUILTIN_PFRSQRT,
20665 IX86_BUILTIN_PFSUB,
20666 IX86_BUILTIN_PFSUBR,
20667 IX86_BUILTIN_PI2FD,
20668 IX86_BUILTIN_PMULHRW,
20670 /* 3DNow! Athlon Extensions */
20671 IX86_BUILTIN_PF2IW,
20672 IX86_BUILTIN_PFNACC,
20673 IX86_BUILTIN_PFPNACC,
20674 IX86_BUILTIN_PI2FW,
20675 IX86_BUILTIN_PSWAPDSI,
20676 IX86_BUILTIN_PSWAPDSF,
20679 IX86_BUILTIN_ADDPD,
20680 IX86_BUILTIN_ADDSD,
20681 IX86_BUILTIN_DIVPD,
20682 IX86_BUILTIN_DIVSD,
20683 IX86_BUILTIN_MULPD,
20684 IX86_BUILTIN_MULSD,
20685 IX86_BUILTIN_SUBPD,
20686 IX86_BUILTIN_SUBSD,
20688 IX86_BUILTIN_CMPEQPD,
20689 IX86_BUILTIN_CMPLTPD,
20690 IX86_BUILTIN_CMPLEPD,
20691 IX86_BUILTIN_CMPGTPD,
20692 IX86_BUILTIN_CMPGEPD,
20693 IX86_BUILTIN_CMPNEQPD,
20694 IX86_BUILTIN_CMPNLTPD,
20695 IX86_BUILTIN_CMPNLEPD,
20696 IX86_BUILTIN_CMPNGTPD,
20697 IX86_BUILTIN_CMPNGEPD,
20698 IX86_BUILTIN_CMPORDPD,
20699 IX86_BUILTIN_CMPUNORDPD,
20700 IX86_BUILTIN_CMPEQSD,
20701 IX86_BUILTIN_CMPLTSD,
20702 IX86_BUILTIN_CMPLESD,
20703 IX86_BUILTIN_CMPNEQSD,
20704 IX86_BUILTIN_CMPNLTSD,
20705 IX86_BUILTIN_CMPNLESD,
20706 IX86_BUILTIN_CMPORDSD,
20707 IX86_BUILTIN_CMPUNORDSD,
20709 IX86_BUILTIN_COMIEQSD,
20710 IX86_BUILTIN_COMILTSD,
20711 IX86_BUILTIN_COMILESD,
20712 IX86_BUILTIN_COMIGTSD,
20713 IX86_BUILTIN_COMIGESD,
20714 IX86_BUILTIN_COMINEQSD,
20715 IX86_BUILTIN_UCOMIEQSD,
20716 IX86_BUILTIN_UCOMILTSD,
20717 IX86_BUILTIN_UCOMILESD,
20718 IX86_BUILTIN_UCOMIGTSD,
20719 IX86_BUILTIN_UCOMIGESD,
20720 IX86_BUILTIN_UCOMINEQSD,
20722 IX86_BUILTIN_MAXPD,
20723 IX86_BUILTIN_MAXSD,
20724 IX86_BUILTIN_MINPD,
20725 IX86_BUILTIN_MINSD,
20727 IX86_BUILTIN_ANDPD,
20728 IX86_BUILTIN_ANDNPD,
20730 IX86_BUILTIN_XORPD,
20732 IX86_BUILTIN_SQRTPD,
20733 IX86_BUILTIN_SQRTSD,
20735 IX86_BUILTIN_UNPCKHPD,
20736 IX86_BUILTIN_UNPCKLPD,
20738 IX86_BUILTIN_SHUFPD,
20740 IX86_BUILTIN_LOADUPD,
20741 IX86_BUILTIN_STOREUPD,
20742 IX86_BUILTIN_MOVSD,
20744 IX86_BUILTIN_LOADHPD,
20745 IX86_BUILTIN_LOADLPD,
20747 IX86_BUILTIN_CVTDQ2PD,
20748 IX86_BUILTIN_CVTDQ2PS,
20750 IX86_BUILTIN_CVTPD2DQ,
20751 IX86_BUILTIN_CVTPD2PI,
20752 IX86_BUILTIN_CVTPD2PS,
20753 IX86_BUILTIN_CVTTPD2DQ,
20754 IX86_BUILTIN_CVTTPD2PI,
20756 IX86_BUILTIN_CVTPI2PD,
20757 IX86_BUILTIN_CVTSI2SD,
20758 IX86_BUILTIN_CVTSI642SD,
20760 IX86_BUILTIN_CVTSD2SI,
20761 IX86_BUILTIN_CVTSD2SI64,
20762 IX86_BUILTIN_CVTSD2SS,
20763 IX86_BUILTIN_CVTSS2SD,
20764 IX86_BUILTIN_CVTTSD2SI,
20765 IX86_BUILTIN_CVTTSD2SI64,
20767 IX86_BUILTIN_CVTPS2DQ,
20768 IX86_BUILTIN_CVTPS2PD,
20769 IX86_BUILTIN_CVTTPS2DQ,
20771 IX86_BUILTIN_MOVNTI,
20772 IX86_BUILTIN_MOVNTPD,
20773 IX86_BUILTIN_MOVNTDQ,
20775 IX86_BUILTIN_MOVQ128,
20778 IX86_BUILTIN_MASKMOVDQU,
20779 IX86_BUILTIN_MOVMSKPD,
20780 IX86_BUILTIN_PMOVMSKB128,
20782 IX86_BUILTIN_PACKSSWB128,
20783 IX86_BUILTIN_PACKSSDW128,
20784 IX86_BUILTIN_PACKUSWB128,
20786 IX86_BUILTIN_PADDB128,
20787 IX86_BUILTIN_PADDW128,
20788 IX86_BUILTIN_PADDD128,
20789 IX86_BUILTIN_PADDQ128,
20790 IX86_BUILTIN_PADDSB128,
20791 IX86_BUILTIN_PADDSW128,
20792 IX86_BUILTIN_PADDUSB128,
20793 IX86_BUILTIN_PADDUSW128,
20794 IX86_BUILTIN_PSUBB128,
20795 IX86_BUILTIN_PSUBW128,
20796 IX86_BUILTIN_PSUBD128,
20797 IX86_BUILTIN_PSUBQ128,
20798 IX86_BUILTIN_PSUBSB128,
20799 IX86_BUILTIN_PSUBSW128,
20800 IX86_BUILTIN_PSUBUSB128,
20801 IX86_BUILTIN_PSUBUSW128,
20803 IX86_BUILTIN_PAND128,
20804 IX86_BUILTIN_PANDN128,
20805 IX86_BUILTIN_POR128,
20806 IX86_BUILTIN_PXOR128,
20808 IX86_BUILTIN_PAVGB128,
20809 IX86_BUILTIN_PAVGW128,
20811 IX86_BUILTIN_PCMPEQB128,
20812 IX86_BUILTIN_PCMPEQW128,
20813 IX86_BUILTIN_PCMPEQD128,
20814 IX86_BUILTIN_PCMPGTB128,
20815 IX86_BUILTIN_PCMPGTW128,
20816 IX86_BUILTIN_PCMPGTD128,
20818 IX86_BUILTIN_PMADDWD128,
20820 IX86_BUILTIN_PMAXSW128,
20821 IX86_BUILTIN_PMAXUB128,
20822 IX86_BUILTIN_PMINSW128,
20823 IX86_BUILTIN_PMINUB128,
20825 IX86_BUILTIN_PMULUDQ,
20826 IX86_BUILTIN_PMULUDQ128,
20827 IX86_BUILTIN_PMULHUW128,
20828 IX86_BUILTIN_PMULHW128,
20829 IX86_BUILTIN_PMULLW128,
20831 IX86_BUILTIN_PSADBW128,
20832 IX86_BUILTIN_PSHUFHW,
20833 IX86_BUILTIN_PSHUFLW,
20834 IX86_BUILTIN_PSHUFD,
20836 IX86_BUILTIN_PSLLDQI128,
20837 IX86_BUILTIN_PSLLWI128,
20838 IX86_BUILTIN_PSLLDI128,
20839 IX86_BUILTIN_PSLLQI128,
20840 IX86_BUILTIN_PSRAWI128,
20841 IX86_BUILTIN_PSRADI128,
20842 IX86_BUILTIN_PSRLDQI128,
20843 IX86_BUILTIN_PSRLWI128,
20844 IX86_BUILTIN_PSRLDI128,
20845 IX86_BUILTIN_PSRLQI128,
20847 IX86_BUILTIN_PSLLDQ128,
20848 IX86_BUILTIN_PSLLW128,
20849 IX86_BUILTIN_PSLLD128,
20850 IX86_BUILTIN_PSLLQ128,
20851 IX86_BUILTIN_PSRAW128,
20852 IX86_BUILTIN_PSRAD128,
20853 IX86_BUILTIN_PSRLW128,
20854 IX86_BUILTIN_PSRLD128,
20855 IX86_BUILTIN_PSRLQ128,
20857 IX86_BUILTIN_PUNPCKHBW128,
20858 IX86_BUILTIN_PUNPCKHWD128,
20859 IX86_BUILTIN_PUNPCKHDQ128,
20860 IX86_BUILTIN_PUNPCKHQDQ128,
20861 IX86_BUILTIN_PUNPCKLBW128,
20862 IX86_BUILTIN_PUNPCKLWD128,
20863 IX86_BUILTIN_PUNPCKLDQ128,
20864 IX86_BUILTIN_PUNPCKLQDQ128,
20866 IX86_BUILTIN_CLFLUSH,
20867 IX86_BUILTIN_MFENCE,
20868 IX86_BUILTIN_LFENCE,
20870 IX86_BUILTIN_BSRSI,
20871 IX86_BUILTIN_BSRDI,
20872 IX86_BUILTIN_RDPMC,
20873 IX86_BUILTIN_RDTSC,
20874 IX86_BUILTIN_RDTSCP,
20875 IX86_BUILTIN_ROLQI,
20876 IX86_BUILTIN_ROLHI,
20877 IX86_BUILTIN_RORQI,
20878 IX86_BUILTIN_RORHI,
20881 IX86_BUILTIN_ADDSUBPS,
20882 IX86_BUILTIN_HADDPS,
20883 IX86_BUILTIN_HSUBPS,
20884 IX86_BUILTIN_MOVSHDUP,
20885 IX86_BUILTIN_MOVSLDUP,
20886 IX86_BUILTIN_ADDSUBPD,
20887 IX86_BUILTIN_HADDPD,
20888 IX86_BUILTIN_HSUBPD,
20889 IX86_BUILTIN_LDDQU,
20891 IX86_BUILTIN_MONITOR,
20892 IX86_BUILTIN_MWAIT,
20895 IX86_BUILTIN_PHADDW,
20896 IX86_BUILTIN_PHADDD,
20897 IX86_BUILTIN_PHADDSW,
20898 IX86_BUILTIN_PHSUBW,
20899 IX86_BUILTIN_PHSUBD,
20900 IX86_BUILTIN_PHSUBSW,
20901 IX86_BUILTIN_PMADDUBSW,
20902 IX86_BUILTIN_PMULHRSW,
20903 IX86_BUILTIN_PSHUFB,
20904 IX86_BUILTIN_PSIGNB,
20905 IX86_BUILTIN_PSIGNW,
20906 IX86_BUILTIN_PSIGND,
20907 IX86_BUILTIN_PALIGNR,
20908 IX86_BUILTIN_PABSB,
20909 IX86_BUILTIN_PABSW,
20910 IX86_BUILTIN_PABSD,
20912 IX86_BUILTIN_PHADDW128,
20913 IX86_BUILTIN_PHADDD128,
20914 IX86_BUILTIN_PHADDSW128,
20915 IX86_BUILTIN_PHSUBW128,
20916 IX86_BUILTIN_PHSUBD128,
20917 IX86_BUILTIN_PHSUBSW128,
20918 IX86_BUILTIN_PMADDUBSW128,
20919 IX86_BUILTIN_PMULHRSW128,
20920 IX86_BUILTIN_PSHUFB128,
20921 IX86_BUILTIN_PSIGNB128,
20922 IX86_BUILTIN_PSIGNW128,
20923 IX86_BUILTIN_PSIGND128,
20924 IX86_BUILTIN_PALIGNR128,
20925 IX86_BUILTIN_PABSB128,
20926 IX86_BUILTIN_PABSW128,
20927 IX86_BUILTIN_PABSD128,
20929 /* AMDFAM10 - SSE4A New Instructions. */
20930 IX86_BUILTIN_MOVNTSD,
20931 IX86_BUILTIN_MOVNTSS,
20932 IX86_BUILTIN_EXTRQI,
20933 IX86_BUILTIN_EXTRQ,
20934 IX86_BUILTIN_INSERTQI,
20935 IX86_BUILTIN_INSERTQ,
20938 IX86_BUILTIN_BLENDPD,
20939 IX86_BUILTIN_BLENDPS,
20940 IX86_BUILTIN_BLENDVPD,
20941 IX86_BUILTIN_BLENDVPS,
20942 IX86_BUILTIN_PBLENDVB128,
20943 IX86_BUILTIN_PBLENDW128,
20948 IX86_BUILTIN_INSERTPS128,
20950 IX86_BUILTIN_MOVNTDQA,
20951 IX86_BUILTIN_MPSADBW128,
20952 IX86_BUILTIN_PACKUSDW128,
20953 IX86_BUILTIN_PCMPEQQ,
20954 IX86_BUILTIN_PHMINPOSUW128,
20956 IX86_BUILTIN_PMAXSB128,
20957 IX86_BUILTIN_PMAXSD128,
20958 IX86_BUILTIN_PMAXUD128,
20959 IX86_BUILTIN_PMAXUW128,
20961 IX86_BUILTIN_PMINSB128,
20962 IX86_BUILTIN_PMINSD128,
20963 IX86_BUILTIN_PMINUD128,
20964 IX86_BUILTIN_PMINUW128,
20966 IX86_BUILTIN_PMOVSXBW128,
20967 IX86_BUILTIN_PMOVSXBD128,
20968 IX86_BUILTIN_PMOVSXBQ128,
20969 IX86_BUILTIN_PMOVSXWD128,
20970 IX86_BUILTIN_PMOVSXWQ128,
20971 IX86_BUILTIN_PMOVSXDQ128,
20973 IX86_BUILTIN_PMOVZXBW128,
20974 IX86_BUILTIN_PMOVZXBD128,
20975 IX86_BUILTIN_PMOVZXBQ128,
20976 IX86_BUILTIN_PMOVZXWD128,
20977 IX86_BUILTIN_PMOVZXWQ128,
20978 IX86_BUILTIN_PMOVZXDQ128,
20980 IX86_BUILTIN_PMULDQ128,
20981 IX86_BUILTIN_PMULLD128,
20983 IX86_BUILTIN_ROUNDPD,
20984 IX86_BUILTIN_ROUNDPS,
20985 IX86_BUILTIN_ROUNDSD,
20986 IX86_BUILTIN_ROUNDSS,
20988 IX86_BUILTIN_PTESTZ,
20989 IX86_BUILTIN_PTESTC,
20990 IX86_BUILTIN_PTESTNZC,
20992 IX86_BUILTIN_VEC_INIT_V2SI,
20993 IX86_BUILTIN_VEC_INIT_V4HI,
20994 IX86_BUILTIN_VEC_INIT_V8QI,
20995 IX86_BUILTIN_VEC_EXT_V2DF,
20996 IX86_BUILTIN_VEC_EXT_V2DI,
20997 IX86_BUILTIN_VEC_EXT_V4SF,
20998 IX86_BUILTIN_VEC_EXT_V4SI,
20999 IX86_BUILTIN_VEC_EXT_V8HI,
21000 IX86_BUILTIN_VEC_EXT_V2SI,
21001 IX86_BUILTIN_VEC_EXT_V4HI,
21002 IX86_BUILTIN_VEC_EXT_V16QI,
21003 IX86_BUILTIN_VEC_SET_V2DI,
21004 IX86_BUILTIN_VEC_SET_V4SF,
21005 IX86_BUILTIN_VEC_SET_V4SI,
21006 IX86_BUILTIN_VEC_SET_V8HI,
21007 IX86_BUILTIN_VEC_SET_V4HI,
21008 IX86_BUILTIN_VEC_SET_V16QI,
21010 IX86_BUILTIN_VEC_PACK_SFIX,
21013 IX86_BUILTIN_CRC32QI,
21014 IX86_BUILTIN_CRC32HI,
21015 IX86_BUILTIN_CRC32SI,
21016 IX86_BUILTIN_CRC32DI,
21018 IX86_BUILTIN_PCMPESTRI128,
21019 IX86_BUILTIN_PCMPESTRM128,
21020 IX86_BUILTIN_PCMPESTRA128,
21021 IX86_BUILTIN_PCMPESTRC128,
21022 IX86_BUILTIN_PCMPESTRO128,
21023 IX86_BUILTIN_PCMPESTRS128,
21024 IX86_BUILTIN_PCMPESTRZ128,
21025 IX86_BUILTIN_PCMPISTRI128,
21026 IX86_BUILTIN_PCMPISTRM128,
21027 IX86_BUILTIN_PCMPISTRA128,
21028 IX86_BUILTIN_PCMPISTRC128,
21029 IX86_BUILTIN_PCMPISTRO128,
21030 IX86_BUILTIN_PCMPISTRS128,
21031 IX86_BUILTIN_PCMPISTRZ128,
21033 IX86_BUILTIN_PCMPGTQ,
21035 /* AES instructions */
21036 IX86_BUILTIN_AESENC128,
21037 IX86_BUILTIN_AESENCLAST128,
21038 IX86_BUILTIN_AESDEC128,
21039 IX86_BUILTIN_AESDECLAST128,
21040 IX86_BUILTIN_AESIMC128,
21041 IX86_BUILTIN_AESKEYGENASSIST128,
21043 /* PCLMUL instruction */
21044 IX86_BUILTIN_PCLMULQDQ128,
21047 IX86_BUILTIN_ADDPD256,
21048 IX86_BUILTIN_ADDPS256,
21049 IX86_BUILTIN_ADDSUBPD256,
21050 IX86_BUILTIN_ADDSUBPS256,
21051 IX86_BUILTIN_ANDPD256,
21052 IX86_BUILTIN_ANDPS256,
21053 IX86_BUILTIN_ANDNPD256,
21054 IX86_BUILTIN_ANDNPS256,
21055 IX86_BUILTIN_BLENDPD256,
21056 IX86_BUILTIN_BLENDPS256,
21057 IX86_BUILTIN_BLENDVPD256,
21058 IX86_BUILTIN_BLENDVPS256,
21059 IX86_BUILTIN_DIVPD256,
21060 IX86_BUILTIN_DIVPS256,
21061 IX86_BUILTIN_DPPS256,
21062 IX86_BUILTIN_HADDPD256,
21063 IX86_BUILTIN_HADDPS256,
21064 IX86_BUILTIN_HSUBPD256,
21065 IX86_BUILTIN_HSUBPS256,
21066 IX86_BUILTIN_MAXPD256,
21067 IX86_BUILTIN_MAXPS256,
21068 IX86_BUILTIN_MINPD256,
21069 IX86_BUILTIN_MINPS256,
21070 IX86_BUILTIN_MULPD256,
21071 IX86_BUILTIN_MULPS256,
21072 IX86_BUILTIN_ORPD256,
21073 IX86_BUILTIN_ORPS256,
21074 IX86_BUILTIN_SHUFPD256,
21075 IX86_BUILTIN_SHUFPS256,
21076 IX86_BUILTIN_SUBPD256,
21077 IX86_BUILTIN_SUBPS256,
21078 IX86_BUILTIN_XORPD256,
21079 IX86_BUILTIN_XORPS256,
21080 IX86_BUILTIN_CMPSD,
21081 IX86_BUILTIN_CMPSS,
21082 IX86_BUILTIN_CMPPD,
21083 IX86_BUILTIN_CMPPS,
21084 IX86_BUILTIN_CMPPD256,
21085 IX86_BUILTIN_CMPPS256,
21086 IX86_BUILTIN_CVTDQ2PD256,
21087 IX86_BUILTIN_CVTDQ2PS256,
21088 IX86_BUILTIN_CVTPD2PS256,
21089 IX86_BUILTIN_CVTPS2DQ256,
21090 IX86_BUILTIN_CVTPS2PD256,
21091 IX86_BUILTIN_CVTTPD2DQ256,
21092 IX86_BUILTIN_CVTPD2DQ256,
21093 IX86_BUILTIN_CVTTPS2DQ256,
21094 IX86_BUILTIN_EXTRACTF128PD256,
21095 IX86_BUILTIN_EXTRACTF128PS256,
21096 IX86_BUILTIN_EXTRACTF128SI256,
21097 IX86_BUILTIN_VZEROALL,
21098 IX86_BUILTIN_VZEROUPPER,
21099 IX86_BUILTIN_VPERMILVARPD,
21100 IX86_BUILTIN_VPERMILVARPS,
21101 IX86_BUILTIN_VPERMILVARPD256,
21102 IX86_BUILTIN_VPERMILVARPS256,
21103 IX86_BUILTIN_VPERMILPD,
21104 IX86_BUILTIN_VPERMILPS,
21105 IX86_BUILTIN_VPERMILPD256,
21106 IX86_BUILTIN_VPERMILPS256,
21107 IX86_BUILTIN_VPERMIL2PD,
21108 IX86_BUILTIN_VPERMIL2PS,
21109 IX86_BUILTIN_VPERMIL2PD256,
21110 IX86_BUILTIN_VPERMIL2PS256,
21111 IX86_BUILTIN_VPERM2F128PD256,
21112 IX86_BUILTIN_VPERM2F128PS256,
21113 IX86_BUILTIN_VPERM2F128SI256,
21114 IX86_BUILTIN_VBROADCASTSS,
21115 IX86_BUILTIN_VBROADCASTSD256,
21116 IX86_BUILTIN_VBROADCASTSS256,
21117 IX86_BUILTIN_VBROADCASTPD256,
21118 IX86_BUILTIN_VBROADCASTPS256,
21119 IX86_BUILTIN_VINSERTF128PD256,
21120 IX86_BUILTIN_VINSERTF128PS256,
21121 IX86_BUILTIN_VINSERTF128SI256,
21122 IX86_BUILTIN_LOADUPD256,
21123 IX86_BUILTIN_LOADUPS256,
21124 IX86_BUILTIN_STOREUPD256,
21125 IX86_BUILTIN_STOREUPS256,
21126 IX86_BUILTIN_LDDQU256,
21127 IX86_BUILTIN_MOVNTDQ256,
21128 IX86_BUILTIN_MOVNTPD256,
21129 IX86_BUILTIN_MOVNTPS256,
21130 IX86_BUILTIN_LOADDQU256,
21131 IX86_BUILTIN_STOREDQU256,
21132 IX86_BUILTIN_MASKLOADPD,
21133 IX86_BUILTIN_MASKLOADPS,
21134 IX86_BUILTIN_MASKSTOREPD,
21135 IX86_BUILTIN_MASKSTOREPS,
21136 IX86_BUILTIN_MASKLOADPD256,
21137 IX86_BUILTIN_MASKLOADPS256,
21138 IX86_BUILTIN_MASKSTOREPD256,
21139 IX86_BUILTIN_MASKSTOREPS256,
21140 IX86_BUILTIN_MOVSHDUP256,
21141 IX86_BUILTIN_MOVSLDUP256,
21142 IX86_BUILTIN_MOVDDUP256,
21144 IX86_BUILTIN_SQRTPD256,
21145 IX86_BUILTIN_SQRTPS256,
21146 IX86_BUILTIN_SQRTPS_NR256,
21147 IX86_BUILTIN_RSQRTPS256,
21148 IX86_BUILTIN_RSQRTPS_NR256,
21150 IX86_BUILTIN_RCPPS256,
21152 IX86_BUILTIN_ROUNDPD256,
21153 IX86_BUILTIN_ROUNDPS256,
21155 IX86_BUILTIN_UNPCKHPD256,
21156 IX86_BUILTIN_UNPCKLPD256,
21157 IX86_BUILTIN_UNPCKHPS256,
21158 IX86_BUILTIN_UNPCKLPS256,
21160 IX86_BUILTIN_SI256_SI,
21161 IX86_BUILTIN_PS256_PS,
21162 IX86_BUILTIN_PD256_PD,
21163 IX86_BUILTIN_SI_SI256,
21164 IX86_BUILTIN_PS_PS256,
21165 IX86_BUILTIN_PD_PD256,
21167 IX86_BUILTIN_VTESTZPD,
21168 IX86_BUILTIN_VTESTCPD,
21169 IX86_BUILTIN_VTESTNZCPD,
21170 IX86_BUILTIN_VTESTZPS,
21171 IX86_BUILTIN_VTESTCPS,
21172 IX86_BUILTIN_VTESTNZCPS,
21173 IX86_BUILTIN_VTESTZPD256,
21174 IX86_BUILTIN_VTESTCPD256,
21175 IX86_BUILTIN_VTESTNZCPD256,
21176 IX86_BUILTIN_VTESTZPS256,
21177 IX86_BUILTIN_VTESTCPS256,
21178 IX86_BUILTIN_VTESTNZCPS256,
21179 IX86_BUILTIN_PTESTZ256,
21180 IX86_BUILTIN_PTESTC256,
21181 IX86_BUILTIN_PTESTNZC256,
21183 IX86_BUILTIN_MOVMSKPD256,
21184 IX86_BUILTIN_MOVMSKPS256,
21186 /* TFmode support builtins. */
21188 IX86_BUILTIN_HUGE_VALQ,
21189 IX86_BUILTIN_FABSQ,
21190 IX86_BUILTIN_COPYSIGNQ,
21192 /* Vectorizer support builtins. */
21193 IX86_BUILTIN_CPYSGNPS,
21194 IX86_BUILTIN_CPYSGNPD,
21196 IX86_BUILTIN_CVTUDQ2PS,
21198 IX86_BUILTIN_VEC_PERM_V2DF,
21199 IX86_BUILTIN_VEC_PERM_V4SF,
21200 IX86_BUILTIN_VEC_PERM_V2DI,
21201 IX86_BUILTIN_VEC_PERM_V4SI,
21202 IX86_BUILTIN_VEC_PERM_V8HI,
21203 IX86_BUILTIN_VEC_PERM_V16QI,
21204 IX86_BUILTIN_VEC_PERM_V2DI_U,
21205 IX86_BUILTIN_VEC_PERM_V4SI_U,
21206 IX86_BUILTIN_VEC_PERM_V8HI_U,
21207 IX86_BUILTIN_VEC_PERM_V16QI_U,
21208 IX86_BUILTIN_VEC_PERM_V4DF,
21209 IX86_BUILTIN_VEC_PERM_V8SF,
21211 /* FMA4 and XOP instructions. */
21212 IX86_BUILTIN_VFMADDSS,
21213 IX86_BUILTIN_VFMADDSD,
21214 IX86_BUILTIN_VFMADDPS,
21215 IX86_BUILTIN_VFMADDPD,
21216 IX86_BUILTIN_VFMSUBSS,
21217 IX86_BUILTIN_VFMSUBSD,
21218 IX86_BUILTIN_VFMSUBPS,
21219 IX86_BUILTIN_VFMSUBPD,
21220 IX86_BUILTIN_VFMADDSUBPS,
21221 IX86_BUILTIN_VFMADDSUBPD,
21222 IX86_BUILTIN_VFMSUBADDPS,
21223 IX86_BUILTIN_VFMSUBADDPD,
21224 IX86_BUILTIN_VFNMADDSS,
21225 IX86_BUILTIN_VFNMADDSD,
21226 IX86_BUILTIN_VFNMADDPS,
21227 IX86_BUILTIN_VFNMADDPD,
21228 IX86_BUILTIN_VFNMSUBSS,
21229 IX86_BUILTIN_VFNMSUBSD,
21230 IX86_BUILTIN_VFNMSUBPS,
21231 IX86_BUILTIN_VFNMSUBPD,
21232 IX86_BUILTIN_VFMADDPS256,
21233 IX86_BUILTIN_VFMADDPD256,
21234 IX86_BUILTIN_VFMSUBPS256,
21235 IX86_BUILTIN_VFMSUBPD256,
21236 IX86_BUILTIN_VFMADDSUBPS256,
21237 IX86_BUILTIN_VFMADDSUBPD256,
21238 IX86_BUILTIN_VFMSUBADDPS256,
21239 IX86_BUILTIN_VFMSUBADDPD256,
21240 IX86_BUILTIN_VFNMADDPS256,
21241 IX86_BUILTIN_VFNMADDPD256,
21242 IX86_BUILTIN_VFNMSUBPS256,
21243 IX86_BUILTIN_VFNMSUBPD256,
21245 IX86_BUILTIN_VPCMOV,
21246 IX86_BUILTIN_VPCMOV_V2DI,
21247 IX86_BUILTIN_VPCMOV_V4SI,
21248 IX86_BUILTIN_VPCMOV_V8HI,
21249 IX86_BUILTIN_VPCMOV_V16QI,
21250 IX86_BUILTIN_VPCMOV_V4SF,
21251 IX86_BUILTIN_VPCMOV_V2DF,
21252 IX86_BUILTIN_VPCMOV256,
21253 IX86_BUILTIN_VPCMOV_V4DI256,
21254 IX86_BUILTIN_VPCMOV_V8SI256,
21255 IX86_BUILTIN_VPCMOV_V16HI256,
21256 IX86_BUILTIN_VPCMOV_V32QI256,
21257 IX86_BUILTIN_VPCMOV_V8SF256,
21258 IX86_BUILTIN_VPCMOV_V4DF256,
21260 IX86_BUILTIN_VPPERM,
21262 IX86_BUILTIN_VPMACSSWW,
21263 IX86_BUILTIN_VPMACSWW,
21264 IX86_BUILTIN_VPMACSSWD,
21265 IX86_BUILTIN_VPMACSWD,
21266 IX86_BUILTIN_VPMACSSDD,
21267 IX86_BUILTIN_VPMACSDD,
21268 IX86_BUILTIN_VPMACSSDQL,
21269 IX86_BUILTIN_VPMACSSDQH,
21270 IX86_BUILTIN_VPMACSDQL,
21271 IX86_BUILTIN_VPMACSDQH,
21272 IX86_BUILTIN_VPMADCSSWD,
21273 IX86_BUILTIN_VPMADCSWD,
21275 IX86_BUILTIN_VPHADDBW,
21276 IX86_BUILTIN_VPHADDBD,
21277 IX86_BUILTIN_VPHADDBQ,
21278 IX86_BUILTIN_VPHADDWD,
21279 IX86_BUILTIN_VPHADDWQ,
21280 IX86_BUILTIN_VPHADDDQ,
21281 IX86_BUILTIN_VPHADDUBW,
21282 IX86_BUILTIN_VPHADDUBD,
21283 IX86_BUILTIN_VPHADDUBQ,
21284 IX86_BUILTIN_VPHADDUWD,
21285 IX86_BUILTIN_VPHADDUWQ,
21286 IX86_BUILTIN_VPHADDUDQ,
21287 IX86_BUILTIN_VPHSUBBW,
21288 IX86_BUILTIN_VPHSUBWD,
21289 IX86_BUILTIN_VPHSUBDQ,
21291 IX86_BUILTIN_VPROTB,
21292 IX86_BUILTIN_VPROTW,
21293 IX86_BUILTIN_VPROTD,
21294 IX86_BUILTIN_VPROTQ,
21295 IX86_BUILTIN_VPROTB_IMM,
21296 IX86_BUILTIN_VPROTW_IMM,
21297 IX86_BUILTIN_VPROTD_IMM,
21298 IX86_BUILTIN_VPROTQ_IMM,
21300 IX86_BUILTIN_VPSHLB,
21301 IX86_BUILTIN_VPSHLW,
21302 IX86_BUILTIN_VPSHLD,
21303 IX86_BUILTIN_VPSHLQ,
21304 IX86_BUILTIN_VPSHAB,
21305 IX86_BUILTIN_VPSHAW,
21306 IX86_BUILTIN_VPSHAD,
21307 IX86_BUILTIN_VPSHAQ,
21309 IX86_BUILTIN_VFRCZSS,
21310 IX86_BUILTIN_VFRCZSD,
21311 IX86_BUILTIN_VFRCZPS,
21312 IX86_BUILTIN_VFRCZPD,
21313 IX86_BUILTIN_VFRCZPS256,
21314 IX86_BUILTIN_VFRCZPD256,
21316 IX86_BUILTIN_VPCOMEQUB,
21317 IX86_BUILTIN_VPCOMNEUB,
21318 IX86_BUILTIN_VPCOMLTUB,
21319 IX86_BUILTIN_VPCOMLEUB,
21320 IX86_BUILTIN_VPCOMGTUB,
21321 IX86_BUILTIN_VPCOMGEUB,
21322 IX86_BUILTIN_VPCOMFALSEUB,
21323 IX86_BUILTIN_VPCOMTRUEUB,
21325 IX86_BUILTIN_VPCOMEQUW,
21326 IX86_BUILTIN_VPCOMNEUW,
21327 IX86_BUILTIN_VPCOMLTUW,
21328 IX86_BUILTIN_VPCOMLEUW,
21329 IX86_BUILTIN_VPCOMGTUW,
21330 IX86_BUILTIN_VPCOMGEUW,
21331 IX86_BUILTIN_VPCOMFALSEUW,
21332 IX86_BUILTIN_VPCOMTRUEUW,
21334 IX86_BUILTIN_VPCOMEQUD,
21335 IX86_BUILTIN_VPCOMNEUD,
21336 IX86_BUILTIN_VPCOMLTUD,
21337 IX86_BUILTIN_VPCOMLEUD,
21338 IX86_BUILTIN_VPCOMGTUD,
21339 IX86_BUILTIN_VPCOMGEUD,
21340 IX86_BUILTIN_VPCOMFALSEUD,
21341 IX86_BUILTIN_VPCOMTRUEUD,
21343 IX86_BUILTIN_VPCOMEQUQ,
21344 IX86_BUILTIN_VPCOMNEUQ,
21345 IX86_BUILTIN_VPCOMLTUQ,
21346 IX86_BUILTIN_VPCOMLEUQ,
21347 IX86_BUILTIN_VPCOMGTUQ,
21348 IX86_BUILTIN_VPCOMGEUQ,
21349 IX86_BUILTIN_VPCOMFALSEUQ,
21350 IX86_BUILTIN_VPCOMTRUEUQ,
21352 IX86_BUILTIN_VPCOMEQB,
21353 IX86_BUILTIN_VPCOMNEB,
21354 IX86_BUILTIN_VPCOMLTB,
21355 IX86_BUILTIN_VPCOMLEB,
21356 IX86_BUILTIN_VPCOMGTB,
21357 IX86_BUILTIN_VPCOMGEB,
21358 IX86_BUILTIN_VPCOMFALSEB,
21359 IX86_BUILTIN_VPCOMTRUEB,
21361 IX86_BUILTIN_VPCOMEQW,
21362 IX86_BUILTIN_VPCOMNEW,
21363 IX86_BUILTIN_VPCOMLTW,
21364 IX86_BUILTIN_VPCOMLEW,
21365 IX86_BUILTIN_VPCOMGTW,
21366 IX86_BUILTIN_VPCOMGEW,
21367 IX86_BUILTIN_VPCOMFALSEW,
21368 IX86_BUILTIN_VPCOMTRUEW,
21370 IX86_BUILTIN_VPCOMEQD,
21371 IX86_BUILTIN_VPCOMNED,
21372 IX86_BUILTIN_VPCOMLTD,
21373 IX86_BUILTIN_VPCOMLED,
21374 IX86_BUILTIN_VPCOMGTD,
21375 IX86_BUILTIN_VPCOMGED,
21376 IX86_BUILTIN_VPCOMFALSED,
21377 IX86_BUILTIN_VPCOMTRUED,
21379 IX86_BUILTIN_VPCOMEQQ,
21380 IX86_BUILTIN_VPCOMNEQ,
21381 IX86_BUILTIN_VPCOMLTQ,
21382 IX86_BUILTIN_VPCOMLEQ,
21383 IX86_BUILTIN_VPCOMGTQ,
21384 IX86_BUILTIN_VPCOMGEQ,
21385 IX86_BUILTIN_VPCOMFALSEQ,
21386 IX86_BUILTIN_VPCOMTRUEQ,
21388 /* LWP instructions. */
21389 IX86_BUILTIN_LLWPCB,
21390 IX86_BUILTIN_SLWPCB,
21391 IX86_BUILTIN_LWPVAL32,
21392 IX86_BUILTIN_LWPVAL64,
21393 IX86_BUILTIN_LWPINS32,
21394 IX86_BUILTIN_LWPINS64,
21401 /* Table for the ix86 builtin decls. */
21402 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21404 /* Table of all of the builtin functions that are possible with different ISA's
21405 but are waiting to be built until a function is declared to use that
21407 struct builtin_isa {
21408 const char *name; /* function name */
21409 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21410 int isa; /* isa_flags this builtin is defined for */
21411 bool const_p; /* true if the declaration is constant */
21412 bool set_and_not_built_p;
21415 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21418 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21419 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21420 function decl in the ix86_builtins array. Returns the function decl or
21421 NULL_TREE, if the builtin was not added.
21423 If the front end has a special hook for builtin functions, delay adding
21424 builtin functions that aren't in the current ISA until the ISA is changed
21425 with function specific optimization. Doing so, can save about 300K for the
21426 default compiler. When the builtin is expanded, check at that time whether
21429 If the front end doesn't have a special hook, record all builtins, even if
21430 it isn't an instruction set in the current ISA in case the user uses
21431 function specific options for a different ISA, so that we don't get scope
21432 errors if a builtin is added in the middle of a function scope. */
21435 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21436 enum ix86_builtins code)
21438 tree decl = NULL_TREE;
21440 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21442 ix86_builtins_isa[(int) code].isa = mask;
21445 || (mask & ix86_isa_flags) != 0
21446 || (lang_hooks.builtin_function
21447 == lang_hooks.builtin_function_ext_scope))
21450 tree type = ix86_get_builtin_func_type (tcode);
21451 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21453 ix86_builtins[(int) code] = decl;
21454 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21458 ix86_builtins[(int) code] = NULL_TREE;
21459 ix86_builtins_isa[(int) code].tcode = tcode;
21460 ix86_builtins_isa[(int) code].name = name;
21461 ix86_builtins_isa[(int) code].const_p = false;
21462 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21469 /* Like def_builtin, but also marks the function decl "const". */
21472 def_builtin_const (int mask, const char *name,
21473 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21475 tree decl = def_builtin (mask, name, tcode, code);
21477 TREE_READONLY (decl) = 1;
21479 ix86_builtins_isa[(int) code].const_p = true;
21484 /* Add any new builtin functions for a given ISA that may not have been
21485 declared. This saves a bit of space compared to adding all of the
21486 declarations to the tree, even if we didn't use them. */
21489 ix86_add_new_builtins (int isa)
21493 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21495 if ((ix86_builtins_isa[i].isa & isa) != 0
21496 && ix86_builtins_isa[i].set_and_not_built_p)
21500 /* Don't define the builtin again. */
21501 ix86_builtins_isa[i].set_and_not_built_p = false;
21503 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21504 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21505 type, i, BUILT_IN_MD, NULL,
21508 ix86_builtins[i] = decl;
21509 if (ix86_builtins_isa[i].const_p)
21510 TREE_READONLY (decl) = 1;
21515 /* Bits for builtin_description.flag. */
21517 /* Set when we don't support the comparison natively, and should
21518 swap_comparison in order to support it. */
21519 #define BUILTIN_DESC_SWAP_OPERANDS 1
21521 struct builtin_description
21523 const unsigned int mask;
21524 const enum insn_code icode;
21525 const char *const name;
21526 const enum ix86_builtins code;
21527 const enum rtx_code comparison;
21531 static const struct builtin_description bdesc_comi[] =
21533 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21534 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21535 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21536 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21537 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21538 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21539 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21540 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21541 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21542 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21543 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21544 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21546 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21547 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21548 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21549 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21551 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21552 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21553 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21554 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21555 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21556 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21559 static const struct builtin_description bdesc_pcmpestr[] =
21562 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21563 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21564 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21565 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21566 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21567 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21568 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21571 static const struct builtin_description bdesc_pcmpistr[] =
21574 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21575 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21576 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21577 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21578 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21579 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21580 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21583 /* Special builtins with variable number of arguments. */
21584 static const struct builtin_description bdesc_special_args[] =
21586 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21587 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21590 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21593 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21596 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21597 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21598 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21600 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21601 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21602 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21603 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21605 /* SSE or 3DNow!A */
21606 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21607 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21610 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21611 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21612 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21613 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21614 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21615 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21616 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21617 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21618 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21620 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21621 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21624 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21627 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21630 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21631 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21634 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21635 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21637 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21638 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21639 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21640 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21641 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21643 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21644 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21645 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21646 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21647 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21648 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21649 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21651 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21652 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21653 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21655 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21656 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21657 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21658 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21659 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21660 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21661 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21662 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21664 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21665 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21666 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21667 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21668 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21669 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21673 /* Builtins with variable number of arguments. */
21674 static const struct builtin_description bdesc_args[] =
21676 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21677 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21678 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21679 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21680 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21681 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21682 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21685 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21686 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21687 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21688 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21689 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21690 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21692 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21693 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21694 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21695 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21696 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21697 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21698 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21699 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21701 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21702 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21704 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21705 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21706 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21707 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21709 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21710 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21711 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21712 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21713 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21714 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21716 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21717 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21718 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21719 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21720 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21721 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21723 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21724 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21725 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21727 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21729 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21730 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21731 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21732 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21733 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21734 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21736 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21737 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21738 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21739 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21740 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21741 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21743 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21744 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21745 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21746 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21749 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21750 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21751 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21752 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21754 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21755 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21756 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21757 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21758 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21759 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21760 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21761 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21762 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21763 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21764 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21765 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21766 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21767 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21768 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21771 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21772 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21773 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21774 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21775 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21776 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21780 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21781 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21782 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21783 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21784 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21785 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21786 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21787 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21788 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21789 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21790 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21792 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21794 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21795 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21796 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21797 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21798 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21799 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21800 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21801 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21803 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21804 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21805 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21806 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21807 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21808 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21809 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21810 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21811 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21812 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21813 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21814 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21815 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21816 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21817 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21818 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21819 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21820 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21821 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21822 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21823 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21824 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21826 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21827 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21828 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21829 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21831 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21832 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21833 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21834 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21836 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21838 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21839 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21840 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21841 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21842 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21844 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21845 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21846 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21848 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21850 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21851 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21852 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21854 /* SSE MMX or 3Dnow!A */
21855 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21856 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21857 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21859 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21860 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21861 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21862 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21864 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21865 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21867 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21872 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21873 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21877 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21878 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21879 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21882 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21883 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21885 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21886 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21893 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21898 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21902 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21903 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21906 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21915 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21918 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21921 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21925 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21926 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21927 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21930 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21932 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21933 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21935 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21937 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21939 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21941 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21942 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21944 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21945 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21947 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21949 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21951 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21952 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21957 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21959 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21960 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21961 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21963 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21964 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21968 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21969 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21971 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21972 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21976 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21979 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21980 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21981 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21983 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21984 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21986 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21987 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21988 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21989 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21990 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21991 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21993 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21994 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21995 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21996 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21998 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21999 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22000 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22001 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22002 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22003 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22004 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22005 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22007 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22011 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22012 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22014 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22015 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22017 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22020 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22022 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22024 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22026 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22033 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22034 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22035 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22037 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22042 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22043 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22049 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22051 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22052 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22054 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22057 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22058 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22061 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22062 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22064 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22065 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22066 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22067 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22068 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22069 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22072 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22073 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22074 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22075 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22076 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22077 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22079 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22080 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22081 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22082 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22083 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22084 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22085 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22086 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22087 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22088 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22089 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22090 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22091 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22092 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22093 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22094 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22095 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22096 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22097 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22098 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22099 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22100 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22101 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22102 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22105 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22106 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22109 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22110 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22111 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22112 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22113 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22114 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22115 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22116 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22117 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22118 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22120 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22121 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22122 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22123 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22124 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22125 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22126 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22127 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22128 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22129 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22130 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22131 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22132 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22134 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22135 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22136 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22137 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22138 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22139 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22140 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22141 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22142 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22143 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22144 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22145 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22148 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22149 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22150 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22151 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22153 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22154 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22155 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22158 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22159 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22160 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22161 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22162 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22165 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22166 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22167 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22168 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22171 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22172 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22174 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22175 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22176 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22177 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22180 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22188 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22190 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22193 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22194 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22196 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22198 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22200 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22201 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22202 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22203 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22204 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22205 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22208 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22210 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22217 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22221 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22224 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22225 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22226 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22227 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22228 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22229 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22230 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22231 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22232 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22233 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22234 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22235 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22236 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22237 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22238 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22239 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22240 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22241 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22242 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22243 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22244 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22245 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22246 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22247 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22248 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22250 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22251 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22252 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22254 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22255 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22256 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22257 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22258 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22260 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22262 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22263 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22265 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22266 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22267 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22268 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22270 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22271 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22272 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22273 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22274 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22275 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22277 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22278 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22279 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22280 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22281 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22282 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22283 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22284 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22285 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22286 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22287 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22288 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22289 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22290 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22291 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22293 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22294 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22296 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22299 /* FMA4 and XOP. */
22300 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22301 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22302 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22303 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22304 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22305 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22306 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22307 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22308 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22309 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22310 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22311 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22312 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22313 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22314 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22315 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22316 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22317 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22318 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22319 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22320 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22321 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22322 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22323 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22324 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22325 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22326 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22327 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22328 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22329 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22330 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22331 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22332 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22333 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22334 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22335 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22336 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22337 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22338 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22339 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22340 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22341 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22342 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22343 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22344 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22345 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22346 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22347 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22348 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22349 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22350 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22351 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22353 static const struct builtin_description bdesc_multi_arg[] =
22355 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22356 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22357 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22358 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22359 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22360 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22361 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22362 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22364 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22365 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22366 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22367 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22368 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22369 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22370 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22371 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22373 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22374 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22375 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22376 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22378 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22379 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22380 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22381 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22383 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22384 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22385 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22386 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22388 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22389 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22390 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22391 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22397 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22404 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22412 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22416 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22417 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22418 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22420 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22424 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22427 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22428 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22429 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22432 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22434 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22435 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22436 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22441 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22442 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22444 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22448 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22450 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22452 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22456 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22457 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22458 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22460 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22465 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22466 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22468 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22473 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22474 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22476 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22477 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22480 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22481 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22482 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22483 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22484 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22485 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22486 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22488 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22489 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22490 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22491 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22492 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22493 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22494 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22496 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22497 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22498 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22499 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22500 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22501 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22502 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22504 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22505 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22506 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22507 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22508 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22509 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22510 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22512 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22513 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22514 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22515 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22516 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22517 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22518 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22520 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22521 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22522 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22523 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22524 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22525 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22528 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22530 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22531 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22532 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22534 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22537 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22538 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22539 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22540 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22541 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22542 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22543 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22544 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22547 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22553 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22554 in the current target ISA to allow the user to compile particular modules
22555 with different target specific options that differ from the command line
22558 ix86_init_mmx_sse_builtins (void)
22560 const struct builtin_description * d;
22561 enum ix86_builtin_func_type ftype;
22564 /* Add all special builtins with variable number of operands. */
22565 for (i = 0, d = bdesc_special_args;
22566 i < ARRAY_SIZE (bdesc_special_args);
22572 ftype = (enum ix86_builtin_func_type) d->flag;
22573 def_builtin (d->mask, d->name, ftype, d->code);
22576 /* Add all builtins with variable number of operands. */
22577 for (i = 0, d = bdesc_args;
22578 i < ARRAY_SIZE (bdesc_args);
22584 ftype = (enum ix86_builtin_func_type) d->flag;
22585 def_builtin_const (d->mask, d->name, ftype, d->code);
22588 /* pcmpestr[im] insns. */
22589 for (i = 0, d = bdesc_pcmpestr;
22590 i < ARRAY_SIZE (bdesc_pcmpestr);
22593 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22594 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22596 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22597 def_builtin_const (d->mask, d->name, ftype, d->code);
22600 /* pcmpistr[im] insns. */
22601 for (i = 0, d = bdesc_pcmpistr;
22602 i < ARRAY_SIZE (bdesc_pcmpistr);
22605 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22606 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22608 ftype = INT_FTYPE_V16QI_V16QI_INT;
22609 def_builtin_const (d->mask, d->name, ftype, d->code);
22612 /* comi/ucomi insns. */
22613 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22615 if (d->mask == OPTION_MASK_ISA_SSE2)
22616 ftype = INT_FTYPE_V2DF_V2DF;
22618 ftype = INT_FTYPE_V4SF_V4SF;
22619 def_builtin_const (d->mask, d->name, ftype, d->code);
22623 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22624 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22625 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22626 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22628 /* SSE or 3DNow!A */
22629 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22630 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22631 IX86_BUILTIN_MASKMOVQ);
22634 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22635 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22637 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22638 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22639 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22640 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22643 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22644 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22645 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22646 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22649 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22650 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22651 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22652 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22653 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22654 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22655 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22656 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22657 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22658 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22659 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22660 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22663 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22664 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22666 /* MMX access to the vec_init patterns. */
22667 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22668 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22670 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22671 V4HI_FTYPE_HI_HI_HI_HI,
22672 IX86_BUILTIN_VEC_INIT_V4HI);
22674 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22675 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22676 IX86_BUILTIN_VEC_INIT_V8QI);
22678 /* Access to the vec_extract patterns. */
22679 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22680 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22681 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22682 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22683 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22684 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22685 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22686 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22687 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22688 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22690 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22691 "__builtin_ia32_vec_ext_v4hi",
22692 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22694 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22695 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22697 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22698 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22700 /* Access to the vec_set patterns. */
22701 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22702 "__builtin_ia32_vec_set_v2di",
22703 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22705 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22706 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22708 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22709 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22711 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22712 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22714 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22715 "__builtin_ia32_vec_set_v4hi",
22716 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22718 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22719 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22721 /* Add FMA4 multi-arg argument instructions */
22722 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22727 ftype = (enum ix86_builtin_func_type) d->flag;
22728 def_builtin_const (d->mask, d->name, ftype, d->code);
22732 /* Internal method for ix86_init_builtins. */
22735 ix86_init_builtins_va_builtins_abi (void)
22737 tree ms_va_ref, sysv_va_ref;
22738 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22739 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22740 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22741 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22745 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22746 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22747 ms_va_ref = build_reference_type (ms_va_list_type_node);
22749 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22752 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22753 fnvoid_va_start_ms =
22754 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22755 fnvoid_va_end_sysv =
22756 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22757 fnvoid_va_start_sysv =
22758 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22760 fnvoid_va_copy_ms =
22761 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22763 fnvoid_va_copy_sysv =
22764 build_function_type_list (void_type_node, sysv_va_ref,
22765 sysv_va_ref, NULL_TREE);
22767 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22768 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22769 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22770 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22771 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22772 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22773 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22774 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22775 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22776 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22777 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22778 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22782 ix86_init_builtin_types (void)
22784 tree float128_type_node, float80_type_node;
22786 /* The __float80 type. */
22787 float80_type_node = long_double_type_node;
22788 if (TYPE_MODE (float80_type_node) != XFmode)
22790 /* The __float80 type. */
22791 float80_type_node = make_node (REAL_TYPE);
22793 TYPE_PRECISION (float80_type_node) = 80;
22794 layout_type (float80_type_node);
22796 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22798 /* The __float128 type. */
22799 float128_type_node = make_node (REAL_TYPE);
22800 TYPE_PRECISION (float128_type_node) = 128;
22801 layout_type (float128_type_node);
22802 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22804 /* This macro is built by i386-builtin-types.awk. */
22805 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22809 ix86_init_builtins (void)
22813 ix86_init_builtin_types ();
22815 /* TFmode support builtins. */
22816 def_builtin_const (0, "__builtin_infq",
22817 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22818 def_builtin_const (0, "__builtin_huge_valq",
22819 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22821 /* We will expand them to normal call if SSE2 isn't available since
22822 they are used by libgcc. */
22823 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22824 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22825 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22826 TREE_READONLY (t) = 1;
22827 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22829 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22830 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22831 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22832 TREE_READONLY (t) = 1;
22833 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22835 ix86_init_mmx_sse_builtins ();
22838 ix86_init_builtins_va_builtins_abi ();
22841 /* Return the ix86 builtin for CODE. */
22844 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22846 if (code >= IX86_BUILTIN_MAX)
22847 return error_mark_node;
22849 return ix86_builtins[code];
22852 /* Errors in the source file can cause expand_expr to return const0_rtx
22853 where we expect a vector. To avoid crashing, use one of the vector
22854 clear instructions. */
22856 safe_vector_operand (rtx x, enum machine_mode mode)
22858 if (x == const0_rtx)
22859 x = CONST0_RTX (mode);
22863 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22866 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22869 tree arg0 = CALL_EXPR_ARG (exp, 0);
22870 tree arg1 = CALL_EXPR_ARG (exp, 1);
22871 rtx op0 = expand_normal (arg0);
22872 rtx op1 = expand_normal (arg1);
22873 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22874 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22875 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22877 if (VECTOR_MODE_P (mode0))
22878 op0 = safe_vector_operand (op0, mode0);
22879 if (VECTOR_MODE_P (mode1))
22880 op1 = safe_vector_operand (op1, mode1);
22882 if (optimize || !target
22883 || GET_MODE (target) != tmode
22884 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22885 target = gen_reg_rtx (tmode);
22887 if (GET_MODE (op1) == SImode && mode1 == TImode)
22889 rtx x = gen_reg_rtx (V4SImode);
22890 emit_insn (gen_sse2_loadd (x, op1));
22891 op1 = gen_lowpart (TImode, x);
22894 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22895 op0 = copy_to_mode_reg (mode0, op0);
22896 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22897 op1 = copy_to_mode_reg (mode1, op1);
22899 pat = GEN_FCN (icode) (target, op0, op1);
22908 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22911 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22912 enum ix86_builtin_func_type m_type,
22913 enum rtx_code sub_code)
22918 bool comparison_p = false;
22920 bool last_arg_constant = false;
22921 int num_memory = 0;
22924 enum machine_mode mode;
22927 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22931 case MULTI_ARG_4_DF2_DI_I:
22932 case MULTI_ARG_4_DF2_DI_I1:
22933 case MULTI_ARG_4_SF2_SI_I:
22934 case MULTI_ARG_4_SF2_SI_I1:
22936 last_arg_constant = true;
22939 case MULTI_ARG_3_SF:
22940 case MULTI_ARG_3_DF:
22941 case MULTI_ARG_3_SF2:
22942 case MULTI_ARG_3_DF2:
22943 case MULTI_ARG_3_DI:
22944 case MULTI_ARG_3_SI:
22945 case MULTI_ARG_3_SI_DI:
22946 case MULTI_ARG_3_HI:
22947 case MULTI_ARG_3_HI_SI:
22948 case MULTI_ARG_3_QI:
22949 case MULTI_ARG_3_DI2:
22950 case MULTI_ARG_3_SI2:
22951 case MULTI_ARG_3_HI2:
22952 case MULTI_ARG_3_QI2:
22956 case MULTI_ARG_2_SF:
22957 case MULTI_ARG_2_DF:
22958 case MULTI_ARG_2_DI:
22959 case MULTI_ARG_2_SI:
22960 case MULTI_ARG_2_HI:
22961 case MULTI_ARG_2_QI:
22965 case MULTI_ARG_2_DI_IMM:
22966 case MULTI_ARG_2_SI_IMM:
22967 case MULTI_ARG_2_HI_IMM:
22968 case MULTI_ARG_2_QI_IMM:
22970 last_arg_constant = true;
22973 case MULTI_ARG_1_SF:
22974 case MULTI_ARG_1_DF:
22975 case MULTI_ARG_1_SF2:
22976 case MULTI_ARG_1_DF2:
22977 case MULTI_ARG_1_DI:
22978 case MULTI_ARG_1_SI:
22979 case MULTI_ARG_1_HI:
22980 case MULTI_ARG_1_QI:
22981 case MULTI_ARG_1_SI_DI:
22982 case MULTI_ARG_1_HI_DI:
22983 case MULTI_ARG_1_HI_SI:
22984 case MULTI_ARG_1_QI_DI:
22985 case MULTI_ARG_1_QI_SI:
22986 case MULTI_ARG_1_QI_HI:
22990 case MULTI_ARG_2_DI_CMP:
22991 case MULTI_ARG_2_SI_CMP:
22992 case MULTI_ARG_2_HI_CMP:
22993 case MULTI_ARG_2_QI_CMP:
22995 comparison_p = true;
22998 case MULTI_ARG_2_SF_TF:
22999 case MULTI_ARG_2_DF_TF:
23000 case MULTI_ARG_2_DI_TF:
23001 case MULTI_ARG_2_SI_TF:
23002 case MULTI_ARG_2_HI_TF:
23003 case MULTI_ARG_2_QI_TF:
23009 gcc_unreachable ();
23012 if (optimize || !target
23013 || GET_MODE (target) != tmode
23014 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23015 target = gen_reg_rtx (tmode);
23017 gcc_assert (nargs <= 4);
23019 for (i = 0; i < nargs; i++)
23021 tree arg = CALL_EXPR_ARG (exp, i);
23022 rtx op = expand_normal (arg);
23023 int adjust = (comparison_p) ? 1 : 0;
23024 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23026 if (last_arg_constant && i == nargs-1)
23028 if (!CONST_INT_P (op))
23030 error ("last argument must be an immediate");
23031 return gen_reg_rtx (tmode);
23036 if (VECTOR_MODE_P (mode))
23037 op = safe_vector_operand (op, mode);
23039 /* If we aren't optimizing, only allow one memory operand to be
23041 if (memory_operand (op, mode))
23044 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23047 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23049 op = force_reg (mode, op);
23053 args[i].mode = mode;
23059 pat = GEN_FCN (icode) (target, args[0].op);
23064 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23065 GEN_INT ((int)sub_code));
23066 else if (! comparison_p)
23067 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23070 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23074 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23079 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23083 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23087 gcc_unreachable ();
23097 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23098 insns with vec_merge. */
23101 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23105 tree arg0 = CALL_EXPR_ARG (exp, 0);
23106 rtx op1, op0 = expand_normal (arg0);
23107 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23108 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23110 if (optimize || !target
23111 || GET_MODE (target) != tmode
23112 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23113 target = gen_reg_rtx (tmode);
23115 if (VECTOR_MODE_P (mode0))
23116 op0 = safe_vector_operand (op0, mode0);
23118 if ((optimize && !register_operand (op0, mode0))
23119 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23120 op0 = copy_to_mode_reg (mode0, op0);
23123 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23124 op1 = copy_to_mode_reg (mode0, op1);
23126 pat = GEN_FCN (icode) (target, op0, op1);
23133 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23136 ix86_expand_sse_compare (const struct builtin_description *d,
23137 tree exp, rtx target, bool swap)
23140 tree arg0 = CALL_EXPR_ARG (exp, 0);
23141 tree arg1 = CALL_EXPR_ARG (exp, 1);
23142 rtx op0 = expand_normal (arg0);
23143 rtx op1 = expand_normal (arg1);
23145 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23146 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23147 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23148 enum rtx_code comparison = d->comparison;
23150 if (VECTOR_MODE_P (mode0))
23151 op0 = safe_vector_operand (op0, mode0);
23152 if (VECTOR_MODE_P (mode1))
23153 op1 = safe_vector_operand (op1, mode1);
23155 /* Swap operands if we have a comparison that isn't available in
23159 rtx tmp = gen_reg_rtx (mode1);
23160 emit_move_insn (tmp, op1);
23165 if (optimize || !target
23166 || GET_MODE (target) != tmode
23167 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23168 target = gen_reg_rtx (tmode);
23170 if ((optimize && !register_operand (op0, mode0))
23171 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23172 op0 = copy_to_mode_reg (mode0, op0);
23173 if ((optimize && !register_operand (op1, mode1))
23174 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23175 op1 = copy_to_mode_reg (mode1, op1);
23177 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23178 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23185 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23188 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23192 tree arg0 = CALL_EXPR_ARG (exp, 0);
23193 tree arg1 = CALL_EXPR_ARG (exp, 1);
23194 rtx op0 = expand_normal (arg0);
23195 rtx op1 = expand_normal (arg1);
23196 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23197 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23198 enum rtx_code comparison = d->comparison;
23200 if (VECTOR_MODE_P (mode0))
23201 op0 = safe_vector_operand (op0, mode0);
23202 if (VECTOR_MODE_P (mode1))
23203 op1 = safe_vector_operand (op1, mode1);
23205 /* Swap operands if we have a comparison that isn't available in
23207 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23214 target = gen_reg_rtx (SImode);
23215 emit_move_insn (target, const0_rtx);
23216 target = gen_rtx_SUBREG (QImode, target, 0);
23218 if ((optimize && !register_operand (op0, mode0))
23219 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23220 op0 = copy_to_mode_reg (mode0, op0);
23221 if ((optimize && !register_operand (op1, mode1))
23222 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23223 op1 = copy_to_mode_reg (mode1, op1);
23225 pat = GEN_FCN (d->icode) (op0, op1);
23229 emit_insn (gen_rtx_SET (VOIDmode,
23230 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23231 gen_rtx_fmt_ee (comparison, QImode,
23235 return SUBREG_REG (target);
23238 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23241 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23245 tree arg0 = CALL_EXPR_ARG (exp, 0);
23246 tree arg1 = CALL_EXPR_ARG (exp, 1);
23247 rtx op0 = expand_normal (arg0);
23248 rtx op1 = expand_normal (arg1);
23249 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23250 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23251 enum rtx_code comparison = d->comparison;
23253 if (VECTOR_MODE_P (mode0))
23254 op0 = safe_vector_operand (op0, mode0);
23255 if (VECTOR_MODE_P (mode1))
23256 op1 = safe_vector_operand (op1, mode1);
23258 target = gen_reg_rtx (SImode);
23259 emit_move_insn (target, const0_rtx);
23260 target = gen_rtx_SUBREG (QImode, target, 0);
23262 if ((optimize && !register_operand (op0, mode0))
23263 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23264 op0 = copy_to_mode_reg (mode0, op0);
23265 if ((optimize && !register_operand (op1, mode1))
23266 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23267 op1 = copy_to_mode_reg (mode1, op1);
23269 pat = GEN_FCN (d->icode) (op0, op1);
23273 emit_insn (gen_rtx_SET (VOIDmode,
23274 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23275 gen_rtx_fmt_ee (comparison, QImode,
23279 return SUBREG_REG (target);
23282 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23285 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23286 tree exp, rtx target)
23289 tree arg0 = CALL_EXPR_ARG (exp, 0);
23290 tree arg1 = CALL_EXPR_ARG (exp, 1);
23291 tree arg2 = CALL_EXPR_ARG (exp, 2);
23292 tree arg3 = CALL_EXPR_ARG (exp, 3);
23293 tree arg4 = CALL_EXPR_ARG (exp, 4);
23294 rtx scratch0, scratch1;
23295 rtx op0 = expand_normal (arg0);
23296 rtx op1 = expand_normal (arg1);
23297 rtx op2 = expand_normal (arg2);
23298 rtx op3 = expand_normal (arg3);
23299 rtx op4 = expand_normal (arg4);
23300 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23302 tmode0 = insn_data[d->icode].operand[0].mode;
23303 tmode1 = insn_data[d->icode].operand[1].mode;
23304 modev2 = insn_data[d->icode].operand[2].mode;
23305 modei3 = insn_data[d->icode].operand[3].mode;
23306 modev4 = insn_data[d->icode].operand[4].mode;
23307 modei5 = insn_data[d->icode].operand[5].mode;
23308 modeimm = insn_data[d->icode].operand[6].mode;
23310 if (VECTOR_MODE_P (modev2))
23311 op0 = safe_vector_operand (op0, modev2);
23312 if (VECTOR_MODE_P (modev4))
23313 op2 = safe_vector_operand (op2, modev4);
23315 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23316 op0 = copy_to_mode_reg (modev2, op0);
23317 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23318 op1 = copy_to_mode_reg (modei3, op1);
23319 if ((optimize && !register_operand (op2, modev4))
23320 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23321 op2 = copy_to_mode_reg (modev4, op2);
23322 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23323 op3 = copy_to_mode_reg (modei5, op3);
23325 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23327 error ("the fifth argument must be a 8-bit immediate");
23331 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23333 if (optimize || !target
23334 || GET_MODE (target) != tmode0
23335 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23336 target = gen_reg_rtx (tmode0);
23338 scratch1 = gen_reg_rtx (tmode1);
23340 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23342 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23344 if (optimize || !target
23345 || GET_MODE (target) != tmode1
23346 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23347 target = gen_reg_rtx (tmode1);
23349 scratch0 = gen_reg_rtx (tmode0);
23351 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23355 gcc_assert (d->flag);
23357 scratch0 = gen_reg_rtx (tmode0);
23358 scratch1 = gen_reg_rtx (tmode1);
23360 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23370 target = gen_reg_rtx (SImode);
23371 emit_move_insn (target, const0_rtx);
23372 target = gen_rtx_SUBREG (QImode, target, 0);
23375 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23376 gen_rtx_fmt_ee (EQ, QImode,
23377 gen_rtx_REG ((enum machine_mode) d->flag,
23380 return SUBREG_REG (target);
23387 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23390 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23391 tree exp, rtx target)
23394 tree arg0 = CALL_EXPR_ARG (exp, 0);
23395 tree arg1 = CALL_EXPR_ARG (exp, 1);
23396 tree arg2 = CALL_EXPR_ARG (exp, 2);
23397 rtx scratch0, scratch1;
23398 rtx op0 = expand_normal (arg0);
23399 rtx op1 = expand_normal (arg1);
23400 rtx op2 = expand_normal (arg2);
23401 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23403 tmode0 = insn_data[d->icode].operand[0].mode;
23404 tmode1 = insn_data[d->icode].operand[1].mode;
23405 modev2 = insn_data[d->icode].operand[2].mode;
23406 modev3 = insn_data[d->icode].operand[3].mode;
23407 modeimm = insn_data[d->icode].operand[4].mode;
23409 if (VECTOR_MODE_P (modev2))
23410 op0 = safe_vector_operand (op0, modev2);
23411 if (VECTOR_MODE_P (modev3))
23412 op1 = safe_vector_operand (op1, modev3);
23414 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23415 op0 = copy_to_mode_reg (modev2, op0);
23416 if ((optimize && !register_operand (op1, modev3))
23417 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23418 op1 = copy_to_mode_reg (modev3, op1);
23420 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23422 error ("the third argument must be a 8-bit immediate");
23426 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23428 if (optimize || !target
23429 || GET_MODE (target) != tmode0
23430 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23431 target = gen_reg_rtx (tmode0);
23433 scratch1 = gen_reg_rtx (tmode1);
23435 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23437 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23439 if (optimize || !target
23440 || GET_MODE (target) != tmode1
23441 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23442 target = gen_reg_rtx (tmode1);
23444 scratch0 = gen_reg_rtx (tmode0);
23446 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23450 gcc_assert (d->flag);
23452 scratch0 = gen_reg_rtx (tmode0);
23453 scratch1 = gen_reg_rtx (tmode1);
23455 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23465 target = gen_reg_rtx (SImode);
23466 emit_move_insn (target, const0_rtx);
23467 target = gen_rtx_SUBREG (QImode, target, 0);
23470 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23471 gen_rtx_fmt_ee (EQ, QImode,
23472 gen_rtx_REG ((enum machine_mode) d->flag,
23475 return SUBREG_REG (target);
23481 /* Subroutine of ix86_expand_builtin to take care of insns with
23482 variable number of operands. */
23485 ix86_expand_args_builtin (const struct builtin_description *d,
23486 tree exp, rtx target)
23488 rtx pat, real_target;
23489 unsigned int i, nargs;
23490 unsigned int nargs_constant = 0;
23491 int num_memory = 0;
23495 enum machine_mode mode;
23497 bool last_arg_count = false;
23498 enum insn_code icode = d->icode;
23499 const struct insn_data *insn_p = &insn_data[icode];
23500 enum machine_mode tmode = insn_p->operand[0].mode;
23501 enum machine_mode rmode = VOIDmode;
23503 enum rtx_code comparison = d->comparison;
23505 switch ((enum ix86_builtin_func_type) d->flag)
23507 case INT_FTYPE_V8SF_V8SF_PTEST:
23508 case INT_FTYPE_V4DI_V4DI_PTEST:
23509 case INT_FTYPE_V4DF_V4DF_PTEST:
23510 case INT_FTYPE_V4SF_V4SF_PTEST:
23511 case INT_FTYPE_V2DI_V2DI_PTEST:
23512 case INT_FTYPE_V2DF_V2DF_PTEST:
23513 return ix86_expand_sse_ptest (d, exp, target);
23514 case FLOAT128_FTYPE_FLOAT128:
23515 case FLOAT_FTYPE_FLOAT:
23516 case INT_FTYPE_INT:
23517 case UINT64_FTYPE_INT:
23518 case UINT16_FTYPE_UINT16:
23519 case INT64_FTYPE_INT64:
23520 case INT64_FTYPE_V4SF:
23521 case INT64_FTYPE_V2DF:
23522 case INT_FTYPE_V16QI:
23523 case INT_FTYPE_V8QI:
23524 case INT_FTYPE_V8SF:
23525 case INT_FTYPE_V4DF:
23526 case INT_FTYPE_V4SF:
23527 case INT_FTYPE_V2DF:
23528 case V16QI_FTYPE_V16QI:
23529 case V8SI_FTYPE_V8SF:
23530 case V8SI_FTYPE_V4SI:
23531 case V8HI_FTYPE_V8HI:
23532 case V8HI_FTYPE_V16QI:
23533 case V8QI_FTYPE_V8QI:
23534 case V8SF_FTYPE_V8SF:
23535 case V8SF_FTYPE_V8SI:
23536 case V8SF_FTYPE_V4SF:
23537 case V4SI_FTYPE_V4SI:
23538 case V4SI_FTYPE_V16QI:
23539 case V4SI_FTYPE_V4SF:
23540 case V4SI_FTYPE_V8SI:
23541 case V4SI_FTYPE_V8HI:
23542 case V4SI_FTYPE_V4DF:
23543 case V4SI_FTYPE_V2DF:
23544 case V4HI_FTYPE_V4HI:
23545 case V4DF_FTYPE_V4DF:
23546 case V4DF_FTYPE_V4SI:
23547 case V4DF_FTYPE_V4SF:
23548 case V4DF_FTYPE_V2DF:
23549 case V4SF_FTYPE_V4SF:
23550 case V4SF_FTYPE_V4SI:
23551 case V4SF_FTYPE_V8SF:
23552 case V4SF_FTYPE_V4DF:
23553 case V4SF_FTYPE_V2DF:
23554 case V2DI_FTYPE_V2DI:
23555 case V2DI_FTYPE_V16QI:
23556 case V2DI_FTYPE_V8HI:
23557 case V2DI_FTYPE_V4SI:
23558 case V2DF_FTYPE_V2DF:
23559 case V2DF_FTYPE_V4SI:
23560 case V2DF_FTYPE_V4DF:
23561 case V2DF_FTYPE_V4SF:
23562 case V2DF_FTYPE_V2SI:
23563 case V2SI_FTYPE_V2SI:
23564 case V2SI_FTYPE_V4SF:
23565 case V2SI_FTYPE_V2SF:
23566 case V2SI_FTYPE_V2DF:
23567 case V2SF_FTYPE_V2SF:
23568 case V2SF_FTYPE_V2SI:
23571 case V4SF_FTYPE_V4SF_VEC_MERGE:
23572 case V2DF_FTYPE_V2DF_VEC_MERGE:
23573 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23574 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23575 case V16QI_FTYPE_V16QI_V16QI:
23576 case V16QI_FTYPE_V8HI_V8HI:
23577 case V8QI_FTYPE_V8QI_V8QI:
23578 case V8QI_FTYPE_V4HI_V4HI:
23579 case V8HI_FTYPE_V8HI_V8HI:
23580 case V8HI_FTYPE_V16QI_V16QI:
23581 case V8HI_FTYPE_V4SI_V4SI:
23582 case V8SF_FTYPE_V8SF_V8SF:
23583 case V8SF_FTYPE_V8SF_V8SI:
23584 case V4SI_FTYPE_V4SI_V4SI:
23585 case V4SI_FTYPE_V8HI_V8HI:
23586 case V4SI_FTYPE_V4SF_V4SF:
23587 case V4SI_FTYPE_V2DF_V2DF:
23588 case V4HI_FTYPE_V4HI_V4HI:
23589 case V4HI_FTYPE_V8QI_V8QI:
23590 case V4HI_FTYPE_V2SI_V2SI:
23591 case V4DF_FTYPE_V4DF_V4DF:
23592 case V4DF_FTYPE_V4DF_V4DI:
23593 case V4SF_FTYPE_V4SF_V4SF:
23594 case V4SF_FTYPE_V4SF_V4SI:
23595 case V4SF_FTYPE_V4SF_V2SI:
23596 case V4SF_FTYPE_V4SF_V2DF:
23597 case V4SF_FTYPE_V4SF_DI:
23598 case V4SF_FTYPE_V4SF_SI:
23599 case V2DI_FTYPE_V2DI_V2DI:
23600 case V2DI_FTYPE_V16QI_V16QI:
23601 case V2DI_FTYPE_V4SI_V4SI:
23602 case V2DI_FTYPE_V2DI_V16QI:
23603 case V2DI_FTYPE_V2DF_V2DF:
23604 case V2SI_FTYPE_V2SI_V2SI:
23605 case V2SI_FTYPE_V4HI_V4HI:
23606 case V2SI_FTYPE_V2SF_V2SF:
23607 case V2DF_FTYPE_V2DF_V2DF:
23608 case V2DF_FTYPE_V2DF_V4SF:
23609 case V2DF_FTYPE_V2DF_V2DI:
23610 case V2DF_FTYPE_V2DF_DI:
23611 case V2DF_FTYPE_V2DF_SI:
23612 case V2SF_FTYPE_V2SF_V2SF:
23613 case V1DI_FTYPE_V1DI_V1DI:
23614 case V1DI_FTYPE_V8QI_V8QI:
23615 case V1DI_FTYPE_V2SI_V2SI:
23616 if (comparison == UNKNOWN)
23617 return ix86_expand_binop_builtin (icode, exp, target);
23620 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23621 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23622 gcc_assert (comparison != UNKNOWN);
23626 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23627 case V8HI_FTYPE_V8HI_SI_COUNT:
23628 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23629 case V4SI_FTYPE_V4SI_SI_COUNT:
23630 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23631 case V4HI_FTYPE_V4HI_SI_COUNT:
23632 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23633 case V2DI_FTYPE_V2DI_SI_COUNT:
23634 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23635 case V2SI_FTYPE_V2SI_SI_COUNT:
23636 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23637 case V1DI_FTYPE_V1DI_SI_COUNT:
23639 last_arg_count = true;
23641 case UINT64_FTYPE_UINT64_UINT64:
23642 case UINT_FTYPE_UINT_UINT:
23643 case UINT_FTYPE_UINT_USHORT:
23644 case UINT_FTYPE_UINT_UCHAR:
23645 case UINT16_FTYPE_UINT16_INT:
23646 case UINT8_FTYPE_UINT8_INT:
23649 case V2DI_FTYPE_V2DI_INT_CONVERT:
23652 nargs_constant = 1;
23654 case V8HI_FTYPE_V8HI_INT:
23655 case V8SF_FTYPE_V8SF_INT:
23656 case V4SI_FTYPE_V4SI_INT:
23657 case V4SI_FTYPE_V8SI_INT:
23658 case V4HI_FTYPE_V4HI_INT:
23659 case V4DF_FTYPE_V4DF_INT:
23660 case V4SF_FTYPE_V4SF_INT:
23661 case V4SF_FTYPE_V8SF_INT:
23662 case V2DI_FTYPE_V2DI_INT:
23663 case V2DF_FTYPE_V2DF_INT:
23664 case V2DF_FTYPE_V4DF_INT:
23666 nargs_constant = 1;
23668 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23669 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23670 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23671 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23672 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23675 case V16QI_FTYPE_V16QI_V16QI_INT:
23676 case V8HI_FTYPE_V8HI_V8HI_INT:
23677 case V8SI_FTYPE_V8SI_V8SI_INT:
23678 case V8SI_FTYPE_V8SI_V4SI_INT:
23679 case V8SF_FTYPE_V8SF_V8SF_INT:
23680 case V8SF_FTYPE_V8SF_V4SF_INT:
23681 case V4SI_FTYPE_V4SI_V4SI_INT:
23682 case V4DF_FTYPE_V4DF_V4DF_INT:
23683 case V4DF_FTYPE_V4DF_V2DF_INT:
23684 case V4SF_FTYPE_V4SF_V4SF_INT:
23685 case V2DI_FTYPE_V2DI_V2DI_INT:
23686 case V2DF_FTYPE_V2DF_V2DF_INT:
23688 nargs_constant = 1;
23690 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23693 nargs_constant = 1;
23695 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23698 nargs_constant = 1;
23700 case V2DI_FTYPE_V2DI_UINT_UINT:
23702 nargs_constant = 2;
23704 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23705 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23706 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23707 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23709 nargs_constant = 1;
23711 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23713 nargs_constant = 2;
23716 gcc_unreachable ();
23719 gcc_assert (nargs <= ARRAY_SIZE (args));
23721 if (comparison != UNKNOWN)
23723 gcc_assert (nargs == 2);
23724 return ix86_expand_sse_compare (d, exp, target, swap);
23727 if (rmode == VOIDmode || rmode == tmode)
23731 || GET_MODE (target) != tmode
23732 || ! (*insn_p->operand[0].predicate) (target, tmode))
23733 target = gen_reg_rtx (tmode);
23734 real_target = target;
23738 target = gen_reg_rtx (rmode);
23739 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23742 for (i = 0; i < nargs; i++)
23744 tree arg = CALL_EXPR_ARG (exp, i);
23745 rtx op = expand_normal (arg);
23746 enum machine_mode mode = insn_p->operand[i + 1].mode;
23747 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23749 if (last_arg_count && (i + 1) == nargs)
23751 /* SIMD shift insns take either an 8-bit immediate or
23752 register as count. But builtin functions take int as
23753 count. If count doesn't match, we put it in register. */
23756 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23757 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23758 op = copy_to_reg (op);
23761 else if ((nargs - i) <= nargs_constant)
23766 case CODE_FOR_sse4_1_roundpd:
23767 case CODE_FOR_sse4_1_roundps:
23768 case CODE_FOR_sse4_1_roundsd:
23769 case CODE_FOR_sse4_1_roundss:
23770 case CODE_FOR_sse4_1_blendps:
23771 case CODE_FOR_avx_blendpd256:
23772 case CODE_FOR_avx_vpermilv4df:
23773 case CODE_FOR_avx_roundpd256:
23774 case CODE_FOR_avx_roundps256:
23775 error ("the last argument must be a 4-bit immediate");
23778 case CODE_FOR_sse4_1_blendpd:
23779 case CODE_FOR_avx_vpermilv2df:
23780 case CODE_FOR_xop_vpermil2v2df3:
23781 case CODE_FOR_xop_vpermil2v4sf3:
23782 case CODE_FOR_xop_vpermil2v4df3:
23783 case CODE_FOR_xop_vpermil2v8sf3:
23784 error ("the last argument must be a 2-bit immediate");
23787 case CODE_FOR_avx_vextractf128v4df:
23788 case CODE_FOR_avx_vextractf128v8sf:
23789 case CODE_FOR_avx_vextractf128v8si:
23790 case CODE_FOR_avx_vinsertf128v4df:
23791 case CODE_FOR_avx_vinsertf128v8sf:
23792 case CODE_FOR_avx_vinsertf128v8si:
23793 error ("the last argument must be a 1-bit immediate");
23796 case CODE_FOR_avx_cmpsdv2df3:
23797 case CODE_FOR_avx_cmpssv4sf3:
23798 case CODE_FOR_avx_cmppdv2df3:
23799 case CODE_FOR_avx_cmppsv4sf3:
23800 case CODE_FOR_avx_cmppdv4df3:
23801 case CODE_FOR_avx_cmppsv8sf3:
23802 error ("the last argument must be a 5-bit immediate");
23806 switch (nargs_constant)
23809 if ((nargs - i) == nargs_constant)
23811 error ("the next to last argument must be an 8-bit immediate");
23815 error ("the last argument must be an 8-bit immediate");
23818 gcc_unreachable ();
23825 if (VECTOR_MODE_P (mode))
23826 op = safe_vector_operand (op, mode);
23828 /* If we aren't optimizing, only allow one memory operand to
23830 if (memory_operand (op, mode))
23833 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23835 if (optimize || !match || num_memory > 1)
23836 op = copy_to_mode_reg (mode, op);
23840 op = copy_to_reg (op);
23841 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23846 args[i].mode = mode;
23852 pat = GEN_FCN (icode) (real_target, args[0].op);
23855 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23858 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23862 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23863 args[2].op, args[3].op);
23866 gcc_unreachable ();
23876 /* Subroutine of ix86_expand_builtin to take care of special insns
23877 with variable number of operands. */
23880 ix86_expand_special_args_builtin (const struct builtin_description *d,
23881 tree exp, rtx target)
23885 unsigned int i, nargs, arg_adjust, memory;
23889 enum machine_mode mode;
23891 enum insn_code icode = d->icode;
23892 bool last_arg_constant = false;
23893 const struct insn_data *insn_p = &insn_data[icode];
23894 enum machine_mode tmode = insn_p->operand[0].mode;
23895 enum { load, store } klass;
23897 switch ((enum ix86_builtin_func_type) d->flag)
23899 case VOID_FTYPE_VOID:
23900 emit_insn (GEN_FCN (icode) (target));
23902 case UINT64_FTYPE_VOID:
23907 case UINT64_FTYPE_PUNSIGNED:
23908 case V2DI_FTYPE_PV2DI:
23909 case V32QI_FTYPE_PCCHAR:
23910 case V16QI_FTYPE_PCCHAR:
23911 case V8SF_FTYPE_PCV4SF:
23912 case V8SF_FTYPE_PCFLOAT:
23913 case V4SF_FTYPE_PCFLOAT:
23914 case V4DF_FTYPE_PCV2DF:
23915 case V4DF_FTYPE_PCDOUBLE:
23916 case V2DF_FTYPE_PCDOUBLE:
23917 case VOID_FTYPE_PVOID:
23922 case VOID_FTYPE_PV2SF_V4SF:
23923 case VOID_FTYPE_PV4DI_V4DI:
23924 case VOID_FTYPE_PV2DI_V2DI:
23925 case VOID_FTYPE_PCHAR_V32QI:
23926 case VOID_FTYPE_PCHAR_V16QI:
23927 case VOID_FTYPE_PFLOAT_V8SF:
23928 case VOID_FTYPE_PFLOAT_V4SF:
23929 case VOID_FTYPE_PDOUBLE_V4DF:
23930 case VOID_FTYPE_PDOUBLE_V2DF:
23931 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23932 case VOID_FTYPE_PINT_INT:
23935 /* Reserve memory operand for target. */
23936 memory = ARRAY_SIZE (args);
23938 case V4SF_FTYPE_V4SF_PCV2SF:
23939 case V2DF_FTYPE_V2DF_PCDOUBLE:
23944 case V8SF_FTYPE_PCV8SF_V8SF:
23945 case V4DF_FTYPE_PCV4DF_V4DF:
23946 case V4SF_FTYPE_PCV4SF_V4SF:
23947 case V2DF_FTYPE_PCV2DF_V2DF:
23952 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23953 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23954 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23955 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23958 /* Reserve memory operand for target. */
23959 memory = ARRAY_SIZE (args);
23961 case VOID_FTYPE_UINT_UINT_UINT:
23962 case VOID_FTYPE_UINT64_UINT_UINT:
23963 case UCHAR_FTYPE_UINT_UINT_UINT:
23964 case UCHAR_FTYPE_UINT64_UINT_UINT:
23967 memory = ARRAY_SIZE (args);
23968 last_arg_constant = true;
23971 gcc_unreachable ();
23974 gcc_assert (nargs <= ARRAY_SIZE (args));
23976 if (klass == store)
23978 arg = CALL_EXPR_ARG (exp, 0);
23979 op = expand_normal (arg);
23980 gcc_assert (target == 0);
23981 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23989 || GET_MODE (target) != tmode
23990 || ! (*insn_p->operand[0].predicate) (target, tmode))
23991 target = gen_reg_rtx (tmode);
23994 for (i = 0; i < nargs; i++)
23996 enum machine_mode mode = insn_p->operand[i + 1].mode;
23999 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24000 op = expand_normal (arg);
24001 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24003 if (last_arg_constant && (i + 1) == nargs)
24007 if (icode == CODE_FOR_lwp_lwpvalsi3
24008 || icode == CODE_FOR_lwp_lwpinssi3
24009 || icode == CODE_FOR_lwp_lwpvaldi3
24010 || icode == CODE_FOR_lwp_lwpinsdi3)
24011 error ("the last argument must be a 32-bit immediate");
24013 error ("the last argument must be an 8-bit immediate");
24021 /* This must be the memory operand. */
24022 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24023 gcc_assert (GET_MODE (op) == mode
24024 || GET_MODE (op) == VOIDmode);
24028 /* This must be register. */
24029 if (VECTOR_MODE_P (mode))
24030 op = safe_vector_operand (op, mode);
24032 gcc_assert (GET_MODE (op) == mode
24033 || GET_MODE (op) == VOIDmode);
24034 op = copy_to_mode_reg (mode, op);
24039 args[i].mode = mode;
24045 pat = GEN_FCN (icode) (target);
24048 pat = GEN_FCN (icode) (target, args[0].op);
24051 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24054 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24057 gcc_unreachable ();
24063 return klass == store ? 0 : target;
24066 /* Return the integer constant in ARG. Constrain it to be in the range
24067 of the subparts of VEC_TYPE; issue an error if not. */
24070 get_element_number (tree vec_type, tree arg)
24072 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24074 if (!host_integerp (arg, 1)
24075 || (elt = tree_low_cst (arg, 1), elt > max))
24077 error ("selector must be an integer constant in the range 0..%wi", max);
24084 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24085 ix86_expand_vector_init. We DO have language-level syntax for this, in
24086 the form of (type){ init-list }. Except that since we can't place emms
24087 instructions from inside the compiler, we can't allow the use of MMX
24088 registers unless the user explicitly asks for it. So we do *not* define
24089 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24090 we have builtins invoked by mmintrin.h that gives us license to emit
24091 these sorts of instructions. */
24094 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24096 enum machine_mode tmode = TYPE_MODE (type);
24097 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24098 int i, n_elt = GET_MODE_NUNITS (tmode);
24099 rtvec v = rtvec_alloc (n_elt);
24101 gcc_assert (VECTOR_MODE_P (tmode));
24102 gcc_assert (call_expr_nargs (exp) == n_elt);
24104 for (i = 0; i < n_elt; ++i)
24106 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24107 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24110 if (!target || !register_operand (target, tmode))
24111 target = gen_reg_rtx (tmode);
24113 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24117 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24118 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24119 had a language-level syntax for referencing vector elements. */
24122 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24124 enum machine_mode tmode, mode0;
24129 arg0 = CALL_EXPR_ARG (exp, 0);
24130 arg1 = CALL_EXPR_ARG (exp, 1);
24132 op0 = expand_normal (arg0);
24133 elt = get_element_number (TREE_TYPE (arg0), arg1);
24135 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24136 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24137 gcc_assert (VECTOR_MODE_P (mode0));
24139 op0 = force_reg (mode0, op0);
24141 if (optimize || !target || !register_operand (target, tmode))
24142 target = gen_reg_rtx (tmode);
24144 ix86_expand_vector_extract (true, target, op0, elt);
24149 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24150 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24151 a language-level syntax for referencing vector elements. */
24154 ix86_expand_vec_set_builtin (tree exp)
24156 enum machine_mode tmode, mode1;
24157 tree arg0, arg1, arg2;
24159 rtx op0, op1, target;
24161 arg0 = CALL_EXPR_ARG (exp, 0);
24162 arg1 = CALL_EXPR_ARG (exp, 1);
24163 arg2 = CALL_EXPR_ARG (exp, 2);
24165 tmode = TYPE_MODE (TREE_TYPE (arg0));
24166 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24167 gcc_assert (VECTOR_MODE_P (tmode));
24169 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24170 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24171 elt = get_element_number (TREE_TYPE (arg0), arg2);
24173 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24174 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24176 op0 = force_reg (tmode, op0);
24177 op1 = force_reg (mode1, op1);
24179 /* OP0 is the source of these builtin functions and shouldn't be
24180 modified. Create a copy, use it and return it as target. */
24181 target = gen_reg_rtx (tmode);
24182 emit_move_insn (target, op0);
24183 ix86_expand_vector_set (true, target, op1, elt);
24188 /* Expand an expression EXP that calls a built-in function,
24189 with result going to TARGET if that's convenient
24190 (and in mode MODE if that's convenient).
24191 SUBTARGET may be used as the target for computing one of EXP's operands.
24192 IGNORE is nonzero if the value is to be ignored. */
24195 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24196 enum machine_mode mode ATTRIBUTE_UNUSED,
24197 int ignore ATTRIBUTE_UNUSED)
24199 const struct builtin_description *d;
24201 enum insn_code icode;
24202 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24203 tree arg0, arg1, arg2;
24204 rtx op0, op1, op2, pat;
24205 enum machine_mode mode0, mode1, mode2;
24206 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24208 /* Determine whether the builtin function is available under the current ISA.
24209 Originally the builtin was not created if it wasn't applicable to the
24210 current ISA based on the command line switches. With function specific
24211 options, we need to check in the context of the function making the call
24212 whether it is supported. */
24213 if (ix86_builtins_isa[fcode].isa
24214 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24216 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24217 NULL, NULL, false);
24220 error ("%qE needs unknown isa option", fndecl);
24223 gcc_assert (opts != NULL);
24224 error ("%qE needs isa option %s", fndecl, opts);
24232 case IX86_BUILTIN_MASKMOVQ:
24233 case IX86_BUILTIN_MASKMOVDQU:
24234 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24235 ? CODE_FOR_mmx_maskmovq
24236 : CODE_FOR_sse2_maskmovdqu);
24237 /* Note the arg order is different from the operand order. */
24238 arg1 = CALL_EXPR_ARG (exp, 0);
24239 arg2 = CALL_EXPR_ARG (exp, 1);
24240 arg0 = CALL_EXPR_ARG (exp, 2);
24241 op0 = expand_normal (arg0);
24242 op1 = expand_normal (arg1);
24243 op2 = expand_normal (arg2);
24244 mode0 = insn_data[icode].operand[0].mode;
24245 mode1 = insn_data[icode].operand[1].mode;
24246 mode2 = insn_data[icode].operand[2].mode;
24248 op0 = force_reg (Pmode, op0);
24249 op0 = gen_rtx_MEM (mode1, op0);
24251 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24252 op0 = copy_to_mode_reg (mode0, op0);
24253 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24254 op1 = copy_to_mode_reg (mode1, op1);
24255 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24256 op2 = copy_to_mode_reg (mode2, op2);
24257 pat = GEN_FCN (icode) (op0, op1, op2);
24263 case IX86_BUILTIN_LDMXCSR:
24264 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24265 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24266 emit_move_insn (target, op0);
24267 emit_insn (gen_sse_ldmxcsr (target));
24270 case IX86_BUILTIN_STMXCSR:
24271 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24272 emit_insn (gen_sse_stmxcsr (target));
24273 return copy_to_mode_reg (SImode, target);
24275 case IX86_BUILTIN_CLFLUSH:
24276 arg0 = CALL_EXPR_ARG (exp, 0);
24277 op0 = expand_normal (arg0);
24278 icode = CODE_FOR_sse2_clflush;
24279 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24280 op0 = copy_to_mode_reg (Pmode, op0);
24282 emit_insn (gen_sse2_clflush (op0));
24285 case IX86_BUILTIN_MONITOR:
24286 arg0 = CALL_EXPR_ARG (exp, 0);
24287 arg1 = CALL_EXPR_ARG (exp, 1);
24288 arg2 = CALL_EXPR_ARG (exp, 2);
24289 op0 = expand_normal (arg0);
24290 op1 = expand_normal (arg1);
24291 op2 = expand_normal (arg2);
24293 op0 = copy_to_mode_reg (Pmode, op0);
24295 op1 = copy_to_mode_reg (SImode, op1);
24297 op2 = copy_to_mode_reg (SImode, op2);
24298 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24301 case IX86_BUILTIN_MWAIT:
24302 arg0 = CALL_EXPR_ARG (exp, 0);
24303 arg1 = CALL_EXPR_ARG (exp, 1);
24304 op0 = expand_normal (arg0);
24305 op1 = expand_normal (arg1);
24307 op0 = copy_to_mode_reg (SImode, op0);
24309 op1 = copy_to_mode_reg (SImode, op1);
24310 emit_insn (gen_sse3_mwait (op0, op1));
24313 case IX86_BUILTIN_VEC_INIT_V2SI:
24314 case IX86_BUILTIN_VEC_INIT_V4HI:
24315 case IX86_BUILTIN_VEC_INIT_V8QI:
24316 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24318 case IX86_BUILTIN_VEC_EXT_V2DF:
24319 case IX86_BUILTIN_VEC_EXT_V2DI:
24320 case IX86_BUILTIN_VEC_EXT_V4SF:
24321 case IX86_BUILTIN_VEC_EXT_V4SI:
24322 case IX86_BUILTIN_VEC_EXT_V8HI:
24323 case IX86_BUILTIN_VEC_EXT_V2SI:
24324 case IX86_BUILTIN_VEC_EXT_V4HI:
24325 case IX86_BUILTIN_VEC_EXT_V16QI:
24326 return ix86_expand_vec_ext_builtin (exp, target);
24328 case IX86_BUILTIN_VEC_SET_V2DI:
24329 case IX86_BUILTIN_VEC_SET_V4SF:
24330 case IX86_BUILTIN_VEC_SET_V4SI:
24331 case IX86_BUILTIN_VEC_SET_V8HI:
24332 case IX86_BUILTIN_VEC_SET_V4HI:
24333 case IX86_BUILTIN_VEC_SET_V16QI:
24334 return ix86_expand_vec_set_builtin (exp);
24336 case IX86_BUILTIN_VEC_PERM_V2DF:
24337 case IX86_BUILTIN_VEC_PERM_V4SF:
24338 case IX86_BUILTIN_VEC_PERM_V2DI:
24339 case IX86_BUILTIN_VEC_PERM_V4SI:
24340 case IX86_BUILTIN_VEC_PERM_V8HI:
24341 case IX86_BUILTIN_VEC_PERM_V16QI:
24342 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24343 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24344 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24345 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24346 case IX86_BUILTIN_VEC_PERM_V4DF:
24347 case IX86_BUILTIN_VEC_PERM_V8SF:
24348 return ix86_expand_vec_perm_builtin (exp);
24350 case IX86_BUILTIN_INFQ:
24351 case IX86_BUILTIN_HUGE_VALQ:
24353 REAL_VALUE_TYPE inf;
24357 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24359 tmp = validize_mem (force_const_mem (mode, tmp));
24362 target = gen_reg_rtx (mode);
24364 emit_move_insn (target, tmp);
24368 case IX86_BUILTIN_LLWPCB:
24369 arg0 = CALL_EXPR_ARG (exp, 0);
24370 op0 = expand_normal (arg0);
24371 icode = CODE_FOR_lwp_llwpcb;
24372 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24373 op0 = copy_to_mode_reg (Pmode, op0);
24374 emit_insn (gen_lwp_llwpcb (op0));
24377 case IX86_BUILTIN_SLWPCB:
24378 icode = CODE_FOR_lwp_slwpcb;
24380 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24381 target = gen_reg_rtx (Pmode);
24382 emit_insn (gen_lwp_slwpcb (target));
24389 for (i = 0, d = bdesc_special_args;
24390 i < ARRAY_SIZE (bdesc_special_args);
24392 if (d->code == fcode)
24393 return ix86_expand_special_args_builtin (d, exp, target);
24395 for (i = 0, d = bdesc_args;
24396 i < ARRAY_SIZE (bdesc_args);
24398 if (d->code == fcode)
24401 case IX86_BUILTIN_FABSQ:
24402 case IX86_BUILTIN_COPYSIGNQ:
24404 /* Emit a normal call if SSE2 isn't available. */
24405 return expand_call (exp, target, ignore);
24407 return ix86_expand_args_builtin (d, exp, target);
24410 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24411 if (d->code == fcode)
24412 return ix86_expand_sse_comi (d, exp, target);
24414 for (i = 0, d = bdesc_pcmpestr;
24415 i < ARRAY_SIZE (bdesc_pcmpestr);
24417 if (d->code == fcode)
24418 return ix86_expand_sse_pcmpestr (d, exp, target);
24420 for (i = 0, d = bdesc_pcmpistr;
24421 i < ARRAY_SIZE (bdesc_pcmpistr);
24423 if (d->code == fcode)
24424 return ix86_expand_sse_pcmpistr (d, exp, target);
24426 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24427 if (d->code == fcode)
24428 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24429 (enum ix86_builtin_func_type)
24430 d->flag, d->comparison);
24432 gcc_unreachable ();
24435 /* Returns a function decl for a vectorized version of the builtin function
24436 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24437 if it is not available. */
24440 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24443 enum machine_mode in_mode, out_mode;
24445 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24447 if (TREE_CODE (type_out) != VECTOR_TYPE
24448 || TREE_CODE (type_in) != VECTOR_TYPE
24449 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24452 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24453 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24454 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24455 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24459 case BUILT_IN_SQRT:
24460 if (out_mode == DFmode && out_n == 2
24461 && in_mode == DFmode && in_n == 2)
24462 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24465 case BUILT_IN_SQRTF:
24466 if (out_mode == SFmode && out_n == 4
24467 && in_mode == SFmode && in_n == 4)
24468 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24471 case BUILT_IN_LRINT:
24472 if (out_mode == SImode && out_n == 4
24473 && in_mode == DFmode && in_n == 2)
24474 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24477 case BUILT_IN_LRINTF:
24478 if (out_mode == SImode && out_n == 4
24479 && in_mode == SFmode && in_n == 4)
24480 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24483 case BUILT_IN_COPYSIGN:
24484 if (out_mode == DFmode && out_n == 2
24485 && in_mode == DFmode && in_n == 2)
24486 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24489 case BUILT_IN_COPYSIGNF:
24490 if (out_mode == SFmode && out_n == 4
24491 && in_mode == SFmode && in_n == 4)
24492 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24499 /* Dispatch to a handler for a vectorization library. */
24500 if (ix86_veclib_handler)
24501 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24507 /* Handler for an SVML-style interface to
24508 a library with vectorized intrinsics. */
24511 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24514 tree fntype, new_fndecl, args;
24517 enum machine_mode el_mode, in_mode;
24520 /* The SVML is suitable for unsafe math only. */
24521 if (!flag_unsafe_math_optimizations)
24524 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24525 n = TYPE_VECTOR_SUBPARTS (type_out);
24526 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24527 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24528 if (el_mode != in_mode
24536 case BUILT_IN_LOG10:
24538 case BUILT_IN_TANH:
24540 case BUILT_IN_ATAN:
24541 case BUILT_IN_ATAN2:
24542 case BUILT_IN_ATANH:
24543 case BUILT_IN_CBRT:
24544 case BUILT_IN_SINH:
24546 case BUILT_IN_ASINH:
24547 case BUILT_IN_ASIN:
24548 case BUILT_IN_COSH:
24550 case BUILT_IN_ACOSH:
24551 case BUILT_IN_ACOS:
24552 if (el_mode != DFmode || n != 2)
24556 case BUILT_IN_EXPF:
24557 case BUILT_IN_LOGF:
24558 case BUILT_IN_LOG10F:
24559 case BUILT_IN_POWF:
24560 case BUILT_IN_TANHF:
24561 case BUILT_IN_TANF:
24562 case BUILT_IN_ATANF:
24563 case BUILT_IN_ATAN2F:
24564 case BUILT_IN_ATANHF:
24565 case BUILT_IN_CBRTF:
24566 case BUILT_IN_SINHF:
24567 case BUILT_IN_SINF:
24568 case BUILT_IN_ASINHF:
24569 case BUILT_IN_ASINF:
24570 case BUILT_IN_COSHF:
24571 case BUILT_IN_COSF:
24572 case BUILT_IN_ACOSHF:
24573 case BUILT_IN_ACOSF:
24574 if (el_mode != SFmode || n != 4)
24582 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24584 if (fn == BUILT_IN_LOGF)
24585 strcpy (name, "vmlsLn4");
24586 else if (fn == BUILT_IN_LOG)
24587 strcpy (name, "vmldLn2");
24590 sprintf (name, "vmls%s", bname+10);
24591 name[strlen (name)-1] = '4';
24594 sprintf (name, "vmld%s2", bname+10);
24596 /* Convert to uppercase. */
24600 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24601 args = TREE_CHAIN (args))
24605 fntype = build_function_type_list (type_out, type_in, NULL);
24607 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24609 /* Build a function declaration for the vectorized function. */
24610 new_fndecl = build_decl (BUILTINS_LOCATION,
24611 FUNCTION_DECL, get_identifier (name), fntype);
24612 TREE_PUBLIC (new_fndecl) = 1;
24613 DECL_EXTERNAL (new_fndecl) = 1;
24614 DECL_IS_NOVOPS (new_fndecl) = 1;
24615 TREE_READONLY (new_fndecl) = 1;
24620 /* Handler for an ACML-style interface to
24621 a library with vectorized intrinsics. */
24624 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24626 char name[20] = "__vr.._";
24627 tree fntype, new_fndecl, args;
24630 enum machine_mode el_mode, in_mode;
24633 /* The ACML is 64bits only and suitable for unsafe math only as
24634 it does not correctly support parts of IEEE with the required
24635 precision such as denormals. */
24637 || !flag_unsafe_math_optimizations)
24640 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24641 n = TYPE_VECTOR_SUBPARTS (type_out);
24642 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24643 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24644 if (el_mode != in_mode
24654 case BUILT_IN_LOG2:
24655 case BUILT_IN_LOG10:
24658 if (el_mode != DFmode
24663 case BUILT_IN_SINF:
24664 case BUILT_IN_COSF:
24665 case BUILT_IN_EXPF:
24666 case BUILT_IN_POWF:
24667 case BUILT_IN_LOGF:
24668 case BUILT_IN_LOG2F:
24669 case BUILT_IN_LOG10F:
24672 if (el_mode != SFmode
24681 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24682 sprintf (name + 7, "%s", bname+10);
24685 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24686 args = TREE_CHAIN (args))
24690 fntype = build_function_type_list (type_out, type_in, NULL);
24692 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24694 /* Build a function declaration for the vectorized function. */
24695 new_fndecl = build_decl (BUILTINS_LOCATION,
24696 FUNCTION_DECL, get_identifier (name), fntype);
24697 TREE_PUBLIC (new_fndecl) = 1;
24698 DECL_EXTERNAL (new_fndecl) = 1;
24699 DECL_IS_NOVOPS (new_fndecl) = 1;
24700 TREE_READONLY (new_fndecl) = 1;
24706 /* Returns a decl of a function that implements conversion of an integer vector
24707 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24708 are the types involved when converting according to CODE.
24709 Return NULL_TREE if it is not available. */
24712 ix86_vectorize_builtin_conversion (unsigned int code,
24713 tree dest_type, tree src_type)
24721 switch (TYPE_MODE (src_type))
24724 switch (TYPE_MODE (dest_type))
24727 return (TYPE_UNSIGNED (src_type)
24728 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24729 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24731 return (TYPE_UNSIGNED (src_type)
24733 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24739 switch (TYPE_MODE (dest_type))
24742 return (TYPE_UNSIGNED (src_type)
24744 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24753 case FIX_TRUNC_EXPR:
24754 switch (TYPE_MODE (dest_type))
24757 switch (TYPE_MODE (src_type))
24760 return (TYPE_UNSIGNED (dest_type)
24762 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24764 return (TYPE_UNSIGNED (dest_type)
24766 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24773 switch (TYPE_MODE (src_type))
24776 return (TYPE_UNSIGNED (dest_type)
24778 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24795 /* Returns a code for a target-specific builtin that implements
24796 reciprocal of the function, or NULL_TREE if not available. */
24799 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24800 bool sqrt ATTRIBUTE_UNUSED)
24802 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24803 && flag_finite_math_only && !flag_trapping_math
24804 && flag_unsafe_math_optimizations))
24808 /* Machine dependent builtins. */
24811 /* Vectorized version of sqrt to rsqrt conversion. */
24812 case IX86_BUILTIN_SQRTPS_NR:
24813 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24819 /* Normal builtins. */
24822 /* Sqrt to rsqrt conversion. */
24823 case BUILT_IN_SQRTF:
24824 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24831 /* Helper for avx_vpermilps256_operand et al. This is also used by
24832 the expansion functions to turn the parallel back into a mask.
24833 The return value is 0 for no match and the imm8+1 for a match. */
24836 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24838 unsigned i, nelt = GET_MODE_NUNITS (mode);
24840 unsigned char ipar[8];
24842 if (XVECLEN (par, 0) != (int) nelt)
24845 /* Validate that all of the elements are constants, and not totally
24846 out of range. Copy the data into an integral array to make the
24847 subsequent checks easier. */
24848 for (i = 0; i < nelt; ++i)
24850 rtx er = XVECEXP (par, 0, i);
24851 unsigned HOST_WIDE_INT ei;
24853 if (!CONST_INT_P (er))
24864 /* In the 256-bit DFmode case, we can only move elements within
24866 for (i = 0; i < 2; ++i)
24870 mask |= ipar[i] << i;
24872 for (i = 2; i < 4; ++i)
24876 mask |= (ipar[i] - 2) << i;
24881 /* In the 256-bit SFmode case, we have full freedom of movement
24882 within the low 128-bit lane, but the high 128-bit lane must
24883 mirror the exact same pattern. */
24884 for (i = 0; i < 4; ++i)
24885 if (ipar[i] + 4 != ipar[i + 4])
24892 /* In the 128-bit case, we've full freedom in the placement of
24893 the elements from the source operand. */
24894 for (i = 0; i < nelt; ++i)
24895 mask |= ipar[i] << (i * (nelt / 2));
24899 gcc_unreachable ();
24902 /* Make sure success has a non-zero value by adding one. */
24906 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24907 the expansion functions to turn the parallel back into a mask.
24908 The return value is 0 for no match and the imm8+1 for a match. */
24911 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24913 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24915 unsigned char ipar[8];
24917 if (XVECLEN (par, 0) != (int) nelt)
24920 /* Validate that all of the elements are constants, and not totally
24921 out of range. Copy the data into an integral array to make the
24922 subsequent checks easier. */
24923 for (i = 0; i < nelt; ++i)
24925 rtx er = XVECEXP (par, 0, i);
24926 unsigned HOST_WIDE_INT ei;
24928 if (!CONST_INT_P (er))
24931 if (ei >= 2 * nelt)
24936 /* Validate that the halves of the permute are halves. */
24937 for (i = 0; i < nelt2 - 1; ++i)
24938 if (ipar[i] + 1 != ipar[i + 1])
24940 for (i = nelt2; i < nelt - 1; ++i)
24941 if (ipar[i] + 1 != ipar[i + 1])
24944 /* Reconstruct the mask. */
24945 for (i = 0; i < 2; ++i)
24947 unsigned e = ipar[i * nelt2];
24951 mask |= e << (i * 4);
24954 /* Make sure success has a non-zero value by adding one. */
24959 /* Store OPERAND to the memory after reload is completed. This means
24960 that we can't easily use assign_stack_local. */
24962 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24966 gcc_assert (reload_completed);
24967 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24969 result = gen_rtx_MEM (mode,
24970 gen_rtx_PLUS (Pmode,
24972 GEN_INT (-RED_ZONE_SIZE)));
24973 emit_move_insn (result, operand);
24975 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24981 operand = gen_lowpart (DImode, operand);
24985 gen_rtx_SET (VOIDmode,
24986 gen_rtx_MEM (DImode,
24987 gen_rtx_PRE_DEC (DImode,
24988 stack_pointer_rtx)),
24992 gcc_unreachable ();
24994 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25003 split_di (&operand, 1, operands, operands + 1);
25005 gen_rtx_SET (VOIDmode,
25006 gen_rtx_MEM (SImode,
25007 gen_rtx_PRE_DEC (Pmode,
25008 stack_pointer_rtx)),
25011 gen_rtx_SET (VOIDmode,
25012 gen_rtx_MEM (SImode,
25013 gen_rtx_PRE_DEC (Pmode,
25014 stack_pointer_rtx)),
25019 /* Store HImodes as SImodes. */
25020 operand = gen_lowpart (SImode, operand);
25024 gen_rtx_SET (VOIDmode,
25025 gen_rtx_MEM (GET_MODE (operand),
25026 gen_rtx_PRE_DEC (SImode,
25027 stack_pointer_rtx)),
25031 gcc_unreachable ();
25033 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25038 /* Free operand from the memory. */
25040 ix86_free_from_memory (enum machine_mode mode)
25042 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25046 if (mode == DImode || TARGET_64BIT)
25050 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25051 to pop or add instruction if registers are available. */
25052 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25053 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25058 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25059 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25061 static const enum reg_class *
25062 i386_ira_cover_classes (void)
25064 static const enum reg_class sse_fpmath_classes[] = {
25065 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25067 static const enum reg_class no_sse_fpmath_classes[] = {
25068 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25071 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25074 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25075 QImode must go into class Q_REGS.
25076 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25077 movdf to do mem-to-mem moves through integer regs. */
25079 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25081 enum machine_mode mode = GET_MODE (x);
25083 /* We're only allowed to return a subclass of CLASS. Many of the
25084 following checks fail for NO_REGS, so eliminate that early. */
25085 if (regclass == NO_REGS)
25088 /* All classes can load zeros. */
25089 if (x == CONST0_RTX (mode))
25092 /* Force constants into memory if we are loading a (nonzero) constant into
25093 an MMX or SSE register. This is because there are no MMX/SSE instructions
25094 to load from a constant. */
25096 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25099 /* Prefer SSE regs only, if we can use them for math. */
25100 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25101 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25103 /* Floating-point constants need more complex checks. */
25104 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25106 /* General regs can load everything. */
25107 if (reg_class_subset_p (regclass, GENERAL_REGS))
25110 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25111 zero above. We only want to wind up preferring 80387 registers if
25112 we plan on doing computation with them. */
25114 && standard_80387_constant_p (x))
25116 /* Limit class to non-sse. */
25117 if (regclass == FLOAT_SSE_REGS)
25119 if (regclass == FP_TOP_SSE_REGS)
25121 if (regclass == FP_SECOND_SSE_REGS)
25122 return FP_SECOND_REG;
25123 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25130 /* Generally when we see PLUS here, it's the function invariant
25131 (plus soft-fp const_int). Which can only be computed into general
25133 if (GET_CODE (x) == PLUS)
25134 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25136 /* QImode constants are easy to load, but non-constant QImode data
25137 must go into Q_REGS. */
25138 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25140 if (reg_class_subset_p (regclass, Q_REGS))
25142 if (reg_class_subset_p (Q_REGS, regclass))
25150 /* Discourage putting floating-point values in SSE registers unless
25151 SSE math is being used, and likewise for the 387 registers. */
25153 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25155 enum machine_mode mode = GET_MODE (x);
25157 /* Restrict the output reload class to the register bank that we are doing
25158 math on. If we would like not to return a subset of CLASS, reject this
25159 alternative: if reload cannot do this, it will still use its choice. */
25160 mode = GET_MODE (x);
25161 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25162 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25164 if (X87_FLOAT_MODE_P (mode))
25166 if (regclass == FP_TOP_SSE_REGS)
25168 else if (regclass == FP_SECOND_SSE_REGS)
25169 return FP_SECOND_REG;
25171 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25177 static enum reg_class
25178 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25179 enum machine_mode mode,
25180 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25182 /* QImode spills from non-QI registers require
25183 intermediate register on 32bit targets. */
25184 if (!in_p && mode == QImode && !TARGET_64BIT
25185 && (rclass == GENERAL_REGS
25186 || rclass == LEGACY_REGS
25187 || rclass == INDEX_REGS))
25196 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25197 regno = true_regnum (x);
25199 /* Return Q_REGS if the operand is in memory. */
25207 /* If we are copying between general and FP registers, we need a memory
25208 location. The same is true for SSE and MMX registers.
25210 To optimize register_move_cost performance, allow inline variant.
25212 The macro can't work reliably when one of the CLASSES is class containing
25213 registers from multiple units (SSE, MMX, integer). We avoid this by never
25214 combining those units in single alternative in the machine description.
25215 Ensure that this constraint holds to avoid unexpected surprises.
25217 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25218 enforce these sanity checks. */
25221 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25222 enum machine_mode mode, int strict)
25224 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25225 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25226 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25227 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25228 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25229 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25231 gcc_assert (!strict);
25235 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25238 /* ??? This is a lie. We do have moves between mmx/general, and for
25239 mmx/sse2. But by saying we need secondary memory we discourage the
25240 register allocator from using the mmx registers unless needed. */
25241 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25244 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25246 /* SSE1 doesn't have any direct moves from other classes. */
25250 /* If the target says that inter-unit moves are more expensive
25251 than moving through memory, then don't generate them. */
25252 if (!TARGET_INTER_UNIT_MOVES)
25255 /* Between SSE and general, we have moves no larger than word size. */
25256 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25264 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25265 enum machine_mode mode, int strict)
25267 return inline_secondary_memory_needed (class1, class2, mode, strict);
25270 /* Return true if the registers in CLASS cannot represent the change from
25271 modes FROM to TO. */
25274 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25275 enum reg_class regclass)
25280 /* x87 registers can't do subreg at all, as all values are reformatted
25281 to extended precision. */
25282 if (MAYBE_FLOAT_CLASS_P (regclass))
25285 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25287 /* Vector registers do not support QI or HImode loads. If we don't
25288 disallow a change to these modes, reload will assume it's ok to
25289 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25290 the vec_dupv4hi pattern. */
25291 if (GET_MODE_SIZE (from) < 4)
25294 /* Vector registers do not support subreg with nonzero offsets, which
25295 are otherwise valid for integer registers. Since we can't see
25296 whether we have a nonzero offset from here, prohibit all
25297 nonparadoxical subregs changing size. */
25298 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25305 /* Return the cost of moving data of mode M between a
25306 register and memory. A value of 2 is the default; this cost is
25307 relative to those in `REGISTER_MOVE_COST'.
25309 This function is used extensively by register_move_cost that is used to
25310 build tables at startup. Make it inline in this case.
25311 When IN is 2, return maximum of in and out move cost.
25313 If moving between registers and memory is more expensive than
25314 between two registers, you should define this macro to express the
25317 Model also increased moving costs of QImode registers in non
25321 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25325 if (FLOAT_CLASS_P (regclass))
25343 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25344 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25346 if (SSE_CLASS_P (regclass))
25349 switch (GET_MODE_SIZE (mode))
25364 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25365 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25367 if (MMX_CLASS_P (regclass))
25370 switch (GET_MODE_SIZE (mode))
25382 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25383 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25385 switch (GET_MODE_SIZE (mode))
25388 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25391 return ix86_cost->int_store[0];
25392 if (TARGET_PARTIAL_REG_DEPENDENCY
25393 && optimize_function_for_speed_p (cfun))
25394 cost = ix86_cost->movzbl_load;
25396 cost = ix86_cost->int_load[0];
25398 return MAX (cost, ix86_cost->int_store[0]);
25404 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25406 return ix86_cost->movzbl_load;
25408 return ix86_cost->int_store[0] + 4;
25413 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25414 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25416 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25417 if (mode == TFmode)
25420 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25422 cost = ix86_cost->int_load[2];
25424 cost = ix86_cost->int_store[2];
25425 return (cost * (((int) GET_MODE_SIZE (mode)
25426 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25431 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25433 return inline_memory_move_cost (mode, regclass, in);
25437 /* Return the cost of moving data from a register in class CLASS1 to
25438 one in class CLASS2.
25440 It is not required that the cost always equal 2 when FROM is the same as TO;
25441 on some machines it is expensive to move between registers if they are not
25442 general registers. */
25445 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25446 enum reg_class class2)
25448 /* In case we require secondary memory, compute cost of the store followed
25449 by load. In order to avoid bad register allocation choices, we need
25450 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25452 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25456 cost += inline_memory_move_cost (mode, class1, 2);
25457 cost += inline_memory_move_cost (mode, class2, 2);
25459 /* In case of copying from general_purpose_register we may emit multiple
25460 stores followed by single load causing memory size mismatch stall.
25461 Count this as arbitrarily high cost of 20. */
25462 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25465 /* In the case of FP/MMX moves, the registers actually overlap, and we
25466 have to switch modes in order to treat them differently. */
25467 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25468 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25474 /* Moves between SSE/MMX and integer unit are expensive. */
25475 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25476 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25478 /* ??? By keeping returned value relatively high, we limit the number
25479 of moves between integer and MMX/SSE registers for all targets.
25480 Additionally, high value prevents problem with x86_modes_tieable_p(),
25481 where integer modes in MMX/SSE registers are not tieable
25482 because of missing QImode and HImode moves to, from or between
25483 MMX/SSE registers. */
25484 return MAX (8, ix86_cost->mmxsse_to_integer);
25486 if (MAYBE_FLOAT_CLASS_P (class1))
25487 return ix86_cost->fp_move;
25488 if (MAYBE_SSE_CLASS_P (class1))
25489 return ix86_cost->sse_move;
25490 if (MAYBE_MMX_CLASS_P (class1))
25491 return ix86_cost->mmx_move;
25495 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25498 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25500 /* Flags and only flags can only hold CCmode values. */
25501 if (CC_REGNO_P (regno))
25502 return GET_MODE_CLASS (mode) == MODE_CC;
25503 if (GET_MODE_CLASS (mode) == MODE_CC
25504 || GET_MODE_CLASS (mode) == MODE_RANDOM
25505 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25507 if (FP_REGNO_P (regno))
25508 return VALID_FP_MODE_P (mode);
25509 if (SSE_REGNO_P (regno))
25511 /* We implement the move patterns for all vector modes into and
25512 out of SSE registers, even when no operation instructions
25513 are available. OImode move is available only when AVX is
25515 return ((TARGET_AVX && mode == OImode)
25516 || VALID_AVX256_REG_MODE (mode)
25517 || VALID_SSE_REG_MODE (mode)
25518 || VALID_SSE2_REG_MODE (mode)
25519 || VALID_MMX_REG_MODE (mode)
25520 || VALID_MMX_REG_MODE_3DNOW (mode));
25522 if (MMX_REGNO_P (regno))
25524 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25525 so if the register is available at all, then we can move data of
25526 the given mode into or out of it. */
25527 return (VALID_MMX_REG_MODE (mode)
25528 || VALID_MMX_REG_MODE_3DNOW (mode));
25531 if (mode == QImode)
25533 /* Take care for QImode values - they can be in non-QI regs,
25534 but then they do cause partial register stalls. */
25535 if (regno <= BX_REG || TARGET_64BIT)
25537 if (!TARGET_PARTIAL_REG_STALL)
25539 return reload_in_progress || reload_completed;
25541 /* We handle both integer and floats in the general purpose registers. */
25542 else if (VALID_INT_MODE_P (mode))
25544 else if (VALID_FP_MODE_P (mode))
25546 else if (VALID_DFP_MODE_P (mode))
25548 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25549 on to use that value in smaller contexts, this can easily force a
25550 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25551 supporting DImode, allow it. */
25552 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25558 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25559 tieable integer mode. */
25562 ix86_tieable_integer_mode_p (enum machine_mode mode)
25571 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25574 return TARGET_64BIT;
25581 /* Return true if MODE1 is accessible in a register that can hold MODE2
25582 without copying. That is, all register classes that can hold MODE2
25583 can also hold MODE1. */
25586 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25588 if (mode1 == mode2)
25591 if (ix86_tieable_integer_mode_p (mode1)
25592 && ix86_tieable_integer_mode_p (mode2))
25595 /* MODE2 being XFmode implies fp stack or general regs, which means we
25596 can tie any smaller floating point modes to it. Note that we do not
25597 tie this with TFmode. */
25598 if (mode2 == XFmode)
25599 return mode1 == SFmode || mode1 == DFmode;
25601 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25602 that we can tie it with SFmode. */
25603 if (mode2 == DFmode)
25604 return mode1 == SFmode;
25606 /* If MODE2 is only appropriate for an SSE register, then tie with
25607 any other mode acceptable to SSE registers. */
25608 if (GET_MODE_SIZE (mode2) == 16
25609 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25610 return (GET_MODE_SIZE (mode1) == 16
25611 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25613 /* If MODE2 is appropriate for an MMX register, then tie
25614 with any other mode acceptable to MMX registers. */
25615 if (GET_MODE_SIZE (mode2) == 8
25616 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25617 return (GET_MODE_SIZE (mode1) == 8
25618 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25623 /* Compute a (partial) cost for rtx X. Return true if the complete
25624 cost has been computed, and false if subexpressions should be
25625 scanned. In either case, *TOTAL contains the cost result. */
25628 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25630 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25631 enum machine_mode mode = GET_MODE (x);
25632 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25640 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25642 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25644 else if (flag_pic && SYMBOLIC_CONST (x)
25646 || (!GET_CODE (x) != LABEL_REF
25647 && (GET_CODE (x) != SYMBOL_REF
25648 || !SYMBOL_REF_LOCAL_P (x)))))
25655 if (mode == VOIDmode)
25658 switch (standard_80387_constant_p (x))
25663 default: /* Other constants */
25668 /* Start with (MEM (SYMBOL_REF)), since that's where
25669 it'll probably end up. Add a penalty for size. */
25670 *total = (COSTS_N_INSNS (1)
25671 + (flag_pic != 0 && !TARGET_64BIT)
25672 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25678 /* The zero extensions is often completely free on x86_64, so make
25679 it as cheap as possible. */
25680 if (TARGET_64BIT && mode == DImode
25681 && GET_MODE (XEXP (x, 0)) == SImode)
25683 else if (TARGET_ZERO_EXTEND_WITH_AND)
25684 *total = cost->add;
25686 *total = cost->movzx;
25690 *total = cost->movsx;
25694 if (CONST_INT_P (XEXP (x, 1))
25695 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25697 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25700 *total = cost->add;
25703 if ((value == 2 || value == 3)
25704 && cost->lea <= cost->shift_const)
25706 *total = cost->lea;
25716 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25718 if (CONST_INT_P (XEXP (x, 1)))
25720 if (INTVAL (XEXP (x, 1)) > 32)
25721 *total = cost->shift_const + COSTS_N_INSNS (2);
25723 *total = cost->shift_const * 2;
25727 if (GET_CODE (XEXP (x, 1)) == AND)
25728 *total = cost->shift_var * 2;
25730 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25735 if (CONST_INT_P (XEXP (x, 1)))
25736 *total = cost->shift_const;
25738 *total = cost->shift_var;
25743 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25745 /* ??? SSE scalar cost should be used here. */
25746 *total = cost->fmul;
25749 else if (X87_FLOAT_MODE_P (mode))
25751 *total = cost->fmul;
25754 else if (FLOAT_MODE_P (mode))
25756 /* ??? SSE vector cost should be used here. */
25757 *total = cost->fmul;
25762 rtx op0 = XEXP (x, 0);
25763 rtx op1 = XEXP (x, 1);
25765 if (CONST_INT_P (XEXP (x, 1)))
25767 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25768 for (nbits = 0; value != 0; value &= value - 1)
25772 /* This is arbitrary. */
25775 /* Compute costs correctly for widening multiplication. */
25776 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25777 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25778 == GET_MODE_SIZE (mode))
25780 int is_mulwiden = 0;
25781 enum machine_mode inner_mode = GET_MODE (op0);
25783 if (GET_CODE (op0) == GET_CODE (op1))
25784 is_mulwiden = 1, op1 = XEXP (op1, 0);
25785 else if (CONST_INT_P (op1))
25787 if (GET_CODE (op0) == SIGN_EXTEND)
25788 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25791 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25795 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25798 *total = (cost->mult_init[MODE_INDEX (mode)]
25799 + nbits * cost->mult_bit
25800 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25809 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25810 /* ??? SSE cost should be used here. */
25811 *total = cost->fdiv;
25812 else if (X87_FLOAT_MODE_P (mode))
25813 *total = cost->fdiv;
25814 else if (FLOAT_MODE_P (mode))
25815 /* ??? SSE vector cost should be used here. */
25816 *total = cost->fdiv;
25818 *total = cost->divide[MODE_INDEX (mode)];
25822 if (GET_MODE_CLASS (mode) == MODE_INT
25823 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25825 if (GET_CODE (XEXP (x, 0)) == PLUS
25826 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25827 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25828 && CONSTANT_P (XEXP (x, 1)))
25830 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25831 if (val == 2 || val == 4 || val == 8)
25833 *total = cost->lea;
25834 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25835 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25836 outer_code, speed);
25837 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25841 else if (GET_CODE (XEXP (x, 0)) == MULT
25842 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25844 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25845 if (val == 2 || val == 4 || val == 8)
25847 *total = cost->lea;
25848 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25849 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25853 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25855 *total = cost->lea;
25856 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25857 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25858 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25865 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25867 /* ??? SSE cost should be used here. */
25868 *total = cost->fadd;
25871 else if (X87_FLOAT_MODE_P (mode))
25873 *total = cost->fadd;
25876 else if (FLOAT_MODE_P (mode))
25878 /* ??? SSE vector cost should be used here. */
25879 *total = cost->fadd;
25887 if (!TARGET_64BIT && mode == DImode)
25889 *total = (cost->add * 2
25890 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25891 << (GET_MODE (XEXP (x, 0)) != DImode))
25892 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25893 << (GET_MODE (XEXP (x, 1)) != DImode)));
25899 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25901 /* ??? SSE cost should be used here. */
25902 *total = cost->fchs;
25905 else if (X87_FLOAT_MODE_P (mode))
25907 *total = cost->fchs;
25910 else if (FLOAT_MODE_P (mode))
25912 /* ??? SSE vector cost should be used here. */
25913 *total = cost->fchs;
25919 if (!TARGET_64BIT && mode == DImode)
25920 *total = cost->add * 2;
25922 *total = cost->add;
25926 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25927 && XEXP (XEXP (x, 0), 1) == const1_rtx
25928 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25929 && XEXP (x, 1) == const0_rtx)
25931 /* This kind of construct is implemented using test[bwl].
25932 Treat it as if we had an AND. */
25933 *total = (cost->add
25934 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25935 + rtx_cost (const1_rtx, outer_code, speed));
25941 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25946 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25947 /* ??? SSE cost should be used here. */
25948 *total = cost->fabs;
25949 else if (X87_FLOAT_MODE_P (mode))
25950 *total = cost->fabs;
25951 else if (FLOAT_MODE_P (mode))
25952 /* ??? SSE vector cost should be used here. */
25953 *total = cost->fabs;
25957 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25958 /* ??? SSE cost should be used here. */
25959 *total = cost->fsqrt;
25960 else if (X87_FLOAT_MODE_P (mode))
25961 *total = cost->fsqrt;
25962 else if (FLOAT_MODE_P (mode))
25963 /* ??? SSE vector cost should be used here. */
25964 *total = cost->fsqrt;
25968 if (XINT (x, 1) == UNSPEC_TP)
25975 case VEC_DUPLICATE:
25976 /* ??? Assume all of these vector manipulation patterns are
25977 recognizable. In which case they all pretty much have the
25979 *total = COSTS_N_INSNS (1);
25989 static int current_machopic_label_num;
25991 /* Given a symbol name and its associated stub, write out the
25992 definition of the stub. */
25995 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25997 unsigned int length;
25998 char *binder_name, *symbol_name, lazy_ptr_name[32];
25999 int label = ++current_machopic_label_num;
26001 /* For 64-bit we shouldn't get here. */
26002 gcc_assert (!TARGET_64BIT);
26004 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26005 symb = (*targetm.strip_name_encoding) (symb);
26007 length = strlen (stub);
26008 binder_name = XALLOCAVEC (char, length + 32);
26009 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26011 length = strlen (symb);
26012 symbol_name = XALLOCAVEC (char, length + 32);
26013 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26015 sprintf (lazy_ptr_name, "L%d$lz", label);
26018 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26020 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26022 fprintf (file, "%s:\n", stub);
26023 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26027 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26028 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26029 fprintf (file, "\tjmp\t*%%edx\n");
26032 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26034 fprintf (file, "%s:\n", binder_name);
26038 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26039 fputs ("\tpushl\t%eax\n", file);
26042 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26044 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26046 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26047 fprintf (file, "%s:\n", lazy_ptr_name);
26048 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26049 fprintf (file, ASM_LONG "%s\n", binder_name);
26051 #endif /* TARGET_MACHO */
26053 /* Order the registers for register allocator. */
26056 x86_order_regs_for_local_alloc (void)
26061 /* First allocate the local general purpose registers. */
26062 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26063 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26064 reg_alloc_order [pos++] = i;
26066 /* Global general purpose registers. */
26067 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26068 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26069 reg_alloc_order [pos++] = i;
26071 /* x87 registers come first in case we are doing FP math
26073 if (!TARGET_SSE_MATH)
26074 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26075 reg_alloc_order [pos++] = i;
26077 /* SSE registers. */
26078 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26079 reg_alloc_order [pos++] = i;
26080 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26081 reg_alloc_order [pos++] = i;
26083 /* x87 registers. */
26084 if (TARGET_SSE_MATH)
26085 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26086 reg_alloc_order [pos++] = i;
26088 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26089 reg_alloc_order [pos++] = i;
26091 /* Initialize the rest of array as we do not allocate some registers
26093 while (pos < FIRST_PSEUDO_REGISTER)
26094 reg_alloc_order [pos++] = 0;
26097 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26098 struct attribute_spec.handler. */
26100 ix86_handle_abi_attribute (tree *node, tree name,
26101 tree args ATTRIBUTE_UNUSED,
26102 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26104 if (TREE_CODE (*node) != FUNCTION_TYPE
26105 && TREE_CODE (*node) != METHOD_TYPE
26106 && TREE_CODE (*node) != FIELD_DECL
26107 && TREE_CODE (*node) != TYPE_DECL)
26109 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26111 *no_add_attrs = true;
26116 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26118 *no_add_attrs = true;
26122 /* Can combine regparm with all attributes but fastcall. */
26123 if (is_attribute_p ("ms_abi", name))
26125 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26127 error ("ms_abi and sysv_abi attributes are not compatible");
26132 else if (is_attribute_p ("sysv_abi", name))
26134 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26136 error ("ms_abi and sysv_abi attributes are not compatible");
26145 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26146 struct attribute_spec.handler. */
26148 ix86_handle_struct_attribute (tree *node, tree name,
26149 tree args ATTRIBUTE_UNUSED,
26150 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26153 if (DECL_P (*node))
26155 if (TREE_CODE (*node) == TYPE_DECL)
26156 type = &TREE_TYPE (*node);
26161 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26162 || TREE_CODE (*type) == UNION_TYPE)))
26164 warning (OPT_Wattributes, "%qE attribute ignored",
26166 *no_add_attrs = true;
26169 else if ((is_attribute_p ("ms_struct", name)
26170 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26171 || ((is_attribute_p ("gcc_struct", name)
26172 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26174 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26176 *no_add_attrs = true;
26183 ix86_handle_fndecl_attribute (tree *node, tree name,
26184 tree args ATTRIBUTE_UNUSED,
26185 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26187 if (TREE_CODE (*node) != FUNCTION_DECL)
26189 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26191 *no_add_attrs = true;
26197 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26202 #ifndef HAVE_AS_IX86_SWAP
26203 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26210 ix86_ms_bitfield_layout_p (const_tree record_type)
26212 return (TARGET_MS_BITFIELD_LAYOUT &&
26213 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26214 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26217 /* Returns an expression indicating where the this parameter is
26218 located on entry to the FUNCTION. */
26221 x86_this_parameter (tree function)
26223 tree type = TREE_TYPE (function);
26224 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26229 const int *parm_regs;
26231 if (ix86_function_type_abi (type) == MS_ABI)
26232 parm_regs = x86_64_ms_abi_int_parameter_registers;
26234 parm_regs = x86_64_int_parameter_registers;
26235 return gen_rtx_REG (DImode, parm_regs[aggr]);
26238 nregs = ix86_function_regparm (type, function);
26240 if (nregs > 0 && !stdarg_p (type))
26244 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26245 regno = aggr ? DX_REG : CX_REG;
26246 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26250 return gen_rtx_MEM (SImode,
26251 plus_constant (stack_pointer_rtx, 4));
26260 return gen_rtx_MEM (SImode,
26261 plus_constant (stack_pointer_rtx, 4));
26264 return gen_rtx_REG (SImode, regno);
26267 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26270 /* Determine whether x86_output_mi_thunk can succeed. */
26273 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26274 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26275 HOST_WIDE_INT vcall_offset, const_tree function)
26277 /* 64-bit can handle anything. */
26281 /* For 32-bit, everything's fine if we have one free register. */
26282 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26285 /* Need a free register for vcall_offset. */
26289 /* Need a free register for GOT references. */
26290 if (flag_pic && !(*targetm.binds_local_p) (function))
26293 /* Otherwise ok. */
26297 /* Output the assembler code for a thunk function. THUNK_DECL is the
26298 declaration for the thunk function itself, FUNCTION is the decl for
26299 the target function. DELTA is an immediate constant offset to be
26300 added to THIS. If VCALL_OFFSET is nonzero, the word at
26301 *(*this + vcall_offset) should be added to THIS. */
26304 x86_output_mi_thunk (FILE *file,
26305 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26306 HOST_WIDE_INT vcall_offset, tree function)
26309 rtx this_param = x86_this_parameter (function);
26312 /* Make sure unwind info is emitted for the thunk if needed. */
26313 final_start_function (emit_barrier (), file, 1);
26315 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26316 pull it in now and let DELTA benefit. */
26317 if (REG_P (this_param))
26318 this_reg = this_param;
26319 else if (vcall_offset)
26321 /* Put the this parameter into %eax. */
26322 xops[0] = this_param;
26323 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26324 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26327 this_reg = NULL_RTX;
26329 /* Adjust the this parameter by a fixed constant. */
26332 xops[0] = GEN_INT (delta);
26333 xops[1] = this_reg ? this_reg : this_param;
26336 if (!x86_64_general_operand (xops[0], DImode))
26338 tmp = gen_rtx_REG (DImode, R10_REG);
26340 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26342 xops[1] = this_param;
26344 if (x86_maybe_negate_const_int (&xops[0], DImode))
26345 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26347 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26349 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26350 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26352 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26355 /* Adjust the this parameter by a value stored in the vtable. */
26359 tmp = gen_rtx_REG (DImode, R10_REG);
26362 int tmp_regno = CX_REG;
26363 if (lookup_attribute ("fastcall",
26364 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26365 || lookup_attribute ("thiscall",
26366 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26367 tmp_regno = AX_REG;
26368 tmp = gen_rtx_REG (SImode, tmp_regno);
26371 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26373 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26375 /* Adjust the this parameter. */
26376 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26377 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26379 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26380 xops[0] = GEN_INT (vcall_offset);
26382 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26383 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26385 xops[1] = this_reg;
26386 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26389 /* If necessary, drop THIS back to its stack slot. */
26390 if (this_reg && this_reg != this_param)
26392 xops[0] = this_reg;
26393 xops[1] = this_param;
26394 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26397 xops[0] = XEXP (DECL_RTL (function), 0);
26400 if (!flag_pic || (*targetm.binds_local_p) (function))
26401 output_asm_insn ("jmp\t%P0", xops);
26402 /* All thunks should be in the same object as their target,
26403 and thus binds_local_p should be true. */
26404 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26405 gcc_unreachable ();
26408 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26409 tmp = gen_rtx_CONST (Pmode, tmp);
26410 tmp = gen_rtx_MEM (QImode, tmp);
26412 output_asm_insn ("jmp\t%A0", xops);
26417 if (!flag_pic || (*targetm.binds_local_p) (function))
26418 output_asm_insn ("jmp\t%P0", xops);
26423 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26424 tmp = (gen_rtx_SYMBOL_REF
26426 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26427 tmp = gen_rtx_MEM (QImode, tmp);
26429 output_asm_insn ("jmp\t%0", xops);
26432 #endif /* TARGET_MACHO */
26434 tmp = gen_rtx_REG (SImode, CX_REG);
26435 output_set_got (tmp, NULL_RTX);
26438 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26439 output_asm_insn ("jmp\t{*}%1", xops);
26442 final_end_function ();
26446 x86_file_start (void)
26448 default_file_start ();
26450 darwin_file_start ();
26452 if (X86_FILE_START_VERSION_DIRECTIVE)
26453 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26454 if (X86_FILE_START_FLTUSED)
26455 fputs ("\t.global\t__fltused\n", asm_out_file);
26456 if (ix86_asm_dialect == ASM_INTEL)
26457 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26461 x86_field_alignment (tree field, int computed)
26463 enum machine_mode mode;
26464 tree type = TREE_TYPE (field);
26466 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26468 mode = TYPE_MODE (strip_array_types (type));
26469 if (mode == DFmode || mode == DCmode
26470 || GET_MODE_CLASS (mode) == MODE_INT
26471 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26472 return MIN (32, computed);
26476 /* Output assembler code to FILE to increment profiler label # LABELNO
26477 for profiling a function entry. */
26479 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26483 #ifndef NO_PROFILE_COUNTERS
26484 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26487 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26488 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26490 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26494 #ifndef NO_PROFILE_COUNTERS
26495 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26498 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26502 #ifndef NO_PROFILE_COUNTERS
26503 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26506 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26510 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26511 /* We don't have exact information about the insn sizes, but we may assume
26512 quite safely that we are informed about all 1 byte insns and memory
26513 address sizes. This is enough to eliminate unnecessary padding in
26517 min_insn_size (rtx insn)
26521 if (!INSN_P (insn) || !active_insn_p (insn))
26524 /* Discard alignments we've emit and jump instructions. */
26525 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26526 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26528 if (JUMP_TABLE_DATA_P (insn))
26531 /* Important case - calls are always 5 bytes.
26532 It is common to have many calls in the row. */
26534 && symbolic_reference_mentioned_p (PATTERN (insn))
26535 && !SIBLING_CALL_P (insn))
26537 len = get_attr_length (insn);
26541 /* For normal instructions we rely on get_attr_length being exact,
26542 with a few exceptions. */
26543 if (!JUMP_P (insn))
26545 enum attr_type type = get_attr_type (insn);
26550 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26551 || asm_noperands (PATTERN (insn)) >= 0)
26558 /* Otherwise trust get_attr_length. */
26562 l = get_attr_length_address (insn);
26563 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26572 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26576 ix86_avoid_jump_mispredicts (void)
26578 rtx insn, start = get_insns ();
26579 int nbytes = 0, njumps = 0;
26582 /* Look for all minimal intervals of instructions containing 4 jumps.
26583 The intervals are bounded by START and INSN. NBYTES is the total
26584 size of instructions in the interval including INSN and not including
26585 START. When the NBYTES is smaller than 16 bytes, it is possible
26586 that the end of START and INSN ends up in the same 16byte page.
26588 The smallest offset in the page INSN can start is the case where START
26589 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26590 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26592 for (insn = start; insn; insn = NEXT_INSN (insn))
26596 if (LABEL_P (insn))
26598 int align = label_to_alignment (insn);
26599 int max_skip = label_to_max_skip (insn);
26603 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26604 already in the current 16 byte page, because otherwise
26605 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26606 bytes to reach 16 byte boundary. */
26608 || (align <= 3 && max_skip != (1 << align) - 1))
26611 fprintf (dump_file, "Label %i with max_skip %i\n",
26612 INSN_UID (insn), max_skip);
26615 while (nbytes + max_skip >= 16)
26617 start = NEXT_INSN (start);
26618 if ((JUMP_P (start)
26619 && GET_CODE (PATTERN (start)) != ADDR_VEC
26620 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26622 njumps--, isjump = 1;
26625 nbytes -= min_insn_size (start);
26631 min_size = min_insn_size (insn);
26632 nbytes += min_size;
26634 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26635 INSN_UID (insn), min_size);
26637 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26638 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26646 start = NEXT_INSN (start);
26647 if ((JUMP_P (start)
26648 && GET_CODE (PATTERN (start)) != ADDR_VEC
26649 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26651 njumps--, isjump = 1;
26654 nbytes -= min_insn_size (start);
26656 gcc_assert (njumps >= 0);
26658 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26659 INSN_UID (start), INSN_UID (insn), nbytes);
26661 if (njumps == 3 && isjump && nbytes < 16)
26663 int padsize = 15 - nbytes + min_insn_size (insn);
26666 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26667 INSN_UID (insn), padsize);
26668 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26674 /* AMD Athlon works faster
26675 when RET is not destination of conditional jump or directly preceded
26676 by other jump instruction. We avoid the penalty by inserting NOP just
26677 before the RET instructions in such cases. */
26679 ix86_pad_returns (void)
26684 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26686 basic_block bb = e->src;
26687 rtx ret = BB_END (bb);
26689 bool replace = false;
26691 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26692 || optimize_bb_for_size_p (bb))
26694 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26695 if (active_insn_p (prev) || LABEL_P (prev))
26697 if (prev && LABEL_P (prev))
26702 FOR_EACH_EDGE (e, ei, bb->preds)
26703 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26704 && !(e->flags & EDGE_FALLTHRU))
26709 prev = prev_active_insn (ret);
26711 && ((JUMP_P (prev) && any_condjump_p (prev))
26714 /* Empty functions get branch mispredict even when the jump destination
26715 is not visible to us. */
26716 if (!prev && !optimize_function_for_size_p (cfun))
26721 emit_jump_insn_before (gen_return_internal_long (), ret);
26727 /* Implement machine specific optimizations. We implement padding of returns
26728 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26732 if (optimize && optimize_function_for_speed_p (cfun))
26734 if (TARGET_PAD_RETURNS)
26735 ix86_pad_returns ();
26736 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26737 if (TARGET_FOUR_JUMP_LIMIT)
26738 ix86_avoid_jump_mispredicts ();
26743 /* Return nonzero when QImode register that must be represented via REX prefix
26746 x86_extended_QIreg_mentioned_p (rtx insn)
26749 extract_insn_cached (insn);
26750 for (i = 0; i < recog_data.n_operands; i++)
26751 if (REG_P (recog_data.operand[i])
26752 && REGNO (recog_data.operand[i]) > BX_REG)
26757 /* Return nonzero when P points to register encoded via REX prefix.
26758 Called via for_each_rtx. */
26760 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26762 unsigned int regno;
26765 regno = REGNO (*p);
26766 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26769 /* Return true when INSN mentions register that must be encoded using REX
26772 x86_extended_reg_mentioned_p (rtx insn)
26774 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26775 extended_reg_mentioned_1, NULL);
26778 /* If profitable, negate (without causing overflow) integer constant
26779 of mode MODE at location LOC. Return true in this case. */
26781 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26785 if (!CONST_INT_P (*loc))
26791 /* DImode x86_64 constants must fit in 32 bits. */
26792 gcc_assert (x86_64_immediate_operand (*loc, mode));
26803 gcc_unreachable ();
26806 /* Avoid overflows. */
26807 if (mode_signbit_p (mode, *loc))
26810 val = INTVAL (*loc);
26812 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26813 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26814 if ((val < 0 && val != -128)
26817 *loc = GEN_INT (-val);
26824 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26825 optabs would emit if we didn't have TFmode patterns. */
26828 x86_emit_floatuns (rtx operands[2])
26830 rtx neglab, donelab, i0, i1, f0, in, out;
26831 enum machine_mode mode, inmode;
26833 inmode = GET_MODE (operands[1]);
26834 gcc_assert (inmode == SImode || inmode == DImode);
26837 in = force_reg (inmode, operands[1]);
26838 mode = GET_MODE (out);
26839 neglab = gen_label_rtx ();
26840 donelab = gen_label_rtx ();
26841 f0 = gen_reg_rtx (mode);
26843 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26845 expand_float (out, in, 0);
26847 emit_jump_insn (gen_jump (donelab));
26850 emit_label (neglab);
26852 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26854 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26856 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26858 expand_float (f0, i0, 0);
26860 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26862 emit_label (donelab);
26865 /* AVX does not support 32-byte integer vector operations,
26866 thus the longest vector we are faced with is V16QImode. */
26867 #define MAX_VECT_LEN 16
26869 struct expand_vec_perm_d
26871 rtx target, op0, op1;
26872 unsigned char perm[MAX_VECT_LEN];
26873 enum machine_mode vmode;
26874 unsigned char nelt;
26878 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26879 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26881 /* Get a vector mode of the same size as the original but with elements
26882 twice as wide. This is only guaranteed to apply to integral vectors. */
26884 static inline enum machine_mode
26885 get_mode_wider_vector (enum machine_mode o)
26887 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26888 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26889 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26890 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26894 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26895 with all elements equal to VAR. Return true if successful. */
26898 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26899 rtx target, rtx val)
26922 /* First attempt to recognize VAL as-is. */
26923 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26924 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26925 if (recog_memoized (insn) < 0)
26928 /* If that fails, force VAL into a register. */
26931 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26932 seq = get_insns ();
26935 emit_insn_before (seq, insn);
26937 ok = recog_memoized (insn) >= 0;
26946 if (TARGET_SSE || TARGET_3DNOW_A)
26950 val = gen_lowpart (SImode, val);
26951 x = gen_rtx_TRUNCATE (HImode, val);
26952 x = gen_rtx_VEC_DUPLICATE (mode, x);
26953 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26966 struct expand_vec_perm_d dperm;
26970 memset (&dperm, 0, sizeof (dperm));
26971 dperm.target = target;
26972 dperm.vmode = mode;
26973 dperm.nelt = GET_MODE_NUNITS (mode);
26974 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26976 /* Extend to SImode using a paradoxical SUBREG. */
26977 tmp1 = gen_reg_rtx (SImode);
26978 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26980 /* Insert the SImode value as low element of a V4SImode vector. */
26981 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26982 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26984 ok = (expand_vec_perm_1 (&dperm)
26985 || expand_vec_perm_broadcast_1 (&dperm));
26997 /* Replicate the value once into the next wider mode and recurse. */
26999 enum machine_mode smode, wsmode, wvmode;
27002 smode = GET_MODE_INNER (mode);
27003 wvmode = get_mode_wider_vector (mode);
27004 wsmode = GET_MODE_INNER (wvmode);
27006 val = convert_modes (wsmode, smode, val, true);
27007 x = expand_simple_binop (wsmode, ASHIFT, val,
27008 GEN_INT (GET_MODE_BITSIZE (smode)),
27009 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27010 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27012 x = gen_lowpart (wvmode, target);
27013 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27021 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27022 rtx x = gen_reg_rtx (hvmode);
27024 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27027 x = gen_rtx_VEC_CONCAT (mode, x, x);
27028 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27037 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27038 whose ONE_VAR element is VAR, and other elements are zero. Return true
27042 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27043 rtx target, rtx var, int one_var)
27045 enum machine_mode vsimode;
27048 bool use_vector_set = false;
27053 /* For SSE4.1, we normally use vector set. But if the second
27054 element is zero and inter-unit moves are OK, we use movq
27056 use_vector_set = (TARGET_64BIT
27058 && !(TARGET_INTER_UNIT_MOVES
27064 use_vector_set = TARGET_SSE4_1;
27067 use_vector_set = TARGET_SSE2;
27070 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27077 use_vector_set = TARGET_AVX;
27080 /* Use ix86_expand_vector_set in 64bit mode only. */
27081 use_vector_set = TARGET_AVX && TARGET_64BIT;
27087 if (use_vector_set)
27089 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27090 var = force_reg (GET_MODE_INNER (mode), var);
27091 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27107 var = force_reg (GET_MODE_INNER (mode), var);
27108 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27109 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27114 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27115 new_target = gen_reg_rtx (mode);
27117 new_target = target;
27118 var = force_reg (GET_MODE_INNER (mode), var);
27119 x = gen_rtx_VEC_DUPLICATE (mode, var);
27120 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27121 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27124 /* We need to shuffle the value to the correct position, so
27125 create a new pseudo to store the intermediate result. */
27127 /* With SSE2, we can use the integer shuffle insns. */
27128 if (mode != V4SFmode && TARGET_SSE2)
27130 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27132 GEN_INT (one_var == 1 ? 0 : 1),
27133 GEN_INT (one_var == 2 ? 0 : 1),
27134 GEN_INT (one_var == 3 ? 0 : 1)));
27135 if (target != new_target)
27136 emit_move_insn (target, new_target);
27140 /* Otherwise convert the intermediate result to V4SFmode and
27141 use the SSE1 shuffle instructions. */
27142 if (mode != V4SFmode)
27144 tmp = gen_reg_rtx (V4SFmode);
27145 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27150 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27152 GEN_INT (one_var == 1 ? 0 : 1),
27153 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27154 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27156 if (mode != V4SFmode)
27157 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27158 else if (tmp != target)
27159 emit_move_insn (target, tmp);
27161 else if (target != new_target)
27162 emit_move_insn (target, new_target);
27167 vsimode = V4SImode;
27173 vsimode = V2SImode;
27179 /* Zero extend the variable element to SImode and recurse. */
27180 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27182 x = gen_reg_rtx (vsimode);
27183 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27185 gcc_unreachable ();
27187 emit_move_insn (target, gen_lowpart (mode, x));
27195 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27196 consisting of the values in VALS. It is known that all elements
27197 except ONE_VAR are constants. Return true if successful. */
27200 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27201 rtx target, rtx vals, int one_var)
27203 rtx var = XVECEXP (vals, 0, one_var);
27204 enum machine_mode wmode;
27207 const_vec = copy_rtx (vals);
27208 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27209 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27217 /* For the two element vectors, it's just as easy to use
27218 the general case. */
27222 /* Use ix86_expand_vector_set in 64bit mode only. */
27245 /* There's no way to set one QImode entry easily. Combine
27246 the variable value with its adjacent constant value, and
27247 promote to an HImode set. */
27248 x = XVECEXP (vals, 0, one_var ^ 1);
27251 var = convert_modes (HImode, QImode, var, true);
27252 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27253 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27254 x = GEN_INT (INTVAL (x) & 0xff);
27258 var = convert_modes (HImode, QImode, var, true);
27259 x = gen_int_mode (INTVAL (x) << 8, HImode);
27261 if (x != const0_rtx)
27262 var = expand_simple_binop (HImode, IOR, var, x, var,
27263 1, OPTAB_LIB_WIDEN);
27265 x = gen_reg_rtx (wmode);
27266 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27267 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27269 emit_move_insn (target, gen_lowpart (mode, x));
27276 emit_move_insn (target, const_vec);
27277 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27281 /* A subroutine of ix86_expand_vector_init_general. Use vector
27282 concatenate to handle the most general case: all values variable,
27283 and none identical. */
27286 ix86_expand_vector_init_concat (enum machine_mode mode,
27287 rtx target, rtx *ops, int n)
27289 enum machine_mode cmode, hmode = VOIDmode;
27290 rtx first[8], second[4];
27330 gcc_unreachable ();
27333 if (!register_operand (ops[1], cmode))
27334 ops[1] = force_reg (cmode, ops[1]);
27335 if (!register_operand (ops[0], cmode))
27336 ops[0] = force_reg (cmode, ops[0]);
27337 emit_insn (gen_rtx_SET (VOIDmode, target,
27338 gen_rtx_VEC_CONCAT (mode, ops[0],
27358 gcc_unreachable ();
27374 gcc_unreachable ();
27379 /* FIXME: We process inputs backward to help RA. PR 36222. */
27382 for (; i > 0; i -= 2, j--)
27384 first[j] = gen_reg_rtx (cmode);
27385 v = gen_rtvec (2, ops[i - 1], ops[i]);
27386 ix86_expand_vector_init (false, first[j],
27387 gen_rtx_PARALLEL (cmode, v));
27393 gcc_assert (hmode != VOIDmode);
27394 for (i = j = 0; i < n; i += 2, j++)
27396 second[j] = gen_reg_rtx (hmode);
27397 ix86_expand_vector_init_concat (hmode, second [j],
27401 ix86_expand_vector_init_concat (mode, target, second, n);
27404 ix86_expand_vector_init_concat (mode, target, first, n);
27408 gcc_unreachable ();
27412 /* A subroutine of ix86_expand_vector_init_general. Use vector
27413 interleave to handle the most general case: all values variable,
27414 and none identical. */
27417 ix86_expand_vector_init_interleave (enum machine_mode mode,
27418 rtx target, rtx *ops, int n)
27420 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27423 rtx (*gen_load_even) (rtx, rtx, rtx);
27424 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27425 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27430 gen_load_even = gen_vec_setv8hi;
27431 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27432 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27433 inner_mode = HImode;
27434 first_imode = V4SImode;
27435 second_imode = V2DImode;
27436 third_imode = VOIDmode;
27439 gen_load_even = gen_vec_setv16qi;
27440 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27441 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27442 inner_mode = QImode;
27443 first_imode = V8HImode;
27444 second_imode = V4SImode;
27445 third_imode = V2DImode;
27448 gcc_unreachable ();
27451 for (i = 0; i < n; i++)
27453 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27454 op0 = gen_reg_rtx (SImode);
27455 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27457 /* Insert the SImode value as low element of V4SImode vector. */
27458 op1 = gen_reg_rtx (V4SImode);
27459 op0 = gen_rtx_VEC_MERGE (V4SImode,
27460 gen_rtx_VEC_DUPLICATE (V4SImode,
27462 CONST0_RTX (V4SImode),
27464 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27466 /* Cast the V4SImode vector back to a vector in orignal mode. */
27467 op0 = gen_reg_rtx (mode);
27468 emit_move_insn (op0, gen_lowpart (mode, op1));
27470 /* Load even elements into the second positon. */
27471 emit_insn ((*gen_load_even) (op0,
27472 force_reg (inner_mode,
27476 /* Cast vector to FIRST_IMODE vector. */
27477 ops[i] = gen_reg_rtx (first_imode);
27478 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27481 /* Interleave low FIRST_IMODE vectors. */
27482 for (i = j = 0; i < n; i += 2, j++)
27484 op0 = gen_reg_rtx (first_imode);
27485 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27487 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27488 ops[j] = gen_reg_rtx (second_imode);
27489 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27492 /* Interleave low SECOND_IMODE vectors. */
27493 switch (second_imode)
27496 for (i = j = 0; i < n / 2; i += 2, j++)
27498 op0 = gen_reg_rtx (second_imode);
27499 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27502 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27504 ops[j] = gen_reg_rtx (third_imode);
27505 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27507 second_imode = V2DImode;
27508 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27512 op0 = gen_reg_rtx (second_imode);
27513 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27516 /* Cast the SECOND_IMODE vector back to a vector on original
27518 emit_insn (gen_rtx_SET (VOIDmode, target,
27519 gen_lowpart (mode, op0)));
27523 gcc_unreachable ();
27527 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27528 all values variable, and none identical. */
27531 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27532 rtx target, rtx vals)
27534 rtx ops[32], op0, op1;
27535 enum machine_mode half_mode = VOIDmode;
27542 if (!mmx_ok && !TARGET_SSE)
27554 n = GET_MODE_NUNITS (mode);
27555 for (i = 0; i < n; i++)
27556 ops[i] = XVECEXP (vals, 0, i);
27557 ix86_expand_vector_init_concat (mode, target, ops, n);
27561 half_mode = V16QImode;
27565 half_mode = V8HImode;
27569 n = GET_MODE_NUNITS (mode);
27570 for (i = 0; i < n; i++)
27571 ops[i] = XVECEXP (vals, 0, i);
27572 op0 = gen_reg_rtx (half_mode);
27573 op1 = gen_reg_rtx (half_mode);
27574 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27576 ix86_expand_vector_init_interleave (half_mode, op1,
27577 &ops [n >> 1], n >> 2);
27578 emit_insn (gen_rtx_SET (VOIDmode, target,
27579 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27583 if (!TARGET_SSE4_1)
27591 /* Don't use ix86_expand_vector_init_interleave if we can't
27592 move from GPR to SSE register directly. */
27593 if (!TARGET_INTER_UNIT_MOVES)
27596 n = GET_MODE_NUNITS (mode);
27597 for (i = 0; i < n; i++)
27598 ops[i] = XVECEXP (vals, 0, i);
27599 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27607 gcc_unreachable ();
27611 int i, j, n_elts, n_words, n_elt_per_word;
27612 enum machine_mode inner_mode;
27613 rtx words[4], shift;
27615 inner_mode = GET_MODE_INNER (mode);
27616 n_elts = GET_MODE_NUNITS (mode);
27617 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27618 n_elt_per_word = n_elts / n_words;
27619 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27621 for (i = 0; i < n_words; ++i)
27623 rtx word = NULL_RTX;
27625 for (j = 0; j < n_elt_per_word; ++j)
27627 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27628 elt = convert_modes (word_mode, inner_mode, elt, true);
27634 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27635 word, 1, OPTAB_LIB_WIDEN);
27636 word = expand_simple_binop (word_mode, IOR, word, elt,
27637 word, 1, OPTAB_LIB_WIDEN);
27645 emit_move_insn (target, gen_lowpart (mode, words[0]));
27646 else if (n_words == 2)
27648 rtx tmp = gen_reg_rtx (mode);
27649 emit_clobber (tmp);
27650 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27651 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27652 emit_move_insn (target, tmp);
27654 else if (n_words == 4)
27656 rtx tmp = gen_reg_rtx (V4SImode);
27657 gcc_assert (word_mode == SImode);
27658 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27659 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27660 emit_move_insn (target, gen_lowpart (mode, tmp));
27663 gcc_unreachable ();
27667 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27668 instructions unless MMX_OK is true. */
27671 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27673 enum machine_mode mode = GET_MODE (target);
27674 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27675 int n_elts = GET_MODE_NUNITS (mode);
27676 int n_var = 0, one_var = -1;
27677 bool all_same = true, all_const_zero = true;
27681 for (i = 0; i < n_elts; ++i)
27683 x = XVECEXP (vals, 0, i);
27684 if (!(CONST_INT_P (x)
27685 || GET_CODE (x) == CONST_DOUBLE
27686 || GET_CODE (x) == CONST_FIXED))
27687 n_var++, one_var = i;
27688 else if (x != CONST0_RTX (inner_mode))
27689 all_const_zero = false;
27690 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27694 /* Constants are best loaded from the constant pool. */
27697 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27701 /* If all values are identical, broadcast the value. */
27703 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27704 XVECEXP (vals, 0, 0)))
27707 /* Values where only one field is non-constant are best loaded from
27708 the pool and overwritten via move later. */
27712 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27713 XVECEXP (vals, 0, one_var),
27717 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27721 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27725 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27727 enum machine_mode mode = GET_MODE (target);
27728 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27729 enum machine_mode half_mode;
27730 bool use_vec_merge = false;
27732 static rtx (*gen_extract[6][2]) (rtx, rtx)
27734 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27735 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27736 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27737 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27738 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27739 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27741 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27743 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27744 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27745 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27746 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27747 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27748 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27758 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27759 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27761 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27763 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27764 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27770 use_vec_merge = TARGET_SSE4_1;
27778 /* For the two element vectors, we implement a VEC_CONCAT with
27779 the extraction of the other element. */
27781 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27782 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27785 op0 = val, op1 = tmp;
27787 op0 = tmp, op1 = val;
27789 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27790 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27795 use_vec_merge = TARGET_SSE4_1;
27802 use_vec_merge = true;
27806 /* tmp = target = A B C D */
27807 tmp = copy_to_reg (target);
27808 /* target = A A B B */
27809 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27810 /* target = X A B B */
27811 ix86_expand_vector_set (false, target, val, 0);
27812 /* target = A X C D */
27813 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27814 const1_rtx, const0_rtx,
27815 GEN_INT (2+4), GEN_INT (3+4)));
27819 /* tmp = target = A B C D */
27820 tmp = copy_to_reg (target);
27821 /* tmp = X B C D */
27822 ix86_expand_vector_set (false, tmp, val, 0);
27823 /* target = A B X D */
27824 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27825 const0_rtx, const1_rtx,
27826 GEN_INT (0+4), GEN_INT (3+4)));
27830 /* tmp = target = A B C D */
27831 tmp = copy_to_reg (target);
27832 /* tmp = X B C D */
27833 ix86_expand_vector_set (false, tmp, val, 0);
27834 /* target = A B X D */
27835 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27836 const0_rtx, const1_rtx,
27837 GEN_INT (2+4), GEN_INT (0+4)));
27841 gcc_unreachable ();
27846 use_vec_merge = TARGET_SSE4_1;
27850 /* Element 0 handled by vec_merge below. */
27853 use_vec_merge = true;
27859 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27860 store into element 0, then shuffle them back. */
27864 order[0] = GEN_INT (elt);
27865 order[1] = const1_rtx;
27866 order[2] = const2_rtx;
27867 order[3] = GEN_INT (3);
27868 order[elt] = const0_rtx;
27870 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27871 order[1], order[2], order[3]));
27873 ix86_expand_vector_set (false, target, val, 0);
27875 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27876 order[1], order[2], order[3]));
27880 /* For SSE1, we have to reuse the V4SF code. */
27881 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27882 gen_lowpart (SFmode, val), elt);
27887 use_vec_merge = TARGET_SSE2;
27890 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27894 use_vec_merge = TARGET_SSE4_1;
27901 half_mode = V16QImode;
27907 half_mode = V8HImode;
27913 half_mode = V4SImode;
27919 half_mode = V2DImode;
27925 half_mode = V4SFmode;
27931 half_mode = V2DFmode;
27937 /* Compute offset. */
27941 gcc_assert (i <= 1);
27943 /* Extract the half. */
27944 tmp = gen_reg_rtx (half_mode);
27945 emit_insn ((*gen_extract[j][i]) (tmp, target));
27947 /* Put val in tmp at elt. */
27948 ix86_expand_vector_set (false, tmp, val, elt);
27951 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27960 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27961 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27962 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27966 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27968 emit_move_insn (mem, target);
27970 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27971 emit_move_insn (tmp, val);
27973 emit_move_insn (target, mem);
27978 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27980 enum machine_mode mode = GET_MODE (vec);
27981 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27982 bool use_vec_extr = false;
27995 use_vec_extr = true;
27999 use_vec_extr = TARGET_SSE4_1;
28011 tmp = gen_reg_rtx (mode);
28012 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28013 GEN_INT (elt), GEN_INT (elt),
28014 GEN_INT (elt+4), GEN_INT (elt+4)));
28018 tmp = gen_reg_rtx (mode);
28019 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28023 gcc_unreachable ();
28026 use_vec_extr = true;
28031 use_vec_extr = TARGET_SSE4_1;
28045 tmp = gen_reg_rtx (mode);
28046 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28047 GEN_INT (elt), GEN_INT (elt),
28048 GEN_INT (elt), GEN_INT (elt)));
28052 tmp = gen_reg_rtx (mode);
28053 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28057 gcc_unreachable ();
28060 use_vec_extr = true;
28065 /* For SSE1, we have to reuse the V4SF code. */
28066 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28067 gen_lowpart (V4SFmode, vec), elt);
28073 use_vec_extr = TARGET_SSE2;
28076 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28080 use_vec_extr = TARGET_SSE4_1;
28084 /* ??? Could extract the appropriate HImode element and shift. */
28091 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28092 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28094 /* Let the rtl optimizers know about the zero extension performed. */
28095 if (inner_mode == QImode || inner_mode == HImode)
28097 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28098 target = gen_lowpart (SImode, target);
28101 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28105 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28107 emit_move_insn (mem, vec);
28109 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28110 emit_move_insn (target, tmp);
28114 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28115 pattern to reduce; DEST is the destination; IN is the input vector. */
28118 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28120 rtx tmp1, tmp2, tmp3;
28122 tmp1 = gen_reg_rtx (V4SFmode);
28123 tmp2 = gen_reg_rtx (V4SFmode);
28124 tmp3 = gen_reg_rtx (V4SFmode);
28126 emit_insn (gen_sse_movhlps (tmp1, in, in));
28127 emit_insn (fn (tmp2, tmp1, in));
28129 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28130 const1_rtx, const1_rtx,
28131 GEN_INT (1+4), GEN_INT (1+4)));
28132 emit_insn (fn (dest, tmp2, tmp3));
28135 /* Target hook for scalar_mode_supported_p. */
28137 ix86_scalar_mode_supported_p (enum machine_mode mode)
28139 if (DECIMAL_FLOAT_MODE_P (mode))
28140 return default_decimal_float_supported_p ();
28141 else if (mode == TFmode)
28144 return default_scalar_mode_supported_p (mode);
28147 /* Implements target hook vector_mode_supported_p. */
28149 ix86_vector_mode_supported_p (enum machine_mode mode)
28151 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28153 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28155 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28157 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28159 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28164 /* Target hook for c_mode_for_suffix. */
28165 static enum machine_mode
28166 ix86_c_mode_for_suffix (char suffix)
28176 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28178 We do this in the new i386 backend to maintain source compatibility
28179 with the old cc0-based compiler. */
28182 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28183 tree inputs ATTRIBUTE_UNUSED,
28186 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28188 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28193 /* Implements target vector targetm.asm.encode_section_info. This
28194 is not used by netware. */
28196 static void ATTRIBUTE_UNUSED
28197 ix86_encode_section_info (tree decl, rtx rtl, int first)
28199 default_encode_section_info (decl, rtl, first);
28201 if (TREE_CODE (decl) == VAR_DECL
28202 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28203 && ix86_in_large_data_p (decl))
28204 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28207 /* Worker function for REVERSE_CONDITION. */
28210 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28212 return (mode != CCFPmode && mode != CCFPUmode
28213 ? reverse_condition (code)
28214 : reverse_condition_maybe_unordered (code));
28217 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28221 output_387_reg_move (rtx insn, rtx *operands)
28223 if (REG_P (operands[0]))
28225 if (REG_P (operands[1])
28226 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28228 if (REGNO (operands[0]) == FIRST_STACK_REG)
28229 return output_387_ffreep (operands, 0);
28230 return "fstp\t%y0";
28232 if (STACK_TOP_P (operands[0]))
28233 return "fld%Z1\t%y1";
28236 else if (MEM_P (operands[0]))
28238 gcc_assert (REG_P (operands[1]));
28239 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28240 return "fstp%Z0\t%y0";
28243 /* There is no non-popping store to memory for XFmode.
28244 So if we need one, follow the store with a load. */
28245 if (GET_MODE (operands[0]) == XFmode)
28246 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28248 return "fst%Z0\t%y0";
28255 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28256 FP status register is set. */
28259 ix86_emit_fp_unordered_jump (rtx label)
28261 rtx reg = gen_reg_rtx (HImode);
28264 emit_insn (gen_x86_fnstsw_1 (reg));
28266 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28268 emit_insn (gen_x86_sahf_1 (reg));
28270 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28271 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28275 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28277 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28278 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28281 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28282 gen_rtx_LABEL_REF (VOIDmode, label),
28284 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28286 emit_jump_insn (temp);
28287 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28290 /* Output code to perform a log1p XFmode calculation. */
28292 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28294 rtx label1 = gen_label_rtx ();
28295 rtx label2 = gen_label_rtx ();
28297 rtx tmp = gen_reg_rtx (XFmode);
28298 rtx tmp2 = gen_reg_rtx (XFmode);
28301 emit_insn (gen_absxf2 (tmp, op1));
28302 test = gen_rtx_GE (VOIDmode, tmp,
28303 CONST_DOUBLE_FROM_REAL_VALUE (
28304 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28306 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28308 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28309 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28310 emit_jump (label2);
28312 emit_label (label1);
28313 emit_move_insn (tmp, CONST1_RTX (XFmode));
28314 emit_insn (gen_addxf3 (tmp, op1, tmp));
28315 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28316 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28318 emit_label (label2);
28321 /* Output code to perform a Newton-Rhapson approximation of a single precision
28322 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28324 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28326 rtx x0, x1, e0, e1, two;
28328 x0 = gen_reg_rtx (mode);
28329 e0 = gen_reg_rtx (mode);
28330 e1 = gen_reg_rtx (mode);
28331 x1 = gen_reg_rtx (mode);
28333 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28335 if (VECTOR_MODE_P (mode))
28336 two = ix86_build_const_vector (SFmode, true, two);
28338 two = force_reg (mode, two);
28340 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28342 /* x0 = rcp(b) estimate */
28343 emit_insn (gen_rtx_SET (VOIDmode, x0,
28344 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28347 emit_insn (gen_rtx_SET (VOIDmode, e0,
28348 gen_rtx_MULT (mode, x0, a)));
28350 emit_insn (gen_rtx_SET (VOIDmode, e1,
28351 gen_rtx_MULT (mode, x0, b)));
28353 emit_insn (gen_rtx_SET (VOIDmode, x1,
28354 gen_rtx_MINUS (mode, two, e1)));
28355 /* res = e0 * x1 */
28356 emit_insn (gen_rtx_SET (VOIDmode, res,
28357 gen_rtx_MULT (mode, e0, x1)));
28360 /* Output code to perform a Newton-Rhapson approximation of a
28361 single precision floating point [reciprocal] square root. */
28363 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28366 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28369 x0 = gen_reg_rtx (mode);
28370 e0 = gen_reg_rtx (mode);
28371 e1 = gen_reg_rtx (mode);
28372 e2 = gen_reg_rtx (mode);
28373 e3 = gen_reg_rtx (mode);
28375 real_from_integer (&r, VOIDmode, -3, -1, 0);
28376 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28378 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28379 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28381 if (VECTOR_MODE_P (mode))
28383 mthree = ix86_build_const_vector (SFmode, true, mthree);
28384 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28387 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28388 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28390 /* x0 = rsqrt(a) estimate */
28391 emit_insn (gen_rtx_SET (VOIDmode, x0,
28392 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28395 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28400 zero = gen_reg_rtx (mode);
28401 mask = gen_reg_rtx (mode);
28403 zero = force_reg (mode, CONST0_RTX(mode));
28404 emit_insn (gen_rtx_SET (VOIDmode, mask,
28405 gen_rtx_NE (mode, zero, a)));
28407 emit_insn (gen_rtx_SET (VOIDmode, x0,
28408 gen_rtx_AND (mode, x0, mask)));
28412 emit_insn (gen_rtx_SET (VOIDmode, e0,
28413 gen_rtx_MULT (mode, x0, a)));
28415 emit_insn (gen_rtx_SET (VOIDmode, e1,
28416 gen_rtx_MULT (mode, e0, x0)));
28419 mthree = force_reg (mode, mthree);
28420 emit_insn (gen_rtx_SET (VOIDmode, e2,
28421 gen_rtx_PLUS (mode, e1, mthree)));
28423 mhalf = force_reg (mode, mhalf);
28425 /* e3 = -.5 * x0 */
28426 emit_insn (gen_rtx_SET (VOIDmode, e3,
28427 gen_rtx_MULT (mode, x0, mhalf)));
28429 /* e3 = -.5 * e0 */
28430 emit_insn (gen_rtx_SET (VOIDmode, e3,
28431 gen_rtx_MULT (mode, e0, mhalf)));
28432 /* ret = e2 * e3 */
28433 emit_insn (gen_rtx_SET (VOIDmode, res,
28434 gen_rtx_MULT (mode, e2, e3)));
28437 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28439 static void ATTRIBUTE_UNUSED
28440 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28443 /* With Binutils 2.15, the "@unwind" marker must be specified on
28444 every occurrence of the ".eh_frame" section, not just the first
28447 && strcmp (name, ".eh_frame") == 0)
28449 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28450 flags & SECTION_WRITE ? "aw" : "a");
28453 default_elf_asm_named_section (name, flags, decl);
28456 /* Return the mangling of TYPE if it is an extended fundamental type. */
28458 static const char *
28459 ix86_mangle_type (const_tree type)
28461 type = TYPE_MAIN_VARIANT (type);
28463 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28464 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28467 switch (TYPE_MODE (type))
28470 /* __float128 is "g". */
28473 /* "long double" or __float80 is "e". */
28480 /* For 32-bit code we can save PIC register setup by using
28481 __stack_chk_fail_local hidden function instead of calling
28482 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28483 register, so it is better to call __stack_chk_fail directly. */
28486 ix86_stack_protect_fail (void)
28488 return TARGET_64BIT
28489 ? default_external_stack_protect_fail ()
28490 : default_hidden_stack_protect_fail ();
28493 /* Select a format to encode pointers in exception handling data. CODE
28494 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28495 true if the symbol may be affected by dynamic relocations.
28497 ??? All x86 object file formats are capable of representing this.
28498 After all, the relocation needed is the same as for the call insn.
28499 Whether or not a particular assembler allows us to enter such, I
28500 guess we'll have to see. */
28502 asm_preferred_eh_data_format (int code, int global)
28506 int type = DW_EH_PE_sdata8;
28508 || ix86_cmodel == CM_SMALL_PIC
28509 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28510 type = DW_EH_PE_sdata4;
28511 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28513 if (ix86_cmodel == CM_SMALL
28514 || (ix86_cmodel == CM_MEDIUM && code))
28515 return DW_EH_PE_udata4;
28516 return DW_EH_PE_absptr;
28519 /* Expand copysign from SIGN to the positive value ABS_VALUE
28520 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28523 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28525 enum machine_mode mode = GET_MODE (sign);
28526 rtx sgn = gen_reg_rtx (mode);
28527 if (mask == NULL_RTX)
28529 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28530 if (!VECTOR_MODE_P (mode))
28532 /* We need to generate a scalar mode mask in this case. */
28533 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28534 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28535 mask = gen_reg_rtx (mode);
28536 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28540 mask = gen_rtx_NOT (mode, mask);
28541 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28542 gen_rtx_AND (mode, mask, sign)));
28543 emit_insn (gen_rtx_SET (VOIDmode, result,
28544 gen_rtx_IOR (mode, abs_value, sgn)));
28547 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28548 mask for masking out the sign-bit is stored in *SMASK, if that is
28551 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28553 enum machine_mode mode = GET_MODE (op0);
28556 xa = gen_reg_rtx (mode);
28557 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28558 if (!VECTOR_MODE_P (mode))
28560 /* We need to generate a scalar mode mask in this case. */
28561 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28562 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28563 mask = gen_reg_rtx (mode);
28564 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28566 emit_insn (gen_rtx_SET (VOIDmode, xa,
28567 gen_rtx_AND (mode, op0, mask)));
28575 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28576 swapping the operands if SWAP_OPERANDS is true. The expanded
28577 code is a forward jump to a newly created label in case the
28578 comparison is true. The generated label rtx is returned. */
28580 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28581 bool swap_operands)
28592 label = gen_label_rtx ();
28593 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28594 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28595 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28596 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28597 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28598 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28599 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28600 JUMP_LABEL (tmp) = label;
28605 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28606 using comparison code CODE. Operands are swapped for the comparison if
28607 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28609 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28610 bool swap_operands)
28612 enum machine_mode mode = GET_MODE (op0);
28613 rtx mask = gen_reg_rtx (mode);
28622 if (mode == DFmode)
28623 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28624 gen_rtx_fmt_ee (code, mode, op0, op1)));
28626 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28627 gen_rtx_fmt_ee (code, mode, op0, op1)));
28632 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28633 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28635 ix86_gen_TWO52 (enum machine_mode mode)
28637 REAL_VALUE_TYPE TWO52r;
28640 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28641 TWO52 = const_double_from_real_value (TWO52r, mode);
28642 TWO52 = force_reg (mode, TWO52);
28647 /* Expand SSE sequence for computing lround from OP1 storing
28650 ix86_expand_lround (rtx op0, rtx op1)
28652 /* C code for the stuff we're doing below:
28653 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28656 enum machine_mode mode = GET_MODE (op1);
28657 const struct real_format *fmt;
28658 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28661 /* load nextafter (0.5, 0.0) */
28662 fmt = REAL_MODE_FORMAT (mode);
28663 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28664 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28666 /* adj = copysign (0.5, op1) */
28667 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28668 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28670 /* adj = op1 + adj */
28671 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28673 /* op0 = (imode)adj */
28674 expand_fix (op0, adj, 0);
28677 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28680 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28682 /* C code for the stuff we're doing below (for do_floor):
28684 xi -= (double)xi > op1 ? 1 : 0;
28687 enum machine_mode fmode = GET_MODE (op1);
28688 enum machine_mode imode = GET_MODE (op0);
28689 rtx ireg, freg, label, tmp;
28691 /* reg = (long)op1 */
28692 ireg = gen_reg_rtx (imode);
28693 expand_fix (ireg, op1, 0);
28695 /* freg = (double)reg */
28696 freg = gen_reg_rtx (fmode);
28697 expand_float (freg, ireg, 0);
28699 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28700 label = ix86_expand_sse_compare_and_jump (UNLE,
28701 freg, op1, !do_floor);
28702 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28703 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28704 emit_move_insn (ireg, tmp);
28706 emit_label (label);
28707 LABEL_NUSES (label) = 1;
28709 emit_move_insn (op0, ireg);
28712 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28713 result in OPERAND0. */
28715 ix86_expand_rint (rtx operand0, rtx operand1)
28717 /* C code for the stuff we're doing below:
28718 xa = fabs (operand1);
28719 if (!isless (xa, 2**52))
28721 xa = xa + 2**52 - 2**52;
28722 return copysign (xa, operand1);
28724 enum machine_mode mode = GET_MODE (operand0);
28725 rtx res, xa, label, TWO52, mask;
28727 res = gen_reg_rtx (mode);
28728 emit_move_insn (res, operand1);
28730 /* xa = abs (operand1) */
28731 xa = ix86_expand_sse_fabs (res, &mask);
28733 /* if (!isless (xa, TWO52)) goto label; */
28734 TWO52 = ix86_gen_TWO52 (mode);
28735 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28737 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28738 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28740 ix86_sse_copysign_to_positive (res, xa, res, mask);
28742 emit_label (label);
28743 LABEL_NUSES (label) = 1;
28745 emit_move_insn (operand0, res);
28748 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28751 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28753 /* C code for the stuff we expand below.
28754 double xa = fabs (x), x2;
28755 if (!isless (xa, TWO52))
28757 xa = xa + TWO52 - TWO52;
28758 x2 = copysign (xa, x);
28767 enum machine_mode mode = GET_MODE (operand0);
28768 rtx xa, TWO52, tmp, label, one, res, mask;
28770 TWO52 = ix86_gen_TWO52 (mode);
28772 /* Temporary for holding the result, initialized to the input
28773 operand to ease control flow. */
28774 res = gen_reg_rtx (mode);
28775 emit_move_insn (res, operand1);
28777 /* xa = abs (operand1) */
28778 xa = ix86_expand_sse_fabs (res, &mask);
28780 /* if (!isless (xa, TWO52)) goto label; */
28781 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28783 /* xa = xa + TWO52 - TWO52; */
28784 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28785 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28787 /* xa = copysign (xa, operand1) */
28788 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28790 /* generate 1.0 or -1.0 */
28791 one = force_reg (mode,
28792 const_double_from_real_value (do_floor
28793 ? dconst1 : dconstm1, mode));
28795 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28796 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28797 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28798 gen_rtx_AND (mode, one, tmp)));
28799 /* We always need to subtract here to preserve signed zero. */
28800 tmp = expand_simple_binop (mode, MINUS,
28801 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28802 emit_move_insn (res, tmp);
28804 emit_label (label);
28805 LABEL_NUSES (label) = 1;
28807 emit_move_insn (operand0, res);
28810 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28813 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28815 /* C code for the stuff we expand below.
28816 double xa = fabs (x), x2;
28817 if (!isless (xa, TWO52))
28819 x2 = (double)(long)x;
28826 if (HONOR_SIGNED_ZEROS (mode))
28827 return copysign (x2, x);
28830 enum machine_mode mode = GET_MODE (operand0);
28831 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28833 TWO52 = ix86_gen_TWO52 (mode);
28835 /* Temporary for holding the result, initialized to the input
28836 operand to ease control flow. */
28837 res = gen_reg_rtx (mode);
28838 emit_move_insn (res, operand1);
28840 /* xa = abs (operand1) */
28841 xa = ix86_expand_sse_fabs (res, &mask);
28843 /* if (!isless (xa, TWO52)) goto label; */
28844 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28846 /* xa = (double)(long)x */
28847 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28848 expand_fix (xi, res, 0);
28849 expand_float (xa, xi, 0);
28852 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28854 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28855 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28856 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28857 gen_rtx_AND (mode, one, tmp)));
28858 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28859 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28860 emit_move_insn (res, tmp);
28862 if (HONOR_SIGNED_ZEROS (mode))
28863 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28865 emit_label (label);
28866 LABEL_NUSES (label) = 1;
28868 emit_move_insn (operand0, res);
28871 /* Expand SSE sequence for computing round from OPERAND1 storing
28872 into OPERAND0. Sequence that works without relying on DImode truncation
28873 via cvttsd2siq that is only available on 64bit targets. */
28875 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28877 /* C code for the stuff we expand below.
28878 double xa = fabs (x), xa2, x2;
28879 if (!isless (xa, TWO52))
28881 Using the absolute value and copying back sign makes
28882 -0.0 -> -0.0 correct.
28883 xa2 = xa + TWO52 - TWO52;
28888 else if (dxa > 0.5)
28890 x2 = copysign (xa2, x);
28893 enum machine_mode mode = GET_MODE (operand0);
28894 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28896 TWO52 = ix86_gen_TWO52 (mode);
28898 /* Temporary for holding the result, initialized to the input
28899 operand to ease control flow. */
28900 res = gen_reg_rtx (mode);
28901 emit_move_insn (res, operand1);
28903 /* xa = abs (operand1) */
28904 xa = ix86_expand_sse_fabs (res, &mask);
28906 /* if (!isless (xa, TWO52)) goto label; */
28907 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28909 /* xa2 = xa + TWO52 - TWO52; */
28910 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28911 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28913 /* dxa = xa2 - xa; */
28914 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28916 /* generate 0.5, 1.0 and -0.5 */
28917 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28918 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28919 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28923 tmp = gen_reg_rtx (mode);
28924 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28925 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28926 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28927 gen_rtx_AND (mode, one, tmp)));
28928 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28929 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28930 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28931 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28932 gen_rtx_AND (mode, one, tmp)));
28933 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28935 /* res = copysign (xa2, operand1) */
28936 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28938 emit_label (label);
28939 LABEL_NUSES (label) = 1;
28941 emit_move_insn (operand0, res);
28944 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28947 ix86_expand_trunc (rtx operand0, rtx operand1)
28949 /* C code for SSE variant we expand below.
28950 double xa = fabs (x), x2;
28951 if (!isless (xa, TWO52))
28953 x2 = (double)(long)x;
28954 if (HONOR_SIGNED_ZEROS (mode))
28955 return copysign (x2, x);
28958 enum machine_mode mode = GET_MODE (operand0);
28959 rtx xa, xi, TWO52, label, res, mask;
28961 TWO52 = ix86_gen_TWO52 (mode);
28963 /* Temporary for holding the result, initialized to the input
28964 operand to ease control flow. */
28965 res = gen_reg_rtx (mode);
28966 emit_move_insn (res, operand1);
28968 /* xa = abs (operand1) */
28969 xa = ix86_expand_sse_fabs (res, &mask);
28971 /* if (!isless (xa, TWO52)) goto label; */
28972 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28974 /* x = (double)(long)x */
28975 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28976 expand_fix (xi, res, 0);
28977 expand_float (res, xi, 0);
28979 if (HONOR_SIGNED_ZEROS (mode))
28980 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28982 emit_label (label);
28983 LABEL_NUSES (label) = 1;
28985 emit_move_insn (operand0, res);
28988 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28991 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28993 enum machine_mode mode = GET_MODE (operand0);
28994 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28996 /* C code for SSE variant we expand below.
28997 double xa = fabs (x), x2;
28998 if (!isless (xa, TWO52))
29000 xa2 = xa + TWO52 - TWO52;
29004 x2 = copysign (xa2, x);
29008 TWO52 = ix86_gen_TWO52 (mode);
29010 /* Temporary for holding the result, initialized to the input
29011 operand to ease control flow. */
29012 res = gen_reg_rtx (mode);
29013 emit_move_insn (res, operand1);
29015 /* xa = abs (operand1) */
29016 xa = ix86_expand_sse_fabs (res, &smask);
29018 /* if (!isless (xa, TWO52)) goto label; */
29019 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29021 /* res = xa + TWO52 - TWO52; */
29022 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29023 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29024 emit_move_insn (res, tmp);
29027 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29029 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29030 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29031 emit_insn (gen_rtx_SET (VOIDmode, mask,
29032 gen_rtx_AND (mode, mask, one)));
29033 tmp = expand_simple_binop (mode, MINUS,
29034 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29035 emit_move_insn (res, tmp);
29037 /* res = copysign (res, operand1) */
29038 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29040 emit_label (label);
29041 LABEL_NUSES (label) = 1;
29043 emit_move_insn (operand0, res);
29046 /* Expand SSE sequence for computing round from OPERAND1 storing
29049 ix86_expand_round (rtx operand0, rtx operand1)
29051 /* C code for the stuff we're doing below:
29052 double xa = fabs (x);
29053 if (!isless (xa, TWO52))
29055 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29056 return copysign (xa, x);
29058 enum machine_mode mode = GET_MODE (operand0);
29059 rtx res, TWO52, xa, label, xi, half, mask;
29060 const struct real_format *fmt;
29061 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29063 /* Temporary for holding the result, initialized to the input
29064 operand to ease control flow. */
29065 res = gen_reg_rtx (mode);
29066 emit_move_insn (res, operand1);
29068 TWO52 = ix86_gen_TWO52 (mode);
29069 xa = ix86_expand_sse_fabs (res, &mask);
29070 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29072 /* load nextafter (0.5, 0.0) */
29073 fmt = REAL_MODE_FORMAT (mode);
29074 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29075 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29077 /* xa = xa + 0.5 */
29078 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29079 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29081 /* xa = (double)(int64_t)xa */
29082 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29083 expand_fix (xi, xa, 0);
29084 expand_float (xa, xi, 0);
29086 /* res = copysign (xa, operand1) */
29087 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29089 emit_label (label);
29090 LABEL_NUSES (label) = 1;
29092 emit_move_insn (operand0, res);
29096 /* Table of valid machine attributes. */
29097 static const struct attribute_spec ix86_attribute_table[] =
29099 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29100 /* Stdcall attribute says callee is responsible for popping arguments
29101 if they are not variable. */
29102 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29103 /* Fastcall attribute says callee is responsible for popping arguments
29104 if they are not variable. */
29105 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29106 /* Thiscall attribute says callee is responsible for popping arguments
29107 if they are not variable. */
29108 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29109 /* Cdecl attribute says the callee is a normal C declaration */
29110 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29111 /* Regparm attribute specifies how many integer arguments are to be
29112 passed in registers. */
29113 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29114 /* Sseregparm attribute says we are using x86_64 calling conventions
29115 for FP arguments. */
29116 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29117 /* force_align_arg_pointer says this function realigns the stack at entry. */
29118 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29119 false, true, true, ix86_handle_cconv_attribute },
29120 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29121 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29122 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29123 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29125 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29126 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29127 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29128 SUBTARGET_ATTRIBUTE_TABLE,
29130 /* ms_abi and sysv_abi calling convention function attributes. */
29131 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29132 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29133 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29135 { NULL, 0, 0, false, false, false, NULL }
29138 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29140 ix86_builtin_vectorization_cost (bool runtime_test)
29142 /* If the branch of the runtime test is taken - i.e. - the vectorized
29143 version is skipped - this incurs a misprediction cost (because the
29144 vectorized version is expected to be the fall-through). So we subtract
29145 the latency of a mispredicted branch from the costs that are incured
29146 when the vectorized version is executed.
29148 TODO: The values in individual target tables have to be tuned or new
29149 fields may be needed. For eg. on K8, the default branch path is the
29150 not-taken path. If the taken path is predicted correctly, the minimum
29151 penalty of going down the taken-path is 1 cycle. If the taken-path is
29152 not predicted correctly, then the minimum penalty is 10 cycles. */
29156 return (-(ix86_cost->cond_taken_branch_cost));
29162 /* Implement targetm.vectorize.builtin_vec_perm. */
29165 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29167 tree itype = TREE_TYPE (vec_type);
29168 bool u = TYPE_UNSIGNED (itype);
29169 enum machine_mode vmode = TYPE_MODE (vec_type);
29170 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29171 bool ok = TARGET_SSE2;
29177 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29180 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29182 itype = ix86_get_builtin_type (IX86_BT_DI);
29187 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29191 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29193 itype = ix86_get_builtin_type (IX86_BT_SI);
29197 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29200 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29203 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29206 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29216 *mask_type = itype;
29217 return ix86_builtins[(int) fcode];
29220 /* Return a vector mode with twice as many elements as VMODE. */
29221 /* ??? Consider moving this to a table generated by genmodes.c. */
29223 static enum machine_mode
29224 doublesize_vector_mode (enum machine_mode vmode)
29228 case V2SFmode: return V4SFmode;
29229 case V1DImode: return V2DImode;
29230 case V2SImode: return V4SImode;
29231 case V4HImode: return V8HImode;
29232 case V8QImode: return V16QImode;
29234 case V2DFmode: return V4DFmode;
29235 case V4SFmode: return V8SFmode;
29236 case V2DImode: return V4DImode;
29237 case V4SImode: return V8SImode;
29238 case V8HImode: return V16HImode;
29239 case V16QImode: return V32QImode;
29241 case V4DFmode: return V8DFmode;
29242 case V8SFmode: return V16SFmode;
29243 case V4DImode: return V8DImode;
29244 case V8SImode: return V16SImode;
29245 case V16HImode: return V32HImode;
29246 case V32QImode: return V64QImode;
29249 gcc_unreachable ();
29253 /* Construct (set target (vec_select op0 (parallel perm))) and
29254 return true if that's a valid instruction in the active ISA. */
29257 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29259 rtx rperm[MAX_VECT_LEN], x;
29262 for (i = 0; i < nelt; ++i)
29263 rperm[i] = GEN_INT (perm[i]);
29265 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29266 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29267 x = gen_rtx_SET (VOIDmode, target, x);
29270 if (recog_memoized (x) < 0)
29278 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29281 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29282 const unsigned char *perm, unsigned nelt)
29284 enum machine_mode v2mode;
29287 v2mode = doublesize_vector_mode (GET_MODE (op0));
29288 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29289 return expand_vselect (target, x, perm, nelt);
29292 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29293 in terms of blendp[sd] / pblendw / pblendvb. */
29296 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29298 enum machine_mode vmode = d->vmode;
29299 unsigned i, mask, nelt = d->nelt;
29300 rtx target, op0, op1, x;
29302 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29304 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29307 /* This is a blend, not a permute. Elements must stay in their
29308 respective lanes. */
29309 for (i = 0; i < nelt; ++i)
29311 unsigned e = d->perm[i];
29312 if (!(e == i || e == i + nelt))
29319 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29320 decision should be extracted elsewhere, so that we only try that
29321 sequence once all budget==3 options have been tried. */
29323 /* For bytes, see if bytes move in pairs so we can use pblendw with
29324 an immediate argument, rather than pblendvb with a vector argument. */
29325 if (vmode == V16QImode)
29327 bool pblendw_ok = true;
29328 for (i = 0; i < 16 && pblendw_ok; i += 2)
29329 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29333 rtx rperm[16], vperm;
29335 for (i = 0; i < nelt; ++i)
29336 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29338 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29339 vperm = force_reg (V16QImode, vperm);
29341 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29346 target = d->target;
29358 for (i = 0; i < nelt; ++i)
29359 mask |= (d->perm[i] >= nelt) << i;
29363 for (i = 0; i < 2; ++i)
29364 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29368 for (i = 0; i < 4; ++i)
29369 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29373 for (i = 0; i < 8; ++i)
29374 mask |= (d->perm[i * 2] >= 16) << i;
29378 target = gen_lowpart (vmode, target);
29379 op0 = gen_lowpart (vmode, op0);
29380 op1 = gen_lowpart (vmode, op1);
29384 gcc_unreachable ();
29387 /* This matches five different patterns with the different modes. */
29388 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29389 x = gen_rtx_SET (VOIDmode, target, x);
29395 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29396 in terms of the variable form of vpermilps.
29398 Note that we will have already failed the immediate input vpermilps,
29399 which requires that the high and low part shuffle be identical; the
29400 variable form doesn't require that. */
29403 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29405 rtx rperm[8], vperm;
29408 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29411 /* We can only permute within the 128-bit lane. */
29412 for (i = 0; i < 8; ++i)
29414 unsigned e = d->perm[i];
29415 if (i < 4 ? e >= 4 : e < 4)
29422 for (i = 0; i < 8; ++i)
29424 unsigned e = d->perm[i];
29426 /* Within each 128-bit lane, the elements of op0 are numbered
29427 from 0 and the elements of op1 are numbered from 4. */
29433 rperm[i] = GEN_INT (e);
29436 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29437 vperm = force_reg (V8SImode, vperm);
29438 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29443 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29444 in terms of pshufb or vpperm. */
29447 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29449 unsigned i, nelt, eltsz;
29450 rtx rperm[16], vperm, target, op0, op1;
29452 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29454 if (GET_MODE_SIZE (d->vmode) != 16)
29461 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29463 for (i = 0; i < nelt; ++i)
29465 unsigned j, e = d->perm[i];
29466 for (j = 0; j < eltsz; ++j)
29467 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29470 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29471 vperm = force_reg (V16QImode, vperm);
29473 target = gen_lowpart (V16QImode, d->target);
29474 op0 = gen_lowpart (V16QImode, d->op0);
29475 if (d->op0 == d->op1)
29476 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29479 op1 = gen_lowpart (V16QImode, d->op1);
29480 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29486 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29487 in a single instruction. */
29490 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29492 unsigned i, nelt = d->nelt;
29493 unsigned char perm2[MAX_VECT_LEN];
29495 /* Check plain VEC_SELECT first, because AVX has instructions that could
29496 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29497 input where SEL+CONCAT may not. */
29498 if (d->op0 == d->op1)
29500 int mask = nelt - 1;
29502 for (i = 0; i < nelt; i++)
29503 perm2[i] = d->perm[i] & mask;
29505 if (expand_vselect (d->target, d->op0, perm2, nelt))
29508 /* There are plenty of patterns in sse.md that are written for
29509 SEL+CONCAT and are not replicated for a single op. Perhaps
29510 that should be changed, to avoid the nastiness here. */
29512 /* Recognize interleave style patterns, which means incrementing
29513 every other permutation operand. */
29514 for (i = 0; i < nelt; i += 2)
29516 perm2[i] = d->perm[i] & mask;
29517 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29519 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29522 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29525 for (i = 0; i < nelt; i += 4)
29527 perm2[i + 0] = d->perm[i + 0] & mask;
29528 perm2[i + 1] = d->perm[i + 1] & mask;
29529 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29530 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29533 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29538 /* Finally, try the fully general two operand permute. */
29539 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29542 /* Recognize interleave style patterns with reversed operands. */
29543 if (d->op0 != d->op1)
29545 for (i = 0; i < nelt; ++i)
29547 unsigned e = d->perm[i];
29555 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29559 /* Try the SSE4.1 blend variable merge instructions. */
29560 if (expand_vec_perm_blend (d))
29563 /* Try one of the AVX vpermil variable permutations. */
29564 if (expand_vec_perm_vpermil (d))
29567 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29568 if (expand_vec_perm_pshufb (d))
29574 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29575 in terms of a pair of pshuflw + pshufhw instructions. */
29578 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29580 unsigned char perm2[MAX_VECT_LEN];
29584 if (d->vmode != V8HImode || d->op0 != d->op1)
29587 /* The two permutations only operate in 64-bit lanes. */
29588 for (i = 0; i < 4; ++i)
29589 if (d->perm[i] >= 4)
29591 for (i = 4; i < 8; ++i)
29592 if (d->perm[i] < 4)
29598 /* Emit the pshuflw. */
29599 memcpy (perm2, d->perm, 4);
29600 for (i = 4; i < 8; ++i)
29602 ok = expand_vselect (d->target, d->op0, perm2, 8);
29605 /* Emit the pshufhw. */
29606 memcpy (perm2 + 4, d->perm + 4, 4);
29607 for (i = 0; i < 4; ++i)
29609 ok = expand_vselect (d->target, d->target, perm2, 8);
29615 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29616 the permutation using the SSSE3 palignr instruction. This succeeds
29617 when all of the elements in PERM fit within one vector and we merely
29618 need to shift them down so that a single vector permutation has a
29619 chance to succeed. */
29622 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29624 unsigned i, nelt = d->nelt;
29629 /* Even with AVX, palignr only operates on 128-bit vectors. */
29630 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29633 min = nelt, max = 0;
29634 for (i = 0; i < nelt; ++i)
29636 unsigned e = d->perm[i];
29642 if (min == 0 || max - min >= nelt)
29645 /* Given that we have SSSE3, we know we'll be able to implement the
29646 single operand permutation after the palignr with pshufb. */
29650 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29651 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29652 gen_lowpart (TImode, d->op1),
29653 gen_lowpart (TImode, d->op0), shift));
29655 d->op0 = d->op1 = d->target;
29658 for (i = 0; i < nelt; ++i)
29660 unsigned e = d->perm[i] - min;
29666 /* Test for the degenerate case where the alignment by itself
29667 produces the desired permutation. */
29671 ok = expand_vec_perm_1 (d);
29677 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29678 a two vector permutation into a single vector permutation by using
29679 an interleave operation to merge the vectors. */
29682 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29684 struct expand_vec_perm_d dremap, dfinal;
29685 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29686 unsigned contents, h1, h2, h3, h4;
29687 unsigned char remap[2 * MAX_VECT_LEN];
29691 if (d->op0 == d->op1)
29694 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29695 lanes. We can use similar techniques with the vperm2f128 instruction,
29696 but it requires slightly different logic. */
29697 if (GET_MODE_SIZE (d->vmode) != 16)
29700 /* Examine from whence the elements come. */
29702 for (i = 0; i < nelt; ++i)
29703 contents |= 1u << d->perm[i];
29705 /* Split the two input vectors into 4 halves. */
29706 h1 = (1u << nelt2) - 1;
29711 memset (remap, 0xff, sizeof (remap));
29714 /* If the elements from the low halves use interleave low, and similarly
29715 for interleave high. If the elements are from mis-matched halves, we
29716 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29717 if ((contents & (h1 | h3)) == contents)
29719 for (i = 0; i < nelt2; ++i)
29722 remap[i + nelt] = i * 2 + 1;
29723 dremap.perm[i * 2] = i;
29724 dremap.perm[i * 2 + 1] = i + nelt;
29727 else if ((contents & (h2 | h4)) == contents)
29729 for (i = 0; i < nelt2; ++i)
29731 remap[i + nelt2] = i * 2;
29732 remap[i + nelt + nelt2] = i * 2 + 1;
29733 dremap.perm[i * 2] = i + nelt2;
29734 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29737 else if ((contents & (h1 | h4)) == contents)
29739 for (i = 0; i < nelt2; ++i)
29742 remap[i + nelt + nelt2] = i + nelt2;
29743 dremap.perm[i] = i;
29744 dremap.perm[i + nelt2] = i + nelt + nelt2;
29748 dremap.vmode = V2DImode;
29750 dremap.perm[0] = 0;
29751 dremap.perm[1] = 3;
29754 else if ((contents & (h2 | h3)) == contents)
29756 for (i = 0; i < nelt2; ++i)
29758 remap[i + nelt2] = i;
29759 remap[i + nelt] = i + nelt2;
29760 dremap.perm[i] = i + nelt2;
29761 dremap.perm[i + nelt2] = i + nelt;
29765 dremap.vmode = V2DImode;
29767 dremap.perm[0] = 1;
29768 dremap.perm[1] = 2;
29774 /* Use the remapping array set up above to move the elements from their
29775 swizzled locations into their final destinations. */
29777 for (i = 0; i < nelt; ++i)
29779 unsigned e = remap[d->perm[i]];
29780 gcc_assert (e < nelt);
29781 dfinal.perm[i] = e;
29783 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29784 dfinal.op1 = dfinal.op0;
29785 dremap.target = dfinal.op0;
29787 /* Test if the final remap can be done with a single insn. For V4SFmode or
29788 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29790 ok = expand_vec_perm_1 (&dfinal);
29791 seq = get_insns ();
29797 if (dremap.vmode != dfinal.vmode)
29799 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29800 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29801 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29804 ok = expand_vec_perm_1 (&dremap);
29811 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29812 permutation with two pshufb insns and an ior. We should have already
29813 failed all two instruction sequences. */
29816 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29818 rtx rperm[2][16], vperm, l, h, op, m128;
29819 unsigned int i, nelt, eltsz;
29821 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29823 gcc_assert (d->op0 != d->op1);
29826 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29828 /* Generate two permutation masks. If the required element is within
29829 the given vector it is shuffled into the proper lane. If the required
29830 element is in the other vector, force a zero into the lane by setting
29831 bit 7 in the permutation mask. */
29832 m128 = GEN_INT (-128);
29833 for (i = 0; i < nelt; ++i)
29835 unsigned j, e = d->perm[i];
29836 unsigned which = (e >= nelt);
29840 for (j = 0; j < eltsz; ++j)
29842 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29843 rperm[1-which][i*eltsz + j] = m128;
29847 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29848 vperm = force_reg (V16QImode, vperm);
29850 l = gen_reg_rtx (V16QImode);
29851 op = gen_lowpart (V16QImode, d->op0);
29852 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29854 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29855 vperm = force_reg (V16QImode, vperm);
29857 h = gen_reg_rtx (V16QImode);
29858 op = gen_lowpart (V16QImode, d->op1);
29859 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29861 op = gen_lowpart (V16QImode, d->target);
29862 emit_insn (gen_iorv16qi3 (op, l, h));
29867 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29868 and extract-odd permutations. */
29871 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29873 rtx t1, t2, t3, t4;
29878 t1 = gen_reg_rtx (V4DFmode);
29879 t2 = gen_reg_rtx (V4DFmode);
29881 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29882 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29883 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29885 /* Now an unpck[lh]pd will produce the result required. */
29887 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29889 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29895 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29896 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29897 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29899 t1 = gen_reg_rtx (V8SFmode);
29900 t2 = gen_reg_rtx (V8SFmode);
29901 t3 = gen_reg_rtx (V8SFmode);
29902 t4 = gen_reg_rtx (V8SFmode);
29904 /* Shuffle within the 128-bit lanes to produce:
29905 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29906 expand_vselect (t1, d->op0, perm1, 8);
29907 expand_vselect (t2, d->op1, perm1, 8);
29909 /* Shuffle the lanes around to produce:
29910 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29911 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29912 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29914 /* Now a vpermil2p will produce the result required. */
29915 /* ??? The vpermil2p requires a vector constant. Another option
29916 is a unpck[lh]ps to merge the two vectors to produce
29917 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29918 vpermilps to get the elements into the final order. */
29921 memcpy (d->perm, odd ? permo: perme, 8);
29922 expand_vec_perm_vpermil (d);
29930 /* These are always directly implementable by expand_vec_perm_1. */
29931 gcc_unreachable ();
29935 return expand_vec_perm_pshufb2 (d);
29938 /* We need 2*log2(N)-1 operations to achieve odd/even
29939 with interleave. */
29940 t1 = gen_reg_rtx (V8HImode);
29941 t2 = gen_reg_rtx (V8HImode);
29942 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29943 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29944 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29945 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29947 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29949 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29956 return expand_vec_perm_pshufb2 (d);
29959 t1 = gen_reg_rtx (V16QImode);
29960 t2 = gen_reg_rtx (V16QImode);
29961 t3 = gen_reg_rtx (V16QImode);
29962 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29963 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29964 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29965 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29966 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29967 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29969 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29971 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29977 gcc_unreachable ();
29983 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29984 extract-even and extract-odd permutations. */
29987 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29989 unsigned i, odd, nelt = d->nelt;
29992 if (odd != 0 && odd != 1)
29995 for (i = 1; i < nelt; ++i)
29996 if (d->perm[i] != 2 * i + odd)
29999 return expand_vec_perm_even_odd_1 (d, odd);
30002 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
30003 permutations. We assume that expand_vec_perm_1 has already failed. */
30006 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30008 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30009 enum machine_mode vmode = d->vmode;
30010 unsigned char perm2[4];
30018 /* These are special-cased in sse.md so that we can optionally
30019 use the vbroadcast instruction. They expand to two insns
30020 if the input happens to be in a register. */
30021 gcc_unreachable ();
30027 /* These are always implementable using standard shuffle patterns. */
30028 gcc_unreachable ();
30032 /* These can be implemented via interleave. We save one insn by
30033 stopping once we have promoted to V4SImode and then use pshufd. */
30036 optab otab = vec_interleave_low_optab;
30040 otab = vec_interleave_high_optab;
30045 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30046 vmode = get_mode_wider_vector (vmode);
30047 op0 = gen_lowpart (vmode, op0);
30049 while (vmode != V4SImode);
30051 memset (perm2, elt, 4);
30052 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30057 gcc_unreachable ();
30061 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30062 broadcast permutations. */
30065 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30067 unsigned i, elt, nelt = d->nelt;
30069 if (d->op0 != d->op1)
30073 for (i = 1; i < nelt; ++i)
30074 if (d->perm[i] != elt)
30077 return expand_vec_perm_broadcast_1 (d);
30080 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30081 With all of the interface bits taken care of, perform the expansion
30082 in D and return true on success. */
30085 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30087 /* Try a single instruction expansion. */
30088 if (expand_vec_perm_1 (d))
30091 /* Try sequences of two instructions. */
30093 if (expand_vec_perm_pshuflw_pshufhw (d))
30096 if (expand_vec_perm_palignr (d))
30099 if (expand_vec_perm_interleave2 (d))
30102 if (expand_vec_perm_broadcast (d))
30105 /* Try sequences of three instructions. */
30107 if (expand_vec_perm_pshufb2 (d))
30110 /* ??? Look for narrow permutations whose element orderings would
30111 allow the promotion to a wider mode. */
30113 /* ??? Look for sequences of interleave or a wider permute that place
30114 the data into the correct lanes for a half-vector shuffle like
30115 pshuf[lh]w or vpermilps. */
30117 /* ??? Look for sequences of interleave that produce the desired results.
30118 The combinatorics of punpck[lh] get pretty ugly... */
30120 if (expand_vec_perm_even_odd (d))
30126 /* Extract the values from the vector CST into the permutation array in D.
30127 Return 0 on error, 1 if all values from the permutation come from the
30128 first vector, 2 if all values from the second vector, and 3 otherwise. */
30131 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30133 tree list = TREE_VECTOR_CST_ELTS (cst);
30134 unsigned i, nelt = d->nelt;
30137 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30139 unsigned HOST_WIDE_INT e;
30141 if (!host_integerp (TREE_VALUE (list), 1))
30143 e = tree_low_cst (TREE_VALUE (list), 1);
30147 ret |= (e < nelt ? 1 : 2);
30150 gcc_assert (list == NULL);
30152 /* For all elements from second vector, fold the elements to first. */
30154 for (i = 0; i < nelt; ++i)
30155 d->perm[i] -= nelt;
30161 ix86_expand_vec_perm_builtin (tree exp)
30163 struct expand_vec_perm_d d;
30164 tree arg0, arg1, arg2;
30166 arg0 = CALL_EXPR_ARG (exp, 0);
30167 arg1 = CALL_EXPR_ARG (exp, 1);
30168 arg2 = CALL_EXPR_ARG (exp, 2);
30170 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30171 d.nelt = GET_MODE_NUNITS (d.vmode);
30172 d.testing_p = false;
30173 gcc_assert (VECTOR_MODE_P (d.vmode));
30175 if (TREE_CODE (arg2) != VECTOR_CST)
30177 error_at (EXPR_LOCATION (exp),
30178 "vector permutation requires vector constant");
30182 switch (extract_vec_perm_cst (&d, arg2))
30188 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30192 if (!operand_equal_p (arg0, arg1, 0))
30194 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30195 d.op0 = force_reg (d.vmode, d.op0);
30196 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30197 d.op1 = force_reg (d.vmode, d.op1);
30201 /* The elements of PERM do not suggest that only the first operand
30202 is used, but both operands are identical. Allow easier matching
30203 of the permutation by folding the permutation into the single
30206 unsigned i, nelt = d.nelt;
30207 for (i = 0; i < nelt; ++i)
30208 if (d.perm[i] >= nelt)
30214 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30215 d.op0 = force_reg (d.vmode, d.op0);
30220 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30221 d.op0 = force_reg (d.vmode, d.op0);
30226 d.target = gen_reg_rtx (d.vmode);
30227 if (ix86_expand_vec_perm_builtin_1 (&d))
30230 /* For compiler generated permutations, we should never got here, because
30231 the compiler should also be checking the ok hook. But since this is a
30232 builtin the user has access too, so don't abort. */
30236 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30239 sorry ("vector permutation (%d %d %d %d)",
30240 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30243 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30244 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30245 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30248 sorry ("vector permutation "
30249 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30250 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30251 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30252 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30253 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30256 gcc_unreachable ();
30259 return CONST0_RTX (d.vmode);
30262 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30265 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30267 struct expand_vec_perm_d d;
30271 d.vmode = TYPE_MODE (vec_type);
30272 d.nelt = GET_MODE_NUNITS (d.vmode);
30273 d.testing_p = true;
30275 /* Given sufficient ISA support we can just return true here
30276 for selected vector modes. */
30277 if (GET_MODE_SIZE (d.vmode) == 16)
30279 /* All implementable with a single vpperm insn. */
30282 /* All implementable with 2 pshufb + 1 ior. */
30285 /* All implementable with shufpd or unpck[lh]pd. */
30290 vec_mask = extract_vec_perm_cst (&d, mask);
30292 /* This hook is cannot be called in response to something that the
30293 user does (unlike the builtin expander) so we shouldn't ever see
30294 an error generated from the extract. */
30295 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30296 one_vec = (vec_mask != 3);
30298 /* Implementable with shufps or pshufd. */
30299 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30302 /* Otherwise we have to go through the motions and see if we can
30303 figure out how to generate the requested permutation. */
30304 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30305 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30307 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30310 ret = ix86_expand_vec_perm_builtin_1 (&d);
30317 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30319 struct expand_vec_perm_d d;
30325 d.vmode = GET_MODE (targ);
30326 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30327 d.testing_p = false;
30329 for (i = 0; i < nelt; ++i)
30330 d.perm[i] = i * 2 + odd;
30332 /* We'll either be able to implement the permutation directly... */
30333 if (expand_vec_perm_1 (&d))
30336 /* ... or we use the special-case patterns. */
30337 expand_vec_perm_even_odd_1 (&d, odd);
30340 /* This function returns the calling abi specific va_list type node.
30341 It returns the FNDECL specific va_list type. */
30344 ix86_fn_abi_va_list (tree fndecl)
30347 return va_list_type_node;
30348 gcc_assert (fndecl != NULL_TREE);
30350 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30351 return ms_va_list_type_node;
30353 return sysv_va_list_type_node;
30356 /* Returns the canonical va_list type specified by TYPE. If there
30357 is no valid TYPE provided, it return NULL_TREE. */
30360 ix86_canonical_va_list_type (tree type)
30364 /* Resolve references and pointers to va_list type. */
30365 if (INDIRECT_REF_P (type))
30366 type = TREE_TYPE (type);
30367 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30368 type = TREE_TYPE (type);
30372 wtype = va_list_type_node;
30373 gcc_assert (wtype != NULL_TREE);
30375 if (TREE_CODE (wtype) == ARRAY_TYPE)
30377 /* If va_list is an array type, the argument may have decayed
30378 to a pointer type, e.g. by being passed to another function.
30379 In that case, unwrap both types so that we can compare the
30380 underlying records. */
30381 if (TREE_CODE (htype) == ARRAY_TYPE
30382 || POINTER_TYPE_P (htype))
30384 wtype = TREE_TYPE (wtype);
30385 htype = TREE_TYPE (htype);
30388 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30389 return va_list_type_node;
30390 wtype = sysv_va_list_type_node;
30391 gcc_assert (wtype != NULL_TREE);
30393 if (TREE_CODE (wtype) == ARRAY_TYPE)
30395 /* If va_list is an array type, the argument may have decayed
30396 to a pointer type, e.g. by being passed to another function.
30397 In that case, unwrap both types so that we can compare the
30398 underlying records. */
30399 if (TREE_CODE (htype) == ARRAY_TYPE
30400 || POINTER_TYPE_P (htype))
30402 wtype = TREE_TYPE (wtype);
30403 htype = TREE_TYPE (htype);
30406 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30407 return sysv_va_list_type_node;
30408 wtype = ms_va_list_type_node;
30409 gcc_assert (wtype != NULL_TREE);
30411 if (TREE_CODE (wtype) == ARRAY_TYPE)
30413 /* If va_list is an array type, the argument may have decayed
30414 to a pointer type, e.g. by being passed to another function.
30415 In that case, unwrap both types so that we can compare the
30416 underlying records. */
30417 if (TREE_CODE (htype) == ARRAY_TYPE
30418 || POINTER_TYPE_P (htype))
30420 wtype = TREE_TYPE (wtype);
30421 htype = TREE_TYPE (htype);
30424 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30425 return ms_va_list_type_node;
30428 return std_canonical_va_list_type (type);
30431 /* Iterate through the target-specific builtin types for va_list.
30432 IDX denotes the iterator, *PTREE is set to the result type of
30433 the va_list builtin, and *PNAME to its internal type.
30434 Returns zero if there is no element for this index, otherwise
30435 IDX should be increased upon the next call.
30436 Note, do not iterate a base builtin's name like __builtin_va_list.
30437 Used from c_common_nodes_and_builtins. */
30440 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30446 *ptree = ms_va_list_type_node;
30447 *pname = "__builtin_ms_va_list";
30450 *ptree = sysv_va_list_type_node;
30451 *pname = "__builtin_sysv_va_list";
30459 /* Initialize the GCC target structure. */
30460 #undef TARGET_RETURN_IN_MEMORY
30461 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30463 #undef TARGET_LEGITIMIZE_ADDRESS
30464 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30466 #undef TARGET_ATTRIBUTE_TABLE
30467 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30468 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30469 # undef TARGET_MERGE_DECL_ATTRIBUTES
30470 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30473 #undef TARGET_COMP_TYPE_ATTRIBUTES
30474 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30476 #undef TARGET_INIT_BUILTINS
30477 #define TARGET_INIT_BUILTINS ix86_init_builtins
30478 #undef TARGET_BUILTIN_DECL
30479 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30480 #undef TARGET_EXPAND_BUILTIN
30481 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30483 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30484 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30485 ix86_builtin_vectorized_function
30487 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30488 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30490 #undef TARGET_BUILTIN_RECIPROCAL
30491 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30493 #undef TARGET_ASM_FUNCTION_EPILOGUE
30494 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30496 #undef TARGET_ENCODE_SECTION_INFO
30497 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30498 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30500 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30503 #undef TARGET_ASM_OPEN_PAREN
30504 #define TARGET_ASM_OPEN_PAREN ""
30505 #undef TARGET_ASM_CLOSE_PAREN
30506 #define TARGET_ASM_CLOSE_PAREN ""
30508 #undef TARGET_ASM_BYTE_OP
30509 #define TARGET_ASM_BYTE_OP ASM_BYTE
30511 #undef TARGET_ASM_ALIGNED_HI_OP
30512 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30513 #undef TARGET_ASM_ALIGNED_SI_OP
30514 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30516 #undef TARGET_ASM_ALIGNED_DI_OP
30517 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30520 #undef TARGET_ASM_UNALIGNED_HI_OP
30521 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30522 #undef TARGET_ASM_UNALIGNED_SI_OP
30523 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30524 #undef TARGET_ASM_UNALIGNED_DI_OP
30525 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30527 #undef TARGET_SCHED_ADJUST_COST
30528 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30529 #undef TARGET_SCHED_ISSUE_RATE
30530 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30531 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30532 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30533 ia32_multipass_dfa_lookahead
30535 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30536 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30539 #undef TARGET_HAVE_TLS
30540 #define TARGET_HAVE_TLS true
30542 #undef TARGET_CANNOT_FORCE_CONST_MEM
30543 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30544 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30545 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30547 #undef TARGET_DELEGITIMIZE_ADDRESS
30548 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30550 #undef TARGET_MS_BITFIELD_LAYOUT_P
30551 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30554 #undef TARGET_BINDS_LOCAL_P
30555 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30557 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30558 #undef TARGET_BINDS_LOCAL_P
30559 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30562 #undef TARGET_ASM_OUTPUT_MI_THUNK
30563 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30564 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30565 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30567 #undef TARGET_ASM_FILE_START
30568 #define TARGET_ASM_FILE_START x86_file_start
30570 #undef TARGET_DEFAULT_TARGET_FLAGS
30571 #define TARGET_DEFAULT_TARGET_FLAGS \
30573 | TARGET_SUBTARGET_DEFAULT \
30574 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30577 #undef TARGET_HANDLE_OPTION
30578 #define TARGET_HANDLE_OPTION ix86_handle_option
30580 #undef TARGET_RTX_COSTS
30581 #define TARGET_RTX_COSTS ix86_rtx_costs
30582 #undef TARGET_ADDRESS_COST
30583 #define TARGET_ADDRESS_COST ix86_address_cost
30585 #undef TARGET_FIXED_CONDITION_CODE_REGS
30586 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30587 #undef TARGET_CC_MODES_COMPATIBLE
30588 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30590 #undef TARGET_MACHINE_DEPENDENT_REORG
30591 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30593 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30594 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30596 #undef TARGET_BUILD_BUILTIN_VA_LIST
30597 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30599 #undef TARGET_FN_ABI_VA_LIST
30600 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30602 #undef TARGET_CANONICAL_VA_LIST_TYPE
30603 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30605 #undef TARGET_EXPAND_BUILTIN_VA_START
30606 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30608 #undef TARGET_MD_ASM_CLOBBERS
30609 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30611 #undef TARGET_PROMOTE_PROTOTYPES
30612 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30613 #undef TARGET_STRUCT_VALUE_RTX
30614 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30615 #undef TARGET_SETUP_INCOMING_VARARGS
30616 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30617 #undef TARGET_MUST_PASS_IN_STACK
30618 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30619 #undef TARGET_PASS_BY_REFERENCE
30620 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30621 #undef TARGET_INTERNAL_ARG_POINTER
30622 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30623 #undef TARGET_UPDATE_STACK_BOUNDARY
30624 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30625 #undef TARGET_GET_DRAP_RTX
30626 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30627 #undef TARGET_STRICT_ARGUMENT_NAMING
30628 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30629 #undef TARGET_STATIC_CHAIN
30630 #define TARGET_STATIC_CHAIN ix86_static_chain
30631 #undef TARGET_TRAMPOLINE_INIT
30632 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30634 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30635 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30637 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30638 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30640 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30641 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30643 #undef TARGET_C_MODE_FOR_SUFFIX
30644 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30647 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30648 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30651 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30652 #undef TARGET_INSERT_ATTRIBUTES
30653 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30656 #undef TARGET_MANGLE_TYPE
30657 #define TARGET_MANGLE_TYPE ix86_mangle_type
30659 #undef TARGET_STACK_PROTECT_FAIL
30660 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30662 #undef TARGET_FUNCTION_VALUE
30663 #define TARGET_FUNCTION_VALUE ix86_function_value
30665 #undef TARGET_FUNCTION_VALUE_REGNO_P
30666 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30668 #undef TARGET_SECONDARY_RELOAD
30669 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30671 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30672 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30673 ix86_builtin_vectorization_cost
30674 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30675 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30676 ix86_vectorize_builtin_vec_perm
30677 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30678 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30679 ix86_vectorize_builtin_vec_perm_ok
30681 #undef TARGET_SET_CURRENT_FUNCTION
30682 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30684 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30685 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30687 #undef TARGET_OPTION_SAVE
30688 #define TARGET_OPTION_SAVE ix86_function_specific_save
30690 #undef TARGET_OPTION_RESTORE
30691 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30693 #undef TARGET_OPTION_PRINT
30694 #define TARGET_OPTION_PRINT ix86_function_specific_print
30696 #undef TARGET_CAN_INLINE_P
30697 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30699 #undef TARGET_EXPAND_TO_RTL_HOOK
30700 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30702 #undef TARGET_LEGITIMATE_ADDRESS_P
30703 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30705 #undef TARGET_IRA_COVER_CLASSES
30706 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30708 #undef TARGET_FRAME_POINTER_REQUIRED
30709 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30711 #undef TARGET_CAN_ELIMINATE
30712 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30714 #undef TARGET_ASM_CODE_END
30715 #define TARGET_ASM_CODE_END ix86_code_end
30717 struct gcc_target targetm = TARGET_INITIALIZER;
30719 #include "gt-i386.h"