1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 struct processor_costs bdver1_cost = {
823 COSTS_N_INSNS (1), /* cost of an add instruction */
824 COSTS_N_INSNS (2), /* cost of a lea instruction */
825 COSTS_N_INSNS (1), /* variable shift costs */
826 COSTS_N_INSNS (1), /* constant shift costs */
827 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
828 COSTS_N_INSNS (4), /* HI */
829 COSTS_N_INSNS (3), /* SI */
830 COSTS_N_INSNS (4), /* DI */
831 COSTS_N_INSNS (5)}, /* other */
832 0, /* cost of multiply per each bit set */
833 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
834 COSTS_N_INSNS (35), /* HI */
835 COSTS_N_INSNS (51), /* SI */
836 COSTS_N_INSNS (83), /* DI */
837 COSTS_N_INSNS (83)}, /* other */
838 COSTS_N_INSNS (1), /* cost of movsx */
839 COSTS_N_INSNS (1), /* cost of movzx */
840 8, /* "large" insn */
842 4, /* cost for loading QImode using movzbl */
843 {3, 4, 3}, /* cost of loading integer registers
844 in QImode, HImode and SImode.
845 Relative to reg-reg move (2). */
846 {3, 4, 3}, /* cost of storing integer registers */
847 4, /* cost of reg,reg fld/fst */
848 {4, 4, 12}, /* cost of loading fp registers
849 in SFmode, DFmode and XFmode */
850 {6, 6, 8}, /* cost of storing fp registers
851 in SFmode, DFmode and XFmode */
852 2, /* cost of moving MMX register */
853 {3, 3}, /* cost of loading MMX registers
854 in SImode and DImode */
855 {4, 4}, /* cost of storing MMX registers
856 in SImode and DImode */
857 2, /* cost of moving SSE register */
858 {4, 4, 3}, /* cost of loading SSE registers
859 in SImode, DImode and TImode */
860 {4, 4, 5}, /* cost of storing SSE registers
861 in SImode, DImode and TImode */
862 3, /* MMX or SSE register to integer */
864 MOVD reg64, xmmreg Double FSTORE 4
865 MOVD reg32, xmmreg Double FSTORE 4
867 MOVD reg64, xmmreg Double FADD 3
869 MOVD reg32, xmmreg Double FADD 3
871 64, /* size of l1 cache. */
872 1024, /* size of l2 cache. */
873 64, /* size of prefetch block */
874 /* New AMD processors never drop prefetches; if they cannot be performed
875 immediately, they are queued. We set number of simultaneous prefetches
876 to a large constant to reflect this (it probably is not a good idea not
877 to limit number of prefetches at all, as their execution also takes some
879 100, /* number of parallel prefetches */
881 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
882 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
883 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
884 COSTS_N_INSNS (2), /* cost of FABS instruction. */
885 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
886 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
888 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
889 very small blocks it is better to use loop. For large blocks, libcall can
890 do nontemporary accesses and beat inline considerably. */
891 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
892 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
893 {{libcall, {{8, loop}, {24, unrolled_loop},
894 {2048, rep_prefix_4_byte}, {-1, libcall}}},
895 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
896 4, /* scalar_stmt_cost. */
897 2, /* scalar load_cost. */
898 2, /* scalar_store_cost. */
899 6, /* vec_stmt_cost. */
900 0, /* vec_to_scalar_cost. */
901 2, /* scalar_to_vec_cost. */
902 2, /* vec_align_load_cost. */
903 2, /* vec_unalign_load_cost. */
904 2, /* vec_store_cost. */
905 2, /* cond_taken_branch_cost. */
906 1, /* cond_not_taken_branch_cost. */
910 struct processor_costs pentium4_cost = {
911 COSTS_N_INSNS (1), /* cost of an add instruction */
912 COSTS_N_INSNS (3), /* cost of a lea instruction */
913 COSTS_N_INSNS (4), /* variable shift costs */
914 COSTS_N_INSNS (4), /* constant shift costs */
915 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
916 COSTS_N_INSNS (15), /* HI */
917 COSTS_N_INSNS (15), /* SI */
918 COSTS_N_INSNS (15), /* DI */
919 COSTS_N_INSNS (15)}, /* other */
920 0, /* cost of multiply per each bit set */
921 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
922 COSTS_N_INSNS (56), /* HI */
923 COSTS_N_INSNS (56), /* SI */
924 COSTS_N_INSNS (56), /* DI */
925 COSTS_N_INSNS (56)}, /* other */
926 COSTS_N_INSNS (1), /* cost of movsx */
927 COSTS_N_INSNS (1), /* cost of movzx */
928 16, /* "large" insn */
930 2, /* cost for loading QImode using movzbl */
931 {4, 5, 4}, /* cost of loading integer registers
932 in QImode, HImode and SImode.
933 Relative to reg-reg move (2). */
934 {2, 3, 2}, /* cost of storing integer registers */
935 2, /* cost of reg,reg fld/fst */
936 {2, 2, 6}, /* cost of loading fp registers
937 in SFmode, DFmode and XFmode */
938 {4, 4, 6}, /* cost of storing fp registers
939 in SFmode, DFmode and XFmode */
940 2, /* cost of moving MMX register */
941 {2, 2}, /* cost of loading MMX registers
942 in SImode and DImode */
943 {2, 2}, /* cost of storing MMX registers
944 in SImode and DImode */
945 12, /* cost of moving SSE register */
946 {12, 12, 12}, /* cost of loading SSE registers
947 in SImode, DImode and TImode */
948 {2, 2, 8}, /* cost of storing SSE registers
949 in SImode, DImode and TImode */
950 10, /* MMX or SSE register to integer */
951 8, /* size of l1 cache. */
952 256, /* size of l2 cache. */
953 64, /* size of prefetch block */
954 6, /* number of parallel prefetches */
956 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
957 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
958 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
959 COSTS_N_INSNS (2), /* cost of FABS instruction. */
960 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
961 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
962 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
963 DUMMY_STRINGOP_ALGS},
964 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
966 DUMMY_STRINGOP_ALGS},
967 1, /* scalar_stmt_cost. */
968 1, /* scalar load_cost. */
969 1, /* scalar_store_cost. */
970 1, /* vec_stmt_cost. */
971 1, /* vec_to_scalar_cost. */
972 1, /* scalar_to_vec_cost. */
973 1, /* vec_align_load_cost. */
974 2, /* vec_unalign_load_cost. */
975 1, /* vec_store_cost. */
976 3, /* cond_taken_branch_cost. */
977 1, /* cond_not_taken_branch_cost. */
981 struct processor_costs nocona_cost = {
982 COSTS_N_INSNS (1), /* cost of an add instruction */
983 COSTS_N_INSNS (1), /* cost of a lea instruction */
984 COSTS_N_INSNS (1), /* variable shift costs */
985 COSTS_N_INSNS (1), /* constant shift costs */
986 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
987 COSTS_N_INSNS (10), /* HI */
988 COSTS_N_INSNS (10), /* SI */
989 COSTS_N_INSNS (10), /* DI */
990 COSTS_N_INSNS (10)}, /* other */
991 0, /* cost of multiply per each bit set */
992 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
993 COSTS_N_INSNS (66), /* HI */
994 COSTS_N_INSNS (66), /* SI */
995 COSTS_N_INSNS (66), /* DI */
996 COSTS_N_INSNS (66)}, /* other */
997 COSTS_N_INSNS (1), /* cost of movsx */
998 COSTS_N_INSNS (1), /* cost of movzx */
999 16, /* "large" insn */
1000 17, /* MOVE_RATIO */
1001 4, /* cost for loading QImode using movzbl */
1002 {4, 4, 4}, /* cost of loading integer registers
1003 in QImode, HImode and SImode.
1004 Relative to reg-reg move (2). */
1005 {4, 4, 4}, /* cost of storing integer registers */
1006 3, /* cost of reg,reg fld/fst */
1007 {12, 12, 12}, /* cost of loading fp registers
1008 in SFmode, DFmode and XFmode */
1009 {4, 4, 4}, /* cost of storing fp registers
1010 in SFmode, DFmode and XFmode */
1011 6, /* cost of moving MMX register */
1012 {12, 12}, /* cost of loading MMX registers
1013 in SImode and DImode */
1014 {12, 12}, /* cost of storing MMX registers
1015 in SImode and DImode */
1016 6, /* cost of moving SSE register */
1017 {12, 12, 12}, /* cost of loading SSE registers
1018 in SImode, DImode and TImode */
1019 {12, 12, 12}, /* cost of storing SSE registers
1020 in SImode, DImode and TImode */
1021 8, /* MMX or SSE register to integer */
1022 8, /* size of l1 cache. */
1023 1024, /* size of l2 cache. */
1024 128, /* size of prefetch block */
1025 8, /* number of parallel prefetches */
1026 1, /* Branch cost */
1027 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1028 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1029 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1030 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1031 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1032 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1033 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1034 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1035 {100000, unrolled_loop}, {-1, libcall}}}},
1036 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1038 {libcall, {{24, loop}, {64, unrolled_loop},
1039 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1040 1, /* scalar_stmt_cost. */
1041 1, /* scalar load_cost. */
1042 1, /* scalar_store_cost. */
1043 1, /* vec_stmt_cost. */
1044 1, /* vec_to_scalar_cost. */
1045 1, /* scalar_to_vec_cost. */
1046 1, /* vec_align_load_cost. */
1047 2, /* vec_unalign_load_cost. */
1048 1, /* vec_store_cost. */
1049 3, /* cond_taken_branch_cost. */
1050 1, /* cond_not_taken_branch_cost. */
1054 struct processor_costs core2_cost = {
1055 COSTS_N_INSNS (1), /* cost of an add instruction */
1056 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1057 COSTS_N_INSNS (1), /* variable shift costs */
1058 COSTS_N_INSNS (1), /* constant shift costs */
1059 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1060 COSTS_N_INSNS (3), /* HI */
1061 COSTS_N_INSNS (3), /* SI */
1062 COSTS_N_INSNS (3), /* DI */
1063 COSTS_N_INSNS (3)}, /* other */
1064 0, /* cost of multiply per each bit set */
1065 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1066 COSTS_N_INSNS (22), /* HI */
1067 COSTS_N_INSNS (22), /* SI */
1068 COSTS_N_INSNS (22), /* DI */
1069 COSTS_N_INSNS (22)}, /* other */
1070 COSTS_N_INSNS (1), /* cost of movsx */
1071 COSTS_N_INSNS (1), /* cost of movzx */
1072 8, /* "large" insn */
1073 16, /* MOVE_RATIO */
1074 2, /* cost for loading QImode using movzbl */
1075 {6, 6, 6}, /* cost of loading integer registers
1076 in QImode, HImode and SImode.
1077 Relative to reg-reg move (2). */
1078 {4, 4, 4}, /* cost of storing integer registers */
1079 2, /* cost of reg,reg fld/fst */
1080 {6, 6, 6}, /* cost of loading fp registers
1081 in SFmode, DFmode and XFmode */
1082 {4, 4, 4}, /* cost of storing fp registers
1083 in SFmode, DFmode and XFmode */
1084 2, /* cost of moving MMX register */
1085 {6, 6}, /* cost of loading MMX registers
1086 in SImode and DImode */
1087 {4, 4}, /* cost of storing MMX registers
1088 in SImode and DImode */
1089 2, /* cost of moving SSE register */
1090 {6, 6, 6}, /* cost of loading SSE registers
1091 in SImode, DImode and TImode */
1092 {4, 4, 4}, /* cost of storing SSE registers
1093 in SImode, DImode and TImode */
1094 2, /* MMX or SSE register to integer */
1095 32, /* size of l1 cache. */
1096 2048, /* size of l2 cache. */
1097 128, /* size of prefetch block */
1098 8, /* number of parallel prefetches */
1099 3, /* Branch cost */
1100 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1101 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1102 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1103 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1104 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1105 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1106 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1107 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1108 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1109 {{libcall, {{8, loop}, {15, unrolled_loop},
1110 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1111 {libcall, {{24, loop}, {32, unrolled_loop},
1112 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1113 1, /* scalar_stmt_cost. */
1114 1, /* scalar load_cost. */
1115 1, /* scalar_store_cost. */
1116 1, /* vec_stmt_cost. */
1117 1, /* vec_to_scalar_cost. */
1118 1, /* scalar_to_vec_cost. */
1119 1, /* vec_align_load_cost. */
1120 2, /* vec_unalign_load_cost. */
1121 1, /* vec_store_cost. */
1122 3, /* cond_taken_branch_cost. */
1123 1, /* cond_not_taken_branch_cost. */
1127 struct processor_costs atom_cost = {
1128 COSTS_N_INSNS (1), /* cost of an add instruction */
1129 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1130 COSTS_N_INSNS (1), /* variable shift costs */
1131 COSTS_N_INSNS (1), /* constant shift costs */
1132 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1133 COSTS_N_INSNS (4), /* HI */
1134 COSTS_N_INSNS (3), /* SI */
1135 COSTS_N_INSNS (4), /* DI */
1136 COSTS_N_INSNS (2)}, /* other */
1137 0, /* cost of multiply per each bit set */
1138 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1139 COSTS_N_INSNS (26), /* HI */
1140 COSTS_N_INSNS (42), /* SI */
1141 COSTS_N_INSNS (74), /* DI */
1142 COSTS_N_INSNS (74)}, /* other */
1143 COSTS_N_INSNS (1), /* cost of movsx */
1144 COSTS_N_INSNS (1), /* cost of movzx */
1145 8, /* "large" insn */
1146 17, /* MOVE_RATIO */
1147 2, /* cost for loading QImode using movzbl */
1148 {4, 4, 4}, /* cost of loading integer registers
1149 in QImode, HImode and SImode.
1150 Relative to reg-reg move (2). */
1151 {4, 4, 4}, /* cost of storing integer registers */
1152 4, /* cost of reg,reg fld/fst */
1153 {12, 12, 12}, /* cost of loading fp registers
1154 in SFmode, DFmode and XFmode */
1155 {6, 6, 8}, /* cost of storing fp registers
1156 in SFmode, DFmode and XFmode */
1157 2, /* cost of moving MMX register */
1158 {8, 8}, /* cost of loading MMX registers
1159 in SImode and DImode */
1160 {8, 8}, /* cost of storing MMX registers
1161 in SImode and DImode */
1162 2, /* cost of moving SSE register */
1163 {8, 8, 8}, /* cost of loading SSE registers
1164 in SImode, DImode and TImode */
1165 {8, 8, 8}, /* cost of storing SSE registers
1166 in SImode, DImode and TImode */
1167 5, /* MMX or SSE register to integer */
1168 32, /* size of l1 cache. */
1169 256, /* size of l2 cache. */
1170 64, /* size of prefetch block */
1171 6, /* number of parallel prefetches */
1172 3, /* Branch cost */
1173 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1174 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1175 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1176 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1177 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1178 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1179 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1180 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1181 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1182 {{libcall, {{8, loop}, {15, unrolled_loop},
1183 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1184 {libcall, {{24, loop}, {32, unrolled_loop},
1185 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1186 1, /* scalar_stmt_cost. */
1187 1, /* scalar load_cost. */
1188 1, /* scalar_store_cost. */
1189 1, /* vec_stmt_cost. */
1190 1, /* vec_to_scalar_cost. */
1191 1, /* scalar_to_vec_cost. */
1192 1, /* vec_align_load_cost. */
1193 2, /* vec_unalign_load_cost. */
1194 1, /* vec_store_cost. */
1195 3, /* cond_taken_branch_cost. */
1196 1, /* cond_not_taken_branch_cost. */
1199 /* Generic64 should produce code tuned for Nocona and K8. */
1201 struct processor_costs generic64_cost = {
1202 COSTS_N_INSNS (1), /* cost of an add instruction */
1203 /* On all chips taken into consideration lea is 2 cycles and more. With
1204 this cost however our current implementation of synth_mult results in
1205 use of unnecessary temporary registers causing regression on several
1206 SPECfp benchmarks. */
1207 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1208 COSTS_N_INSNS (1), /* variable shift costs */
1209 COSTS_N_INSNS (1), /* constant shift costs */
1210 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1211 COSTS_N_INSNS (4), /* HI */
1212 COSTS_N_INSNS (3), /* SI */
1213 COSTS_N_INSNS (4), /* DI */
1214 COSTS_N_INSNS (2)}, /* other */
1215 0, /* cost of multiply per each bit set */
1216 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1217 COSTS_N_INSNS (26), /* HI */
1218 COSTS_N_INSNS (42), /* SI */
1219 COSTS_N_INSNS (74), /* DI */
1220 COSTS_N_INSNS (74)}, /* other */
1221 COSTS_N_INSNS (1), /* cost of movsx */
1222 COSTS_N_INSNS (1), /* cost of movzx */
1223 8, /* "large" insn */
1224 17, /* MOVE_RATIO */
1225 4, /* cost for loading QImode using movzbl */
1226 {4, 4, 4}, /* cost of loading integer registers
1227 in QImode, HImode and SImode.
1228 Relative to reg-reg move (2). */
1229 {4, 4, 4}, /* cost of storing integer registers */
1230 4, /* cost of reg,reg fld/fst */
1231 {12, 12, 12}, /* cost of loading fp registers
1232 in SFmode, DFmode and XFmode */
1233 {6, 6, 8}, /* cost of storing fp registers
1234 in SFmode, DFmode and XFmode */
1235 2, /* cost of moving MMX register */
1236 {8, 8}, /* cost of loading MMX registers
1237 in SImode and DImode */
1238 {8, 8}, /* cost of storing MMX registers
1239 in SImode and DImode */
1240 2, /* cost of moving SSE register */
1241 {8, 8, 8}, /* cost of loading SSE registers
1242 in SImode, DImode and TImode */
1243 {8, 8, 8}, /* cost of storing SSE registers
1244 in SImode, DImode and TImode */
1245 5, /* MMX or SSE register to integer */
1246 32, /* size of l1 cache. */
1247 512, /* size of l2 cache. */
1248 64, /* size of prefetch block */
1249 6, /* number of parallel prefetches */
1250 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1251 is increased to perhaps more appropriate value of 5. */
1252 3, /* Branch cost */
1253 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1254 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1255 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1256 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1257 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1258 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1259 {DUMMY_STRINGOP_ALGS,
1260 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1261 {DUMMY_STRINGOP_ALGS,
1262 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1263 1, /* scalar_stmt_cost. */
1264 1, /* scalar load_cost. */
1265 1, /* scalar_store_cost. */
1266 1, /* vec_stmt_cost. */
1267 1, /* vec_to_scalar_cost. */
1268 1, /* scalar_to_vec_cost. */
1269 1, /* vec_align_load_cost. */
1270 2, /* vec_unalign_load_cost. */
1271 1, /* vec_store_cost. */
1272 3, /* cond_taken_branch_cost. */
1273 1, /* cond_not_taken_branch_cost. */
1276 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1278 struct processor_costs generic32_cost = {
1279 COSTS_N_INSNS (1), /* cost of an add instruction */
1280 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1281 COSTS_N_INSNS (1), /* variable shift costs */
1282 COSTS_N_INSNS (1), /* constant shift costs */
1283 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1284 COSTS_N_INSNS (4), /* HI */
1285 COSTS_N_INSNS (3), /* SI */
1286 COSTS_N_INSNS (4), /* DI */
1287 COSTS_N_INSNS (2)}, /* other */
1288 0, /* cost of multiply per each bit set */
1289 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1290 COSTS_N_INSNS (26), /* HI */
1291 COSTS_N_INSNS (42), /* SI */
1292 COSTS_N_INSNS (74), /* DI */
1293 COSTS_N_INSNS (74)}, /* other */
1294 COSTS_N_INSNS (1), /* cost of movsx */
1295 COSTS_N_INSNS (1), /* cost of movzx */
1296 8, /* "large" insn */
1297 17, /* MOVE_RATIO */
1298 4, /* cost for loading QImode using movzbl */
1299 {4, 4, 4}, /* cost of loading integer registers
1300 in QImode, HImode and SImode.
1301 Relative to reg-reg move (2). */
1302 {4, 4, 4}, /* cost of storing integer registers */
1303 4, /* cost of reg,reg fld/fst */
1304 {12, 12, 12}, /* cost of loading fp registers
1305 in SFmode, DFmode and XFmode */
1306 {6, 6, 8}, /* cost of storing fp registers
1307 in SFmode, DFmode and XFmode */
1308 2, /* cost of moving MMX register */
1309 {8, 8}, /* cost of loading MMX registers
1310 in SImode and DImode */
1311 {8, 8}, /* cost of storing MMX registers
1312 in SImode and DImode */
1313 2, /* cost of moving SSE register */
1314 {8, 8, 8}, /* cost of loading SSE registers
1315 in SImode, DImode and TImode */
1316 {8, 8, 8}, /* cost of storing SSE registers
1317 in SImode, DImode and TImode */
1318 5, /* MMX or SSE register to integer */
1319 32, /* size of l1 cache. */
1320 256, /* size of l2 cache. */
1321 64, /* size of prefetch block */
1322 6, /* number of parallel prefetches */
1323 3, /* Branch cost */
1324 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1325 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1326 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1327 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1328 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1329 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1330 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1331 DUMMY_STRINGOP_ALGS},
1332 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1333 DUMMY_STRINGOP_ALGS},
1334 1, /* scalar_stmt_cost. */
1335 1, /* scalar load_cost. */
1336 1, /* scalar_store_cost. */
1337 1, /* vec_stmt_cost. */
1338 1, /* vec_to_scalar_cost. */
1339 1, /* scalar_to_vec_cost. */
1340 1, /* vec_align_load_cost. */
1341 2, /* vec_unalign_load_cost. */
1342 1, /* vec_store_cost. */
1343 3, /* cond_taken_branch_cost. */
1344 1, /* cond_not_taken_branch_cost. */
1347 const struct processor_costs *ix86_cost = &pentium_cost;
1349 /* Processor feature/optimization bitmasks. */
1350 #define m_386 (1<<PROCESSOR_I386)
1351 #define m_486 (1<<PROCESSOR_I486)
1352 #define m_PENT (1<<PROCESSOR_PENTIUM)
1353 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1354 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1355 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1356 #define m_CORE2 (1<<PROCESSOR_CORE2)
1357 #define m_ATOM (1<<PROCESSOR_ATOM)
1359 #define m_GEODE (1<<PROCESSOR_GEODE)
1360 #define m_K6 (1<<PROCESSOR_K6)
1361 #define m_K6_GEODE (m_K6 | m_GEODE)
1362 #define m_K8 (1<<PROCESSOR_K8)
1363 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1364 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1365 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1366 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1367 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1369 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1370 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1372 /* Generic instruction choice should be common subset of supported CPUs
1373 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1374 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1376 /* Feature tests against the various tunings. */
1377 unsigned char ix86_tune_features[X86_TUNE_LAST];
1379 /* Feature tests against the various tunings used to create ix86_tune_features
1380 based on the processor mask. */
1381 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1382 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1383 negatively, so enabling for Generic64 seems like good code size
1384 tradeoff. We can't enable it for 32bit generic because it does not
1385 work well with PPro base chips. */
1386 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1388 /* X86_TUNE_PUSH_MEMORY */
1389 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1390 | m_NOCONA | m_CORE2 | m_GENERIC,
1392 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1395 /* X86_TUNE_UNROLL_STRLEN */
1396 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1397 | m_CORE2 | m_GENERIC,
1399 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1400 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1402 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1403 on simulation result. But after P4 was made, no performance benefit
1404 was observed with branch hints. It also increases the code size.
1405 As a result, icc never generates branch hints. */
1408 /* X86_TUNE_DOUBLE_WITH_ADD */
1411 /* X86_TUNE_USE_SAHF */
1412 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1413 | m_NOCONA | m_CORE2 | m_GENERIC,
1415 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1416 partial dependencies. */
1417 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1418 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1420 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1421 register stalls on Generic32 compilation setting as well. However
1422 in current implementation the partial register stalls are not eliminated
1423 very well - they can be introduced via subregs synthesized by combine
1424 and can happen in caller/callee saving sequences. Because this option
1425 pays back little on PPro based chips and is in conflict with partial reg
1426 dependencies used by Athlon/P4 based chips, it is better to leave it off
1427 for generic32 for now. */
1430 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1431 m_CORE2 | m_GENERIC,
1433 /* X86_TUNE_USE_HIMODE_FIOP */
1434 m_386 | m_486 | m_K6_GEODE,
1436 /* X86_TUNE_USE_SIMODE_FIOP */
1437 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1439 /* X86_TUNE_USE_MOV0 */
1442 /* X86_TUNE_USE_CLTD */
1443 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1445 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1448 /* X86_TUNE_SPLIT_LONG_MOVES */
1451 /* X86_TUNE_READ_MODIFY_WRITE */
1454 /* X86_TUNE_READ_MODIFY */
1457 /* X86_TUNE_PROMOTE_QIMODE */
1458 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1459 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1461 /* X86_TUNE_FAST_PREFIX */
1462 ~(m_PENT | m_486 | m_386),
1464 /* X86_TUNE_SINGLE_STRINGOP */
1465 m_386 | m_PENT4 | m_NOCONA,
1467 /* X86_TUNE_QIMODE_MATH */
1470 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1471 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1472 might be considered for Generic32 if our scheme for avoiding partial
1473 stalls was more effective. */
1476 /* X86_TUNE_PROMOTE_QI_REGS */
1479 /* X86_TUNE_PROMOTE_HI_REGS */
1482 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1483 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1484 | m_CORE2 | m_GENERIC,
1486 /* X86_TUNE_ADD_ESP_8 */
1487 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1488 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1490 /* X86_TUNE_SUB_ESP_4 */
1491 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1494 /* X86_TUNE_SUB_ESP_8 */
1495 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1496 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1498 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1499 for DFmode copies */
1500 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1501 | m_GENERIC | m_GEODE),
1503 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1504 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1506 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1507 conflict here in between PPro/Pentium4 based chips that thread 128bit
1508 SSE registers as single units versus K8 based chips that divide SSE
1509 registers to two 64bit halves. This knob promotes all store destinations
1510 to be 128bit to allow register renaming on 128bit SSE units, but usually
1511 results in one extra microop on 64bit SSE units. Experimental results
1512 shows that disabling this option on P4 brings over 20% SPECfp regression,
1513 while enabling it on K8 brings roughly 2.4% regression that can be partly
1514 masked by careful scheduling of moves. */
1515 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1516 | m_AMDFAM10 | m_BDVER1,
1518 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1519 m_AMDFAM10 | m_BDVER1,
1521 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1524 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1527 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1528 are resolved on SSE register parts instead of whole registers, so we may
1529 maintain just lower part of scalar values in proper format leaving the
1530 upper part undefined. */
1533 /* X86_TUNE_SSE_TYPELESS_STORES */
1536 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1537 m_PPRO | m_PENT4 | m_NOCONA,
1539 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1540 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1542 /* X86_TUNE_PROLOGUE_USING_MOVE */
1543 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1545 /* X86_TUNE_EPILOGUE_USING_MOVE */
1546 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1548 /* X86_TUNE_SHIFT1 */
1551 /* X86_TUNE_USE_FFREEP */
1554 /* X86_TUNE_INTER_UNIT_MOVES */
1555 ~(m_AMD_MULTIPLE | m_GENERIC),
1557 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1558 ~(m_AMDFAM10 | m_BDVER1),
1560 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1561 than 4 branch instructions in the 16 byte window. */
1562 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1565 /* X86_TUNE_SCHEDULE */
1566 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1569 /* X86_TUNE_USE_BT */
1570 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1572 /* X86_TUNE_USE_INCDEC */
1573 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1575 /* X86_TUNE_PAD_RETURNS */
1576 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1578 /* X86_TUNE_EXT_80387_CONSTANTS */
1579 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1580 | m_CORE2 | m_GENERIC,
1582 /* X86_TUNE_SHORTEN_X87_SSE */
1585 /* X86_TUNE_AVOID_VECTOR_DECODE */
1588 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1589 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1592 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1593 vector path on AMD machines. */
1594 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1596 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1598 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1600 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1604 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1605 but one byte longer. */
1608 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1609 operand that cannot be represented using a modRM byte. The XOR
1610 replacement is long decoded, so this split helps here as well. */
1613 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1615 m_AMDFAM10 | m_GENERIC,
1617 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1618 from integer to FP. */
1621 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1622 with a subsequent conditional jump instruction into a single
1623 compare-and-branch uop. */
1626 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1627 will impact LEA instruction selection. */
1631 /* Feature tests against the various architecture variations. */
1632 unsigned char ix86_arch_features[X86_ARCH_LAST];
1634 /* Feature tests against the various architecture variations, used to create
1635 ix86_arch_features based on the processor mask. */
1636 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1637 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1638 ~(m_386 | m_486 | m_PENT | m_K6),
1640 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1643 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1646 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1649 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1653 static const unsigned int x86_accumulate_outgoing_args
1654 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1657 static const unsigned int x86_arch_always_fancy_math_387
1658 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1659 | m_NOCONA | m_CORE2 | m_GENERIC;
1661 static enum stringop_alg stringop_alg = no_stringop;
1663 /* In case the average insn count for single function invocation is
1664 lower than this constant, emit fast (but longer) prologue and
1666 #define FAST_PROLOGUE_INSN_COUNT 20
1668 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1669 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1670 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1671 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1673 /* Array of the smallest class containing reg number REGNO, indexed by
1674 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1676 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1678 /* ax, dx, cx, bx */
1679 AREG, DREG, CREG, BREG,
1680 /* si, di, bp, sp */
1681 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1683 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1684 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1687 /* flags, fpsr, fpcr, frame */
1688 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1690 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1693 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1696 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1697 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1698 /* SSE REX registers */
1699 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1703 /* The "default" register map used in 32bit mode. */
1705 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1707 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1708 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1709 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1710 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1711 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1712 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1713 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1716 /* The "default" register map used in 64bit mode. */
1718 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1720 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1721 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1722 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1723 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1724 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1725 8,9,10,11,12,13,14,15, /* extended integer registers */
1726 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1729 /* Define the register numbers to be used in Dwarf debugging information.
1730 The SVR4 reference port C compiler uses the following register numbers
1731 in its Dwarf output code:
1732 0 for %eax (gcc regno = 0)
1733 1 for %ecx (gcc regno = 2)
1734 2 for %edx (gcc regno = 1)
1735 3 for %ebx (gcc regno = 3)
1736 4 for %esp (gcc regno = 7)
1737 5 for %ebp (gcc regno = 6)
1738 6 for %esi (gcc regno = 4)
1739 7 for %edi (gcc regno = 5)
1740 The following three DWARF register numbers are never generated by
1741 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1742 believes these numbers have these meanings.
1743 8 for %eip (no gcc equivalent)
1744 9 for %eflags (gcc regno = 17)
1745 10 for %trapno (no gcc equivalent)
1746 It is not at all clear how we should number the FP stack registers
1747 for the x86 architecture. If the version of SDB on x86/svr4 were
1748 a bit less brain dead with respect to floating-point then we would
1749 have a precedent to follow with respect to DWARF register numbers
1750 for x86 FP registers, but the SDB on x86/svr4 is so completely
1751 broken with respect to FP registers that it is hardly worth thinking
1752 of it as something to strive for compatibility with.
1753 The version of x86/svr4 SDB I have at the moment does (partially)
1754 seem to believe that DWARF register number 11 is associated with
1755 the x86 register %st(0), but that's about all. Higher DWARF
1756 register numbers don't seem to be associated with anything in
1757 particular, and even for DWARF regno 11, SDB only seems to under-
1758 stand that it should say that a variable lives in %st(0) (when
1759 asked via an `=' command) if we said it was in DWARF regno 11,
1760 but SDB still prints garbage when asked for the value of the
1761 variable in question (via a `/' command).
1762 (Also note that the labels SDB prints for various FP stack regs
1763 when doing an `x' command are all wrong.)
1764 Note that these problems generally don't affect the native SVR4
1765 C compiler because it doesn't allow the use of -O with -g and
1766 because when it is *not* optimizing, it allocates a memory
1767 location for each floating-point variable, and the memory
1768 location is what gets described in the DWARF AT_location
1769 attribute for the variable in question.
1770 Regardless of the severe mental illness of the x86/svr4 SDB, we
1771 do something sensible here and we use the following DWARF
1772 register numbers. Note that these are all stack-top-relative
1774 11 for %st(0) (gcc regno = 8)
1775 12 for %st(1) (gcc regno = 9)
1776 13 for %st(2) (gcc regno = 10)
1777 14 for %st(3) (gcc regno = 11)
1778 15 for %st(4) (gcc regno = 12)
1779 16 for %st(5) (gcc regno = 13)
1780 17 for %st(6) (gcc regno = 14)
1781 18 for %st(7) (gcc regno = 15)
1783 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1785 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1786 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1787 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1788 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1789 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1790 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1791 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1794 /* Test and compare insns in i386.md store the information needed to
1795 generate branch and scc insns here. */
1797 rtx ix86_compare_op0 = NULL_RTX;
1798 rtx ix86_compare_op1 = NULL_RTX;
1800 /* Define parameter passing and return registers. */
1802 static int const x86_64_int_parameter_registers[6] =
1804 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1807 static int const x86_64_ms_abi_int_parameter_registers[4] =
1809 CX_REG, DX_REG, R8_REG, R9_REG
1812 static int const x86_64_int_return_registers[4] =
1814 AX_REG, DX_REG, DI_REG, SI_REG
1817 /* Define the structure for the machine field in struct function. */
1819 struct GTY(()) stack_local_entry {
1820 unsigned short mode;
1823 struct stack_local_entry *next;
1826 /* Structure describing stack frame layout.
1827 Stack grows downward:
1833 saved frame pointer if frame_pointer_needed
1834 <- HARD_FRAME_POINTER
1843 [va_arg registers] (
1844 > to_allocate <- FRAME_POINTER
1856 HOST_WIDE_INT frame;
1858 int outgoing_arguments_size;
1861 HOST_WIDE_INT to_allocate;
1862 /* The offsets relative to ARG_POINTER. */
1863 HOST_WIDE_INT frame_pointer_offset;
1864 HOST_WIDE_INT hard_frame_pointer_offset;
1865 HOST_WIDE_INT stack_pointer_offset;
1867 /* When save_regs_using_mov is set, emit prologue using
1868 move instead of push instructions. */
1869 bool save_regs_using_mov;
1872 /* Code model option. */
1873 enum cmodel ix86_cmodel;
1875 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1877 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1879 /* Which unit we are generating floating point math for. */
1880 enum fpmath_unit ix86_fpmath;
1882 /* Which cpu are we scheduling for. */
1883 enum attr_cpu ix86_schedule;
1885 /* Which cpu are we optimizing for. */
1886 enum processor_type ix86_tune;
1888 /* Which instruction set architecture to use. */
1889 enum processor_type ix86_arch;
1891 /* true if sse prefetch instruction is not NOOP. */
1892 int x86_prefetch_sse;
1894 /* ix86_regparm_string as a number */
1895 static int ix86_regparm;
1897 /* -mstackrealign option */
1898 extern int ix86_force_align_arg_pointer;
1899 static const char ix86_force_align_arg_pointer_string[]
1900 = "force_align_arg_pointer";
1902 static rtx (*ix86_gen_leave) (void);
1903 static rtx (*ix86_gen_pop1) (rtx);
1904 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1905 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1906 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1907 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1908 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1909 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1911 /* Preferred alignment for stack boundary in bits. */
1912 unsigned int ix86_preferred_stack_boundary;
1914 /* Alignment for incoming stack boundary in bits specified at
1916 static unsigned int ix86_user_incoming_stack_boundary;
1918 /* Default alignment for incoming stack boundary in bits. */
1919 static unsigned int ix86_default_incoming_stack_boundary;
1921 /* Alignment for incoming stack boundary in bits. */
1922 unsigned int ix86_incoming_stack_boundary;
1924 /* The abi used by target. */
1925 enum calling_abi ix86_abi;
1927 /* Values 1-5: see jump.c */
1928 int ix86_branch_cost;
1930 /* Calling abi specific va_list type nodes. */
1931 static GTY(()) tree sysv_va_list_type_node;
1932 static GTY(()) tree ms_va_list_type_node;
1934 /* Variables which are this size or smaller are put in the data/bss
1935 or ldata/lbss sections. */
1937 int ix86_section_threshold = 65536;
1939 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1940 char internal_label_prefix[16];
1941 int internal_label_prefix_len;
1943 /* Fence to use after loop using movnt. */
1946 /* Register class used for passing given 64bit part of the argument.
1947 These represent classes as documented by the PS ABI, with the exception
1948 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1949 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1951 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1952 whenever possible (upper half does contain padding). */
1953 enum x86_64_reg_class
1956 X86_64_INTEGER_CLASS,
1957 X86_64_INTEGERSI_CLASS,
1964 X86_64_COMPLEX_X87_CLASS,
1968 #define MAX_CLASSES 4
1970 /* Table of constants used by fldpi, fldln2, etc.... */
1971 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1972 static bool ext_80387_constants_init = 0;
1975 static struct machine_function * ix86_init_machine_status (void);
1976 static rtx ix86_function_value (const_tree, const_tree, bool);
1977 static bool ix86_function_value_regno_p (const unsigned int);
1978 static rtx ix86_static_chain (const_tree, bool);
1979 static int ix86_function_regparm (const_tree, const_tree);
1980 static void ix86_compute_frame_layout (struct ix86_frame *);
1981 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1983 static void ix86_add_new_builtins (int);
1984 static rtx ix86_expand_vec_perm_builtin (tree);
1986 enum ix86_function_specific_strings
1988 IX86_FUNCTION_SPECIFIC_ARCH,
1989 IX86_FUNCTION_SPECIFIC_TUNE,
1990 IX86_FUNCTION_SPECIFIC_FPMATH,
1991 IX86_FUNCTION_SPECIFIC_MAX
1994 static char *ix86_target_string (int, int, const char *, const char *,
1995 const char *, bool);
1996 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1997 static void ix86_function_specific_save (struct cl_target_option *);
1998 static void ix86_function_specific_restore (struct cl_target_option *);
1999 static void ix86_function_specific_print (FILE *, int,
2000 struct cl_target_option *);
2001 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2002 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2003 static bool ix86_can_inline_p (tree, tree);
2004 static void ix86_set_current_function (tree);
2005 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2007 static enum calling_abi ix86_function_abi (const_tree);
2010 #ifndef SUBTARGET32_DEFAULT_CPU
2011 #define SUBTARGET32_DEFAULT_CPU "i386"
2014 /* The svr4 ABI for the i386 says that records and unions are returned
2016 #ifndef DEFAULT_PCC_STRUCT_RETURN
2017 #define DEFAULT_PCC_STRUCT_RETURN 1
2020 /* Whether -mtune= or -march= were specified */
2021 static int ix86_tune_defaulted;
2022 static int ix86_arch_specified;
2024 /* Bit flags that specify the ISA we are compiling for. */
2025 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
2027 /* A mask of ix86_isa_flags that includes bit X if X
2028 was set or cleared on the command line. */
2029 static int ix86_isa_flags_explicit;
2031 /* Define a set of ISAs which are available when a given ISA is
2032 enabled. MMX and SSE ISAs are handled separately. */
2034 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2035 #define OPTION_MASK_ISA_3DNOW_SET \
2036 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2038 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2039 #define OPTION_MASK_ISA_SSE2_SET \
2040 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2041 #define OPTION_MASK_ISA_SSE3_SET \
2042 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2043 #define OPTION_MASK_ISA_SSSE3_SET \
2044 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2045 #define OPTION_MASK_ISA_SSE4_1_SET \
2046 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2047 #define OPTION_MASK_ISA_SSE4_2_SET \
2048 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2049 #define OPTION_MASK_ISA_AVX_SET \
2050 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2051 #define OPTION_MASK_ISA_FMA_SET \
2052 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2054 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2056 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2058 #define OPTION_MASK_ISA_SSE4A_SET \
2059 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2060 #define OPTION_MASK_ISA_FMA4_SET \
2061 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2062 | OPTION_MASK_ISA_AVX_SET)
2063 #define OPTION_MASK_ISA_XOP_SET \
2064 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2065 #define OPTION_MASK_ISA_LWP_SET \
2068 /* AES and PCLMUL need SSE2 because they use xmm registers */
2069 #define OPTION_MASK_ISA_AES_SET \
2070 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2071 #define OPTION_MASK_ISA_PCLMUL_SET \
2072 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2074 #define OPTION_MASK_ISA_ABM_SET \
2075 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2077 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2078 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2079 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2080 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2081 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2083 /* Define a set of ISAs which aren't available when a given ISA is
2084 disabled. MMX and SSE ISAs are handled separately. */
2086 #define OPTION_MASK_ISA_MMX_UNSET \
2087 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2088 #define OPTION_MASK_ISA_3DNOW_UNSET \
2089 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2090 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2092 #define OPTION_MASK_ISA_SSE_UNSET \
2093 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2094 #define OPTION_MASK_ISA_SSE2_UNSET \
2095 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2096 #define OPTION_MASK_ISA_SSE3_UNSET \
2097 (OPTION_MASK_ISA_SSE3 \
2098 | OPTION_MASK_ISA_SSSE3_UNSET \
2099 | OPTION_MASK_ISA_SSE4A_UNSET )
2100 #define OPTION_MASK_ISA_SSSE3_UNSET \
2101 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2102 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2103 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2104 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2105 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2106 #define OPTION_MASK_ISA_AVX_UNSET \
2107 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2108 | OPTION_MASK_ISA_FMA4_UNSET)
2109 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2111 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2113 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2115 #define OPTION_MASK_ISA_SSE4A_UNSET \
2116 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2118 #define OPTION_MASK_ISA_FMA4_UNSET \
2119 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2120 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2121 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2123 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2124 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2125 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2126 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2127 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2128 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2129 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2130 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2132 /* Vectorization library interface and handlers. */
2133 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2134 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2135 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2137 /* Processor target table, indexed by processor number */
2140 const struct processor_costs *cost; /* Processor costs */
2141 const int align_loop; /* Default alignments. */
2142 const int align_loop_max_skip;
2143 const int align_jump;
2144 const int align_jump_max_skip;
2145 const int align_func;
2148 static const struct ptt processor_target_table[PROCESSOR_max] =
2150 {&i386_cost, 4, 3, 4, 3, 4},
2151 {&i486_cost, 16, 15, 16, 15, 16},
2152 {&pentium_cost, 16, 7, 16, 7, 16},
2153 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2154 {&geode_cost, 0, 0, 0, 0, 0},
2155 {&k6_cost, 32, 7, 32, 7, 32},
2156 {&athlon_cost, 16, 7, 16, 7, 16},
2157 {&pentium4_cost, 0, 0, 0, 0, 0},
2158 {&k8_cost, 16, 7, 16, 7, 16},
2159 {&nocona_cost, 0, 0, 0, 0, 0},
2160 {&core2_cost, 16, 10, 16, 10, 16},
2161 {&generic32_cost, 16, 7, 16, 7, 16},
2162 {&generic64_cost, 16, 10, 16, 10, 16},
2163 {&amdfam10_cost, 32, 24, 32, 7, 32},
2164 {&bdver1_cost, 32, 24, 32, 7, 32},
2165 {&atom_cost, 16, 7, 16, 7, 16}
2168 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2195 /* Implement TARGET_HANDLE_OPTION. */
2198 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2205 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2206 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2210 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2211 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2218 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2219 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2223 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2224 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2234 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2239 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2247 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2248 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2252 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2253 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2260 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2261 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2265 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2266 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2273 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2274 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2278 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2279 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2286 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2287 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2291 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2292 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2299 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2300 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2304 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2305 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2312 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2313 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2317 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2318 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2325 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2326 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2330 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2331 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2336 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2348 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2349 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2353 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2354 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2361 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2362 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2366 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2367 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2374 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2375 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2379 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2380 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2387 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2388 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2392 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2393 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2400 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2401 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2405 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2406 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2413 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2414 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2418 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2419 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2426 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2427 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2431 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2432 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2439 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2440 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2444 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2445 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2452 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2453 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2457 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2458 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2465 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2466 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2470 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2471 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2478 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2479 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2483 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2484 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2491 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2492 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2496 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2497 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2506 /* Return a string that documents the current -m options. The caller is
2507 responsible for freeing the string. */
2510 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2511 const char *fpmath, bool add_nl_p)
2513 struct ix86_target_opts
2515 const char *option; /* option string */
2516 int mask; /* isa mask options */
2519 /* This table is ordered so that options like -msse4.2 that imply
2520 preceding options while match those first. */
2521 static struct ix86_target_opts isa_opts[] =
2523 { "-m64", OPTION_MASK_ISA_64BIT },
2524 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2525 { "-mfma", OPTION_MASK_ISA_FMA },
2526 { "-mxop", OPTION_MASK_ISA_XOP },
2527 { "-mlwp", OPTION_MASK_ISA_LWP },
2528 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2529 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2530 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2531 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2532 { "-msse3", OPTION_MASK_ISA_SSE3 },
2533 { "-msse2", OPTION_MASK_ISA_SSE2 },
2534 { "-msse", OPTION_MASK_ISA_SSE },
2535 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2536 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2537 { "-mmmx", OPTION_MASK_ISA_MMX },
2538 { "-mabm", OPTION_MASK_ISA_ABM },
2539 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2540 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2541 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2542 { "-maes", OPTION_MASK_ISA_AES },
2543 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2547 static struct ix86_target_opts flag_opts[] =
2549 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2550 { "-m80387", MASK_80387 },
2551 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2552 { "-malign-double", MASK_ALIGN_DOUBLE },
2553 { "-mcld", MASK_CLD },
2554 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2555 { "-mieee-fp", MASK_IEEE_FP },
2556 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2557 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2558 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2559 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2560 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2561 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2562 { "-mno-red-zone", MASK_NO_RED_ZONE },
2563 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2564 { "-mrecip", MASK_RECIP },
2565 { "-mrtd", MASK_RTD },
2566 { "-msseregparm", MASK_SSEREGPARM },
2567 { "-mstack-arg-probe", MASK_STACK_PROBE },
2568 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2571 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2574 char target_other[40];
2583 memset (opts, '\0', sizeof (opts));
2585 /* Add -march= option. */
2588 opts[num][0] = "-march=";
2589 opts[num++][1] = arch;
2592 /* Add -mtune= option. */
2595 opts[num][0] = "-mtune=";
2596 opts[num++][1] = tune;
2599 /* Pick out the options in isa options. */
2600 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2602 if ((isa & isa_opts[i].mask) != 0)
2604 opts[num++][0] = isa_opts[i].option;
2605 isa &= ~ isa_opts[i].mask;
2609 if (isa && add_nl_p)
2611 opts[num++][0] = isa_other;
2612 sprintf (isa_other, "(other isa: %#x)", isa);
2615 /* Add flag options. */
2616 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2618 if ((flags & flag_opts[i].mask) != 0)
2620 opts[num++][0] = flag_opts[i].option;
2621 flags &= ~ flag_opts[i].mask;
2625 if (flags && add_nl_p)
2627 opts[num++][0] = target_other;
2628 sprintf (target_other, "(other flags: %#x)", flags);
2631 /* Add -fpmath= option. */
2634 opts[num][0] = "-mfpmath=";
2635 opts[num++][1] = fpmath;
2642 gcc_assert (num < ARRAY_SIZE (opts));
2644 /* Size the string. */
2646 sep_len = (add_nl_p) ? 3 : 1;
2647 for (i = 0; i < num; i++)
2650 for (j = 0; j < 2; j++)
2652 len += strlen (opts[i][j]);
2655 /* Build the string. */
2656 ret = ptr = (char *) xmalloc (len);
2659 for (i = 0; i < num; i++)
2663 for (j = 0; j < 2; j++)
2664 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2671 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2679 for (j = 0; j < 2; j++)
2682 memcpy (ptr, opts[i][j], len2[j]);
2684 line_len += len2[j];
2689 gcc_assert (ret + len >= ptr);
2694 /* Function that is callable from the debugger to print the current
2697 ix86_debug_options (void)
2699 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2700 ix86_arch_string, ix86_tune_string,
2701 ix86_fpmath_string, true);
2705 fprintf (stderr, "%s\n\n", opts);
2709 fputs ("<no options>\n\n", stderr);
2714 /* Sometimes certain combinations of command options do not make
2715 sense on a particular target machine. You can define a macro
2716 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2717 defined, is executed once just after all the command options have
2720 Don't use this macro to turn on various extra optimizations for
2721 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2724 override_options (bool main_args_p)
2727 unsigned int ix86_arch_mask, ix86_tune_mask;
2728 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2733 /* Comes from final.c -- no real reason to change it. */
2734 #define MAX_CODE_ALIGN 16
2742 PTA_PREFETCH_SSE = 1 << 4,
2744 PTA_3DNOW_A = 1 << 6,
2748 PTA_POPCNT = 1 << 10,
2750 PTA_SSE4A = 1 << 12,
2751 PTA_NO_SAHF = 1 << 13,
2752 PTA_SSE4_1 = 1 << 14,
2753 PTA_SSE4_2 = 1 << 15,
2755 PTA_PCLMUL = 1 << 17,
2758 PTA_MOVBE = 1 << 20,
2766 const char *const name; /* processor name or nickname. */
2767 const enum processor_type processor;
2768 const enum attr_cpu schedule;
2769 const unsigned /*enum pta_flags*/ flags;
2771 const processor_alias_table[] =
2773 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2774 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2775 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2776 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2777 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2778 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2779 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2780 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2781 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2782 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2783 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2784 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2785 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2787 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2789 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2790 PTA_MMX | PTA_SSE | PTA_SSE2},
2791 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2792 PTA_MMX |PTA_SSE | PTA_SSE2},
2793 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2794 PTA_MMX | PTA_SSE | PTA_SSE2},
2795 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2796 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2797 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2798 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2799 | PTA_CX16 | PTA_NO_SAHF},
2800 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2801 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2802 | PTA_SSSE3 | PTA_CX16},
2803 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2804 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2805 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2806 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2807 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2808 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2809 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2810 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2811 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2812 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2813 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2814 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2815 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2816 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2817 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2818 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2819 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2820 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2821 {"x86-64", PROCESSOR_K8, CPU_K8,
2822 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2823 {"k8", PROCESSOR_K8, CPU_K8,
2824 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2825 | PTA_SSE2 | PTA_NO_SAHF},
2826 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2827 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2828 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2829 {"opteron", PROCESSOR_K8, CPU_K8,
2830 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2831 | PTA_SSE2 | PTA_NO_SAHF},
2832 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2833 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2834 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2835 {"athlon64", PROCESSOR_K8, CPU_K8,
2836 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2837 | PTA_SSE2 | PTA_NO_SAHF},
2838 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2839 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2840 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2841 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2842 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2843 | PTA_SSE2 | PTA_NO_SAHF},
2844 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2845 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2846 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2847 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2848 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2849 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2850 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
2851 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2852 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
2853 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
2854 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
2855 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2856 0 /* flags are only used for -march switch. */ },
2857 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2858 PTA_64BIT /* flags are only used for -march switch. */ },
2861 int const pta_size = ARRAY_SIZE (processor_alias_table);
2863 /* Set up prefix/suffix so the error messages refer to either the command
2864 line argument, or the attribute(target). */
2873 prefix = "option(\"";
2878 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2879 SUBTARGET_OVERRIDE_OPTIONS;
2882 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2883 SUBSUBTARGET_OVERRIDE_OPTIONS;
2886 /* -fPIC is the default for x86_64. */
2887 if (TARGET_MACHO && TARGET_64BIT)
2890 /* Set the default values for switches whose default depends on TARGET_64BIT
2891 in case they weren't overwritten by command line options. */
2894 /* Mach-O doesn't support omitting the frame pointer for now. */
2895 if (flag_omit_frame_pointer == 2)
2896 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2897 if (flag_asynchronous_unwind_tables == 2)
2898 flag_asynchronous_unwind_tables = 1;
2899 if (flag_pcc_struct_return == 2)
2900 flag_pcc_struct_return = 0;
2904 if (flag_omit_frame_pointer == 2)
2905 flag_omit_frame_pointer = 0;
2906 if (flag_asynchronous_unwind_tables == 2)
2907 flag_asynchronous_unwind_tables = 0;
2908 if (flag_pcc_struct_return == 2)
2909 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2912 /* Need to check -mtune=generic first. */
2913 if (ix86_tune_string)
2915 if (!strcmp (ix86_tune_string, "generic")
2916 || !strcmp (ix86_tune_string, "i686")
2917 /* As special support for cross compilers we read -mtune=native
2918 as -mtune=generic. With native compilers we won't see the
2919 -mtune=native, as it was changed by the driver. */
2920 || !strcmp (ix86_tune_string, "native"))
2923 ix86_tune_string = "generic64";
2925 ix86_tune_string = "generic32";
2927 /* If this call is for setting the option attribute, allow the
2928 generic32/generic64 that was previously set. */
2929 else if (!main_args_p
2930 && (!strcmp (ix86_tune_string, "generic32")
2931 || !strcmp (ix86_tune_string, "generic64")))
2933 else if (!strncmp (ix86_tune_string, "generic", 7))
2934 error ("bad value (%s) for %stune=%s %s",
2935 ix86_tune_string, prefix, suffix, sw);
2936 else if (!strcmp (ix86_tune_string, "x86-64"))
2937 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2938 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2939 prefix, suffix, prefix, suffix, prefix, suffix);
2943 if (ix86_arch_string)
2944 ix86_tune_string = ix86_arch_string;
2945 if (!ix86_tune_string)
2947 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2948 ix86_tune_defaulted = 1;
2951 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2952 need to use a sensible tune option. */
2953 if (!strcmp (ix86_tune_string, "generic")
2954 || !strcmp (ix86_tune_string, "x86-64")
2955 || !strcmp (ix86_tune_string, "i686"))
2958 ix86_tune_string = "generic64";
2960 ix86_tune_string = "generic32";
2964 if (ix86_stringop_string)
2966 if (!strcmp (ix86_stringop_string, "rep_byte"))
2967 stringop_alg = rep_prefix_1_byte;
2968 else if (!strcmp (ix86_stringop_string, "libcall"))
2969 stringop_alg = libcall;
2970 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2971 stringop_alg = rep_prefix_4_byte;
2972 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2974 /* rep; movq isn't available in 32-bit code. */
2975 stringop_alg = rep_prefix_8_byte;
2976 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2977 stringop_alg = loop_1_byte;
2978 else if (!strcmp (ix86_stringop_string, "loop"))
2979 stringop_alg = loop;
2980 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2981 stringop_alg = unrolled_loop;
2983 error ("bad value (%s) for %sstringop-strategy=%s %s",
2984 ix86_stringop_string, prefix, suffix, sw);
2987 if (!ix86_arch_string)
2988 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2990 ix86_arch_specified = 1;
2992 /* Validate -mabi= value. */
2993 if (ix86_abi_string)
2995 if (strcmp (ix86_abi_string, "sysv") == 0)
2996 ix86_abi = SYSV_ABI;
2997 else if (strcmp (ix86_abi_string, "ms") == 0)
3000 error ("unknown ABI (%s) for %sabi=%s %s",
3001 ix86_abi_string, prefix, suffix, sw);
3004 ix86_abi = DEFAULT_ABI;
3006 if (ix86_cmodel_string != 0)
3008 if (!strcmp (ix86_cmodel_string, "small"))
3009 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3010 else if (!strcmp (ix86_cmodel_string, "medium"))
3011 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3012 else if (!strcmp (ix86_cmodel_string, "large"))
3013 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3015 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3016 else if (!strcmp (ix86_cmodel_string, "32"))
3017 ix86_cmodel = CM_32;
3018 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3019 ix86_cmodel = CM_KERNEL;
3021 error ("bad value (%s) for %scmodel=%s %s",
3022 ix86_cmodel_string, prefix, suffix, sw);
3026 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3027 use of rip-relative addressing. This eliminates fixups that
3028 would otherwise be needed if this object is to be placed in a
3029 DLL, and is essentially just as efficient as direct addressing. */
3030 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3031 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3032 else if (TARGET_64BIT)
3033 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3035 ix86_cmodel = CM_32;
3037 if (ix86_asm_string != 0)
3040 && !strcmp (ix86_asm_string, "intel"))
3041 ix86_asm_dialect = ASM_INTEL;
3042 else if (!strcmp (ix86_asm_string, "att"))
3043 ix86_asm_dialect = ASM_ATT;
3045 error ("bad value (%s) for %sasm=%s %s",
3046 ix86_asm_string, prefix, suffix, sw);
3048 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3049 error ("code model %qs not supported in the %s bit mode",
3050 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3051 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3052 sorry ("%i-bit mode not compiled in",
3053 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3055 for (i = 0; i < pta_size; i++)
3056 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3058 ix86_schedule = processor_alias_table[i].schedule;
3059 ix86_arch = processor_alias_table[i].processor;
3060 /* Default cpu tuning to the architecture. */
3061 ix86_tune = ix86_arch;
3063 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3064 error ("CPU you selected does not support x86-64 "
3067 if (processor_alias_table[i].flags & PTA_MMX
3068 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3069 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3070 if (processor_alias_table[i].flags & PTA_3DNOW
3071 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3072 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3073 if (processor_alias_table[i].flags & PTA_3DNOW_A
3074 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3075 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3076 if (processor_alias_table[i].flags & PTA_SSE
3077 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3078 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3079 if (processor_alias_table[i].flags & PTA_SSE2
3080 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3081 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3082 if (processor_alias_table[i].flags & PTA_SSE3
3083 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3084 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3085 if (processor_alias_table[i].flags & PTA_SSSE3
3086 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3087 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3088 if (processor_alias_table[i].flags & PTA_SSE4_1
3089 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3090 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3091 if (processor_alias_table[i].flags & PTA_SSE4_2
3092 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3093 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3094 if (processor_alias_table[i].flags & PTA_AVX
3095 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3096 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3097 if (processor_alias_table[i].flags & PTA_FMA
3098 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3099 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3100 if (processor_alias_table[i].flags & PTA_SSE4A
3101 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3102 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3103 if (processor_alias_table[i].flags & PTA_FMA4
3104 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3105 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3106 if (processor_alias_table[i].flags & PTA_XOP
3107 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3108 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3109 if (processor_alias_table[i].flags & PTA_LWP
3110 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3111 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3112 if (processor_alias_table[i].flags & PTA_ABM
3113 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3114 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3115 if (processor_alias_table[i].flags & PTA_CX16
3116 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3117 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3118 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3119 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3120 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3121 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3122 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3123 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3124 if (processor_alias_table[i].flags & PTA_MOVBE
3125 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3126 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3127 if (processor_alias_table[i].flags & PTA_AES
3128 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3129 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3130 if (processor_alias_table[i].flags & PTA_PCLMUL
3131 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3132 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3133 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3134 x86_prefetch_sse = true;
3139 if (!strcmp (ix86_arch_string, "generic"))
3140 error ("generic CPU can be used only for %stune=%s %s",
3141 prefix, suffix, sw);
3142 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3143 error ("bad value (%s) for %sarch=%s %s",
3144 ix86_arch_string, prefix, suffix, sw);
3146 ix86_arch_mask = 1u << ix86_arch;
3147 for (i = 0; i < X86_ARCH_LAST; ++i)
3148 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3150 for (i = 0; i < pta_size; i++)
3151 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3153 ix86_schedule = processor_alias_table[i].schedule;
3154 ix86_tune = processor_alias_table[i].processor;
3155 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3157 if (ix86_tune_defaulted)
3159 ix86_tune_string = "x86-64";
3160 for (i = 0; i < pta_size; i++)
3161 if (! strcmp (ix86_tune_string,
3162 processor_alias_table[i].name))
3164 ix86_schedule = processor_alias_table[i].schedule;
3165 ix86_tune = processor_alias_table[i].processor;
3168 error ("CPU you selected does not support x86-64 "
3171 /* Intel CPUs have always interpreted SSE prefetch instructions as
3172 NOPs; so, we can enable SSE prefetch instructions even when
3173 -mtune (rather than -march) points us to a processor that has them.
3174 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3175 higher processors. */
3177 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3178 x86_prefetch_sse = true;
3182 if (ix86_tune_specified && i == pta_size)
3183 error ("bad value (%s) for %stune=%s %s",
3184 ix86_tune_string, prefix, suffix, sw);
3186 ix86_tune_mask = 1u << ix86_tune;
3187 for (i = 0; i < X86_TUNE_LAST; ++i)
3188 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3191 ix86_cost = &ix86_size_cost;
3193 ix86_cost = processor_target_table[ix86_tune].cost;
3195 /* Arrange to set up i386_stack_locals for all functions. */
3196 init_machine_status = ix86_init_machine_status;
3198 /* Validate -mregparm= value. */
3199 if (ix86_regparm_string)
3202 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3203 i = atoi (ix86_regparm_string);
3204 if (i < 0 || i > REGPARM_MAX)
3205 error ("%sregparm=%d%s is not between 0 and %d",
3206 prefix, i, suffix, REGPARM_MAX);
3211 ix86_regparm = REGPARM_MAX;
3213 /* If the user has provided any of the -malign-* options,
3214 warn and use that value only if -falign-* is not set.
3215 Remove this code in GCC 3.2 or later. */
3216 if (ix86_align_loops_string)
3218 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3219 prefix, suffix, suffix);
3220 if (align_loops == 0)
3222 i = atoi (ix86_align_loops_string);
3223 if (i < 0 || i > MAX_CODE_ALIGN)
3224 error ("%salign-loops=%d%s is not between 0 and %d",
3225 prefix, i, suffix, MAX_CODE_ALIGN);
3227 align_loops = 1 << i;
3231 if (ix86_align_jumps_string)
3233 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3234 prefix, suffix, suffix);
3235 if (align_jumps == 0)
3237 i = atoi (ix86_align_jumps_string);
3238 if (i < 0 || i > MAX_CODE_ALIGN)
3239 error ("%salign-loops=%d%s is not between 0 and %d",
3240 prefix, i, suffix, MAX_CODE_ALIGN);
3242 align_jumps = 1 << i;
3246 if (ix86_align_funcs_string)
3248 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3249 prefix, suffix, suffix);
3250 if (align_functions == 0)
3252 i = atoi (ix86_align_funcs_string);
3253 if (i < 0 || i > MAX_CODE_ALIGN)
3254 error ("%salign-loops=%d%s is not between 0 and %d",
3255 prefix, i, suffix, MAX_CODE_ALIGN);
3257 align_functions = 1 << i;
3261 /* Default align_* from the processor table. */
3262 if (align_loops == 0)
3264 align_loops = processor_target_table[ix86_tune].align_loop;
3265 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3267 if (align_jumps == 0)
3269 align_jumps = processor_target_table[ix86_tune].align_jump;
3270 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3272 if (align_functions == 0)
3274 align_functions = processor_target_table[ix86_tune].align_func;
3277 /* Validate -mbranch-cost= value, or provide default. */
3278 ix86_branch_cost = ix86_cost->branch_cost;
3279 if (ix86_branch_cost_string)
3281 i = atoi (ix86_branch_cost_string);
3283 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3285 ix86_branch_cost = i;
3287 if (ix86_section_threshold_string)
3289 i = atoi (ix86_section_threshold_string);
3291 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3293 ix86_section_threshold = i;
3296 if (ix86_tls_dialect_string)
3298 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3299 ix86_tls_dialect = TLS_DIALECT_GNU;
3300 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3301 ix86_tls_dialect = TLS_DIALECT_GNU2;
3303 error ("bad value (%s) for %stls-dialect=%s %s",
3304 ix86_tls_dialect_string, prefix, suffix, sw);
3307 if (ix87_precision_string)
3309 i = atoi (ix87_precision_string);
3310 if (i != 32 && i != 64 && i != 80)
3311 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3316 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3318 /* Enable by default the SSE and MMX builtins. Do allow the user to
3319 explicitly disable any of these. In particular, disabling SSE and
3320 MMX for kernel code is extremely useful. */
3321 if (!ix86_arch_specified)
3323 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3324 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3327 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3331 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3333 if (!ix86_arch_specified)
3335 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3337 /* i386 ABI does not specify red zone. It still makes sense to use it
3338 when programmer takes care to stack from being destroyed. */
3339 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3340 target_flags |= MASK_NO_RED_ZONE;
3343 /* Keep nonleaf frame pointers. */
3344 if (flag_omit_frame_pointer)
3345 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3346 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3347 flag_omit_frame_pointer = 1;
3349 /* If we're doing fast math, we don't care about comparison order
3350 wrt NaNs. This lets us use a shorter comparison sequence. */
3351 if (flag_finite_math_only)
3352 target_flags &= ~MASK_IEEE_FP;
3354 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3355 since the insns won't need emulation. */
3356 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3357 target_flags &= ~MASK_NO_FANCY_MATH_387;
3359 /* Likewise, if the target doesn't have a 387, or we've specified
3360 software floating point, don't use 387 inline intrinsics. */
3362 target_flags |= MASK_NO_FANCY_MATH_387;
3364 /* Turn on MMX builtins for -msse. */
3367 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3368 x86_prefetch_sse = true;
3371 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3372 if (TARGET_SSE4_2 || TARGET_ABM)
3373 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3375 /* Validate -mpreferred-stack-boundary= value or default it to
3376 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3377 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3378 if (ix86_preferred_stack_boundary_string)
3380 i = atoi (ix86_preferred_stack_boundary_string);
3381 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3382 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3383 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3385 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3388 /* Set the default value for -mstackrealign. */
3389 if (ix86_force_align_arg_pointer == -1)
3390 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3392 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3394 /* Validate -mincoming-stack-boundary= value or default it to
3395 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3396 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3397 if (ix86_incoming_stack_boundary_string)
3399 i = atoi (ix86_incoming_stack_boundary_string);
3400 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3401 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3402 i, TARGET_64BIT ? 4 : 2);
3405 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3406 ix86_incoming_stack_boundary
3407 = ix86_user_incoming_stack_boundary;
3411 /* Accept -msseregparm only if at least SSE support is enabled. */
3412 if (TARGET_SSEREGPARM
3414 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3416 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3417 if (ix86_fpmath_string != 0)
3419 if (! strcmp (ix86_fpmath_string, "387"))
3420 ix86_fpmath = FPMATH_387;
3421 else if (! strcmp (ix86_fpmath_string, "sse"))
3425 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3426 ix86_fpmath = FPMATH_387;
3429 ix86_fpmath = FPMATH_SSE;
3431 else if (! strcmp (ix86_fpmath_string, "387,sse")
3432 || ! strcmp (ix86_fpmath_string, "387+sse")
3433 || ! strcmp (ix86_fpmath_string, "sse,387")
3434 || ! strcmp (ix86_fpmath_string, "sse+387")
3435 || ! strcmp (ix86_fpmath_string, "both"))
3439 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3440 ix86_fpmath = FPMATH_387;
3442 else if (!TARGET_80387)
3444 warning (0, "387 instruction set disabled, using SSE arithmetics");
3445 ix86_fpmath = FPMATH_SSE;
3448 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3451 error ("bad value (%s) for %sfpmath=%s %s",
3452 ix86_fpmath_string, prefix, suffix, sw);
3455 /* If the i387 is disabled, then do not return values in it. */
3457 target_flags &= ~MASK_FLOAT_RETURNS;
3459 /* Use external vectorized library in vectorizing intrinsics. */
3460 if (ix86_veclibabi_string)
3462 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3463 ix86_veclib_handler = ix86_veclibabi_svml;
3464 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3465 ix86_veclib_handler = ix86_veclibabi_acml;
3467 error ("unknown vectorization library ABI type (%s) for "
3468 "%sveclibabi=%s %s", ix86_veclibabi_string,
3469 prefix, suffix, sw);
3472 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3473 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3475 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3477 /* ??? Unwind info is not correct around the CFG unless either a frame
3478 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3479 unwind info generation to be aware of the CFG and propagating states
3481 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3482 || flag_exceptions || flag_non_call_exceptions)
3483 && flag_omit_frame_pointer
3484 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3486 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3487 warning (0, "unwind tables currently require either a frame pointer "
3488 "or %saccumulate-outgoing-args%s for correctness",
3490 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3493 /* If stack probes are required, the space used for large function
3494 arguments on the stack must also be probed, so enable
3495 -maccumulate-outgoing-args so this happens in the prologue. */
3496 if (TARGET_STACK_PROBE
3497 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3499 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3500 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3501 "for correctness", prefix, suffix);
3502 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3505 /* For sane SSE instruction set generation we need fcomi instruction.
3506 It is safe to enable all CMOVE instructions. */
3510 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3513 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3514 p = strchr (internal_label_prefix, 'X');
3515 internal_label_prefix_len = p - internal_label_prefix;
3519 /* When scheduling description is not available, disable scheduler pass
3520 so it won't slow down the compilation and make x87 code slower. */
3521 if (!TARGET_SCHEDULE)
3522 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3524 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3525 set_param_value ("simultaneous-prefetches",
3526 ix86_cost->simultaneous_prefetches);
3527 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3528 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3529 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3530 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3531 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3532 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3534 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3535 can be optimized to ap = __builtin_next_arg (0). */
3537 targetm.expand_builtin_va_start = NULL;
3541 ix86_gen_leave = gen_leave_rex64;
3542 ix86_gen_pop1 = gen_popdi1;
3543 ix86_gen_add3 = gen_adddi3;
3544 ix86_gen_sub3 = gen_subdi3;
3545 ix86_gen_sub3_carry = gen_subdi3_carry;
3546 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3547 ix86_gen_monitor = gen_sse3_monitor64;
3548 ix86_gen_andsp = gen_anddi3;
3552 ix86_gen_leave = gen_leave;
3553 ix86_gen_pop1 = gen_popsi1;
3554 ix86_gen_add3 = gen_addsi3;
3555 ix86_gen_sub3 = gen_subsi3;
3556 ix86_gen_sub3_carry = gen_subsi3_carry;
3557 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3558 ix86_gen_monitor = gen_sse3_monitor;
3559 ix86_gen_andsp = gen_andsi3;
3563 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3565 target_flags |= MASK_CLD & ~target_flags_explicit;
3568 /* Save the initial options in case the user does function specific options */
3570 target_option_default_node = target_option_current_node
3571 = build_target_option_node ();
3574 /* Update register usage after having seen the compiler flags. */
3577 ix86_conditional_register_usage (void)
3582 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3584 if (fixed_regs[i] > 1)
3585 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3586 if (call_used_regs[i] > 1)
3587 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3590 /* The PIC register, if it exists, is fixed. */
3591 j = PIC_OFFSET_TABLE_REGNUM;
3592 if (j != INVALID_REGNUM)
3593 fixed_regs[j] = call_used_regs[j] = 1;
3595 /* The MS_ABI changes the set of call-used registers. */
3596 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3598 call_used_regs[SI_REG] = 0;
3599 call_used_regs[DI_REG] = 0;
3600 call_used_regs[XMM6_REG] = 0;
3601 call_used_regs[XMM7_REG] = 0;
3602 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3603 call_used_regs[i] = 0;
3606 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3607 other call-clobbered regs for 64-bit. */
3610 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3612 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3613 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3614 && call_used_regs[i])
3615 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3618 /* If MMX is disabled, squash the registers. */
3620 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3621 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3622 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3624 /* If SSE is disabled, squash the registers. */
3626 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3627 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3628 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3630 /* If the FPU is disabled, squash the registers. */
3631 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3632 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3633 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3634 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3636 /* If 32-bit, squash the 64-bit registers. */
3639 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3641 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3647 /* Save the current options */
3650 ix86_function_specific_save (struct cl_target_option *ptr)
3652 ptr->arch = ix86_arch;
3653 ptr->schedule = ix86_schedule;
3654 ptr->tune = ix86_tune;
3655 ptr->fpmath = ix86_fpmath;
3656 ptr->branch_cost = ix86_branch_cost;
3657 ptr->tune_defaulted = ix86_tune_defaulted;
3658 ptr->arch_specified = ix86_arch_specified;
3659 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3660 ptr->target_flags_explicit = target_flags_explicit;
3662 /* The fields are char but the variables are not; make sure the
3663 values fit in the fields. */
3664 gcc_assert (ptr->arch == ix86_arch);
3665 gcc_assert (ptr->schedule == ix86_schedule);
3666 gcc_assert (ptr->tune == ix86_tune);
3667 gcc_assert (ptr->fpmath == ix86_fpmath);
3668 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3671 /* Restore the current options */
3674 ix86_function_specific_restore (struct cl_target_option *ptr)
3676 enum processor_type old_tune = ix86_tune;
3677 enum processor_type old_arch = ix86_arch;
3678 unsigned int ix86_arch_mask, ix86_tune_mask;
3681 ix86_arch = (enum processor_type) ptr->arch;
3682 ix86_schedule = (enum attr_cpu) ptr->schedule;
3683 ix86_tune = (enum processor_type) ptr->tune;
3684 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3685 ix86_branch_cost = ptr->branch_cost;
3686 ix86_tune_defaulted = ptr->tune_defaulted;
3687 ix86_arch_specified = ptr->arch_specified;
3688 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3689 target_flags_explicit = ptr->target_flags_explicit;
3691 /* Recreate the arch feature tests if the arch changed */
3692 if (old_arch != ix86_arch)
3694 ix86_arch_mask = 1u << ix86_arch;
3695 for (i = 0; i < X86_ARCH_LAST; ++i)
3696 ix86_arch_features[i]
3697 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3700 /* Recreate the tune optimization tests */
3701 if (old_tune != ix86_tune)
3703 ix86_tune_mask = 1u << ix86_tune;
3704 for (i = 0; i < X86_TUNE_LAST; ++i)
3705 ix86_tune_features[i]
3706 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3710 /* Print the current options */
3713 ix86_function_specific_print (FILE *file, int indent,
3714 struct cl_target_option *ptr)
3717 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3718 NULL, NULL, NULL, false);
3720 fprintf (file, "%*sarch = %d (%s)\n",
3723 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3724 ? cpu_names[ptr->arch]
3727 fprintf (file, "%*stune = %d (%s)\n",
3730 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3731 ? cpu_names[ptr->tune]
3734 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3735 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3736 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3737 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3741 fprintf (file, "%*s%s\n", indent, "", target_string);
3742 free (target_string);
3747 /* Inner function to process the attribute((target(...))), take an argument and
3748 set the current options from the argument. If we have a list, recursively go
3752 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3757 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3758 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3759 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3760 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3775 enum ix86_opt_type type;
3780 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3781 IX86_ATTR_ISA ("abm", OPT_mabm),
3782 IX86_ATTR_ISA ("aes", OPT_maes),
3783 IX86_ATTR_ISA ("avx", OPT_mavx),
3784 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3785 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3786 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3787 IX86_ATTR_ISA ("sse", OPT_msse),
3788 IX86_ATTR_ISA ("sse2", OPT_msse2),
3789 IX86_ATTR_ISA ("sse3", OPT_msse3),
3790 IX86_ATTR_ISA ("sse4", OPT_msse4),
3791 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3792 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3793 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3794 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3795 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3796 IX86_ATTR_ISA ("xop", OPT_mxop),
3797 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3799 /* string options */
3800 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3801 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3802 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3805 IX86_ATTR_YES ("cld",
3809 IX86_ATTR_NO ("fancy-math-387",
3810 OPT_mfancy_math_387,
3811 MASK_NO_FANCY_MATH_387),
3813 IX86_ATTR_YES ("ieee-fp",
3817 IX86_ATTR_YES ("inline-all-stringops",
3818 OPT_minline_all_stringops,
3819 MASK_INLINE_ALL_STRINGOPS),
3821 IX86_ATTR_YES ("inline-stringops-dynamically",
3822 OPT_minline_stringops_dynamically,
3823 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3825 IX86_ATTR_NO ("align-stringops",
3826 OPT_mno_align_stringops,
3827 MASK_NO_ALIGN_STRINGOPS),
3829 IX86_ATTR_YES ("recip",
3835 /* If this is a list, recurse to get the options. */
3836 if (TREE_CODE (args) == TREE_LIST)
3840 for (; args; args = TREE_CHAIN (args))
3841 if (TREE_VALUE (args)
3842 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3848 else if (TREE_CODE (args) != STRING_CST)
3851 /* Handle multiple arguments separated by commas. */
3852 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3854 while (next_optstr && *next_optstr != '\0')
3856 char *p = next_optstr;
3858 char *comma = strchr (next_optstr, ',');
3859 const char *opt_string;
3860 size_t len, opt_len;
3865 enum ix86_opt_type type = ix86_opt_unknown;
3871 len = comma - next_optstr;
3872 next_optstr = comma + 1;
3880 /* Recognize no-xxx. */
3881 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3890 /* Find the option. */
3893 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3895 type = attrs[i].type;
3896 opt_len = attrs[i].len;
3897 if (ch == attrs[i].string[0]
3898 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3899 && memcmp (p, attrs[i].string, opt_len) == 0)
3902 mask = attrs[i].mask;
3903 opt_string = attrs[i].string;
3908 /* Process the option. */
3911 error ("attribute(target(\"%s\")) is unknown", orig_p);
3915 else if (type == ix86_opt_isa)
3916 ix86_handle_option (opt, p, opt_set_p);
3918 else if (type == ix86_opt_yes || type == ix86_opt_no)
3920 if (type == ix86_opt_no)
3921 opt_set_p = !opt_set_p;
3924 target_flags |= mask;
3926 target_flags &= ~mask;
3929 else if (type == ix86_opt_str)
3933 error ("option(\"%s\") was already specified", opt_string);
3937 p_strings[opt] = xstrdup (p + opt_len);
3947 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3950 ix86_valid_target_attribute_tree (tree args)
3952 const char *orig_arch_string = ix86_arch_string;
3953 const char *orig_tune_string = ix86_tune_string;
3954 const char *orig_fpmath_string = ix86_fpmath_string;
3955 int orig_tune_defaulted = ix86_tune_defaulted;
3956 int orig_arch_specified = ix86_arch_specified;
3957 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3960 struct cl_target_option *def
3961 = TREE_TARGET_OPTION (target_option_default_node);
3963 /* Process each of the options on the chain. */
3964 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3967 /* If the changed options are different from the default, rerun override_options,
3968 and then save the options away. The string options are are attribute options,
3969 and will be undone when we copy the save structure. */
3970 if (ix86_isa_flags != def->ix86_isa_flags
3971 || target_flags != def->target_flags
3972 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3973 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3974 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3976 /* If we are using the default tune= or arch=, undo the string assigned,
3977 and use the default. */
3978 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3979 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3980 else if (!orig_arch_specified)
3981 ix86_arch_string = NULL;
3983 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3984 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3985 else if (orig_tune_defaulted)
3986 ix86_tune_string = NULL;
3988 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3989 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3990 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3991 else if (!TARGET_64BIT && TARGET_SSE)
3992 ix86_fpmath_string = "sse,387";
3994 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3995 override_options (false);
3997 /* Add any builtin functions with the new isa if any. */
3998 ix86_add_new_builtins (ix86_isa_flags);
4000 /* Save the current options unless we are validating options for
4002 t = build_target_option_node ();
4004 ix86_arch_string = orig_arch_string;
4005 ix86_tune_string = orig_tune_string;
4006 ix86_fpmath_string = orig_fpmath_string;
4008 /* Free up memory allocated to hold the strings */
4009 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4010 if (option_strings[i])
4011 free (option_strings[i]);
4017 /* Hook to validate attribute((target("string"))). */
4020 ix86_valid_target_attribute_p (tree fndecl,
4021 tree ARG_UNUSED (name),
4023 int ARG_UNUSED (flags))
4025 struct cl_target_option cur_target;
4027 tree old_optimize = build_optimization_node ();
4028 tree new_target, new_optimize;
4029 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4031 /* If the function changed the optimization levels as well as setting target
4032 options, start with the optimizations specified. */
4033 if (func_optimize && func_optimize != old_optimize)
4034 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
4036 /* The target attributes may also change some optimization flags, so update
4037 the optimization options if necessary. */
4038 cl_target_option_save (&cur_target);
4039 new_target = ix86_valid_target_attribute_tree (args);
4040 new_optimize = build_optimization_node ();
4047 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4049 if (old_optimize != new_optimize)
4050 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4053 cl_target_option_restore (&cur_target);
4055 if (old_optimize != new_optimize)
4056 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
4062 /* Hook to determine if one function can safely inline another. */
4065 ix86_can_inline_p (tree caller, tree callee)
4068 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4069 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4071 /* If callee has no option attributes, then it is ok to inline. */
4075 /* If caller has no option attributes, but callee does then it is not ok to
4077 else if (!caller_tree)
4082 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4083 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4085 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4086 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4088 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
4089 != callee_opts->ix86_isa_flags)
4092 /* See if we have the same non-isa options. */
4093 else if (caller_opts->target_flags != callee_opts->target_flags)
4096 /* See if arch, tune, etc. are the same. */
4097 else if (caller_opts->arch != callee_opts->arch)
4100 else if (caller_opts->tune != callee_opts->tune)
4103 else if (caller_opts->fpmath != callee_opts->fpmath)
4106 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4117 /* Remember the last target of ix86_set_current_function. */
4118 static GTY(()) tree ix86_previous_fndecl;
4120 /* Establish appropriate back-end context for processing the function
4121 FNDECL. The argument might be NULL to indicate processing at top
4122 level, outside of any function scope. */
4124 ix86_set_current_function (tree fndecl)
4126 /* Only change the context if the function changes. This hook is called
4127 several times in the course of compiling a function, and we don't want to
4128 slow things down too much or call target_reinit when it isn't safe. */
4129 if (fndecl && fndecl != ix86_previous_fndecl)
4131 tree old_tree = (ix86_previous_fndecl
4132 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4135 tree new_tree = (fndecl
4136 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4139 ix86_previous_fndecl = fndecl;
4140 if (old_tree == new_tree)
4145 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4151 struct cl_target_option *def
4152 = TREE_TARGET_OPTION (target_option_current_node);
4154 cl_target_option_restore (def);
4161 /* Return true if this goes in large data/bss. */
4164 ix86_in_large_data_p (tree exp)
4166 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4169 /* Functions are never large data. */
4170 if (TREE_CODE (exp) == FUNCTION_DECL)
4173 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4175 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4176 if (strcmp (section, ".ldata") == 0
4177 || strcmp (section, ".lbss") == 0)
4183 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4185 /* If this is an incomplete type with size 0, then we can't put it
4186 in data because it might be too big when completed. */
4187 if (!size || size > ix86_section_threshold)
4194 /* Switch to the appropriate section for output of DECL.
4195 DECL is either a `VAR_DECL' node or a constant of some sort.
4196 RELOC indicates whether forming the initial value of DECL requires
4197 link-time relocations. */
4199 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4203 x86_64_elf_select_section (tree decl, int reloc,
4204 unsigned HOST_WIDE_INT align)
4206 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4207 && ix86_in_large_data_p (decl))
4209 const char *sname = NULL;
4210 unsigned int flags = SECTION_WRITE;
4211 switch (categorize_decl_for_section (decl, reloc))
4216 case SECCAT_DATA_REL:
4217 sname = ".ldata.rel";
4219 case SECCAT_DATA_REL_LOCAL:
4220 sname = ".ldata.rel.local";
4222 case SECCAT_DATA_REL_RO:
4223 sname = ".ldata.rel.ro";
4225 case SECCAT_DATA_REL_RO_LOCAL:
4226 sname = ".ldata.rel.ro.local";
4230 flags |= SECTION_BSS;
4233 case SECCAT_RODATA_MERGE_STR:
4234 case SECCAT_RODATA_MERGE_STR_INIT:
4235 case SECCAT_RODATA_MERGE_CONST:
4239 case SECCAT_SRODATA:
4246 /* We don't split these for medium model. Place them into
4247 default sections and hope for best. */
4249 case SECCAT_EMUTLS_VAR:
4250 case SECCAT_EMUTLS_TMPL:
4255 /* We might get called with string constants, but get_named_section
4256 doesn't like them as they are not DECLs. Also, we need to set
4257 flags in that case. */
4259 return get_section (sname, flags, NULL);
4260 return get_named_section (decl, sname, reloc);
4263 return default_elf_select_section (decl, reloc, align);
4266 /* Build up a unique section name, expressed as a
4267 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4268 RELOC indicates whether the initial value of EXP requires
4269 link-time relocations. */
4271 static void ATTRIBUTE_UNUSED
4272 x86_64_elf_unique_section (tree decl, int reloc)
4274 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4275 && ix86_in_large_data_p (decl))
4277 const char *prefix = NULL;
4278 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4279 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4281 switch (categorize_decl_for_section (decl, reloc))
4284 case SECCAT_DATA_REL:
4285 case SECCAT_DATA_REL_LOCAL:
4286 case SECCAT_DATA_REL_RO:
4287 case SECCAT_DATA_REL_RO_LOCAL:
4288 prefix = one_only ? ".ld" : ".ldata";
4291 prefix = one_only ? ".lb" : ".lbss";
4294 case SECCAT_RODATA_MERGE_STR:
4295 case SECCAT_RODATA_MERGE_STR_INIT:
4296 case SECCAT_RODATA_MERGE_CONST:
4297 prefix = one_only ? ".lr" : ".lrodata";
4299 case SECCAT_SRODATA:
4306 /* We don't split these for medium model. Place them into
4307 default sections and hope for best. */
4309 case SECCAT_EMUTLS_VAR:
4310 prefix = targetm.emutls.var_section;
4312 case SECCAT_EMUTLS_TMPL:
4313 prefix = targetm.emutls.tmpl_section;
4318 const char *name, *linkonce;
4321 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4322 name = targetm.strip_name_encoding (name);
4324 /* If we're using one_only, then there needs to be a .gnu.linkonce
4325 prefix to the section name. */
4326 linkonce = one_only ? ".gnu.linkonce" : "";
4328 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4330 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4334 default_unique_section (decl, reloc);
4337 #ifdef COMMON_ASM_OP
4338 /* This says how to output assembler code to declare an
4339 uninitialized external linkage data object.
4341 For medium model x86-64 we need to use .largecomm opcode for
4344 x86_elf_aligned_common (FILE *file,
4345 const char *name, unsigned HOST_WIDE_INT size,
4348 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4349 && size > (unsigned int)ix86_section_threshold)
4350 fputs (".largecomm\t", file);
4352 fputs (COMMON_ASM_OP, file);
4353 assemble_name (file, name);
4354 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4355 size, align / BITS_PER_UNIT);
4359 /* Utility function for targets to use in implementing
4360 ASM_OUTPUT_ALIGNED_BSS. */
4363 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4364 const char *name, unsigned HOST_WIDE_INT size,
4367 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4368 && size > (unsigned int)ix86_section_threshold)
4369 switch_to_section (get_named_section (decl, ".lbss", 0));
4371 switch_to_section (bss_section);
4372 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4373 #ifdef ASM_DECLARE_OBJECT_NAME
4374 last_assemble_variable_decl = decl;
4375 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4377 /* Standard thing is just output label for the object. */
4378 ASM_OUTPUT_LABEL (file, name);
4379 #endif /* ASM_DECLARE_OBJECT_NAME */
4380 ASM_OUTPUT_SKIP (file, size ? size : 1);
4384 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4386 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4387 make the problem with not enough registers even worse. */
4388 #ifdef INSN_SCHEDULING
4390 flag_schedule_insns = 0;
4393 /* For -O2 and beyond, turn on -fzee for x86_64 target. */
4394 if (level > 1 && TARGET_64BIT)
4398 /* The Darwin libraries never set errno, so we might as well
4399 avoid calling them when that's the only reason we would. */
4400 flag_errno_math = 0;
4402 /* The default values of these switches depend on the TARGET_64BIT
4403 that is not known at this moment. Mark these values with 2 and
4404 let user the to override these. In case there is no command line option
4405 specifying them, we will set the defaults in override_options. */
4407 flag_omit_frame_pointer = 2;
4408 flag_pcc_struct_return = 2;
4409 flag_asynchronous_unwind_tables = 2;
4410 flag_vect_cost_model = 1;
4411 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4412 SUBTARGET_OPTIMIZATION_OPTIONS;
4416 /* Decide whether we can make a sibling call to a function. DECL is the
4417 declaration of the function being targeted by the call and EXP is the
4418 CALL_EXPR representing the call. */
4421 ix86_function_ok_for_sibcall (tree decl, tree exp)
4423 tree type, decl_or_type;
4426 /* If we are generating position-independent code, we cannot sibcall
4427 optimize any indirect call, or a direct call to a global function,
4428 as the PLT requires %ebx be live. */
4429 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4432 /* If we need to align the outgoing stack, then sibcalling would
4433 unalign the stack, which may break the called function. */
4434 if (ix86_minimum_incoming_stack_boundary (true)
4435 < PREFERRED_STACK_BOUNDARY)
4440 decl_or_type = decl;
4441 type = TREE_TYPE (decl);
4445 /* We're looking at the CALL_EXPR, we need the type of the function. */
4446 type = CALL_EXPR_FN (exp); /* pointer expression */
4447 type = TREE_TYPE (type); /* pointer type */
4448 type = TREE_TYPE (type); /* function type */
4449 decl_or_type = type;
4452 /* Check that the return value locations are the same. Like
4453 if we are returning floats on the 80387 register stack, we cannot
4454 make a sibcall from a function that doesn't return a float to a
4455 function that does or, conversely, from a function that does return
4456 a float to a function that doesn't; the necessary stack adjustment
4457 would not be executed. This is also the place we notice
4458 differences in the return value ABI. Note that it is ok for one
4459 of the functions to have void return type as long as the return
4460 value of the other is passed in a register. */
4461 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4462 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4464 if (STACK_REG_P (a) || STACK_REG_P (b))
4466 if (!rtx_equal_p (a, b))
4469 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4471 else if (!rtx_equal_p (a, b))
4476 /* The SYSV ABI has more call-clobbered registers;
4477 disallow sibcalls from MS to SYSV. */
4478 if (cfun->machine->call_abi == MS_ABI
4479 && ix86_function_type_abi (type) == SYSV_ABI)
4484 /* If this call is indirect, we'll need to be able to use a
4485 call-clobbered register for the address of the target function.
4486 Make sure that all such registers are not used for passing
4487 parameters. Note that DLLIMPORT functions are indirect. */
4489 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4491 if (ix86_function_regparm (type, NULL) >= 3)
4493 /* ??? Need to count the actual number of registers to be used,
4494 not the possible number of registers. Fix later. */
4500 /* Otherwise okay. That also includes certain types of indirect calls. */
4504 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4505 and "sseregparm" calling convention attributes;
4506 arguments as in struct attribute_spec.handler. */
4509 ix86_handle_cconv_attribute (tree *node, tree name,
4511 int flags ATTRIBUTE_UNUSED,
4514 if (TREE_CODE (*node) != FUNCTION_TYPE
4515 && TREE_CODE (*node) != METHOD_TYPE
4516 && TREE_CODE (*node) != FIELD_DECL
4517 && TREE_CODE (*node) != TYPE_DECL)
4519 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4521 *no_add_attrs = true;
4525 /* Can combine regparm with all attributes but fastcall. */
4526 if (is_attribute_p ("regparm", name))
4530 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4532 error ("fastcall and regparm attributes are not compatible");
4535 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4537 error ("regparam and thiscall attributes are not compatible");
4540 cst = TREE_VALUE (args);
4541 if (TREE_CODE (cst) != INTEGER_CST)
4543 warning (OPT_Wattributes,
4544 "%qE attribute requires an integer constant argument",
4546 *no_add_attrs = true;
4548 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4550 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4552 *no_add_attrs = true;
4560 /* Do not warn when emulating the MS ABI. */
4561 if ((TREE_CODE (*node) != FUNCTION_TYPE
4562 && TREE_CODE (*node) != METHOD_TYPE)
4563 || ix86_function_type_abi (*node) != MS_ABI)
4564 warning (OPT_Wattributes, "%qE attribute ignored",
4566 *no_add_attrs = true;
4570 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4571 if (is_attribute_p ("fastcall", name))
4573 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4575 error ("fastcall and cdecl attributes are not compatible");
4577 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4579 error ("fastcall and stdcall attributes are not compatible");
4581 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4583 error ("fastcall and regparm attributes are not compatible");
4585 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4587 error ("fastcall and thiscall attributes are not compatible");
4591 /* Can combine stdcall with fastcall (redundant), regparm and
4593 else if (is_attribute_p ("stdcall", name))
4595 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4597 error ("stdcall and cdecl attributes are not compatible");
4599 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4601 error ("stdcall and fastcall attributes are not compatible");
4603 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4605 error ("stdcall and thiscall attributes are not compatible");
4609 /* Can combine cdecl with regparm and sseregparm. */
4610 else if (is_attribute_p ("cdecl", name))
4612 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4614 error ("stdcall and cdecl attributes are not compatible");
4616 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4618 error ("fastcall and cdecl attributes are not compatible");
4620 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4622 error ("cdecl and thiscall attributes are not compatible");
4625 else if (is_attribute_p ("thiscall", name))
4627 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4628 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4630 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4632 error ("stdcall and thiscall attributes are not compatible");
4634 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4636 error ("fastcall and thiscall attributes are not compatible");
4638 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4640 error ("cdecl and thiscall attributes are not compatible");
4644 /* Can combine sseregparm with all attributes. */
4649 /* Return 0 if the attributes for two types are incompatible, 1 if they
4650 are compatible, and 2 if they are nearly compatible (which causes a
4651 warning to be generated). */
4654 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4656 /* Check for mismatch of non-default calling convention. */
4657 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4659 if (TREE_CODE (type1) != FUNCTION_TYPE
4660 && TREE_CODE (type1) != METHOD_TYPE)
4663 /* Check for mismatched fastcall/regparm types. */
4664 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4665 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4666 || (ix86_function_regparm (type1, NULL)
4667 != ix86_function_regparm (type2, NULL)))
4670 /* Check for mismatched sseregparm types. */
4671 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4672 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4675 /* Check for mismatched thiscall types. */
4676 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4677 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4680 /* Check for mismatched return types (cdecl vs stdcall). */
4681 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4682 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4688 /* Return the regparm value for a function with the indicated TYPE and DECL.
4689 DECL may be NULL when calling function indirectly
4690 or considering a libcall. */
4693 ix86_function_regparm (const_tree type, const_tree decl)
4699 return (ix86_function_type_abi (type) == SYSV_ABI
4700 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4702 regparm = ix86_regparm;
4703 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4706 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4710 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4713 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4716 /* Use register calling convention for local functions when possible. */
4718 && TREE_CODE (decl) == FUNCTION_DECL
4722 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4723 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4726 int local_regparm, globals = 0, regno;
4728 /* Make sure no regparm register is taken by a
4729 fixed register variable. */
4730 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4731 if (fixed_regs[local_regparm])
4734 /* We don't want to use regparm(3) for nested functions as
4735 these use a static chain pointer in the third argument. */
4736 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4739 /* Each fixed register usage increases register pressure,
4740 so less registers should be used for argument passing.
4741 This functionality can be overriden by an explicit
4743 for (regno = 0; regno <= DI_REG; regno++)
4744 if (fixed_regs[regno])
4748 = globals < local_regparm ? local_regparm - globals : 0;
4750 if (local_regparm > regparm)
4751 regparm = local_regparm;
4758 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4759 DFmode (2) arguments in SSE registers for a function with the
4760 indicated TYPE and DECL. DECL may be NULL when calling function
4761 indirectly or considering a libcall. Otherwise return 0. */
4764 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4766 gcc_assert (!TARGET_64BIT);
4768 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4769 by the sseregparm attribute. */
4770 if (TARGET_SSEREGPARM
4771 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4778 error ("Calling %qD with attribute sseregparm without "
4779 "SSE/SSE2 enabled", decl);
4781 error ("Calling %qT with attribute sseregparm without "
4782 "SSE/SSE2 enabled", type);
4790 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4791 (and DFmode for SSE2) arguments in SSE registers. */
4792 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4794 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4795 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4797 return TARGET_SSE2 ? 2 : 1;
4803 /* Return true if EAX is live at the start of the function. Used by
4804 ix86_expand_prologue to determine if we need special help before
4805 calling allocate_stack_worker. */
4808 ix86_eax_live_at_start_p (void)
4810 /* Cheat. Don't bother working forward from ix86_function_regparm
4811 to the function type to whether an actual argument is located in
4812 eax. Instead just look at cfg info, which is still close enough
4813 to correct at this point. This gives false positives for broken
4814 functions that might use uninitialized data that happens to be
4815 allocated in eax, but who cares? */
4816 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4819 /* Value is the number of bytes of arguments automatically
4820 popped when returning from a subroutine call.
4821 FUNDECL is the declaration node of the function (as a tree),
4822 FUNTYPE is the data type of the function (as a tree),
4823 or for a library call it is an identifier node for the subroutine name.
4824 SIZE is the number of bytes of arguments passed on the stack.
4826 On the 80386, the RTD insn may be used to pop them if the number
4827 of args is fixed, but if the number is variable then the caller
4828 must pop them all. RTD can't be used for library calls now
4829 because the library is compiled with the Unix compiler.
4830 Use of RTD is a selectable option, since it is incompatible with
4831 standard Unix calling sequences. If the option is not selected,
4832 the caller must always pop the args.
4834 The attribute stdcall is equivalent to RTD on a per module basis. */
4837 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4841 /* None of the 64-bit ABIs pop arguments. */
4845 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4847 /* Cdecl functions override -mrtd, and never pop the stack. */
4848 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4850 /* Stdcall and fastcall functions will pop the stack if not
4852 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4853 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4854 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4857 if (rtd && ! stdarg_p (funtype))
4861 /* Lose any fake structure return argument if it is passed on the stack. */
4862 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4863 && !KEEP_AGGREGATE_RETURN_POINTER)
4865 int nregs = ix86_function_regparm (funtype, fundecl);
4867 return GET_MODE_SIZE (Pmode);
4873 /* Argument support functions. */
4875 /* Return true when register may be used to pass function parameters. */
4877 ix86_function_arg_regno_p (int regno)
4880 const int *parm_regs;
4885 return (regno < REGPARM_MAX
4886 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4888 return (regno < REGPARM_MAX
4889 || (TARGET_MMX && MMX_REGNO_P (regno)
4890 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4891 || (TARGET_SSE && SSE_REGNO_P (regno)
4892 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4897 if (SSE_REGNO_P (regno) && TARGET_SSE)
4902 if (TARGET_SSE && SSE_REGNO_P (regno)
4903 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4907 /* TODO: The function should depend on current function ABI but
4908 builtins.c would need updating then. Therefore we use the
4911 /* RAX is used as hidden argument to va_arg functions. */
4912 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4915 if (ix86_abi == MS_ABI)
4916 parm_regs = x86_64_ms_abi_int_parameter_registers;
4918 parm_regs = x86_64_int_parameter_registers;
4919 for (i = 0; i < (ix86_abi == MS_ABI
4920 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4921 if (regno == parm_regs[i])
4926 /* Return if we do not know how to pass TYPE solely in registers. */
4929 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4931 if (must_pass_in_stack_var_size_or_pad (mode, type))
4934 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4935 The layout_type routine is crafty and tries to trick us into passing
4936 currently unsupported vector types on the stack by using TImode. */
4937 return (!TARGET_64BIT && mode == TImode
4938 && type && TREE_CODE (type) != VECTOR_TYPE);
4941 /* It returns the size, in bytes, of the area reserved for arguments passed
4942 in registers for the function represented by fndecl dependent to the used
4945 ix86_reg_parm_stack_space (const_tree fndecl)
4947 enum calling_abi call_abi = SYSV_ABI;
4948 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4949 call_abi = ix86_function_abi (fndecl);
4951 call_abi = ix86_function_type_abi (fndecl);
4952 if (call_abi == MS_ABI)
4957 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4960 ix86_function_type_abi (const_tree fntype)
4962 if (TARGET_64BIT && fntype != NULL)
4964 enum calling_abi abi = ix86_abi;
4965 if (abi == SYSV_ABI)
4967 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4970 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4978 ix86_function_ms_hook_prologue (const_tree fntype)
4982 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4984 if (decl_function_context (fntype) != NULL_TREE)
4986 error_at (DECL_SOURCE_LOCATION (fntype),
4987 "ms_hook_prologue is not compatible with nested function");
4996 static enum calling_abi
4997 ix86_function_abi (const_tree fndecl)
5001 return ix86_function_type_abi (TREE_TYPE (fndecl));
5004 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5007 ix86_cfun_abi (void)
5009 if (! cfun || ! TARGET_64BIT)
5011 return cfun->machine->call_abi;
5015 extern void init_regs (void);
5017 /* Implementation of call abi switching target hook. Specific to FNDECL
5018 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5019 for more details. */
5021 ix86_call_abi_override (const_tree fndecl)
5023 if (fndecl == NULL_TREE)
5024 cfun->machine->call_abi = ix86_abi;
5026 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5029 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5030 re-initialization of init_regs each time we switch function context since
5031 this is needed only during RTL expansion. */
5033 ix86_maybe_switch_abi (void)
5036 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5040 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5041 for a call to a function whose data type is FNTYPE.
5042 For a library call, FNTYPE is 0. */
5045 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5046 tree fntype, /* tree ptr for function decl */
5047 rtx libname, /* SYMBOL_REF of library name or 0 */
5050 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
5051 memset (cum, 0, sizeof (*cum));
5054 cum->call_abi = ix86_function_abi (fndecl);
5056 cum->call_abi = ix86_function_type_abi (fntype);
5057 /* Set up the number of registers to use for passing arguments. */
5059 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5060 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5061 "or subtarget optimization implying it");
5062 cum->nregs = ix86_regparm;
5065 if (cum->call_abi != ix86_abi)
5066 cum->nregs = (ix86_abi != SYSV_ABI
5067 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5071 cum->sse_nregs = SSE_REGPARM_MAX;
5074 if (cum->call_abi != ix86_abi)
5075 cum->sse_nregs = (ix86_abi != SYSV_ABI
5076 ? X86_64_SSE_REGPARM_MAX
5077 : X86_64_MS_SSE_REGPARM_MAX);
5081 cum->mmx_nregs = MMX_REGPARM_MAX;
5082 cum->warn_avx = true;
5083 cum->warn_sse = true;
5084 cum->warn_mmx = true;
5086 /* Because type might mismatch in between caller and callee, we need to
5087 use actual type of function for local calls.
5088 FIXME: cgraph_analyze can be told to actually record if function uses
5089 va_start so for local functions maybe_vaarg can be made aggressive
5091 FIXME: once typesytem is fixed, we won't need this code anymore. */
5093 fntype = TREE_TYPE (fndecl);
5094 cum->maybe_vaarg = (fntype
5095 ? (!prototype_p (fntype) || stdarg_p (fntype))
5100 /* If there are variable arguments, then we won't pass anything
5101 in registers in 32-bit mode. */
5102 if (stdarg_p (fntype))
5113 /* Use ecx and edx registers if function has fastcall attribute,
5114 else look for regparm information. */
5117 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5120 cum->fastcall = 1; /* Same first register as in fastcall. */
5122 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5128 cum->nregs = ix86_function_regparm (fntype, fndecl);
5131 /* Set up the number of SSE registers used for passing SFmode
5132 and DFmode arguments. Warn for mismatching ABI. */
5133 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5137 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5138 But in the case of vector types, it is some vector mode.
5140 When we have only some of our vector isa extensions enabled, then there
5141 are some modes for which vector_mode_supported_p is false. For these
5142 modes, the generic vector support in gcc will choose some non-vector mode
5143 in order to implement the type. By computing the natural mode, we'll
5144 select the proper ABI location for the operand and not depend on whatever
5145 the middle-end decides to do with these vector types.
5147 The midde-end can't deal with the vector types > 16 bytes. In this
5148 case, we return the original mode and warn ABI change if CUM isn't
5151 static enum machine_mode
5152 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5154 enum machine_mode mode = TYPE_MODE (type);
5156 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5158 HOST_WIDE_INT size = int_size_in_bytes (type);
5159 if ((size == 8 || size == 16 || size == 32)
5160 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5161 && TYPE_VECTOR_SUBPARTS (type) > 1)
5163 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5165 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5166 mode = MIN_MODE_VECTOR_FLOAT;
5168 mode = MIN_MODE_VECTOR_INT;
5170 /* Get the mode which has this inner mode and number of units. */
5171 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5172 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5173 && GET_MODE_INNER (mode) == innermode)
5175 if (size == 32 && !TARGET_AVX)
5177 static bool warnedavx;
5184 warning (0, "AVX vector argument without AVX "
5185 "enabled changes the ABI");
5187 return TYPE_MODE (type);
5200 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5201 this may not agree with the mode that the type system has chosen for the
5202 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5203 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5206 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5211 if (orig_mode != BLKmode)
5212 tmp = gen_rtx_REG (orig_mode, regno);
5215 tmp = gen_rtx_REG (mode, regno);
5216 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5217 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5223 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5224 of this code is to classify each 8bytes of incoming argument by the register
5225 class and assign registers accordingly. */
5227 /* Return the union class of CLASS1 and CLASS2.
5228 See the x86-64 PS ABI for details. */
5230 static enum x86_64_reg_class
5231 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5233 /* Rule #1: If both classes are equal, this is the resulting class. */
5234 if (class1 == class2)
5237 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5239 if (class1 == X86_64_NO_CLASS)
5241 if (class2 == X86_64_NO_CLASS)
5244 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5245 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5246 return X86_64_MEMORY_CLASS;
5248 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5249 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5250 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5251 return X86_64_INTEGERSI_CLASS;
5252 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5253 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5254 return X86_64_INTEGER_CLASS;
5256 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5258 if (class1 == X86_64_X87_CLASS
5259 || class1 == X86_64_X87UP_CLASS
5260 || class1 == X86_64_COMPLEX_X87_CLASS
5261 || class2 == X86_64_X87_CLASS
5262 || class2 == X86_64_X87UP_CLASS
5263 || class2 == X86_64_COMPLEX_X87_CLASS)
5264 return X86_64_MEMORY_CLASS;
5266 /* Rule #6: Otherwise class SSE is used. */
5267 return X86_64_SSE_CLASS;
5270 /* Classify the argument of type TYPE and mode MODE.
5271 CLASSES will be filled by the register class used to pass each word
5272 of the operand. The number of words is returned. In case the parameter
5273 should be passed in memory, 0 is returned. As a special case for zero
5274 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5276 BIT_OFFSET is used internally for handling records and specifies offset
5277 of the offset in bits modulo 256 to avoid overflow cases.
5279 See the x86-64 PS ABI for details.
5283 classify_argument (enum machine_mode mode, const_tree type,
5284 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5286 HOST_WIDE_INT bytes =
5287 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5288 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5290 /* Variable sized entities are always passed/returned in memory. */
5294 if (mode != VOIDmode
5295 && targetm.calls.must_pass_in_stack (mode, type))
5298 if (type && AGGREGATE_TYPE_P (type))
5302 enum x86_64_reg_class subclasses[MAX_CLASSES];
5304 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5308 for (i = 0; i < words; i++)
5309 classes[i] = X86_64_NO_CLASS;
5311 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5312 signalize memory class, so handle it as special case. */
5315 classes[0] = X86_64_NO_CLASS;
5319 /* Classify each field of record and merge classes. */
5320 switch (TREE_CODE (type))
5323 /* And now merge the fields of structure. */
5324 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5326 if (TREE_CODE (field) == FIELD_DECL)
5330 if (TREE_TYPE (field) == error_mark_node)
5333 /* Bitfields are always classified as integer. Handle them
5334 early, since later code would consider them to be
5335 misaligned integers. */
5336 if (DECL_BIT_FIELD (field))
5338 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5339 i < ((int_bit_position (field) + (bit_offset % 64))
5340 + tree_low_cst (DECL_SIZE (field), 0)
5343 merge_classes (X86_64_INTEGER_CLASS,
5350 type = TREE_TYPE (field);
5352 /* Flexible array member is ignored. */
5353 if (TYPE_MODE (type) == BLKmode
5354 && TREE_CODE (type) == ARRAY_TYPE
5355 && TYPE_SIZE (type) == NULL_TREE
5356 && TYPE_DOMAIN (type) != NULL_TREE
5357 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5362 if (!warned && warn_psabi)
5365 inform (input_location,
5366 "The ABI of passing struct with"
5367 " a flexible array member has"
5368 " changed in GCC 4.4");
5372 num = classify_argument (TYPE_MODE (type), type,
5374 (int_bit_position (field)
5375 + bit_offset) % 256);
5378 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5379 for (i = 0; i < num && (i + pos) < words; i++)
5381 merge_classes (subclasses[i], classes[i + pos]);
5388 /* Arrays are handled as small records. */
5391 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5392 TREE_TYPE (type), subclasses, bit_offset);
5396 /* The partial classes are now full classes. */
5397 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5398 subclasses[0] = X86_64_SSE_CLASS;
5399 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5400 && !((bit_offset % 64) == 0 && bytes == 4))
5401 subclasses[0] = X86_64_INTEGER_CLASS;
5403 for (i = 0; i < words; i++)
5404 classes[i] = subclasses[i % num];
5409 case QUAL_UNION_TYPE:
5410 /* Unions are similar to RECORD_TYPE but offset is always 0.
5412 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5414 if (TREE_CODE (field) == FIELD_DECL)
5418 if (TREE_TYPE (field) == error_mark_node)
5421 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5422 TREE_TYPE (field), subclasses,
5426 for (i = 0; i < num; i++)
5427 classes[i] = merge_classes (subclasses[i], classes[i]);
5438 /* When size > 16 bytes, if the first one isn't
5439 X86_64_SSE_CLASS or any other ones aren't
5440 X86_64_SSEUP_CLASS, everything should be passed in
5442 if (classes[0] != X86_64_SSE_CLASS)
5445 for (i = 1; i < words; i++)
5446 if (classes[i] != X86_64_SSEUP_CLASS)
5450 /* Final merger cleanup. */
5451 for (i = 0; i < words; i++)
5453 /* If one class is MEMORY, everything should be passed in
5455 if (classes[i] == X86_64_MEMORY_CLASS)
5458 /* The X86_64_SSEUP_CLASS should be always preceded by
5459 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5460 if (classes[i] == X86_64_SSEUP_CLASS
5461 && classes[i - 1] != X86_64_SSE_CLASS
5462 && classes[i - 1] != X86_64_SSEUP_CLASS)
5464 /* The first one should never be X86_64_SSEUP_CLASS. */
5465 gcc_assert (i != 0);
5466 classes[i] = X86_64_SSE_CLASS;
5469 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5470 everything should be passed in memory. */
5471 if (classes[i] == X86_64_X87UP_CLASS
5472 && (classes[i - 1] != X86_64_X87_CLASS))
5476 /* The first one should never be X86_64_X87UP_CLASS. */
5477 gcc_assert (i != 0);
5478 if (!warned && warn_psabi)
5481 inform (input_location,
5482 "The ABI of passing union with long double"
5483 " has changed in GCC 4.4");
5491 /* Compute alignment needed. We align all types to natural boundaries with
5492 exception of XFmode that is aligned to 64bits. */
5493 if (mode != VOIDmode && mode != BLKmode)
5495 int mode_alignment = GET_MODE_BITSIZE (mode);
5498 mode_alignment = 128;
5499 else if (mode == XCmode)
5500 mode_alignment = 256;
5501 if (COMPLEX_MODE_P (mode))
5502 mode_alignment /= 2;
5503 /* Misaligned fields are always returned in memory. */
5504 if (bit_offset % mode_alignment)
5508 /* for V1xx modes, just use the base mode */
5509 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5510 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5511 mode = GET_MODE_INNER (mode);
5513 /* Classification of atomic types. */
5518 classes[0] = X86_64_SSE_CLASS;
5521 classes[0] = X86_64_SSE_CLASS;
5522 classes[1] = X86_64_SSEUP_CLASS;
5532 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5536 classes[0] = X86_64_INTEGERSI_CLASS;
5539 else if (size <= 64)
5541 classes[0] = X86_64_INTEGER_CLASS;
5544 else if (size <= 64+32)
5546 classes[0] = X86_64_INTEGER_CLASS;
5547 classes[1] = X86_64_INTEGERSI_CLASS;
5550 else if (size <= 64+64)
5552 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5560 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5564 /* OImode shouldn't be used directly. */
5569 if (!(bit_offset % 64))
5570 classes[0] = X86_64_SSESF_CLASS;
5572 classes[0] = X86_64_SSE_CLASS;
5575 classes[0] = X86_64_SSEDF_CLASS;
5578 classes[0] = X86_64_X87_CLASS;
5579 classes[1] = X86_64_X87UP_CLASS;
5582 classes[0] = X86_64_SSE_CLASS;
5583 classes[1] = X86_64_SSEUP_CLASS;
5586 classes[0] = X86_64_SSE_CLASS;
5587 if (!(bit_offset % 64))
5593 if (!warned && warn_psabi)
5596 inform (input_location,
5597 "The ABI of passing structure with complex float"
5598 " member has changed in GCC 4.4");
5600 classes[1] = X86_64_SSESF_CLASS;
5604 classes[0] = X86_64_SSEDF_CLASS;
5605 classes[1] = X86_64_SSEDF_CLASS;
5608 classes[0] = X86_64_COMPLEX_X87_CLASS;
5611 /* This modes is larger than 16 bytes. */
5619 classes[0] = X86_64_SSE_CLASS;
5620 classes[1] = X86_64_SSEUP_CLASS;
5621 classes[2] = X86_64_SSEUP_CLASS;
5622 classes[3] = X86_64_SSEUP_CLASS;
5630 classes[0] = X86_64_SSE_CLASS;
5631 classes[1] = X86_64_SSEUP_CLASS;
5639 classes[0] = X86_64_SSE_CLASS;
5645 gcc_assert (VECTOR_MODE_P (mode));
5650 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5652 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5653 classes[0] = X86_64_INTEGERSI_CLASS;
5655 classes[0] = X86_64_INTEGER_CLASS;
5656 classes[1] = X86_64_INTEGER_CLASS;
5657 return 1 + (bytes > 8);
5661 /* Examine the argument and return set number of register required in each
5662 class. Return 0 iff parameter should be passed in memory. */
5664 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5665 int *int_nregs, int *sse_nregs)
5667 enum x86_64_reg_class regclass[MAX_CLASSES];
5668 int n = classify_argument (mode, type, regclass, 0);
5674 for (n--; n >= 0; n--)
5675 switch (regclass[n])
5677 case X86_64_INTEGER_CLASS:
5678 case X86_64_INTEGERSI_CLASS:
5681 case X86_64_SSE_CLASS:
5682 case X86_64_SSESF_CLASS:
5683 case X86_64_SSEDF_CLASS:
5686 case X86_64_NO_CLASS:
5687 case X86_64_SSEUP_CLASS:
5689 case X86_64_X87_CLASS:
5690 case X86_64_X87UP_CLASS:
5694 case X86_64_COMPLEX_X87_CLASS:
5695 return in_return ? 2 : 0;
5696 case X86_64_MEMORY_CLASS:
5702 /* Construct container for the argument used by GCC interface. See
5703 FUNCTION_ARG for the detailed description. */
5706 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5707 const_tree type, int in_return, int nintregs, int nsseregs,
5708 const int *intreg, int sse_regno)
5710 /* The following variables hold the static issued_error state. */
5711 static bool issued_sse_arg_error;
5712 static bool issued_sse_ret_error;
5713 static bool issued_x87_ret_error;
5715 enum machine_mode tmpmode;
5717 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5718 enum x86_64_reg_class regclass[MAX_CLASSES];
5722 int needed_sseregs, needed_intregs;
5723 rtx exp[MAX_CLASSES];
5726 n = classify_argument (mode, type, regclass, 0);
5729 if (!examine_argument (mode, type, in_return, &needed_intregs,
5732 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5735 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5736 some less clueful developer tries to use floating-point anyway. */
5737 if (needed_sseregs && !TARGET_SSE)
5741 if (!issued_sse_ret_error)
5743 error ("SSE register return with SSE disabled");
5744 issued_sse_ret_error = true;
5747 else if (!issued_sse_arg_error)
5749 error ("SSE register argument with SSE disabled");
5750 issued_sse_arg_error = true;
5755 /* Likewise, error if the ABI requires us to return values in the
5756 x87 registers and the user specified -mno-80387. */
5757 if (!TARGET_80387 && in_return)
5758 for (i = 0; i < n; i++)
5759 if (regclass[i] == X86_64_X87_CLASS
5760 || regclass[i] == X86_64_X87UP_CLASS
5761 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5763 if (!issued_x87_ret_error)
5765 error ("x87 register return with x87 disabled");
5766 issued_x87_ret_error = true;
5771 /* First construct simple cases. Avoid SCmode, since we want to use
5772 single register to pass this type. */
5773 if (n == 1 && mode != SCmode)
5774 switch (regclass[0])
5776 case X86_64_INTEGER_CLASS:
5777 case X86_64_INTEGERSI_CLASS:
5778 return gen_rtx_REG (mode, intreg[0]);
5779 case X86_64_SSE_CLASS:
5780 case X86_64_SSESF_CLASS:
5781 case X86_64_SSEDF_CLASS:
5782 if (mode != BLKmode)
5783 return gen_reg_or_parallel (mode, orig_mode,
5784 SSE_REGNO (sse_regno));
5786 case X86_64_X87_CLASS:
5787 case X86_64_COMPLEX_X87_CLASS:
5788 return gen_rtx_REG (mode, FIRST_STACK_REG);
5789 case X86_64_NO_CLASS:
5790 /* Zero sized array, struct or class. */
5795 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5796 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5797 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5799 && regclass[0] == X86_64_SSE_CLASS
5800 && regclass[1] == X86_64_SSEUP_CLASS
5801 && regclass[2] == X86_64_SSEUP_CLASS
5802 && regclass[3] == X86_64_SSEUP_CLASS
5804 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5807 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5808 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5809 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5810 && regclass[1] == X86_64_INTEGER_CLASS
5811 && (mode == CDImode || mode == TImode || mode == TFmode)
5812 && intreg[0] + 1 == intreg[1])
5813 return gen_rtx_REG (mode, intreg[0]);
5815 /* Otherwise figure out the entries of the PARALLEL. */
5816 for (i = 0; i < n; i++)
5820 switch (regclass[i])
5822 case X86_64_NO_CLASS:
5824 case X86_64_INTEGER_CLASS:
5825 case X86_64_INTEGERSI_CLASS:
5826 /* Merge TImodes on aligned occasions here too. */
5827 if (i * 8 + 8 > bytes)
5828 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5829 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5833 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5834 if (tmpmode == BLKmode)
5836 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5837 gen_rtx_REG (tmpmode, *intreg),
5841 case X86_64_SSESF_CLASS:
5842 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5843 gen_rtx_REG (SFmode,
5844 SSE_REGNO (sse_regno)),
5848 case X86_64_SSEDF_CLASS:
5849 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5850 gen_rtx_REG (DFmode,
5851 SSE_REGNO (sse_regno)),
5855 case X86_64_SSE_CLASS:
5863 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5873 && regclass[1] == X86_64_SSEUP_CLASS
5874 && regclass[2] == X86_64_SSEUP_CLASS
5875 && regclass[3] == X86_64_SSEUP_CLASS);
5882 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5883 gen_rtx_REG (tmpmode,
5884 SSE_REGNO (sse_regno)),
5893 /* Empty aligned struct, union or class. */
5897 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5898 for (i = 0; i < nexps; i++)
5899 XVECEXP (ret, 0, i) = exp [i];
5903 /* Update the data in CUM to advance over an argument of mode MODE
5904 and data type TYPE. (TYPE is null for libcalls where that information
5905 may not be available.) */
5908 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5909 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5925 cum->words += words;
5926 cum->nregs -= words;
5927 cum->regno += words;
5929 if (cum->nregs <= 0)
5937 /* OImode shouldn't be used directly. */
5941 if (cum->float_in_sse < 2)
5944 if (cum->float_in_sse < 1)
5961 if (!type || !AGGREGATE_TYPE_P (type))
5963 cum->sse_words += words;
5964 cum->sse_nregs -= 1;
5965 cum->sse_regno += 1;
5966 if (cum->sse_nregs <= 0)
5980 if (!type || !AGGREGATE_TYPE_P (type))
5982 cum->mmx_words += words;
5983 cum->mmx_nregs -= 1;
5984 cum->mmx_regno += 1;
5985 if (cum->mmx_nregs <= 0)
5996 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5997 tree type, HOST_WIDE_INT words, int named)
5999 int int_nregs, sse_nregs;
6001 /* Unnamed 256bit vector mode parameters are passed on stack. */
6002 if (!named && VALID_AVX256_REG_MODE (mode))
6005 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
6006 cum->words += words;
6007 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6009 cum->nregs -= int_nregs;
6010 cum->sse_nregs -= sse_nregs;
6011 cum->regno += int_nregs;
6012 cum->sse_regno += sse_nregs;
6015 cum->words += words;
6019 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6020 HOST_WIDE_INT words)
6022 /* Otherwise, this should be passed indirect. */
6023 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6025 cum->words += words;
6034 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6035 tree type, int named)
6037 HOST_WIDE_INT bytes, words;
6039 if (mode == BLKmode)
6040 bytes = int_size_in_bytes (type);
6042 bytes = GET_MODE_SIZE (mode);
6043 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6046 mode = type_natural_mode (type, NULL);
6048 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6049 function_arg_advance_ms_64 (cum, bytes, words);
6050 else if (TARGET_64BIT)
6051 function_arg_advance_64 (cum, mode, type, words, named);
6053 function_arg_advance_32 (cum, mode, type, bytes, words);
6056 /* Define where to put the arguments to a function.
6057 Value is zero to push the argument on the stack,
6058 or a hard register in which to store the argument.
6060 MODE is the argument's machine mode.
6061 TYPE is the data type of the argument (as a tree).
6062 This is null for libcalls where that information may
6064 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6065 the preceding args and about the function being called.
6066 NAMED is nonzero if this argument is a named parameter
6067 (otherwise it is an extra parameter matching an ellipsis). */
6070 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6071 enum machine_mode orig_mode, tree type,
6072 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6074 static bool warnedsse, warnedmmx;
6076 /* Avoid the AL settings for the Unix64 ABI. */
6077 if (mode == VOIDmode)
6093 if (words <= cum->nregs)
6095 int regno = cum->regno;
6097 /* Fastcall allocates the first two DWORD (SImode) or
6098 smaller arguments to ECX and EDX if it isn't an
6104 || (type && AGGREGATE_TYPE_P (type)))
6107 /* ECX not EAX is the first allocated register. */
6108 if (regno == AX_REG)
6111 return gen_rtx_REG (mode, regno);
6116 if (cum->float_in_sse < 2)
6119 if (cum->float_in_sse < 1)
6123 /* In 32bit, we pass TImode in xmm registers. */
6130 if (!type || !AGGREGATE_TYPE_P (type))
6132 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6135 warning (0, "SSE vector argument without SSE enabled "
6139 return gen_reg_or_parallel (mode, orig_mode,
6140 cum->sse_regno + FIRST_SSE_REG);
6145 /* OImode shouldn't be used directly. */
6154 if (!type || !AGGREGATE_TYPE_P (type))
6157 return gen_reg_or_parallel (mode, orig_mode,
6158 cum->sse_regno + FIRST_SSE_REG);
6168 if (!type || !AGGREGATE_TYPE_P (type))
6170 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6173 warning (0, "MMX vector argument without MMX enabled "
6177 return gen_reg_or_parallel (mode, orig_mode,
6178 cum->mmx_regno + FIRST_MMX_REG);
6187 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6188 enum machine_mode orig_mode, tree type, int named)
6190 /* Handle a hidden AL argument containing number of registers
6191 for varargs x86-64 functions. */
6192 if (mode == VOIDmode)
6193 return GEN_INT (cum->maybe_vaarg
6194 ? (cum->sse_nregs < 0
6195 ? (cum->call_abi == ix86_abi
6197 : (ix86_abi != SYSV_ABI
6198 ? X86_64_SSE_REGPARM_MAX
6199 : X86_64_MS_SSE_REGPARM_MAX))
6214 /* Unnamed 256bit vector mode parameters are passed on stack. */
6220 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6222 &x86_64_int_parameter_registers [cum->regno],
6227 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6228 enum machine_mode orig_mode, int named,
6229 HOST_WIDE_INT bytes)
6233 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6234 We use value of -2 to specify that current function call is MSABI. */
6235 if (mode == VOIDmode)
6236 return GEN_INT (-2);
6238 /* If we've run out of registers, it goes on the stack. */
6239 if (cum->nregs == 0)
6242 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6244 /* Only floating point modes are passed in anything but integer regs. */
6245 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6248 regno = cum->regno + FIRST_SSE_REG;
6253 /* Unnamed floating parameters are passed in both the
6254 SSE and integer registers. */
6255 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6256 t2 = gen_rtx_REG (mode, regno);
6257 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6258 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6259 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6262 /* Handle aggregated types passed in register. */
6263 if (orig_mode == BLKmode)
6265 if (bytes > 0 && bytes <= 8)
6266 mode = (bytes > 4 ? DImode : SImode);
6267 if (mode == BLKmode)
6271 return gen_reg_or_parallel (mode, orig_mode, regno);
6275 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6276 tree type, int named)
6278 enum machine_mode mode = omode;
6279 HOST_WIDE_INT bytes, words;
6281 if (mode == BLKmode)
6282 bytes = int_size_in_bytes (type);
6284 bytes = GET_MODE_SIZE (mode);
6285 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6287 /* To simplify the code below, represent vector types with a vector mode
6288 even if MMX/SSE are not active. */
6289 if (type && TREE_CODE (type) == VECTOR_TYPE)
6290 mode = type_natural_mode (type, cum);
6292 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6293 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6294 else if (TARGET_64BIT)
6295 return function_arg_64 (cum, mode, omode, type, named);
6297 return function_arg_32 (cum, mode, omode, type, bytes, words);
6300 /* A C expression that indicates when an argument must be passed by
6301 reference. If nonzero for an argument, a copy of that argument is
6302 made in memory and a pointer to the argument is passed instead of
6303 the argument itself. The pointer is passed in whatever way is
6304 appropriate for passing a pointer to that type. */
6307 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6308 enum machine_mode mode ATTRIBUTE_UNUSED,
6309 const_tree type, bool named ATTRIBUTE_UNUSED)
6311 /* See Windows x64 Software Convention. */
6312 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6314 int msize = (int) GET_MODE_SIZE (mode);
6317 /* Arrays are passed by reference. */
6318 if (TREE_CODE (type) == ARRAY_TYPE)
6321 if (AGGREGATE_TYPE_P (type))
6323 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6324 are passed by reference. */
6325 msize = int_size_in_bytes (type);
6329 /* __m128 is passed by reference. */
6331 case 1: case 2: case 4: case 8:
6337 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6343 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6346 contains_aligned_value_p (tree type)
6348 enum machine_mode mode = TYPE_MODE (type);
6349 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6353 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6355 if (TYPE_ALIGN (type) < 128)
6358 if (AGGREGATE_TYPE_P (type))
6360 /* Walk the aggregates recursively. */
6361 switch (TREE_CODE (type))
6365 case QUAL_UNION_TYPE:
6369 /* Walk all the structure fields. */
6370 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6372 if (TREE_CODE (field) == FIELD_DECL
6373 && contains_aligned_value_p (TREE_TYPE (field)))
6380 /* Just for use if some languages passes arrays by value. */
6381 if (contains_aligned_value_p (TREE_TYPE (type)))
6392 /* Gives the alignment boundary, in bits, of an argument with the
6393 specified mode and type. */
6396 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6401 /* Since canonical type is used for call, we convert it to
6402 canonical type if needed. */
6403 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6404 type = TYPE_CANONICAL (type);
6405 align = TYPE_ALIGN (type);
6408 align = GET_MODE_ALIGNMENT (mode);
6409 if (align < PARM_BOUNDARY)
6410 align = PARM_BOUNDARY;
6411 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6412 natural boundaries. */
6413 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6415 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6416 make an exception for SSE modes since these require 128bit
6419 The handling here differs from field_alignment. ICC aligns MMX
6420 arguments to 4 byte boundaries, while structure fields are aligned
6421 to 8 byte boundaries. */
6424 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6425 align = PARM_BOUNDARY;
6429 if (!contains_aligned_value_p (type))
6430 align = PARM_BOUNDARY;
6433 if (align > BIGGEST_ALIGNMENT)
6434 align = BIGGEST_ALIGNMENT;
6438 /* Return true if N is a possible register number of function value. */
6441 ix86_function_value_regno_p (const unsigned int regno)
6448 case FIRST_FLOAT_REG:
6449 /* TODO: The function should depend on current function ABI but
6450 builtins.c would need updating then. Therefore we use the
6452 if (TARGET_64BIT && ix86_abi == MS_ABI)
6454 return TARGET_FLOAT_RETURNS_IN_80387;
6460 if (TARGET_MACHO || TARGET_64BIT)
6468 /* Define how to find the value returned by a function.
6469 VALTYPE is the data type of the value (as a tree).
6470 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6471 otherwise, FUNC is 0. */
6474 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6475 const_tree fntype, const_tree fn)
6479 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6480 we normally prevent this case when mmx is not available. However
6481 some ABIs may require the result to be returned like DImode. */
6482 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6483 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6485 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6486 we prevent this case when sse is not available. However some ABIs
6487 may require the result to be returned like integer TImode. */
6488 else if (mode == TImode
6489 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6490 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6492 /* 32-byte vector modes in %ymm0. */
6493 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6494 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6496 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6497 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6498 regno = FIRST_FLOAT_REG;
6500 /* Most things go in %eax. */
6503 /* Override FP return register with %xmm0 for local functions when
6504 SSE math is enabled or for functions with sseregparm attribute. */
6505 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6507 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6508 if ((sse_level >= 1 && mode == SFmode)
6509 || (sse_level == 2 && mode == DFmode))
6510 regno = FIRST_SSE_REG;
6513 /* OImode shouldn't be used directly. */
6514 gcc_assert (mode != OImode);
6516 return gen_rtx_REG (orig_mode, regno);
6520 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6525 /* Handle libcalls, which don't provide a type node. */
6526 if (valtype == NULL)
6538 return gen_rtx_REG (mode, FIRST_SSE_REG);
6541 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6545 return gen_rtx_REG (mode, AX_REG);
6549 ret = construct_container (mode, orig_mode, valtype, 1,
6550 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6551 x86_64_int_return_registers, 0);
6553 /* For zero sized structures, construct_container returns NULL, but we
6554 need to keep rest of compiler happy by returning meaningful value. */
6556 ret = gen_rtx_REG (orig_mode, AX_REG);
6562 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6564 unsigned int regno = AX_REG;
6568 switch (GET_MODE_SIZE (mode))
6571 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6572 && !COMPLEX_MODE_P (mode))
6573 regno = FIRST_SSE_REG;
6577 if (mode == SFmode || mode == DFmode)
6578 regno = FIRST_SSE_REG;
6584 return gen_rtx_REG (orig_mode, regno);
6588 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6589 enum machine_mode orig_mode, enum machine_mode mode)
6591 const_tree fn, fntype;
6594 if (fntype_or_decl && DECL_P (fntype_or_decl))
6595 fn = fntype_or_decl;
6596 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6598 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6599 return function_value_ms_64 (orig_mode, mode);
6600 else if (TARGET_64BIT)
6601 return function_value_64 (orig_mode, mode, valtype);
6603 return function_value_32 (orig_mode, mode, fntype, fn);
6607 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6608 bool outgoing ATTRIBUTE_UNUSED)
6610 enum machine_mode mode, orig_mode;
6612 orig_mode = TYPE_MODE (valtype);
6613 mode = type_natural_mode (valtype, NULL);
6614 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6618 ix86_libcall_value (enum machine_mode mode)
6620 return ix86_function_value_1 (NULL, NULL, mode, mode);
6623 /* Return true iff type is returned in memory. */
6625 static int ATTRIBUTE_UNUSED
6626 return_in_memory_32 (const_tree type, enum machine_mode mode)
6630 if (mode == BLKmode)
6633 size = int_size_in_bytes (type);
6635 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6638 if (VECTOR_MODE_P (mode) || mode == TImode)
6640 /* User-created vectors small enough to fit in EAX. */
6644 /* MMX/3dNow values are returned in MM0,
6645 except when it doesn't exits. */
6647 return (TARGET_MMX ? 0 : 1);
6649 /* SSE values are returned in XMM0, except when it doesn't exist. */
6651 return (TARGET_SSE ? 0 : 1);
6653 /* AVX values are returned in YMM0, except when it doesn't exist. */
6655 return TARGET_AVX ? 0 : 1;
6664 /* OImode shouldn't be used directly. */
6665 gcc_assert (mode != OImode);
6670 static int ATTRIBUTE_UNUSED
6671 return_in_memory_64 (const_tree type, enum machine_mode mode)
6673 int needed_intregs, needed_sseregs;
6674 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6677 static int ATTRIBUTE_UNUSED
6678 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6680 HOST_WIDE_INT size = int_size_in_bytes (type);
6682 /* __m128 is returned in xmm0. */
6683 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6684 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6687 /* Otherwise, the size must be exactly in [1248]. */
6688 return (size != 1 && size != 2 && size != 4 && size != 8);
6692 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6694 #ifdef SUBTARGET_RETURN_IN_MEMORY
6695 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6697 const enum machine_mode mode = type_natural_mode (type, NULL);
6701 if (ix86_function_type_abi (fntype) == MS_ABI)
6702 return return_in_memory_ms_64 (type, mode);
6704 return return_in_memory_64 (type, mode);
6707 return return_in_memory_32 (type, mode);
6711 /* Return false iff TYPE is returned in memory. This version is used
6712 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6713 but differs notably in that when MMX is available, 8-byte vectors
6714 are returned in memory, rather than in MMX registers. */
6717 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6720 enum machine_mode mode = type_natural_mode (type, NULL);
6723 return return_in_memory_64 (type, mode);
6725 if (mode == BLKmode)
6728 size = int_size_in_bytes (type);
6730 if (VECTOR_MODE_P (mode))
6732 /* Return in memory only if MMX registers *are* available. This
6733 seems backwards, but it is consistent with the existing
6740 else if (mode == TImode)
6742 else if (mode == XFmode)
6748 /* When returning SSE vector types, we have a choice of either
6749 (1) being abi incompatible with a -march switch, or
6750 (2) generating an error.
6751 Given no good solution, I think the safest thing is one warning.
6752 The user won't be able to use -Werror, but....
6754 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6755 called in response to actually generating a caller or callee that
6756 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6757 via aggregate_value_p for general type probing from tree-ssa. */
6760 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6762 static bool warnedsse, warnedmmx;
6764 if (!TARGET_64BIT && type)
6766 /* Look at the return type of the function, not the function type. */
6767 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6769 if (!TARGET_SSE && !warnedsse)
6772 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6775 warning (0, "SSE vector return without SSE enabled "
6780 if (!TARGET_MMX && !warnedmmx)
6782 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6785 warning (0, "MMX vector return without MMX enabled "
6795 /* Create the va_list data type. */
6797 /* Returns the calling convention specific va_list date type.
6798 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6801 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6803 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6805 /* For i386 we use plain pointer to argument area. */
6806 if (!TARGET_64BIT || abi == MS_ABI)
6807 return build_pointer_type (char_type_node);
6809 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6810 type_decl = build_decl (BUILTINS_LOCATION,
6811 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6813 f_gpr = build_decl (BUILTINS_LOCATION,
6814 FIELD_DECL, get_identifier ("gp_offset"),
6815 unsigned_type_node);
6816 f_fpr = build_decl (BUILTINS_LOCATION,
6817 FIELD_DECL, get_identifier ("fp_offset"),
6818 unsigned_type_node);
6819 f_ovf = build_decl (BUILTINS_LOCATION,
6820 FIELD_DECL, get_identifier ("overflow_arg_area"),
6822 f_sav = build_decl (BUILTINS_LOCATION,
6823 FIELD_DECL, get_identifier ("reg_save_area"),
6826 va_list_gpr_counter_field = f_gpr;
6827 va_list_fpr_counter_field = f_fpr;
6829 DECL_FIELD_CONTEXT (f_gpr) = record;
6830 DECL_FIELD_CONTEXT (f_fpr) = record;
6831 DECL_FIELD_CONTEXT (f_ovf) = record;
6832 DECL_FIELD_CONTEXT (f_sav) = record;
6834 TREE_CHAIN (record) = type_decl;
6835 TYPE_NAME (record) = type_decl;
6836 TYPE_FIELDS (record) = f_gpr;
6837 TREE_CHAIN (f_gpr) = f_fpr;
6838 TREE_CHAIN (f_fpr) = f_ovf;
6839 TREE_CHAIN (f_ovf) = f_sav;
6841 layout_type (record);
6843 /* The correct type is an array type of one element. */
6844 return build_array_type (record, build_index_type (size_zero_node));
6847 /* Setup the builtin va_list data type and for 64-bit the additional
6848 calling convention specific va_list data types. */
6851 ix86_build_builtin_va_list (void)
6853 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6855 /* Initialize abi specific va_list builtin types. */
6859 if (ix86_abi == MS_ABI)
6861 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6862 if (TREE_CODE (t) != RECORD_TYPE)
6863 t = build_variant_type_copy (t);
6864 sysv_va_list_type_node = t;
6869 if (TREE_CODE (t) != RECORD_TYPE)
6870 t = build_variant_type_copy (t);
6871 sysv_va_list_type_node = t;
6873 if (ix86_abi != MS_ABI)
6875 t = ix86_build_builtin_va_list_abi (MS_ABI);
6876 if (TREE_CODE (t) != RECORD_TYPE)
6877 t = build_variant_type_copy (t);
6878 ms_va_list_type_node = t;
6883 if (TREE_CODE (t) != RECORD_TYPE)
6884 t = build_variant_type_copy (t);
6885 ms_va_list_type_node = t;
6892 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6895 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6903 int regparm = ix86_regparm;
6905 if (cum->call_abi != ix86_abi)
6906 regparm = (ix86_abi != SYSV_ABI
6907 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6909 /* GPR size of varargs save area. */
6910 if (cfun->va_list_gpr_size)
6911 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6913 ix86_varargs_gpr_size = 0;
6915 /* FPR size of varargs save area. We don't need it if we don't pass
6916 anything in SSE registers. */
6917 if (cum->sse_nregs && cfun->va_list_fpr_size)
6918 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6920 ix86_varargs_fpr_size = 0;
6922 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6925 save_area = frame_pointer_rtx;
6926 set = get_varargs_alias_set ();
6928 for (i = cum->regno;
6930 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6933 mem = gen_rtx_MEM (Pmode,
6934 plus_constant (save_area, i * UNITS_PER_WORD));
6935 MEM_NOTRAP_P (mem) = 1;
6936 set_mem_alias_set (mem, set);
6937 emit_move_insn (mem, gen_rtx_REG (Pmode,
6938 x86_64_int_parameter_registers[i]));
6941 if (ix86_varargs_fpr_size)
6943 /* Now emit code to save SSE registers. The AX parameter contains number
6944 of SSE parameter registers used to call this function. We use
6945 sse_prologue_save insn template that produces computed jump across
6946 SSE saves. We need some preparation work to get this working. */
6948 label = gen_label_rtx ();
6950 nsse_reg = gen_reg_rtx (Pmode);
6951 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6953 /* Compute address of memory block we save into. We always use pointer
6954 pointing 127 bytes after first byte to store - this is needed to keep
6955 instruction size limited by 4 bytes (5 bytes for AVX) with one
6956 byte displacement. */
6957 tmp_reg = gen_reg_rtx (Pmode);
6958 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6959 plus_constant (save_area,
6960 ix86_varargs_gpr_size + 127)));
6961 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6962 MEM_NOTRAP_P (mem) = 1;
6963 set_mem_alias_set (mem, set);
6964 set_mem_align (mem, 64);
6966 /* And finally do the dirty job! */
6967 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6968 GEN_INT (cum->sse_regno), label,
6969 gen_reg_rtx (Pmode)));
6974 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6976 alias_set_type set = get_varargs_alias_set ();
6979 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6983 mem = gen_rtx_MEM (Pmode,
6984 plus_constant (virtual_incoming_args_rtx,
6985 i * UNITS_PER_WORD));
6986 MEM_NOTRAP_P (mem) = 1;
6987 set_mem_alias_set (mem, set);
6989 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6990 emit_move_insn (mem, reg);
6995 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6996 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6999 CUMULATIVE_ARGS next_cum;
7002 /* This argument doesn't appear to be used anymore. Which is good,
7003 because the old code here didn't suppress rtl generation. */
7004 gcc_assert (!no_rtl);
7009 fntype = TREE_TYPE (current_function_decl);
7011 /* For varargs, we do not want to skip the dummy va_dcl argument.
7012 For stdargs, we do want to skip the last named argument. */
7014 if (stdarg_p (fntype))
7015 function_arg_advance (&next_cum, mode, type, 1);
7017 if (cum->call_abi == MS_ABI)
7018 setup_incoming_varargs_ms_64 (&next_cum);
7020 setup_incoming_varargs_64 (&next_cum);
7023 /* Checks if TYPE is of kind va_list char *. */
7026 is_va_list_char_pointer (tree type)
7030 /* For 32-bit it is always true. */
7033 canonic = ix86_canonical_va_list_type (type);
7034 return (canonic == ms_va_list_type_node
7035 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7038 /* Implement va_start. */
7041 ix86_va_start (tree valist, rtx nextarg)
7043 HOST_WIDE_INT words, n_gpr, n_fpr;
7044 tree f_gpr, f_fpr, f_ovf, f_sav;
7045 tree gpr, fpr, ovf, sav, t;
7048 /* Only 64bit target needs something special. */
7049 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7051 std_expand_builtin_va_start (valist, nextarg);
7055 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7056 f_fpr = TREE_CHAIN (f_gpr);
7057 f_ovf = TREE_CHAIN (f_fpr);
7058 f_sav = TREE_CHAIN (f_ovf);
7060 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
7061 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
7062 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7063 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7064 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7066 /* Count number of gp and fp argument registers used. */
7067 words = crtl->args.info.words;
7068 n_gpr = crtl->args.info.regno;
7069 n_fpr = crtl->args.info.sse_regno;
7071 if (cfun->va_list_gpr_size)
7073 type = TREE_TYPE (gpr);
7074 t = build2 (MODIFY_EXPR, type,
7075 gpr, build_int_cst (type, n_gpr * 8));
7076 TREE_SIDE_EFFECTS (t) = 1;
7077 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7080 if (TARGET_SSE && cfun->va_list_fpr_size)
7082 type = TREE_TYPE (fpr);
7083 t = build2 (MODIFY_EXPR, type, fpr,
7084 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7085 TREE_SIDE_EFFECTS (t) = 1;
7086 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7089 /* Find the overflow area. */
7090 type = TREE_TYPE (ovf);
7091 t = make_tree (type, crtl->args.internal_arg_pointer);
7093 t = build2 (POINTER_PLUS_EXPR, type, t,
7094 size_int (words * UNITS_PER_WORD));
7095 t = build2 (MODIFY_EXPR, type, ovf, t);
7096 TREE_SIDE_EFFECTS (t) = 1;
7097 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7099 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7101 /* Find the register save area.
7102 Prologue of the function save it right above stack frame. */
7103 type = TREE_TYPE (sav);
7104 t = make_tree (type, frame_pointer_rtx);
7105 if (!ix86_varargs_gpr_size)
7106 t = build2 (POINTER_PLUS_EXPR, type, t,
7107 size_int (-8 * X86_64_REGPARM_MAX));
7108 t = build2 (MODIFY_EXPR, type, sav, t);
7109 TREE_SIDE_EFFECTS (t) = 1;
7110 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7114 /* Implement va_arg. */
7117 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7120 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7121 tree f_gpr, f_fpr, f_ovf, f_sav;
7122 tree gpr, fpr, ovf, sav, t;
7124 tree lab_false, lab_over = NULL_TREE;
7129 enum machine_mode nat_mode;
7130 unsigned int arg_boundary;
7132 /* Only 64bit target needs something special. */
7133 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7134 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7136 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7137 f_fpr = TREE_CHAIN (f_gpr);
7138 f_ovf = TREE_CHAIN (f_fpr);
7139 f_sav = TREE_CHAIN (f_ovf);
7141 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7142 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7143 valist = build_va_arg_indirect_ref (valist);
7144 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7145 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7146 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7148 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7150 type = build_pointer_type (type);
7151 size = int_size_in_bytes (type);
7152 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7154 nat_mode = type_natural_mode (type, NULL);
7163 /* Unnamed 256bit vector mode parameters are passed on stack. */
7164 if (ix86_cfun_abi () == SYSV_ABI)
7171 container = construct_container (nat_mode, TYPE_MODE (type),
7172 type, 0, X86_64_REGPARM_MAX,
7173 X86_64_SSE_REGPARM_MAX, intreg,
7178 /* Pull the value out of the saved registers. */
7180 addr = create_tmp_var (ptr_type_node, "addr");
7184 int needed_intregs, needed_sseregs;
7186 tree int_addr, sse_addr;
7188 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7189 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7191 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7193 need_temp = (!REG_P (container)
7194 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7195 || TYPE_ALIGN (type) > 128));
7197 /* In case we are passing structure, verify that it is consecutive block
7198 on the register save area. If not we need to do moves. */
7199 if (!need_temp && !REG_P (container))
7201 /* Verify that all registers are strictly consecutive */
7202 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7206 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7208 rtx slot = XVECEXP (container, 0, i);
7209 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7210 || INTVAL (XEXP (slot, 1)) != i * 16)
7218 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7220 rtx slot = XVECEXP (container, 0, i);
7221 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7222 || INTVAL (XEXP (slot, 1)) != i * 8)
7234 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7235 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7238 /* First ensure that we fit completely in registers. */
7241 t = build_int_cst (TREE_TYPE (gpr),
7242 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7243 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7244 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7245 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7246 gimplify_and_add (t, pre_p);
7250 t = build_int_cst (TREE_TYPE (fpr),
7251 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7252 + X86_64_REGPARM_MAX * 8);
7253 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7254 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7255 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7256 gimplify_and_add (t, pre_p);
7259 /* Compute index to start of area used for integer regs. */
7262 /* int_addr = gpr + sav; */
7263 t = fold_convert (sizetype, gpr);
7264 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7265 gimplify_assign (int_addr, t, pre_p);
7269 /* sse_addr = fpr + sav; */
7270 t = fold_convert (sizetype, fpr);
7271 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7272 gimplify_assign (sse_addr, t, pre_p);
7277 tree temp = create_tmp_var (type, "va_arg_tmp");
7280 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7281 gimplify_assign (addr, t, pre_p);
7283 for (i = 0; i < XVECLEN (container, 0); i++)
7285 rtx slot = XVECEXP (container, 0, i);
7286 rtx reg = XEXP (slot, 0);
7287 enum machine_mode mode = GET_MODE (reg);
7288 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7289 tree addr_type = build_pointer_type (piece_type);
7290 tree daddr_type = build_pointer_type_for_mode (piece_type,
7294 tree dest_addr, dest;
7296 if (SSE_REGNO_P (REGNO (reg)))
7298 src_addr = sse_addr;
7299 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7303 src_addr = int_addr;
7304 src_offset = REGNO (reg) * 8;
7306 src_addr = fold_convert (addr_type, src_addr);
7307 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7308 size_int (src_offset));
7309 src = build_va_arg_indirect_ref (src_addr);
7311 dest_addr = fold_convert (daddr_type, addr);
7312 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7313 size_int (INTVAL (XEXP (slot, 1))));
7314 dest = build_va_arg_indirect_ref (dest_addr);
7316 gimplify_assign (dest, src, pre_p);
7322 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7323 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7324 gimplify_assign (gpr, t, pre_p);
7329 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7330 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7331 gimplify_assign (fpr, t, pre_p);
7334 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7336 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7339 /* ... otherwise out of the overflow area. */
7341 /* When we align parameter on stack for caller, if the parameter
7342 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7343 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7344 here with caller. */
7345 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7346 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7347 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7349 /* Care for on-stack alignment if needed. */
7350 if (arg_boundary <= 64
7351 || integer_zerop (TYPE_SIZE (type)))
7355 HOST_WIDE_INT align = arg_boundary / 8;
7356 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7357 size_int (align - 1));
7358 t = fold_convert (sizetype, t);
7359 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7361 t = fold_convert (TREE_TYPE (ovf), t);
7362 if (crtl->stack_alignment_needed < arg_boundary)
7363 crtl->stack_alignment_needed = arg_boundary;
7365 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7366 gimplify_assign (addr, t, pre_p);
7368 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7369 size_int (rsize * UNITS_PER_WORD));
7370 gimplify_assign (unshare_expr (ovf), t, pre_p);
7373 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7375 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7376 addr = fold_convert (ptrtype, addr);
7379 addr = build_va_arg_indirect_ref (addr);
7380 return build_va_arg_indirect_ref (addr);
7383 /* Return nonzero if OPNUM's MEM should be matched
7384 in movabs* patterns. */
7387 ix86_check_movabs (rtx insn, int opnum)
7391 set = PATTERN (insn);
7392 if (GET_CODE (set) == PARALLEL)
7393 set = XVECEXP (set, 0, 0);
7394 gcc_assert (GET_CODE (set) == SET);
7395 mem = XEXP (set, opnum);
7396 while (GET_CODE (mem) == SUBREG)
7397 mem = SUBREG_REG (mem);
7398 gcc_assert (MEM_P (mem));
7399 return (volatile_ok || !MEM_VOLATILE_P (mem));
7402 /* Initialize the table of extra 80387 mathematical constants. */
7405 init_ext_80387_constants (void)
7407 static const char * cst[5] =
7409 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7410 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7411 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7412 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7413 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7417 for (i = 0; i < 5; i++)
7419 real_from_string (&ext_80387_constants_table[i], cst[i]);
7420 /* Ensure each constant is rounded to XFmode precision. */
7421 real_convert (&ext_80387_constants_table[i],
7422 XFmode, &ext_80387_constants_table[i]);
7425 ext_80387_constants_init = 1;
7428 /* Return true if the constant is something that can be loaded with
7429 a special instruction. */
7432 standard_80387_constant_p (rtx x)
7434 enum machine_mode mode = GET_MODE (x);
7438 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7441 if (x == CONST0_RTX (mode))
7443 if (x == CONST1_RTX (mode))
7446 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7448 /* For XFmode constants, try to find a special 80387 instruction when
7449 optimizing for size or on those CPUs that benefit from them. */
7451 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7455 if (! ext_80387_constants_init)
7456 init_ext_80387_constants ();
7458 for (i = 0; i < 5; i++)
7459 if (real_identical (&r, &ext_80387_constants_table[i]))
7463 /* Load of the constant -0.0 or -1.0 will be split as
7464 fldz;fchs or fld1;fchs sequence. */
7465 if (real_isnegzero (&r))
7467 if (real_identical (&r, &dconstm1))
7473 /* Return the opcode of the special instruction to be used to load
7477 standard_80387_constant_opcode (rtx x)
7479 switch (standard_80387_constant_p (x))
7503 /* Return the CONST_DOUBLE representing the 80387 constant that is
7504 loaded by the specified special instruction. The argument IDX
7505 matches the return value from standard_80387_constant_p. */
7508 standard_80387_constant_rtx (int idx)
7512 if (! ext_80387_constants_init)
7513 init_ext_80387_constants ();
7529 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7533 /* Return 1 if X is all 0s and 2 if x is all 1s
7534 in supported SSE vector mode. */
7537 standard_sse_constant_p (rtx x)
7539 enum machine_mode mode = GET_MODE (x);
7541 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7543 if (vector_all_ones_operand (x, mode))
7559 /* Return the opcode of the special instruction to be used to load
7563 standard_sse_constant_opcode (rtx insn, rtx x)
7565 switch (standard_sse_constant_p (x))
7568 switch (get_attr_mode (insn))
7571 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7573 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7574 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7576 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7578 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7579 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7581 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7583 return "vxorps\t%x0, %x0, %x0";
7585 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7586 return "vxorps\t%x0, %x0, %x0";
7588 return "vxorpd\t%x0, %x0, %x0";
7590 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7591 return "vxorps\t%x0, %x0, %x0";
7593 return "vpxor\t%x0, %x0, %x0";
7598 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7605 /* Returns 1 if OP contains a symbol reference */
7608 symbolic_reference_mentioned_p (rtx op)
7613 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7616 fmt = GET_RTX_FORMAT (GET_CODE (op));
7617 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7623 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7624 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7628 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7635 /* Return 1 if it is appropriate to emit `ret' instructions in the
7636 body of a function. Do this only if the epilogue is simple, needing a
7637 couple of insns. Prior to reloading, we can't tell how many registers
7638 must be saved, so return 0 then. Return 0 if there is no frame
7639 marker to de-allocate. */
7642 ix86_can_use_return_insn_p (void)
7644 struct ix86_frame frame;
7646 if (! reload_completed || frame_pointer_needed)
7649 /* Don't allow more than 32 pop, since that's all we can do
7650 with one instruction. */
7651 if (crtl->args.pops_args
7652 && crtl->args.size >= 32768)
7655 ix86_compute_frame_layout (&frame);
7656 return frame.to_allocate == 0 && frame.padding0 == 0
7657 && (frame.nregs + frame.nsseregs) == 0;
7660 /* Value should be nonzero if functions must have frame pointers.
7661 Zero means the frame pointer need not be set up (and parms may
7662 be accessed via the stack pointer) in functions that seem suitable. */
7665 ix86_frame_pointer_required (void)
7667 /* If we accessed previous frames, then the generated code expects
7668 to be able to access the saved ebp value in our frame. */
7669 if (cfun->machine->accesses_prev_frame)
7672 /* Several x86 os'es need a frame pointer for other reasons,
7673 usually pertaining to setjmp. */
7674 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7677 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7678 the frame pointer by default. Turn it back on now if we've not
7679 got a leaf function. */
7680 if (TARGET_OMIT_LEAF_FRAME_POINTER
7681 && (!current_function_is_leaf
7682 || ix86_current_function_calls_tls_descriptor))
7691 /* Record that the current function accesses previous call frames. */
7694 ix86_setup_frame_addresses (void)
7696 cfun->machine->accesses_prev_frame = 1;
7699 #ifndef USE_HIDDEN_LINKONCE
7700 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7701 # define USE_HIDDEN_LINKONCE 1
7703 # define USE_HIDDEN_LINKONCE 0
7707 static int pic_labels_used;
7709 /* Fills in the label name that should be used for a pc thunk for
7710 the given register. */
7713 get_pc_thunk_name (char name[32], unsigned int regno)
7715 gcc_assert (!TARGET_64BIT);
7717 if (USE_HIDDEN_LINKONCE)
7718 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7720 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7724 /* This function generates code for -fpic that loads %ebx with
7725 the return address of the caller and then returns. */
7728 ix86_code_end (void)
7733 for (regno = 0; regno < 8; ++regno)
7738 if (! ((pic_labels_used >> regno) & 1))
7741 get_pc_thunk_name (name, regno);
7743 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7744 get_identifier (name),
7745 build_function_type (void_type_node, void_list_node));
7746 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7747 NULL_TREE, void_type_node);
7748 TREE_PUBLIC (decl) = 1;
7749 TREE_STATIC (decl) = 1;
7754 switch_to_section (darwin_sections[text_coal_section]);
7755 fputs ("\t.weak_definition\t", asm_out_file);
7756 assemble_name (asm_out_file, name);
7757 fputs ("\n\t.private_extern\t", asm_out_file);
7758 assemble_name (asm_out_file, name);
7759 fputs ("\n", asm_out_file);
7760 ASM_OUTPUT_LABEL (asm_out_file, name);
7761 DECL_WEAK (decl) = 1;
7765 if (USE_HIDDEN_LINKONCE)
7767 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7769 (*targetm.asm_out.unique_section) (decl, 0);
7770 switch_to_section (get_named_section (decl, NULL, 0));
7772 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7773 fputs ("\t.hidden\t", asm_out_file);
7774 assemble_name (asm_out_file, name);
7775 putc ('\n', asm_out_file);
7776 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7780 switch_to_section (text_section);
7781 ASM_OUTPUT_LABEL (asm_out_file, name);
7784 DECL_INITIAL (decl) = make_node (BLOCK);
7785 current_function_decl = decl;
7786 init_function_start (decl);
7787 first_function_block_is_cold = false;
7788 /* Make sure unwind info is emitted for the thunk if needed. */
7789 final_start_function (emit_barrier (), asm_out_file, 1);
7791 xops[0] = gen_rtx_REG (Pmode, regno);
7792 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7793 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7794 output_asm_insn ("ret", xops);
7795 final_end_function ();
7796 init_insn_lengths ();
7797 free_after_compilation (cfun);
7799 current_function_decl = NULL;
7803 /* Emit code for the SET_GOT patterns. */
7806 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7812 if (TARGET_VXWORKS_RTP && flag_pic)
7814 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7815 xops[2] = gen_rtx_MEM (Pmode,
7816 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7817 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7819 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7820 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7821 an unadorned address. */
7822 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7823 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7824 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7828 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7830 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7832 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7835 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7838 output_asm_insn ("call\t%a2", xops);
7839 #ifdef DWARF2_UNWIND_INFO
7840 /* The call to next label acts as a push. */
7841 if (dwarf2out_do_frame ())
7845 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7846 gen_rtx_PLUS (Pmode,
7849 RTX_FRAME_RELATED_P (insn) = 1;
7850 dwarf2out_frame_debug (insn, true);
7857 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7858 is what will be referenced by the Mach-O PIC subsystem. */
7860 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7863 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7864 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7868 output_asm_insn ("pop%z0\t%0", xops);
7869 #ifdef DWARF2_UNWIND_INFO
7870 /* The pop is a pop and clobbers dest, but doesn't restore it
7871 for unwind info purposes. */
7872 if (dwarf2out_do_frame ())
7876 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7877 dwarf2out_frame_debug (insn, true);
7878 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7879 gen_rtx_PLUS (Pmode,
7882 RTX_FRAME_RELATED_P (insn) = 1;
7883 dwarf2out_frame_debug (insn, true);
7892 get_pc_thunk_name (name, REGNO (dest));
7893 pic_labels_used |= 1 << REGNO (dest);
7895 #ifdef DWARF2_UNWIND_INFO
7896 /* Ensure all queued register saves are flushed before the
7898 if (dwarf2out_do_frame ())
7902 insn = emit_barrier ();
7904 dwarf2out_frame_debug (insn, false);
7907 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7908 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7909 output_asm_insn ("call\t%X2", xops);
7910 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7911 is what will be referenced by the Mach-O PIC subsystem. */
7914 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7916 targetm.asm_out.internal_label (asm_out_file, "L",
7917 CODE_LABEL_NUMBER (label));
7924 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7925 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7927 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7932 /* Generate an "push" pattern for input ARG. */
7937 if (ix86_cfa_state->reg == stack_pointer_rtx)
7938 ix86_cfa_state->offset += UNITS_PER_WORD;
7940 return gen_rtx_SET (VOIDmode,
7942 gen_rtx_PRE_DEC (Pmode,
7943 stack_pointer_rtx)),
7947 /* Return >= 0 if there is an unused call-clobbered register available
7948 for the entire function. */
7951 ix86_select_alt_pic_regnum (void)
7953 if (current_function_is_leaf && !crtl->profile
7954 && !ix86_current_function_calls_tls_descriptor)
7957 /* Can't use the same register for both PIC and DRAP. */
7959 drap = REGNO (crtl->drap_reg);
7962 for (i = 2; i >= 0; --i)
7963 if (i != drap && !df_regs_ever_live_p (i))
7967 return INVALID_REGNUM;
7970 /* Return 1 if we need to save REGNO. */
7972 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7974 if (pic_offset_table_rtx
7975 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7976 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7978 || crtl->calls_eh_return
7979 || crtl->uses_const_pool))
7981 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7986 if (crtl->calls_eh_return && maybe_eh_return)
7991 unsigned test = EH_RETURN_DATA_REGNO (i);
7992 if (test == INVALID_REGNUM)
7999 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8002 return (df_regs_ever_live_p (regno)
8003 && !call_used_regs[regno]
8004 && !fixed_regs[regno]
8005 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8008 /* Return number of saved general prupose registers. */
8011 ix86_nsaved_regs (void)
8016 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8017 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8022 /* Return number of saved SSE registrers. */
8025 ix86_nsaved_sseregs (void)
8030 if (ix86_cfun_abi () != MS_ABI)
8032 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8033 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8038 /* Given FROM and TO register numbers, say whether this elimination is
8039 allowed. If stack alignment is needed, we can only replace argument
8040 pointer with hard frame pointer, or replace frame pointer with stack
8041 pointer. Otherwise, frame pointer elimination is automatically
8042 handled and all other eliminations are valid. */
8045 ix86_can_eliminate (const int from, const int to)
8047 if (stack_realign_fp)
8048 return ((from == ARG_POINTER_REGNUM
8049 && to == HARD_FRAME_POINTER_REGNUM)
8050 || (from == FRAME_POINTER_REGNUM
8051 && to == STACK_POINTER_REGNUM));
8053 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8056 /* Return the offset between two registers, one to be eliminated, and the other
8057 its replacement, at the start of a routine. */
8060 ix86_initial_elimination_offset (int from, int to)
8062 struct ix86_frame frame;
8063 ix86_compute_frame_layout (&frame);
8065 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8066 return frame.hard_frame_pointer_offset;
8067 else if (from == FRAME_POINTER_REGNUM
8068 && to == HARD_FRAME_POINTER_REGNUM)
8069 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8072 gcc_assert (to == STACK_POINTER_REGNUM);
8074 if (from == ARG_POINTER_REGNUM)
8075 return frame.stack_pointer_offset;
8077 gcc_assert (from == FRAME_POINTER_REGNUM);
8078 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8082 /* In a dynamically-aligned function, we can't know the offset from
8083 stack pointer to frame pointer, so we must ensure that setjmp
8084 eliminates fp against the hard fp (%ebp) rather than trying to
8085 index from %esp up to the top of the frame across a gap that is
8086 of unknown (at compile-time) size. */
8088 ix86_builtin_setjmp_frame_value (void)
8090 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8093 /* Fill structure ix86_frame about frame of currently computed function. */
8096 ix86_compute_frame_layout (struct ix86_frame *frame)
8098 unsigned int stack_alignment_needed;
8099 HOST_WIDE_INT offset;
8100 unsigned int preferred_alignment;
8101 HOST_WIDE_INT size = get_frame_size ();
8103 frame->nregs = ix86_nsaved_regs ();
8104 frame->nsseregs = ix86_nsaved_sseregs ();
8106 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8107 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8109 /* MS ABI seem to require stack alignment to be always 16 except for function
8111 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8113 preferred_alignment = 16;
8114 stack_alignment_needed = 16;
8115 crtl->preferred_stack_boundary = 128;
8116 crtl->stack_alignment_needed = 128;
8119 gcc_assert (!size || stack_alignment_needed);
8120 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8121 gcc_assert (preferred_alignment <= stack_alignment_needed);
8123 /* During reload iteration the amount of registers saved can change.
8124 Recompute the value as needed. Do not recompute when amount of registers
8125 didn't change as reload does multiple calls to the function and does not
8126 expect the decision to change within single iteration. */
8127 if (!optimize_function_for_size_p (cfun)
8128 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8130 int count = frame->nregs;
8131 struct cgraph_node *node = cgraph_node (current_function_decl);
8133 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8134 /* The fast prologue uses move instead of push to save registers. This
8135 is significantly longer, but also executes faster as modern hardware
8136 can execute the moves in parallel, but can't do that for push/pop.
8138 Be careful about choosing what prologue to emit: When function takes
8139 many instructions to execute we may use slow version as well as in
8140 case function is known to be outside hot spot (this is known with
8141 feedback only). Weight the size of function by number of registers
8142 to save as it is cheap to use one or two push instructions but very
8143 slow to use many of them. */
8145 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8146 if (node->frequency < NODE_FREQUENCY_NORMAL
8147 || (flag_branch_probabilities
8148 && node->frequency < NODE_FREQUENCY_HOT))
8149 cfun->machine->use_fast_prologue_epilogue = false;
8151 cfun->machine->use_fast_prologue_epilogue
8152 = !expensive_function_p (count);
8154 if (TARGET_PROLOGUE_USING_MOVE
8155 && cfun->machine->use_fast_prologue_epilogue)
8156 frame->save_regs_using_mov = true;
8158 frame->save_regs_using_mov = false;
8160 /* Skip return address. */
8161 offset = UNITS_PER_WORD;
8163 /* Skip pushed static chain. */
8164 if (ix86_static_chain_on_stack)
8165 offset += UNITS_PER_WORD;
8167 /* Skip saved base pointer. */
8168 if (frame_pointer_needed)
8169 offset += UNITS_PER_WORD;
8171 frame->hard_frame_pointer_offset = offset;
8173 /* Set offset to aligned because the realigned frame starts from
8175 if (stack_realign_fp)
8176 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8178 /* Register save area */
8179 offset += frame->nregs * UNITS_PER_WORD;
8181 /* Align SSE reg save area. */
8182 if (frame->nsseregs)
8183 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8185 frame->padding0 = 0;
8187 /* SSE register save area. */
8188 offset += frame->padding0 + frame->nsseregs * 16;
8191 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8192 offset += frame->va_arg_size;
8194 /* Align start of frame for local function. */
8195 frame->padding1 = ((offset + stack_alignment_needed - 1)
8196 & -stack_alignment_needed) - offset;
8198 offset += frame->padding1;
8200 /* Frame pointer points here. */
8201 frame->frame_pointer_offset = offset;
8205 /* Add outgoing arguments area. Can be skipped if we eliminated
8206 all the function calls as dead code.
8207 Skipping is however impossible when function calls alloca. Alloca
8208 expander assumes that last crtl->outgoing_args_size
8209 of stack frame are unused. */
8210 if (ACCUMULATE_OUTGOING_ARGS
8211 && (!current_function_is_leaf || cfun->calls_alloca
8212 || ix86_current_function_calls_tls_descriptor))
8214 offset += crtl->outgoing_args_size;
8215 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8218 frame->outgoing_arguments_size = 0;
8220 /* Align stack boundary. Only needed if we're calling another function
8222 if (!current_function_is_leaf || cfun->calls_alloca
8223 || ix86_current_function_calls_tls_descriptor)
8224 frame->padding2 = ((offset + preferred_alignment - 1)
8225 & -preferred_alignment) - offset;
8227 frame->padding2 = 0;
8229 offset += frame->padding2;
8231 /* We've reached end of stack frame. */
8232 frame->stack_pointer_offset = offset;
8234 /* Size prologue needs to allocate. */
8235 frame->to_allocate =
8236 (size + frame->padding1 + frame->padding2
8237 + frame->outgoing_arguments_size + frame->va_arg_size);
8239 if ((!frame->to_allocate && frame->nregs <= 1)
8240 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8241 frame->save_regs_using_mov = false;
8243 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8244 && current_function_sp_is_unchanging
8245 && current_function_is_leaf
8246 && !ix86_current_function_calls_tls_descriptor)
8248 frame->red_zone_size = frame->to_allocate;
8249 if (frame->save_regs_using_mov)
8250 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8251 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8252 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8255 frame->red_zone_size = 0;
8256 frame->to_allocate -= frame->red_zone_size;
8257 frame->stack_pointer_offset -= frame->red_zone_size;
8260 /* Emit code to save registers in the prologue. */
8263 ix86_emit_save_regs (void)
8268 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8269 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8271 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8272 RTX_FRAME_RELATED_P (insn) = 1;
8276 /* Emit code to save registers using MOV insns. First register
8277 is restored from POINTER + OFFSET. */
8279 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8284 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8285 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8287 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8289 gen_rtx_REG (Pmode, regno));
8290 RTX_FRAME_RELATED_P (insn) = 1;
8291 offset += UNITS_PER_WORD;
8295 /* Emit code to save registers using MOV insns. First register
8296 is restored from POINTER + OFFSET. */
8298 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8304 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8305 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8307 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8308 set_mem_align (mem, 128);
8309 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8310 RTX_FRAME_RELATED_P (insn) = 1;
8315 static GTY(()) rtx queued_cfa_restores;
8317 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8318 manipulation insn. Don't add it if the previously
8319 saved value will be left untouched within stack red-zone till return,
8320 as unwinders can find the same value in the register and
8324 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8327 && !TARGET_64BIT_MS_ABI
8328 && red_offset + RED_ZONE_SIZE >= 0
8329 && crtl->args.pops_args < 65536)
8334 add_reg_note (insn, REG_CFA_RESTORE, reg);
8335 RTX_FRAME_RELATED_P (insn) = 1;
8339 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8342 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8345 ix86_add_queued_cfa_restore_notes (rtx insn)
8348 if (!queued_cfa_restores)
8350 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8352 XEXP (last, 1) = REG_NOTES (insn);
8353 REG_NOTES (insn) = queued_cfa_restores;
8354 queued_cfa_restores = NULL_RTX;
8355 RTX_FRAME_RELATED_P (insn) = 1;
8358 /* Expand prologue or epilogue stack adjustment.
8359 The pattern exist to put a dependency on all ebp-based memory accesses.
8360 STYLE should be negative if instructions should be marked as frame related,
8361 zero if %r11 register is live and cannot be freely used and positive
8365 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8366 int style, bool set_cfa)
8371 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8372 else if (x86_64_immediate_operand (offset, DImode))
8373 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8377 /* r11 is used by indirect sibcall return as well, set before the
8378 epilogue and used after the epilogue. ATM indirect sibcall
8379 shouldn't be used together with huge frame sizes in one
8380 function because of the frame_size check in sibcall.c. */
8382 r11 = gen_rtx_REG (DImode, R11_REG);
8383 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8385 RTX_FRAME_RELATED_P (insn) = 1;
8386 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8391 ix86_add_queued_cfa_restore_notes (insn);
8397 gcc_assert (ix86_cfa_state->reg == src);
8398 ix86_cfa_state->offset += INTVAL (offset);
8399 ix86_cfa_state->reg = dest;
8401 r = gen_rtx_PLUS (Pmode, src, offset);
8402 r = gen_rtx_SET (VOIDmode, dest, r);
8403 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8404 RTX_FRAME_RELATED_P (insn) = 1;
8407 RTX_FRAME_RELATED_P (insn) = 1;
8410 /* Find an available register to be used as dynamic realign argument
8411 pointer regsiter. Such a register will be written in prologue and
8412 used in begin of body, so it must not be
8413 1. parameter passing register.
8415 We reuse static-chain register if it is available. Otherwise, we
8416 use DI for i386 and R13 for x86-64. We chose R13 since it has
8419 Return: the regno of chosen register. */
8422 find_drap_reg (void)
8424 tree decl = cfun->decl;
8428 /* Use R13 for nested function or function need static chain.
8429 Since function with tail call may use any caller-saved
8430 registers in epilogue, DRAP must not use caller-saved
8431 register in such case. */
8432 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8439 /* Use DI for nested function or function need static chain.
8440 Since function with tail call may use any caller-saved
8441 registers in epilogue, DRAP must not use caller-saved
8442 register in such case. */
8443 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8446 /* Reuse static chain register if it isn't used for parameter
8448 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8449 && !lookup_attribute ("fastcall",
8450 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8451 && !lookup_attribute ("thiscall",
8452 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8459 /* Return minimum incoming stack alignment. */
8462 ix86_minimum_incoming_stack_boundary (bool sibcall)
8464 unsigned int incoming_stack_boundary;
8466 /* Prefer the one specified at command line. */
8467 if (ix86_user_incoming_stack_boundary)
8468 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8469 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8470 if -mstackrealign is used, it isn't used for sibcall check and
8471 estimated stack alignment is 128bit. */
8474 && ix86_force_align_arg_pointer
8475 && crtl->stack_alignment_estimated == 128)
8476 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8478 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8480 /* Incoming stack alignment can be changed on individual functions
8481 via force_align_arg_pointer attribute. We use the smallest
8482 incoming stack boundary. */
8483 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8484 && lookup_attribute (ix86_force_align_arg_pointer_string,
8485 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8486 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8488 /* The incoming stack frame has to be aligned at least at
8489 parm_stack_boundary. */
8490 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8491 incoming_stack_boundary = crtl->parm_stack_boundary;
8493 /* Stack at entrance of main is aligned by runtime. We use the
8494 smallest incoming stack boundary. */
8495 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8496 && DECL_NAME (current_function_decl)
8497 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8498 && DECL_FILE_SCOPE_P (current_function_decl))
8499 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8501 return incoming_stack_boundary;
8504 /* Update incoming stack boundary and estimated stack alignment. */
8507 ix86_update_stack_boundary (void)
8509 ix86_incoming_stack_boundary
8510 = ix86_minimum_incoming_stack_boundary (false);
8512 /* x86_64 vararg needs 16byte stack alignment for register save
8516 && crtl->stack_alignment_estimated < 128)
8517 crtl->stack_alignment_estimated = 128;
8520 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8521 needed or an rtx for DRAP otherwise. */
8524 ix86_get_drap_rtx (void)
8526 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8527 crtl->need_drap = true;
8529 if (stack_realign_drap)
8531 /* Assign DRAP to vDRAP and returns vDRAP */
8532 unsigned int regno = find_drap_reg ();
8537 arg_ptr = gen_rtx_REG (Pmode, regno);
8538 crtl->drap_reg = arg_ptr;
8541 drap_vreg = copy_to_reg (arg_ptr);
8545 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8548 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8549 RTX_FRAME_RELATED_P (insn) = 1;
8557 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8560 ix86_internal_arg_pointer (void)
8562 return virtual_incoming_args_rtx;
8565 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8566 to be generated in correct form. */
8568 ix86_finalize_stack_realign_flags (void)
8570 /* Check if stack realign is really needed after reload, and
8571 stores result in cfun */
8572 unsigned int incoming_stack_boundary
8573 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8574 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8575 unsigned int stack_realign = (incoming_stack_boundary
8576 < (current_function_is_leaf
8577 ? crtl->max_used_stack_slot_alignment
8578 : crtl->stack_alignment_needed));
8580 if (crtl->stack_realign_finalized)
8582 /* After stack_realign_needed is finalized, we can't no longer
8584 gcc_assert (crtl->stack_realign_needed == stack_realign);
8588 crtl->stack_realign_needed = stack_realign;
8589 crtl->stack_realign_finalized = true;
8593 /* Expand the prologue into a bunch of separate insns. */
8596 ix86_expand_prologue (void)
8600 struct ix86_frame frame;
8601 HOST_WIDE_INT allocate;
8602 int gen_frame_pointer = frame_pointer_needed;
8604 ix86_finalize_stack_realign_flags ();
8606 /* DRAP should not coexist with stack_realign_fp */
8607 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8609 /* Initialize CFA state for before the prologue. */
8610 ix86_cfa_state->reg = stack_pointer_rtx;
8611 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8613 ix86_compute_frame_layout (&frame);
8615 if (ix86_function_ms_hook_prologue (current_function_decl))
8619 /* Make sure the function starts with
8620 8b ff movl.s %edi,%edi
8622 8b ec movl.s %esp,%ebp
8624 This matches the hookable function prologue in Win32 API
8625 functions in Microsoft Windows XP Service Pack 2 and newer.
8626 Wine uses this to enable Windows apps to hook the Win32 API
8627 functions provided by Wine. */
8628 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8629 gen_rtx_REG (SImode, DI_REG)));
8630 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8631 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8632 stack_pointer_rtx));
8634 if (frame_pointer_needed && !(crtl->drap_reg
8635 && crtl->stack_realign_needed))
8637 /* The push %ebp and movl.s %esp, %ebp already set up
8638 the frame pointer. No need to do this again. */
8639 gen_frame_pointer = 0;
8640 RTX_FRAME_RELATED_P (push) = 1;
8641 RTX_FRAME_RELATED_P (mov) = 1;
8642 if (ix86_cfa_state->reg == stack_pointer_rtx)
8643 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8646 /* If the frame pointer is not needed, pop %ebp again. This
8647 could be optimized for cases where ebp needs to be backed up
8648 for some other reason. If stack realignment is needed, pop
8649 the base pointer again, align the stack, and later regenerate
8650 the frame pointer setup. The frame pointer generated by the
8651 hook prologue is not aligned, so it can't be used. */
8652 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8655 /* The first insn of a function that accepts its static chain on the
8656 stack is to push the register that would be filled in by a direct
8657 call. This insn will be skipped by the trampoline. */
8658 if (ix86_static_chain_on_stack)
8662 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8663 emit_insn (gen_blockage ());
8665 /* We don't want to interpret this push insn as a register save,
8666 only as a stack adjustment. The real copy of the register as
8667 a save will be done later, if needed. */
8668 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8669 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8670 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8671 RTX_FRAME_RELATED_P (insn) = 1;
8674 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8675 of DRAP is needed and stack realignment is really needed after reload */
8676 if (crtl->drap_reg && crtl->stack_realign_needed)
8679 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8680 int param_ptr_offset = UNITS_PER_WORD;
8682 if (ix86_static_chain_on_stack)
8683 param_ptr_offset += UNITS_PER_WORD;
8684 if (!call_used_regs[REGNO (crtl->drap_reg)])
8685 param_ptr_offset += UNITS_PER_WORD;
8687 gcc_assert (stack_realign_drap);
8689 /* Grab the argument pointer. */
8690 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8693 /* Only need to push parameter pointer reg if it is caller
8695 if (!call_used_regs[REGNO (crtl->drap_reg)])
8697 /* Push arg pointer reg */
8698 insn = emit_insn (gen_push (y));
8699 RTX_FRAME_RELATED_P (insn) = 1;
8702 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8703 RTX_FRAME_RELATED_P (insn) = 1;
8704 ix86_cfa_state->reg = crtl->drap_reg;
8706 /* Align the stack. */
8707 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8709 GEN_INT (-align_bytes)));
8710 RTX_FRAME_RELATED_P (insn) = 1;
8712 /* Replicate the return address on the stack so that return
8713 address can be reached via (argp - 1) slot. This is needed
8714 to implement macro RETURN_ADDR_RTX and intrinsic function
8715 expand_builtin_return_addr etc. */
8717 x = gen_frame_mem (Pmode,
8718 plus_constant (x, -UNITS_PER_WORD));
8719 insn = emit_insn (gen_push (x));
8720 RTX_FRAME_RELATED_P (insn) = 1;
8723 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8724 slower on all targets. Also sdb doesn't like it. */
8726 if (gen_frame_pointer)
8728 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8729 RTX_FRAME_RELATED_P (insn) = 1;
8731 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8732 RTX_FRAME_RELATED_P (insn) = 1;
8734 if (ix86_cfa_state->reg == stack_pointer_rtx)
8735 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8738 if (stack_realign_fp)
8740 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8741 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8743 /* Align the stack. */
8744 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8746 GEN_INT (-align_bytes)));
8747 RTX_FRAME_RELATED_P (insn) = 1;
8750 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8752 if (!frame.save_regs_using_mov)
8753 ix86_emit_save_regs ();
8755 allocate += frame.nregs * UNITS_PER_WORD;
8757 /* When using red zone we may start register saving before allocating
8758 the stack frame saving one cycle of the prologue. However I will
8759 avoid doing this if I am going to have to probe the stack since
8760 at least on x86_64 the stack probe can turn into a call that clobbers
8761 a red zone location */
8762 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8763 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8764 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8765 && !crtl->stack_realign_needed)
8766 ? hard_frame_pointer_rtx
8767 : stack_pointer_rtx,
8768 -frame.nregs * UNITS_PER_WORD);
8772 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8773 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8774 GEN_INT (-allocate), -1,
8775 ix86_cfa_state->reg == stack_pointer_rtx);
8778 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8782 if (cfun->machine->call_abi == MS_ABI)
8785 eax_live = ix86_eax_live_at_start_p ();
8789 emit_insn (gen_push (eax));
8790 allocate -= UNITS_PER_WORD;
8793 emit_move_insn (eax, GEN_INT (allocate));
8796 insn = gen_allocate_stack_worker_64 (eax, eax);
8798 insn = gen_allocate_stack_worker_32 (eax, eax);
8799 insn = emit_insn (insn);
8801 if (ix86_cfa_state->reg == stack_pointer_rtx)
8803 ix86_cfa_state->offset += allocate;
8804 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8805 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8806 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8807 RTX_FRAME_RELATED_P (insn) = 1;
8812 if (frame_pointer_needed)
8813 t = plus_constant (hard_frame_pointer_rtx,
8816 - frame.nregs * UNITS_PER_WORD);
8818 t = plus_constant (stack_pointer_rtx, allocate);
8819 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8823 if (frame.save_regs_using_mov
8824 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8825 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8827 if (!frame_pointer_needed
8828 || !(frame.to_allocate + frame.padding0)
8829 || crtl->stack_realign_needed)
8830 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8832 + frame.nsseregs * 16 + frame.padding0);
8834 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8835 -frame.nregs * UNITS_PER_WORD);
8837 if (!frame_pointer_needed
8838 || !(frame.to_allocate + frame.padding0)
8839 || crtl->stack_realign_needed)
8840 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8843 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8844 - frame.nregs * UNITS_PER_WORD
8845 - frame.nsseregs * 16
8848 pic_reg_used = false;
8849 if (pic_offset_table_rtx
8850 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8853 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8855 if (alt_pic_reg_used != INVALID_REGNUM)
8856 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8858 pic_reg_used = true;
8865 if (ix86_cmodel == CM_LARGE_PIC)
8867 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8868 rtx label = gen_label_rtx ();
8870 LABEL_PRESERVE_P (label) = 1;
8871 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8872 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8873 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8874 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8875 pic_offset_table_rtx, tmp_reg));
8878 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8881 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8884 /* In the pic_reg_used case, make sure that the got load isn't deleted
8885 when mcount needs it. Blockage to avoid call movement across mcount
8886 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8888 if (crtl->profile && pic_reg_used)
8889 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8891 if (crtl->drap_reg && !crtl->stack_realign_needed)
8893 /* vDRAP is setup but after reload it turns out stack realign
8894 isn't necessary, here we will emit prologue to setup DRAP
8895 without stack realign adjustment */
8897 int drap_bp_offset = UNITS_PER_WORD * 2;
8899 if (ix86_static_chain_on_stack)
8900 drap_bp_offset += UNITS_PER_WORD;
8901 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8902 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8905 /* Prevent instructions from being scheduled into register save push
8906 sequence when access to the redzone area is done through frame pointer.
8907 The offset between the frame pointer and the stack pointer is calculated
8908 relative to the value of the stack pointer at the end of the function
8909 prologue, and moving instructions that access redzone area via frame
8910 pointer inside push sequence violates this assumption. */
8911 if (frame_pointer_needed && frame.red_zone_size)
8912 emit_insn (gen_memory_blockage ());
8914 /* Emit cld instruction if stringops are used in the function. */
8915 if (TARGET_CLD && ix86_current_function_needs_cld)
8916 emit_insn (gen_cld ());
8919 /* Emit code to restore REG using a POP insn. */
8922 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8924 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8926 if (ix86_cfa_state->reg == crtl->drap_reg
8927 && REGNO (reg) == REGNO (crtl->drap_reg))
8929 /* Previously we'd represented the CFA as an expression
8930 like *(%ebp - 8). We've just popped that value from
8931 the stack, which means we need to reset the CFA to
8932 the drap register. This will remain until we restore
8933 the stack pointer. */
8934 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8935 RTX_FRAME_RELATED_P (insn) = 1;
8939 if (ix86_cfa_state->reg == stack_pointer_rtx)
8941 ix86_cfa_state->offset -= UNITS_PER_WORD;
8942 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8943 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8944 RTX_FRAME_RELATED_P (insn) = 1;
8947 /* When the frame pointer is the CFA, and we pop it, we are
8948 swapping back to the stack pointer as the CFA. This happens
8949 for stack frames that don't allocate other data, so we assume
8950 the stack pointer is now pointing at the return address, i.e.
8951 the function entry state, which makes the offset be 1 word. */
8952 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8953 && reg == hard_frame_pointer_rtx)
8955 ix86_cfa_state->reg = stack_pointer_rtx;
8956 ix86_cfa_state->offset -= UNITS_PER_WORD;
8958 add_reg_note (insn, REG_CFA_DEF_CFA,
8959 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8960 GEN_INT (ix86_cfa_state->offset)));
8961 RTX_FRAME_RELATED_P (insn) = 1;
8964 ix86_add_cfa_restore_note (insn, reg, red_offset);
8967 /* Emit code to restore saved registers using POP insns. */
8970 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8974 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8975 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8977 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8979 red_offset += UNITS_PER_WORD;
8983 /* Emit code and notes for the LEAVE instruction. */
8986 ix86_emit_leave (HOST_WIDE_INT red_offset)
8988 rtx insn = emit_insn (ix86_gen_leave ());
8990 ix86_add_queued_cfa_restore_notes (insn);
8992 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8994 ix86_cfa_state->reg = stack_pointer_rtx;
8995 ix86_cfa_state->offset -= UNITS_PER_WORD;
8997 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8998 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8999 RTX_FRAME_RELATED_P (insn) = 1;
9000 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
9004 /* Emit code to restore saved registers using MOV insns. First register
9005 is restored from POINTER + OFFSET. */
9007 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9008 HOST_WIDE_INT red_offset,
9009 int maybe_eh_return)
9012 rtx base_address = gen_rtx_MEM (Pmode, pointer);
9015 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9016 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9018 rtx reg = gen_rtx_REG (Pmode, regno);
9020 /* Ensure that adjust_address won't be forced to produce pointer
9021 out of range allowed by x86-64 instruction set. */
9022 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9026 r11 = gen_rtx_REG (DImode, R11_REG);
9027 emit_move_insn (r11, GEN_INT (offset));
9028 emit_insn (gen_adddi3 (r11, r11, pointer));
9029 base_address = gen_rtx_MEM (Pmode, r11);
9032 insn = emit_move_insn (reg,
9033 adjust_address (base_address, Pmode, offset));
9034 offset += UNITS_PER_WORD;
9036 if (ix86_cfa_state->reg == crtl->drap_reg
9037 && regno == REGNO (crtl->drap_reg))
9039 /* Previously we'd represented the CFA as an expression
9040 like *(%ebp - 8). We've just popped that value from
9041 the stack, which means we need to reset the CFA to
9042 the drap register. This will remain until we restore
9043 the stack pointer. */
9044 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9045 RTX_FRAME_RELATED_P (insn) = 1;
9048 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9050 red_offset += UNITS_PER_WORD;
9054 /* Emit code to restore saved registers using MOV insns. First register
9055 is restored from POINTER + OFFSET. */
9057 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9058 HOST_WIDE_INT red_offset,
9059 int maybe_eh_return)
9062 rtx base_address = gen_rtx_MEM (TImode, pointer);
9065 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9066 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9068 rtx reg = gen_rtx_REG (TImode, regno);
9070 /* Ensure that adjust_address won't be forced to produce pointer
9071 out of range allowed by x86-64 instruction set. */
9072 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9076 r11 = gen_rtx_REG (DImode, R11_REG);
9077 emit_move_insn (r11, GEN_INT (offset));
9078 emit_insn (gen_adddi3 (r11, r11, pointer));
9079 base_address = gen_rtx_MEM (TImode, r11);
9082 mem = adjust_address (base_address, TImode, offset);
9083 set_mem_align (mem, 128);
9084 emit_move_insn (reg, mem);
9087 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9093 /* Restore function stack, frame, and registers. */
9096 ix86_expand_epilogue (int style)
9099 struct ix86_frame frame;
9100 HOST_WIDE_INT offset, red_offset;
9101 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9104 ix86_finalize_stack_realign_flags ();
9106 /* When stack is realigned, SP must be valid. */
9107 sp_valid = (!frame_pointer_needed
9108 || current_function_sp_is_unchanging
9109 || stack_realign_fp);
9111 ix86_compute_frame_layout (&frame);
9113 /* See the comment about red zone and frame
9114 pointer usage in ix86_expand_prologue. */
9115 if (frame_pointer_needed && frame.red_zone_size)
9116 emit_insn (gen_memory_blockage ());
9118 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9119 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9121 /* Calculate start of saved registers relative to ebp. Special care
9122 must be taken for the normal return case of a function using
9123 eh_return: the eax and edx registers are marked as saved, but not
9124 restored along this path. */
9125 offset = frame.nregs;
9126 if (crtl->calls_eh_return && style != 2)
9128 offset *= -UNITS_PER_WORD;
9129 offset -= frame.nsseregs * 16 + frame.padding0;
9131 /* Calculate start of saved registers relative to esp on entry of the
9132 function. When realigning stack, this needs to be the most negative
9133 value possible at runtime. */
9134 red_offset = offset;
9136 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9138 else if (stack_realign_fp)
9139 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9141 if (ix86_static_chain_on_stack)
9142 red_offset -= UNITS_PER_WORD;
9143 if (frame_pointer_needed)
9144 red_offset -= UNITS_PER_WORD;
9146 /* If we're only restoring one register and sp is not valid then
9147 using a move instruction to restore the register since it's
9148 less work than reloading sp and popping the register.
9150 The default code result in stack adjustment using add/lea instruction,
9151 while this code results in LEAVE instruction (or discrete equivalent),
9152 so it is profitable in some other cases as well. Especially when there
9153 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9154 and there is exactly one register to pop. This heuristic may need some
9155 tuning in future. */
9156 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9157 || (TARGET_EPILOGUE_USING_MOVE
9158 && cfun->machine->use_fast_prologue_epilogue
9159 && ((frame.nregs + frame.nsseregs) > 1
9160 || (frame.to_allocate + frame.padding0) != 0))
9161 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9162 && (frame.to_allocate + frame.padding0) != 0)
9163 || (frame_pointer_needed && TARGET_USE_LEAVE
9164 && cfun->machine->use_fast_prologue_epilogue
9165 && (frame.nregs + frame.nsseregs) == 1)
9166 || crtl->calls_eh_return)
9168 /* Restore registers. We can use ebp or esp to address the memory
9169 locations. If both are available, default to ebp, since offsets
9170 are known to be small. Only exception is esp pointing directly
9171 to the end of block of saved registers, where we may simplify
9174 If we are realigning stack with bp and sp, regs restore can't
9175 be addressed by bp. sp must be used instead. */
9177 if (!frame_pointer_needed
9178 || (sp_valid && !(frame.to_allocate + frame.padding0))
9179 || stack_realign_fp)
9181 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9182 frame.to_allocate, red_offset,
9184 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9186 + frame.nsseregs * 16
9189 + frame.nsseregs * 16
9190 + frame.padding0, style == 2);
9194 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9197 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9199 + frame.nsseregs * 16
9202 + frame.nsseregs * 16
9203 + frame.padding0, style == 2);
9206 red_offset -= offset;
9208 /* eh_return epilogues need %ecx added to the stack pointer. */
9211 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9213 /* Stack align doesn't work with eh_return. */
9214 gcc_assert (!crtl->stack_realign_needed);
9215 /* Neither does regparm nested functions. */
9216 gcc_assert (!ix86_static_chain_on_stack);
9218 if (frame_pointer_needed)
9220 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9221 tmp = plus_constant (tmp, UNITS_PER_WORD);
9222 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9224 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9225 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9227 /* Note that we use SA as a temporary CFA, as the return
9228 address is at the proper place relative to it. We
9229 pretend this happens at the FP restore insn because
9230 prior to this insn the FP would be stored at the wrong
9231 offset relative to SA, and after this insn we have no
9232 other reasonable register to use for the CFA. We don't
9233 bother resetting the CFA to the SP for the duration of
9235 add_reg_note (tmp, REG_CFA_DEF_CFA,
9236 plus_constant (sa, UNITS_PER_WORD));
9237 ix86_add_queued_cfa_restore_notes (tmp);
9238 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9239 RTX_FRAME_RELATED_P (tmp) = 1;
9240 ix86_cfa_state->reg = sa;
9241 ix86_cfa_state->offset = UNITS_PER_WORD;
9243 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9244 const0_rtx, style, false);
9248 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9249 tmp = plus_constant (tmp, (frame.to_allocate
9250 + frame.nregs * UNITS_PER_WORD
9251 + frame.nsseregs * 16
9253 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9254 ix86_add_queued_cfa_restore_notes (tmp);
9256 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9257 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9259 ix86_cfa_state->offset = UNITS_PER_WORD;
9260 add_reg_note (tmp, REG_CFA_DEF_CFA,
9261 plus_constant (stack_pointer_rtx,
9263 RTX_FRAME_RELATED_P (tmp) = 1;
9267 else if (!frame_pointer_needed)
9268 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9269 GEN_INT (frame.to_allocate
9270 + frame.nregs * UNITS_PER_WORD
9271 + frame.nsseregs * 16
9273 style, !using_drap);
9274 /* If not an i386, mov & pop is faster than "leave". */
9275 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9276 || !cfun->machine->use_fast_prologue_epilogue)
9277 ix86_emit_leave (red_offset);
9280 pro_epilogue_adjust_stack (stack_pointer_rtx,
9281 hard_frame_pointer_rtx,
9282 const0_rtx, style, !using_drap);
9284 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9289 /* First step is to deallocate the stack frame so that we can
9292 If we realign stack with frame pointer, then stack pointer
9293 won't be able to recover via lea $offset(%bp), %sp, because
9294 there is a padding area between bp and sp for realign.
9295 "add $to_allocate, %sp" must be used instead. */
9298 gcc_assert (frame_pointer_needed);
9299 gcc_assert (!stack_realign_fp);
9300 pro_epilogue_adjust_stack (stack_pointer_rtx,
9301 hard_frame_pointer_rtx,
9302 GEN_INT (offset), style, false);
9303 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9306 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9307 GEN_INT (frame.nsseregs * 16
9311 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9313 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9314 frame.to_allocate, red_offset,
9316 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9317 GEN_INT (frame.to_allocate
9318 + frame.nsseregs * 16
9319 + frame.padding0), style,
9320 !using_drap && !frame_pointer_needed);
9323 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9325 red_offset -= offset;
9327 if (frame_pointer_needed)
9329 /* Leave results in shorter dependency chains on CPUs that are
9330 able to grok it fast. */
9331 if (TARGET_USE_LEAVE)
9332 ix86_emit_leave (red_offset);
9335 /* For stack realigned really happens, recover stack
9336 pointer to hard frame pointer is a must, if not using
9338 if (stack_realign_fp)
9339 pro_epilogue_adjust_stack (stack_pointer_rtx,
9340 hard_frame_pointer_rtx,
9341 const0_rtx, style, !using_drap);
9342 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9350 int param_ptr_offset = UNITS_PER_WORD;
9353 gcc_assert (stack_realign_drap);
9355 if (ix86_static_chain_on_stack)
9356 param_ptr_offset += UNITS_PER_WORD;
9357 if (!call_used_regs[REGNO (crtl->drap_reg)])
9358 param_ptr_offset += UNITS_PER_WORD;
9360 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9362 GEN_INT (-param_ptr_offset)));
9364 ix86_cfa_state->reg = stack_pointer_rtx;
9365 ix86_cfa_state->offset = param_ptr_offset;
9367 add_reg_note (insn, REG_CFA_DEF_CFA,
9368 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9369 GEN_INT (ix86_cfa_state->offset)));
9370 RTX_FRAME_RELATED_P (insn) = 1;
9372 if (!call_used_regs[REGNO (crtl->drap_reg)])
9373 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9376 /* Remove the saved static chain from the stack. The use of ECX is
9377 merely as a scratch register, not as the actual static chain. */
9378 if (ix86_static_chain_on_stack)
9382 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9383 ix86_cfa_state->offset += UNITS_PER_WORD;
9385 r = gen_rtx_REG (Pmode, CX_REG);
9386 insn = emit_insn (ix86_gen_pop1 (r));
9388 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9389 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9390 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9391 RTX_FRAME_RELATED_P (insn) = 1;
9394 /* Sibcall epilogues don't want a return instruction. */
9397 *ix86_cfa_state = cfa_state_save;
9401 if (crtl->args.pops_args && crtl->args.size)
9403 rtx popc = GEN_INT (crtl->args.pops_args);
9405 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9406 address, do explicit add, and jump indirectly to the caller. */
9408 if (crtl->args.pops_args >= 65536)
9410 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9413 /* There is no "pascal" calling convention in any 64bit ABI. */
9414 gcc_assert (!TARGET_64BIT);
9416 insn = emit_insn (gen_popsi1 (ecx));
9417 ix86_cfa_state->offset -= UNITS_PER_WORD;
9419 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9420 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9421 add_reg_note (insn, REG_CFA_REGISTER,
9422 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9423 RTX_FRAME_RELATED_P (insn) = 1;
9425 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9427 emit_jump_insn (gen_return_indirect_internal (ecx));
9430 emit_jump_insn (gen_return_pop_internal (popc));
9433 emit_jump_insn (gen_return_internal ());
9435 /* Restore the state back to the state from the prologue,
9436 so that it's correct for the next epilogue. */
9437 *ix86_cfa_state = cfa_state_save;
9440 /* Reset from the function's potential modifications. */
9443 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9444 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9446 if (pic_offset_table_rtx)
9447 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9449 /* Mach-O doesn't support labels at the end of objects, so if
9450 it looks like we might want one, insert a NOP. */
9452 rtx insn = get_last_insn ();
9455 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9456 insn = PREV_INSN (insn);
9460 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9461 fputs ("\tnop\n", file);
9467 /* Extract the parts of an RTL expression that is a valid memory address
9468 for an instruction. Return 0 if the structure of the address is
9469 grossly off. Return -1 if the address contains ASHIFT, so it is not
9470 strictly valid, but still used for computing length of lea instruction. */
9473 ix86_decompose_address (rtx addr, struct ix86_address *out)
9475 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9476 rtx base_reg, index_reg;
9477 HOST_WIDE_INT scale = 1;
9478 rtx scale_rtx = NULL_RTX;
9481 enum ix86_address_seg seg = SEG_DEFAULT;
9483 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9485 else if (GET_CODE (addr) == PLUS)
9495 addends[n++] = XEXP (op, 1);
9498 while (GET_CODE (op) == PLUS);
9503 for (i = n; i >= 0; --i)
9506 switch (GET_CODE (op))
9511 index = XEXP (op, 0);
9512 scale_rtx = XEXP (op, 1);
9518 index = XEXP (op, 0);
9520 if (!CONST_INT_P (tmp))
9522 scale = INTVAL (tmp);
9523 if ((unsigned HOST_WIDE_INT) scale > 3)
9529 if (XINT (op, 1) == UNSPEC_TP
9530 && TARGET_TLS_DIRECT_SEG_REFS
9531 && seg == SEG_DEFAULT)
9532 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9561 else if (GET_CODE (addr) == MULT)
9563 index = XEXP (addr, 0); /* index*scale */
9564 scale_rtx = XEXP (addr, 1);
9566 else if (GET_CODE (addr) == ASHIFT)
9568 /* We're called for lea too, which implements ashift on occasion. */
9569 index = XEXP (addr, 0);
9570 tmp = XEXP (addr, 1);
9571 if (!CONST_INT_P (tmp))
9573 scale = INTVAL (tmp);
9574 if ((unsigned HOST_WIDE_INT) scale > 3)
9580 disp = addr; /* displacement */
9582 /* Extract the integral value of scale. */
9585 if (!CONST_INT_P (scale_rtx))
9587 scale = INTVAL (scale_rtx);
9590 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9591 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9593 /* Avoid useless 0 displacement. */
9594 if (disp == const0_rtx && (base || index))
9597 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9598 if (base_reg && index_reg && scale == 1
9599 && (index_reg == arg_pointer_rtx
9600 || index_reg == frame_pointer_rtx
9601 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9604 tmp = base, base = index, index = tmp;
9605 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9608 /* Special case: %ebp cannot be encoded as a base without a displacement.
9612 && (base_reg == hard_frame_pointer_rtx
9613 || base_reg == frame_pointer_rtx
9614 || base_reg == arg_pointer_rtx
9615 || (REG_P (base_reg)
9616 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9617 || REGNO (base_reg) == R13_REG))))
9620 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9621 Avoid this by transforming to [%esi+0].
9622 Reload calls address legitimization without cfun defined, so we need
9623 to test cfun for being non-NULL. */
9624 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9625 && base_reg && !index_reg && !disp
9627 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9630 /* Special case: encode reg+reg instead of reg*2. */
9631 if (!base && index && scale == 2)
9632 base = index, base_reg = index_reg, scale = 1;
9634 /* Special case: scaling cannot be encoded without base or displacement. */
9635 if (!base && !disp && index && scale != 1)
9647 /* Return cost of the memory address x.
9648 For i386, it is better to use a complex address than let gcc copy
9649 the address into a reg and make a new pseudo. But not if the address
9650 requires to two regs - that would mean more pseudos with longer
9653 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9655 struct ix86_address parts;
9657 int ok = ix86_decompose_address (x, &parts);
9661 if (parts.base && GET_CODE (parts.base) == SUBREG)
9662 parts.base = SUBREG_REG (parts.base);
9663 if (parts.index && GET_CODE (parts.index) == SUBREG)
9664 parts.index = SUBREG_REG (parts.index);
9666 /* Attempt to minimize number of registers in the address. */
9668 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9670 && (!REG_P (parts.index)
9671 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9675 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9677 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9678 && parts.base != parts.index)
9681 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9682 since it's predecode logic can't detect the length of instructions
9683 and it degenerates to vector decoded. Increase cost of such
9684 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9685 to split such addresses or even refuse such addresses at all.
9687 Following addressing modes are affected:
9692 The first and last case may be avoidable by explicitly coding the zero in
9693 memory address, but I don't have AMD-K6 machine handy to check this
9697 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9698 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9699 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9705 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9706 this is used for to form addresses to local data when -fPIC is in
9710 darwin_local_data_pic (rtx disp)
9712 return (GET_CODE (disp) == UNSPEC
9713 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9716 /* Determine if a given RTX is a valid constant. We already know this
9717 satisfies CONSTANT_P. */
9720 legitimate_constant_p (rtx x)
9722 switch (GET_CODE (x))
9727 if (GET_CODE (x) == PLUS)
9729 if (!CONST_INT_P (XEXP (x, 1)))
9734 if (TARGET_MACHO && darwin_local_data_pic (x))
9737 /* Only some unspecs are valid as "constants". */
9738 if (GET_CODE (x) == UNSPEC)
9739 switch (XINT (x, 1))
9744 return TARGET_64BIT;
9747 x = XVECEXP (x, 0, 0);
9748 return (GET_CODE (x) == SYMBOL_REF
9749 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9751 x = XVECEXP (x, 0, 0);
9752 return (GET_CODE (x) == SYMBOL_REF
9753 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9758 /* We must have drilled down to a symbol. */
9759 if (GET_CODE (x) == LABEL_REF)
9761 if (GET_CODE (x) != SYMBOL_REF)
9766 /* TLS symbols are never valid. */
9767 if (SYMBOL_REF_TLS_MODEL (x))
9770 /* DLLIMPORT symbols are never valid. */
9771 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9772 && SYMBOL_REF_DLLIMPORT_P (x))
9777 if (GET_MODE (x) == TImode
9778 && x != CONST0_RTX (TImode)
9784 if (!standard_sse_constant_p (x))
9791 /* Otherwise we handle everything else in the move patterns. */
9795 /* Determine if it's legal to put X into the constant pool. This
9796 is not possible for the address of thread-local symbols, which
9797 is checked above. */
9800 ix86_cannot_force_const_mem (rtx x)
9802 /* We can always put integral constants and vectors in memory. */
9803 switch (GET_CODE (x))
9813 return !legitimate_constant_p (x);
9817 /* Nonzero if the constant value X is a legitimate general operand
9818 when generating PIC code. It is given that flag_pic is on and
9819 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9822 legitimate_pic_operand_p (rtx x)
9826 switch (GET_CODE (x))
9829 inner = XEXP (x, 0);
9830 if (GET_CODE (inner) == PLUS
9831 && CONST_INT_P (XEXP (inner, 1)))
9832 inner = XEXP (inner, 0);
9834 /* Only some unspecs are valid as "constants". */
9835 if (GET_CODE (inner) == UNSPEC)
9836 switch (XINT (inner, 1))
9841 return TARGET_64BIT;
9843 x = XVECEXP (inner, 0, 0);
9844 return (GET_CODE (x) == SYMBOL_REF
9845 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9846 case UNSPEC_MACHOPIC_OFFSET:
9847 return legitimate_pic_address_disp_p (x);
9855 return legitimate_pic_address_disp_p (x);
9862 /* Determine if a given CONST RTX is a valid memory displacement
9866 legitimate_pic_address_disp_p (rtx disp)
9870 /* In 64bit mode we can allow direct addresses of symbols and labels
9871 when they are not dynamic symbols. */
9874 rtx op0 = disp, op1;
9876 switch (GET_CODE (disp))
9882 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9884 op0 = XEXP (XEXP (disp, 0), 0);
9885 op1 = XEXP (XEXP (disp, 0), 1);
9886 if (!CONST_INT_P (op1)
9887 || INTVAL (op1) >= 16*1024*1024
9888 || INTVAL (op1) < -16*1024*1024)
9890 if (GET_CODE (op0) == LABEL_REF)
9892 if (GET_CODE (op0) != SYMBOL_REF)
9897 /* TLS references should always be enclosed in UNSPEC. */
9898 if (SYMBOL_REF_TLS_MODEL (op0))
9900 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9901 && ix86_cmodel != CM_LARGE_PIC)
9909 if (GET_CODE (disp) != CONST)
9911 disp = XEXP (disp, 0);
9915 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9916 of GOT tables. We should not need these anyway. */
9917 if (GET_CODE (disp) != UNSPEC
9918 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9919 && XINT (disp, 1) != UNSPEC_GOTOFF
9920 && XINT (disp, 1) != UNSPEC_PLTOFF))
9923 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9924 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9930 if (GET_CODE (disp) == PLUS)
9932 if (!CONST_INT_P (XEXP (disp, 1)))
9934 disp = XEXP (disp, 0);
9938 if (TARGET_MACHO && darwin_local_data_pic (disp))
9941 if (GET_CODE (disp) != UNSPEC)
9944 switch (XINT (disp, 1))
9949 /* We need to check for both symbols and labels because VxWorks loads
9950 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9952 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9953 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9955 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9956 While ABI specify also 32bit relocation but we don't produce it in
9957 small PIC model at all. */
9958 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9959 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9961 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9963 case UNSPEC_GOTTPOFF:
9964 case UNSPEC_GOTNTPOFF:
9965 case UNSPEC_INDNTPOFF:
9968 disp = XVECEXP (disp, 0, 0);
9969 return (GET_CODE (disp) == SYMBOL_REF
9970 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9972 disp = XVECEXP (disp, 0, 0);
9973 return (GET_CODE (disp) == SYMBOL_REF
9974 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9976 disp = XVECEXP (disp, 0, 0);
9977 return (GET_CODE (disp) == SYMBOL_REF
9978 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9984 /* Recognizes RTL expressions that are valid memory addresses for an
9985 instruction. The MODE argument is the machine mode for the MEM
9986 expression that wants to use this address.
9988 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9989 convert common non-canonical forms to canonical form so that they will
9993 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9994 rtx addr, bool strict)
9996 struct ix86_address parts;
9997 rtx base, index, disp;
9998 HOST_WIDE_INT scale;
10000 if (ix86_decompose_address (addr, &parts) <= 0)
10001 /* Decomposition failed. */
10005 index = parts.index;
10007 scale = parts.scale;
10009 /* Validate base register.
10011 Don't allow SUBREG's that span more than a word here. It can lead to spill
10012 failures when the base is one word out of a two word structure, which is
10013 represented internally as a DImode int. */
10021 else if (GET_CODE (base) == SUBREG
10022 && REG_P (SUBREG_REG (base))
10023 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
10025 reg = SUBREG_REG (base);
10027 /* Base is not a register. */
10030 if (GET_MODE (base) != Pmode)
10031 /* Base is not in Pmode. */
10034 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
10035 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
10036 /* Base is not valid. */
10040 /* Validate index register.
10042 Don't allow SUBREG's that span more than a word here -- same as above. */
10050 else if (GET_CODE (index) == SUBREG
10051 && REG_P (SUBREG_REG (index))
10052 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
10054 reg = SUBREG_REG (index);
10056 /* Index is not a register. */
10059 if (GET_MODE (index) != Pmode)
10060 /* Index is not in Pmode. */
10063 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
10064 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
10065 /* Index is not valid. */
10069 /* Validate scale factor. */
10073 /* Scale without index. */
10076 if (scale != 2 && scale != 4 && scale != 8)
10077 /* Scale is not a valid multiplier. */
10081 /* Validate displacement. */
10084 if (GET_CODE (disp) == CONST
10085 && GET_CODE (XEXP (disp, 0)) == UNSPEC
10086 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
10087 switch (XINT (XEXP (disp, 0), 1))
10089 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
10090 used. While ABI specify also 32bit relocations, we don't produce
10091 them at all and use IP relative instead. */
10093 case UNSPEC_GOTOFF:
10094 gcc_assert (flag_pic);
10096 goto is_legitimate_pic;
10098 /* 64bit address unspec. */
10101 case UNSPEC_GOTPCREL:
10102 gcc_assert (flag_pic);
10103 goto is_legitimate_pic;
10105 case UNSPEC_GOTTPOFF:
10106 case UNSPEC_GOTNTPOFF:
10107 case UNSPEC_INDNTPOFF:
10108 case UNSPEC_NTPOFF:
10109 case UNSPEC_DTPOFF:
10113 /* Invalid address unspec. */
10117 else if (SYMBOLIC_CONST (disp)
10121 && MACHOPIC_INDIRECT
10122 && !machopic_operand_p (disp)
10128 if (TARGET_64BIT && (index || base))
10130 /* foo@dtpoff(%rX) is ok. */
10131 if (GET_CODE (disp) != CONST
10132 || GET_CODE (XEXP (disp, 0)) != PLUS
10133 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10134 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10135 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10136 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10137 /* Non-constant pic memory reference. */
10140 else if (! legitimate_pic_address_disp_p (disp))
10141 /* Displacement is an invalid pic construct. */
10144 /* This code used to verify that a symbolic pic displacement
10145 includes the pic_offset_table_rtx register.
10147 While this is good idea, unfortunately these constructs may
10148 be created by "adds using lea" optimization for incorrect
10157 This code is nonsensical, but results in addressing
10158 GOT table with pic_offset_table_rtx base. We can't
10159 just refuse it easily, since it gets matched by
10160 "addsi3" pattern, that later gets split to lea in the
10161 case output register differs from input. While this
10162 can be handled by separate addsi pattern for this case
10163 that never results in lea, this seems to be easier and
10164 correct fix for crash to disable this test. */
10166 else if (GET_CODE (disp) != LABEL_REF
10167 && !CONST_INT_P (disp)
10168 && (GET_CODE (disp) != CONST
10169 || !legitimate_constant_p (disp))
10170 && (GET_CODE (disp) != SYMBOL_REF
10171 || !legitimate_constant_p (disp)))
10172 /* Displacement is not constant. */
10174 else if (TARGET_64BIT
10175 && !x86_64_immediate_operand (disp, VOIDmode))
10176 /* Displacement is out of range. */
10180 /* Everything looks valid. */
10184 /* Determine if a given RTX is a valid constant address. */
10187 constant_address_p (rtx x)
10189 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10192 /* Return a unique alias set for the GOT. */
10194 static alias_set_type
10195 ix86_GOT_alias_set (void)
10197 static alias_set_type set = -1;
10199 set = new_alias_set ();
10203 /* Return a legitimate reference for ORIG (an address) using the
10204 register REG. If REG is 0, a new pseudo is generated.
10206 There are two types of references that must be handled:
10208 1. Global data references must load the address from the GOT, via
10209 the PIC reg. An insn is emitted to do this load, and the reg is
10212 2. Static data references, constant pool addresses, and code labels
10213 compute the address as an offset from the GOT, whose base is in
10214 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10215 differentiate them from global data objects. The returned
10216 address is the PIC reg + an unspec constant.
10218 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10219 reg also appears in the address. */
10222 legitimize_pic_address (rtx orig, rtx reg)
10225 rtx new_rtx = orig;
10229 if (TARGET_MACHO && !TARGET_64BIT)
10232 reg = gen_reg_rtx (Pmode);
10233 /* Use the generic Mach-O PIC machinery. */
10234 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10238 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10240 else if (TARGET_64BIT
10241 && ix86_cmodel != CM_SMALL_PIC
10242 && gotoff_operand (addr, Pmode))
10245 /* This symbol may be referenced via a displacement from the PIC
10246 base address (@GOTOFF). */
10248 if (reload_in_progress)
10249 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10250 if (GET_CODE (addr) == CONST)
10251 addr = XEXP (addr, 0);
10252 if (GET_CODE (addr) == PLUS)
10254 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10256 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10259 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10260 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10262 tmpreg = gen_reg_rtx (Pmode);
10265 emit_move_insn (tmpreg, new_rtx);
10269 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10270 tmpreg, 1, OPTAB_DIRECT);
10273 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10275 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10277 /* This symbol may be referenced via a displacement from the PIC
10278 base address (@GOTOFF). */
10280 if (reload_in_progress)
10281 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10282 if (GET_CODE (addr) == CONST)
10283 addr = XEXP (addr, 0);
10284 if (GET_CODE (addr) == PLUS)
10286 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10288 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10291 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10292 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10293 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10297 emit_move_insn (reg, new_rtx);
10301 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10302 /* We can't use @GOTOFF for text labels on VxWorks;
10303 see gotoff_operand. */
10304 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10306 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10308 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10309 return legitimize_dllimport_symbol (addr, true);
10310 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10311 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10312 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10314 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10315 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10319 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10321 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10322 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10323 new_rtx = gen_const_mem (Pmode, new_rtx);
10324 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10327 reg = gen_reg_rtx (Pmode);
10328 /* Use directly gen_movsi, otherwise the address is loaded
10329 into register for CSE. We don't want to CSE this addresses,
10330 instead we CSE addresses from the GOT table, so skip this. */
10331 emit_insn (gen_movsi (reg, new_rtx));
10336 /* This symbol must be referenced via a load from the
10337 Global Offset Table (@GOT). */
10339 if (reload_in_progress)
10340 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10341 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10342 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10344 new_rtx = force_reg (Pmode, new_rtx);
10345 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10346 new_rtx = gen_const_mem (Pmode, new_rtx);
10347 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10350 reg = gen_reg_rtx (Pmode);
10351 emit_move_insn (reg, new_rtx);
10357 if (CONST_INT_P (addr)
10358 && !x86_64_immediate_operand (addr, VOIDmode))
10362 emit_move_insn (reg, addr);
10366 new_rtx = force_reg (Pmode, addr);
10368 else if (GET_CODE (addr) == CONST)
10370 addr = XEXP (addr, 0);
10372 /* We must match stuff we generate before. Assume the only
10373 unspecs that can get here are ours. Not that we could do
10374 anything with them anyway.... */
10375 if (GET_CODE (addr) == UNSPEC
10376 || (GET_CODE (addr) == PLUS
10377 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10379 gcc_assert (GET_CODE (addr) == PLUS);
10381 if (GET_CODE (addr) == PLUS)
10383 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10385 /* Check first to see if this is a constant offset from a @GOTOFF
10386 symbol reference. */
10387 if (gotoff_operand (op0, Pmode)
10388 && CONST_INT_P (op1))
10392 if (reload_in_progress)
10393 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10394 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10396 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10397 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10398 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10402 emit_move_insn (reg, new_rtx);
10408 if (INTVAL (op1) < -16*1024*1024
10409 || INTVAL (op1) >= 16*1024*1024)
10411 if (!x86_64_immediate_operand (op1, Pmode))
10412 op1 = force_reg (Pmode, op1);
10413 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10419 base = legitimize_pic_address (XEXP (addr, 0), reg);
10420 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10421 base == reg ? NULL_RTX : reg);
10423 if (CONST_INT_P (new_rtx))
10424 new_rtx = plus_constant (base, INTVAL (new_rtx));
10427 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10429 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10430 new_rtx = XEXP (new_rtx, 1);
10432 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10440 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10443 get_thread_pointer (int to_reg)
10447 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10451 reg = gen_reg_rtx (Pmode);
10452 insn = gen_rtx_SET (VOIDmode, reg, tp);
10453 insn = emit_insn (insn);
10458 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10459 false if we expect this to be used for a memory address and true if
10460 we expect to load the address into a register. */
10463 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10465 rtx dest, base, off, pic, tp;
10470 case TLS_MODEL_GLOBAL_DYNAMIC:
10471 dest = gen_reg_rtx (Pmode);
10472 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10474 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10476 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10479 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10480 insns = get_insns ();
10483 RTL_CONST_CALL_P (insns) = 1;
10484 emit_libcall_block (insns, dest, rax, x);
10486 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10487 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10489 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10491 if (TARGET_GNU2_TLS)
10493 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10495 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10499 case TLS_MODEL_LOCAL_DYNAMIC:
10500 base = gen_reg_rtx (Pmode);
10501 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10503 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10505 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10508 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10509 insns = get_insns ();
10512 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10513 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10514 RTL_CONST_CALL_P (insns) = 1;
10515 emit_libcall_block (insns, base, rax, note);
10517 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10518 emit_insn (gen_tls_local_dynamic_base_64 (base));
10520 emit_insn (gen_tls_local_dynamic_base_32 (base));
10522 if (TARGET_GNU2_TLS)
10524 rtx x = ix86_tls_module_base ();
10526 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10527 gen_rtx_MINUS (Pmode, x, tp));
10530 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10531 off = gen_rtx_CONST (Pmode, off);
10533 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10535 if (TARGET_GNU2_TLS)
10537 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10539 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10544 case TLS_MODEL_INITIAL_EXEC:
10548 type = UNSPEC_GOTNTPOFF;
10552 if (reload_in_progress)
10553 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10554 pic = pic_offset_table_rtx;
10555 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10557 else if (!TARGET_ANY_GNU_TLS)
10559 pic = gen_reg_rtx (Pmode);
10560 emit_insn (gen_set_got (pic));
10561 type = UNSPEC_GOTTPOFF;
10566 type = UNSPEC_INDNTPOFF;
10569 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10570 off = gen_rtx_CONST (Pmode, off);
10572 off = gen_rtx_PLUS (Pmode, pic, off);
10573 off = gen_const_mem (Pmode, off);
10574 set_mem_alias_set (off, ix86_GOT_alias_set ());
10576 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10578 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10579 off = force_reg (Pmode, off);
10580 return gen_rtx_PLUS (Pmode, base, off);
10584 base = get_thread_pointer (true);
10585 dest = gen_reg_rtx (Pmode);
10586 emit_insn (gen_subsi3 (dest, base, off));
10590 case TLS_MODEL_LOCAL_EXEC:
10591 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10592 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10593 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10594 off = gen_rtx_CONST (Pmode, off);
10596 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10598 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10599 return gen_rtx_PLUS (Pmode, base, off);
10603 base = get_thread_pointer (true);
10604 dest = gen_reg_rtx (Pmode);
10605 emit_insn (gen_subsi3 (dest, base, off));
10610 gcc_unreachable ();
10616 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10619 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10620 htab_t dllimport_map;
10623 get_dllimport_decl (tree decl)
10625 struct tree_map *h, in;
10628 const char *prefix;
10629 size_t namelen, prefixlen;
10634 if (!dllimport_map)
10635 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10637 in.hash = htab_hash_pointer (decl);
10638 in.base.from = decl;
10639 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10640 h = (struct tree_map *) *loc;
10644 *loc = h = GGC_NEW (struct tree_map);
10646 h->base.from = decl;
10647 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10648 VAR_DECL, NULL, ptr_type_node);
10649 DECL_ARTIFICIAL (to) = 1;
10650 DECL_IGNORED_P (to) = 1;
10651 DECL_EXTERNAL (to) = 1;
10652 TREE_READONLY (to) = 1;
10654 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10655 name = targetm.strip_name_encoding (name);
10656 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10657 ? "*__imp_" : "*__imp__";
10658 namelen = strlen (name);
10659 prefixlen = strlen (prefix);
10660 imp_name = (char *) alloca (namelen + prefixlen + 1);
10661 memcpy (imp_name, prefix, prefixlen);
10662 memcpy (imp_name + prefixlen, name, namelen + 1);
10664 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10665 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10666 SET_SYMBOL_REF_DECL (rtl, to);
10667 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10669 rtl = gen_const_mem (Pmode, rtl);
10670 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10672 SET_DECL_RTL (to, rtl);
10673 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10678 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10679 true if we require the result be a register. */
10682 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10687 gcc_assert (SYMBOL_REF_DECL (symbol));
10688 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10690 x = DECL_RTL (imp_decl);
10692 x = force_reg (Pmode, x);
10696 /* Try machine-dependent ways of modifying an illegitimate address
10697 to be legitimate. If we find one, return the new, valid address.
10698 This macro is used in only one place: `memory_address' in explow.c.
10700 OLDX is the address as it was before break_out_memory_refs was called.
10701 In some cases it is useful to look at this to decide what needs to be done.
10703 It is always safe for this macro to do nothing. It exists to recognize
10704 opportunities to optimize the output.
10706 For the 80386, we handle X+REG by loading X into a register R and
10707 using R+REG. R will go in a general reg and indexing will be used.
10708 However, if REG is a broken-out memory address or multiplication,
10709 nothing needs to be done because REG can certainly go in a general reg.
10711 When -fpic is used, special handling is needed for symbolic references.
10712 See comments by legitimize_pic_address in i386.c for details. */
10715 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10716 enum machine_mode mode)
10721 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10723 return legitimize_tls_address (x, (enum tls_model) log, false);
10724 if (GET_CODE (x) == CONST
10725 && GET_CODE (XEXP (x, 0)) == PLUS
10726 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10727 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10729 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10730 (enum tls_model) log, false);
10731 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10734 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10736 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10737 return legitimize_dllimport_symbol (x, true);
10738 if (GET_CODE (x) == CONST
10739 && GET_CODE (XEXP (x, 0)) == PLUS
10740 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10741 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10743 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10744 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10748 if (flag_pic && SYMBOLIC_CONST (x))
10749 return legitimize_pic_address (x, 0);
10751 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10752 if (GET_CODE (x) == ASHIFT
10753 && CONST_INT_P (XEXP (x, 1))
10754 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10757 log = INTVAL (XEXP (x, 1));
10758 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10759 GEN_INT (1 << log));
10762 if (GET_CODE (x) == PLUS)
10764 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10766 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10767 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10768 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10771 log = INTVAL (XEXP (XEXP (x, 0), 1));
10772 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10773 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10774 GEN_INT (1 << log));
10777 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10778 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10779 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10782 log = INTVAL (XEXP (XEXP (x, 1), 1));
10783 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10784 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10785 GEN_INT (1 << log));
10788 /* Put multiply first if it isn't already. */
10789 if (GET_CODE (XEXP (x, 1)) == MULT)
10791 rtx tmp = XEXP (x, 0);
10792 XEXP (x, 0) = XEXP (x, 1);
10797 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10798 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10799 created by virtual register instantiation, register elimination, and
10800 similar optimizations. */
10801 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10804 x = gen_rtx_PLUS (Pmode,
10805 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10806 XEXP (XEXP (x, 1), 0)),
10807 XEXP (XEXP (x, 1), 1));
10811 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10812 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10813 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10814 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10815 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10816 && CONSTANT_P (XEXP (x, 1)))
10819 rtx other = NULL_RTX;
10821 if (CONST_INT_P (XEXP (x, 1)))
10823 constant = XEXP (x, 1);
10824 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10826 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10828 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10829 other = XEXP (x, 1);
10837 x = gen_rtx_PLUS (Pmode,
10838 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10839 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10840 plus_constant (other, INTVAL (constant)));
10844 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10847 if (GET_CODE (XEXP (x, 0)) == MULT)
10850 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10853 if (GET_CODE (XEXP (x, 1)) == MULT)
10856 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10860 && REG_P (XEXP (x, 1))
10861 && REG_P (XEXP (x, 0)))
10864 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10867 x = legitimize_pic_address (x, 0);
10870 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10873 if (REG_P (XEXP (x, 0)))
10875 rtx temp = gen_reg_rtx (Pmode);
10876 rtx val = force_operand (XEXP (x, 1), temp);
10878 emit_move_insn (temp, val);
10880 XEXP (x, 1) = temp;
10884 else if (REG_P (XEXP (x, 1)))
10886 rtx temp = gen_reg_rtx (Pmode);
10887 rtx val = force_operand (XEXP (x, 0), temp);
10889 emit_move_insn (temp, val);
10891 XEXP (x, 0) = temp;
10899 /* Print an integer constant expression in assembler syntax. Addition
10900 and subtraction are the only arithmetic that may appear in these
10901 expressions. FILE is the stdio stream to write to, X is the rtx, and
10902 CODE is the operand print code from the output string. */
10905 output_pic_addr_const (FILE *file, rtx x, int code)
10909 switch (GET_CODE (x))
10912 gcc_assert (flag_pic);
10917 if (! TARGET_MACHO || TARGET_64BIT)
10918 output_addr_const (file, x);
10921 const char *name = XSTR (x, 0);
10923 /* Mark the decl as referenced so that cgraph will
10924 output the function. */
10925 if (SYMBOL_REF_DECL (x))
10926 mark_decl_referenced (SYMBOL_REF_DECL (x));
10929 if (MACHOPIC_INDIRECT
10930 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10931 name = machopic_indirection_name (x, /*stub_p=*/true);
10933 assemble_name (file, name);
10935 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10936 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10937 fputs ("@PLT", file);
10944 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10945 assemble_name (asm_out_file, buf);
10949 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10953 /* This used to output parentheses around the expression,
10954 but that does not work on the 386 (either ATT or BSD assembler). */
10955 output_pic_addr_const (file, XEXP (x, 0), code);
10959 if (GET_MODE (x) == VOIDmode)
10961 /* We can use %d if the number is <32 bits and positive. */
10962 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10963 fprintf (file, "0x%lx%08lx",
10964 (unsigned long) CONST_DOUBLE_HIGH (x),
10965 (unsigned long) CONST_DOUBLE_LOW (x));
10967 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10970 /* We can't handle floating point constants;
10971 PRINT_OPERAND must handle them. */
10972 output_operand_lossage ("floating constant misused");
10976 /* Some assemblers need integer constants to appear first. */
10977 if (CONST_INT_P (XEXP (x, 0)))
10979 output_pic_addr_const (file, XEXP (x, 0), code);
10981 output_pic_addr_const (file, XEXP (x, 1), code);
10985 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10986 output_pic_addr_const (file, XEXP (x, 1), code);
10988 output_pic_addr_const (file, XEXP (x, 0), code);
10994 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10995 output_pic_addr_const (file, XEXP (x, 0), code);
10997 output_pic_addr_const (file, XEXP (x, 1), code);
10999 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
11003 gcc_assert (XVECLEN (x, 0) == 1);
11004 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
11005 switch (XINT (x, 1))
11008 fputs ("@GOT", file);
11010 case UNSPEC_GOTOFF:
11011 fputs ("@GOTOFF", file);
11013 case UNSPEC_PLTOFF:
11014 fputs ("@PLTOFF", file);
11016 case UNSPEC_GOTPCREL:
11017 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11018 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
11020 case UNSPEC_GOTTPOFF:
11021 /* FIXME: This might be @TPOFF in Sun ld too. */
11022 fputs ("@gottpoff", file);
11025 fputs ("@tpoff", file);
11027 case UNSPEC_NTPOFF:
11029 fputs ("@tpoff", file);
11031 fputs ("@ntpoff", file);
11033 case UNSPEC_DTPOFF:
11034 fputs ("@dtpoff", file);
11036 case UNSPEC_GOTNTPOFF:
11038 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11039 "@gottpoff(%rip)": "@gottpoff[rip]", file);
11041 fputs ("@gotntpoff", file);
11043 case UNSPEC_INDNTPOFF:
11044 fputs ("@indntpoff", file);
11047 case UNSPEC_MACHOPIC_OFFSET:
11049 machopic_output_function_base_name (file);
11053 output_operand_lossage ("invalid UNSPEC as operand");
11059 output_operand_lossage ("invalid expression as operand");
11063 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11064 We need to emit DTP-relative relocations. */
11066 static void ATTRIBUTE_UNUSED
11067 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
11069 fputs (ASM_LONG, file);
11070 output_addr_const (file, x);
11071 fputs ("@dtpoff", file);
11077 fputs (", 0", file);
11080 gcc_unreachable ();
11084 /* Return true if X is a representation of the PIC register. This copes
11085 with calls from ix86_find_base_term, where the register might have
11086 been replaced by a cselib value. */
11089 ix86_pic_register_p (rtx x)
11091 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
11092 return (pic_offset_table_rtx
11093 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
11095 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
11098 /* In the name of slightly smaller debug output, and to cater to
11099 general assembler lossage, recognize PIC+GOTOFF and turn it back
11100 into a direct symbol reference.
11102 On Darwin, this is necessary to avoid a crash, because Darwin
11103 has a different PIC label for each routine but the DWARF debugging
11104 information is not associated with any particular routine, so it's
11105 necessary to remove references to the PIC label from RTL stored by
11106 the DWARF output code. */
11109 ix86_delegitimize_address (rtx x)
11111 rtx orig_x = delegitimize_mem_from_attrs (x);
11112 /* addend is NULL or some rtx if x is something+GOTOFF where
11113 something doesn't include the PIC register. */
11114 rtx addend = NULL_RTX;
11115 /* reg_addend is NULL or a multiple of some register. */
11116 rtx reg_addend = NULL_RTX;
11117 /* const_addend is NULL or a const_int. */
11118 rtx const_addend = NULL_RTX;
11119 /* This is the result, or NULL. */
11120 rtx result = NULL_RTX;
11129 if (GET_CODE (x) != CONST
11130 || GET_CODE (XEXP (x, 0)) != UNSPEC
11131 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11132 || !MEM_P (orig_x))
11134 x = XVECEXP (XEXP (x, 0), 0, 0);
11135 if (GET_MODE (orig_x) != Pmode)
11136 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
11140 if (GET_CODE (x) != PLUS
11141 || GET_CODE (XEXP (x, 1)) != CONST)
11144 if (ix86_pic_register_p (XEXP (x, 0)))
11145 /* %ebx + GOT/GOTOFF */
11147 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11149 /* %ebx + %reg * scale + GOT/GOTOFF */
11150 reg_addend = XEXP (x, 0);
11151 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11152 reg_addend = XEXP (reg_addend, 1);
11153 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11154 reg_addend = XEXP (reg_addend, 0);
11157 reg_addend = NULL_RTX;
11158 addend = XEXP (x, 0);
11162 addend = XEXP (x, 0);
11164 x = XEXP (XEXP (x, 1), 0);
11165 if (GET_CODE (x) == PLUS
11166 && CONST_INT_P (XEXP (x, 1)))
11168 const_addend = XEXP (x, 1);
11172 if (GET_CODE (x) == UNSPEC
11173 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11174 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11175 result = XVECEXP (x, 0, 0);
11177 if (TARGET_MACHO && darwin_local_data_pic (x)
11178 && !MEM_P (orig_x))
11179 result = XVECEXP (x, 0, 0);
11185 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11187 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11190 /* If the rest of original X doesn't involve the PIC register, add
11191 addend and subtract pic_offset_table_rtx. This can happen e.g.
11193 leal (%ebx, %ecx, 4), %ecx
11195 movl foo@GOTOFF(%ecx), %edx
11196 in which case we return (%ecx - %ebx) + foo. */
11197 if (pic_offset_table_rtx)
11198 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11199 pic_offset_table_rtx),
11204 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
11205 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
11209 /* If X is a machine specific address (i.e. a symbol or label being
11210 referenced as a displacement from the GOT implemented using an
11211 UNSPEC), then return the base term. Otherwise return X. */
11214 ix86_find_base_term (rtx x)
11220 if (GET_CODE (x) != CONST)
11222 term = XEXP (x, 0);
11223 if (GET_CODE (term) == PLUS
11224 && (CONST_INT_P (XEXP (term, 1))
11225 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11226 term = XEXP (term, 0);
11227 if (GET_CODE (term) != UNSPEC
11228 || XINT (term, 1) != UNSPEC_GOTPCREL)
11231 return XVECEXP (term, 0, 0);
11234 return ix86_delegitimize_address (x);
11238 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11239 int fp, FILE *file)
11241 const char *suffix;
11243 if (mode == CCFPmode || mode == CCFPUmode)
11245 code = ix86_fp_compare_code_to_integer (code);
11249 code = reverse_condition (code);
11300 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11304 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11305 Those same assemblers have the same but opposite lossage on cmov. */
11306 if (mode == CCmode)
11307 suffix = fp ? "nbe" : "a";
11308 else if (mode == CCCmode)
11311 gcc_unreachable ();
11327 gcc_unreachable ();
11331 gcc_assert (mode == CCmode || mode == CCCmode);
11348 gcc_unreachable ();
11352 /* ??? As above. */
11353 gcc_assert (mode == CCmode || mode == CCCmode);
11354 suffix = fp ? "nb" : "ae";
11357 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11361 /* ??? As above. */
11362 if (mode == CCmode)
11364 else if (mode == CCCmode)
11365 suffix = fp ? "nb" : "ae";
11367 gcc_unreachable ();
11370 suffix = fp ? "u" : "p";
11373 suffix = fp ? "nu" : "np";
11376 gcc_unreachable ();
11378 fputs (suffix, file);
11381 /* Print the name of register X to FILE based on its machine mode and number.
11382 If CODE is 'w', pretend the mode is HImode.
11383 If CODE is 'b', pretend the mode is QImode.
11384 If CODE is 'k', pretend the mode is SImode.
11385 If CODE is 'q', pretend the mode is DImode.
11386 If CODE is 'x', pretend the mode is V4SFmode.
11387 If CODE is 't', pretend the mode is V8SFmode.
11388 If CODE is 'h', pretend the reg is the 'high' byte register.
11389 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11390 If CODE is 'd', duplicate the operand for AVX instruction.
11394 print_reg (rtx x, int code, FILE *file)
11397 bool duplicated = code == 'd' && TARGET_AVX;
11399 gcc_assert (x == pc_rtx
11400 || (REGNO (x) != ARG_POINTER_REGNUM
11401 && REGNO (x) != FRAME_POINTER_REGNUM
11402 && REGNO (x) != FLAGS_REG
11403 && REGNO (x) != FPSR_REG
11404 && REGNO (x) != FPCR_REG));
11406 if (ASSEMBLER_DIALECT == ASM_ATT)
11411 gcc_assert (TARGET_64BIT);
11412 fputs ("rip", file);
11416 if (code == 'w' || MMX_REG_P (x))
11418 else if (code == 'b')
11420 else if (code == 'k')
11422 else if (code == 'q')
11424 else if (code == 'y')
11426 else if (code == 'h')
11428 else if (code == 'x')
11430 else if (code == 't')
11433 code = GET_MODE_SIZE (GET_MODE (x));
11435 /* Irritatingly, AMD extended registers use different naming convention
11436 from the normal registers. */
11437 if (REX_INT_REG_P (x))
11439 gcc_assert (TARGET_64BIT);
11443 error ("extended registers have no high halves");
11446 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11449 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11452 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11455 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11458 error ("unsupported operand size for extended register");
11468 if (STACK_TOP_P (x))
11477 if (! ANY_FP_REG_P (x))
11478 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11483 reg = hi_reg_name[REGNO (x)];
11486 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11488 reg = qi_reg_name[REGNO (x)];
11491 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11493 reg = qi_high_reg_name[REGNO (x)];
11498 gcc_assert (!duplicated);
11500 fputs (hi_reg_name[REGNO (x)] + 1, file);
11505 gcc_unreachable ();
11511 if (ASSEMBLER_DIALECT == ASM_ATT)
11512 fprintf (file, ", %%%s", reg);
11514 fprintf (file, ", %s", reg);
11518 /* Locate some local-dynamic symbol still in use by this function
11519 so that we can print its name in some tls_local_dynamic_base
11523 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11527 if (GET_CODE (x) == SYMBOL_REF
11528 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11530 cfun->machine->some_ld_name = XSTR (x, 0);
11537 static const char *
11538 get_some_local_dynamic_name (void)
11542 if (cfun->machine->some_ld_name)
11543 return cfun->machine->some_ld_name;
11545 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11546 if (NONDEBUG_INSN_P (insn)
11547 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11548 return cfun->machine->some_ld_name;
11553 /* Meaning of CODE:
11554 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11555 C -- print opcode suffix for set/cmov insn.
11556 c -- like C, but print reversed condition
11557 F,f -- likewise, but for floating-point.
11558 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11560 R -- print the prefix for register names.
11561 z -- print the opcode suffix for the size of the current operand.
11562 Z -- likewise, with special suffixes for x87 instructions.
11563 * -- print a star (in certain assembler syntax)
11564 A -- print an absolute memory reference.
11565 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11566 s -- print a shift double count, followed by the assemblers argument
11568 b -- print the QImode name of the register for the indicated operand.
11569 %b0 would print %al if operands[0] is reg 0.
11570 w -- likewise, print the HImode name of the register.
11571 k -- likewise, print the SImode name of the register.
11572 q -- likewise, print the DImode name of the register.
11573 x -- likewise, print the V4SFmode name of the register.
11574 t -- likewise, print the V8SFmode name of the register.
11575 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11576 y -- print "st(0)" instead of "st" as a register.
11577 d -- print duplicated register operand for AVX instruction.
11578 D -- print condition for SSE cmp instruction.
11579 P -- if PIC, print an @PLT suffix.
11580 X -- don't print any sort of PIC '@' suffix for a symbol.
11581 & -- print some in-use local-dynamic symbol name.
11582 H -- print a memory address offset by 8; used for sse high-parts
11583 Y -- print condition for XOP pcom* instruction.
11584 + -- print a branch hint as 'cs' or 'ds' prefix
11585 ; -- print a semicolon (after prefixes due to bug in older gas).
11589 print_operand (FILE *file, rtx x, int code)
11596 if (ASSEMBLER_DIALECT == ASM_ATT)
11602 const char *name = get_some_local_dynamic_name ();
11604 output_operand_lossage ("'%%&' used without any "
11605 "local dynamic TLS references");
11607 assemble_name (file, name);
11612 switch (ASSEMBLER_DIALECT)
11619 /* Intel syntax. For absolute addresses, registers should not
11620 be surrounded by braces. */
11624 PRINT_OPERAND (file, x, 0);
11631 gcc_unreachable ();
11634 PRINT_OPERAND (file, x, 0);
11639 if (ASSEMBLER_DIALECT == ASM_ATT)
11644 if (ASSEMBLER_DIALECT == ASM_ATT)
11649 if (ASSEMBLER_DIALECT == ASM_ATT)
11654 if (ASSEMBLER_DIALECT == ASM_ATT)
11659 if (ASSEMBLER_DIALECT == ASM_ATT)
11664 if (ASSEMBLER_DIALECT == ASM_ATT)
11669 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11671 /* Opcodes don't get size suffixes if using Intel opcodes. */
11672 if (ASSEMBLER_DIALECT == ASM_INTEL)
11675 switch (GET_MODE_SIZE (GET_MODE (x)))
11694 output_operand_lossage
11695 ("invalid operand size for operand code '%c'", code);
11700 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11702 (0, "non-integer operand used with operand code '%c'", code);
11706 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11707 if (ASSEMBLER_DIALECT == ASM_INTEL)
11710 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11712 switch (GET_MODE_SIZE (GET_MODE (x)))
11715 #ifdef HAVE_AS_IX86_FILDS
11725 #ifdef HAVE_AS_IX86_FILDQ
11728 fputs ("ll", file);
11736 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11738 /* 387 opcodes don't get size suffixes
11739 if the operands are registers. */
11740 if (STACK_REG_P (x))
11743 switch (GET_MODE_SIZE (GET_MODE (x)))
11764 output_operand_lossage
11765 ("invalid operand type used with operand code '%c'", code);
11769 output_operand_lossage
11770 ("invalid operand size for operand code '%c'", code);
11787 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11789 PRINT_OPERAND (file, x, 0);
11790 fputs (", ", file);
11795 /* Little bit of braindamage here. The SSE compare instructions
11796 does use completely different names for the comparisons that the
11797 fp conditional moves. */
11800 switch (GET_CODE (x))
11803 fputs ("eq", file);
11806 fputs ("eq_us", file);
11809 fputs ("lt", file);
11812 fputs ("nge", file);
11815 fputs ("le", file);
11818 fputs ("ngt", file);
11821 fputs ("unord", file);
11824 fputs ("neq", file);
11827 fputs ("neq_oq", file);
11830 fputs ("ge", file);
11833 fputs ("nlt", file);
11836 fputs ("gt", file);
11839 fputs ("nle", file);
11842 fputs ("ord", file);
11845 output_operand_lossage ("operand is not a condition code, "
11846 "invalid operand code 'D'");
11852 switch (GET_CODE (x))
11856 fputs ("eq", file);
11860 fputs ("lt", file);
11864 fputs ("le", file);
11867 fputs ("unord", file);
11871 fputs ("neq", file);
11875 fputs ("nlt", file);
11879 fputs ("nle", file);
11882 fputs ("ord", file);
11885 output_operand_lossage ("operand is not a condition code, "
11886 "invalid operand code 'D'");
11892 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11893 if (ASSEMBLER_DIALECT == ASM_ATT)
11895 switch (GET_MODE (x))
11897 case HImode: putc ('w', file); break;
11899 case SFmode: putc ('l', file); break;
11901 case DFmode: putc ('q', file); break;
11902 default: gcc_unreachable ();
11909 if (!COMPARISON_P (x))
11911 output_operand_lossage ("operand is neither a constant nor a "
11912 "condition code, invalid operand code "
11916 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11919 if (!COMPARISON_P (x))
11921 output_operand_lossage ("operand is neither a constant nor a "
11922 "condition code, invalid operand code "
11926 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11927 if (ASSEMBLER_DIALECT == ASM_ATT)
11930 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11933 /* Like above, but reverse condition */
11935 /* Check to see if argument to %c is really a constant
11936 and not a condition code which needs to be reversed. */
11937 if (!COMPARISON_P (x))
11939 output_operand_lossage ("operand is neither a constant nor a "
11940 "condition code, invalid operand "
11944 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11947 if (!COMPARISON_P (x))
11949 output_operand_lossage ("operand is neither a constant nor a "
11950 "condition code, invalid operand "
11954 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11955 if (ASSEMBLER_DIALECT == ASM_ATT)
11958 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11962 /* It doesn't actually matter what mode we use here, as we're
11963 only going to use this for printing. */
11964 x = adjust_address_nv (x, DImode, 8);
11972 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11975 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11978 int pred_val = INTVAL (XEXP (x, 0));
11980 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11981 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11983 int taken = pred_val > REG_BR_PROB_BASE / 2;
11984 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11986 /* Emit hints only in the case default branch prediction
11987 heuristics would fail. */
11988 if (taken != cputaken)
11990 /* We use 3e (DS) prefix for taken branches and
11991 2e (CS) prefix for not taken branches. */
11993 fputs ("ds ; ", file);
11995 fputs ("cs ; ", file);
12003 switch (GET_CODE (x))
12006 fputs ("neq", file);
12009 fputs ("eq", file);
12013 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
12017 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
12021 fputs ("le", file);
12025 fputs ("lt", file);
12028 fputs ("unord", file);
12031 fputs ("ord", file);
12034 fputs ("ueq", file);
12037 fputs ("nlt", file);
12040 fputs ("nle", file);
12043 fputs ("ule", file);
12046 fputs ("ult", file);
12049 fputs ("une", file);
12052 output_operand_lossage ("operand is not a condition code, "
12053 "invalid operand code 'Y'");
12059 #if TARGET_MACHO || !HAVE_AS_IX86_REP_LOCK_PREFIX
12065 output_operand_lossage ("invalid operand code '%c'", code);
12070 print_reg (x, code, file);
12072 else if (MEM_P (x))
12074 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
12075 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
12076 && GET_MODE (x) != BLKmode)
12079 switch (GET_MODE_SIZE (GET_MODE (x)))
12081 case 1: size = "BYTE"; break;
12082 case 2: size = "WORD"; break;
12083 case 4: size = "DWORD"; break;
12084 case 8: size = "QWORD"; break;
12085 case 12: size = "TBYTE"; break;
12087 if (GET_MODE (x) == XFmode)
12092 case 32: size = "YMMWORD"; break;
12094 gcc_unreachable ();
12097 /* Check for explicit size override (codes 'b', 'w' and 'k') */
12100 else if (code == 'w')
12102 else if (code == 'k')
12105 fputs (size, file);
12106 fputs (" PTR ", file);
12110 /* Avoid (%rip) for call operands. */
12111 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12112 && !CONST_INT_P (x))
12113 output_addr_const (file, x);
12114 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12115 output_operand_lossage ("invalid constraints for operand");
12117 output_address (x);
12120 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12125 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12126 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12128 if (ASSEMBLER_DIALECT == ASM_ATT)
12130 fprintf (file, "0x%08lx", (long unsigned int) l);
12133 /* These float cases don't actually occur as immediate operands. */
12134 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12138 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12139 fputs (dstr, file);
12142 else if (GET_CODE (x) == CONST_DOUBLE
12143 && GET_MODE (x) == XFmode)
12147 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12148 fputs (dstr, file);
12153 /* We have patterns that allow zero sets of memory, for instance.
12154 In 64-bit mode, we should probably support all 8-byte vectors,
12155 since we can in fact encode that into an immediate. */
12156 if (GET_CODE (x) == CONST_VECTOR)
12158 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12164 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12166 if (ASSEMBLER_DIALECT == ASM_ATT)
12169 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12170 || GET_CODE (x) == LABEL_REF)
12172 if (ASSEMBLER_DIALECT == ASM_ATT)
12175 fputs ("OFFSET FLAT:", file);
12178 if (CONST_INT_P (x))
12179 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12181 output_pic_addr_const (file, x, code);
12183 output_addr_const (file, x);
12187 /* Print a memory operand whose address is ADDR. */
12190 print_operand_address (FILE *file, rtx addr)
12192 struct ix86_address parts;
12193 rtx base, index, disp;
12195 int ok = ix86_decompose_address (addr, &parts);
12200 index = parts.index;
12202 scale = parts.scale;
12210 if (ASSEMBLER_DIALECT == ASM_ATT)
12212 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12215 gcc_unreachable ();
12218 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12219 if (TARGET_64BIT && !base && !index)
12223 if (GET_CODE (disp) == CONST
12224 && GET_CODE (XEXP (disp, 0)) == PLUS
12225 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12226 symbol = XEXP (XEXP (disp, 0), 0);
12228 if (GET_CODE (symbol) == LABEL_REF
12229 || (GET_CODE (symbol) == SYMBOL_REF
12230 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12233 if (!base && !index)
12235 /* Displacement only requires special attention. */
12237 if (CONST_INT_P (disp))
12239 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12240 fputs ("ds:", file);
12241 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12244 output_pic_addr_const (file, disp, 0);
12246 output_addr_const (file, disp);
12250 if (ASSEMBLER_DIALECT == ASM_ATT)
12255 output_pic_addr_const (file, disp, 0);
12256 else if (GET_CODE (disp) == LABEL_REF)
12257 output_asm_label (disp);
12259 output_addr_const (file, disp);
12264 print_reg (base, 0, file);
12268 print_reg (index, 0, file);
12270 fprintf (file, ",%d", scale);
12276 rtx offset = NULL_RTX;
12280 /* Pull out the offset of a symbol; print any symbol itself. */
12281 if (GET_CODE (disp) == CONST
12282 && GET_CODE (XEXP (disp, 0)) == PLUS
12283 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12285 offset = XEXP (XEXP (disp, 0), 1);
12286 disp = gen_rtx_CONST (VOIDmode,
12287 XEXP (XEXP (disp, 0), 0));
12291 output_pic_addr_const (file, disp, 0);
12292 else if (GET_CODE (disp) == LABEL_REF)
12293 output_asm_label (disp);
12294 else if (CONST_INT_P (disp))
12297 output_addr_const (file, disp);
12303 print_reg (base, 0, file);
12306 if (INTVAL (offset) >= 0)
12308 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12312 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12319 print_reg (index, 0, file);
12321 fprintf (file, "*%d", scale);
12329 output_addr_const_extra (FILE *file, rtx x)
12333 if (GET_CODE (x) != UNSPEC)
12336 op = XVECEXP (x, 0, 0);
12337 switch (XINT (x, 1))
12339 case UNSPEC_GOTTPOFF:
12340 output_addr_const (file, op);
12341 /* FIXME: This might be @TPOFF in Sun ld. */
12342 fputs ("@gottpoff", file);
12345 output_addr_const (file, op);
12346 fputs ("@tpoff", file);
12348 case UNSPEC_NTPOFF:
12349 output_addr_const (file, op);
12351 fputs ("@tpoff", file);
12353 fputs ("@ntpoff", file);
12355 case UNSPEC_DTPOFF:
12356 output_addr_const (file, op);
12357 fputs ("@dtpoff", file);
12359 case UNSPEC_GOTNTPOFF:
12360 output_addr_const (file, op);
12362 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12363 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12365 fputs ("@gotntpoff", file);
12367 case UNSPEC_INDNTPOFF:
12368 output_addr_const (file, op);
12369 fputs ("@indntpoff", file);
12372 case UNSPEC_MACHOPIC_OFFSET:
12373 output_addr_const (file, op);
12375 machopic_output_function_base_name (file);
12386 /* Split one or more DImode RTL references into pairs of SImode
12387 references. The RTL can be REG, offsettable MEM, integer constant, or
12388 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12389 split and "num" is its length. lo_half and hi_half are output arrays
12390 that parallel "operands". */
12393 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12397 rtx op = operands[num];
12399 /* simplify_subreg refuse to split volatile memory addresses,
12400 but we still have to handle it. */
12403 lo_half[num] = adjust_address (op, SImode, 0);
12404 hi_half[num] = adjust_address (op, SImode, 4);
12408 lo_half[num] = simplify_gen_subreg (SImode, op,
12409 GET_MODE (op) == VOIDmode
12410 ? DImode : GET_MODE (op), 0);
12411 hi_half[num] = simplify_gen_subreg (SImode, op,
12412 GET_MODE (op) == VOIDmode
12413 ? DImode : GET_MODE (op), 4);
12417 /* Split one or more TImode RTL references into pairs of DImode
12418 references. The RTL can be REG, offsettable MEM, integer constant, or
12419 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12420 split and "num" is its length. lo_half and hi_half are output arrays
12421 that parallel "operands". */
12424 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12428 rtx op = operands[num];
12430 /* simplify_subreg refuse to split volatile memory addresses, but we
12431 still have to handle it. */
12434 lo_half[num] = adjust_address (op, DImode, 0);
12435 hi_half[num] = adjust_address (op, DImode, 8);
12439 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12440 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12445 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12446 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12447 is the expression of the binary operation. The output may either be
12448 emitted here, or returned to the caller, like all output_* functions.
12450 There is no guarantee that the operands are the same mode, as they
12451 might be within FLOAT or FLOAT_EXTEND expressions. */
12453 #ifndef SYSV386_COMPAT
12454 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12455 wants to fix the assemblers because that causes incompatibility
12456 with gcc. No-one wants to fix gcc because that causes
12457 incompatibility with assemblers... You can use the option of
12458 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12459 #define SYSV386_COMPAT 1
12463 output_387_binary_op (rtx insn, rtx *operands)
12465 static char buf[40];
12468 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12470 #ifdef ENABLE_CHECKING
12471 /* Even if we do not want to check the inputs, this documents input
12472 constraints. Which helps in understanding the following code. */
12473 if (STACK_REG_P (operands[0])
12474 && ((REG_P (operands[1])
12475 && REGNO (operands[0]) == REGNO (operands[1])
12476 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12477 || (REG_P (operands[2])
12478 && REGNO (operands[0]) == REGNO (operands[2])
12479 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12480 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12483 gcc_assert (is_sse);
12486 switch (GET_CODE (operands[3]))
12489 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12490 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12498 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12499 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12507 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12508 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12516 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12517 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12525 gcc_unreachable ();
12532 strcpy (buf, ssep);
12533 if (GET_MODE (operands[0]) == SFmode)
12534 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12536 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12540 strcpy (buf, ssep + 1);
12541 if (GET_MODE (operands[0]) == SFmode)
12542 strcat (buf, "ss\t{%2, %0|%0, %2}");
12544 strcat (buf, "sd\t{%2, %0|%0, %2}");
12550 switch (GET_CODE (operands[3]))
12554 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12556 rtx temp = operands[2];
12557 operands[2] = operands[1];
12558 operands[1] = temp;
12561 /* know operands[0] == operands[1]. */
12563 if (MEM_P (operands[2]))
12569 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12571 if (STACK_TOP_P (operands[0]))
12572 /* How is it that we are storing to a dead operand[2]?
12573 Well, presumably operands[1] is dead too. We can't
12574 store the result to st(0) as st(0) gets popped on this
12575 instruction. Instead store to operands[2] (which I
12576 think has to be st(1)). st(1) will be popped later.
12577 gcc <= 2.8.1 didn't have this check and generated
12578 assembly code that the Unixware assembler rejected. */
12579 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12581 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12585 if (STACK_TOP_P (operands[0]))
12586 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12588 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12593 if (MEM_P (operands[1]))
12599 if (MEM_P (operands[2]))
12605 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12608 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12609 derived assemblers, confusingly reverse the direction of
12610 the operation for fsub{r} and fdiv{r} when the
12611 destination register is not st(0). The Intel assembler
12612 doesn't have this brain damage. Read !SYSV386_COMPAT to
12613 figure out what the hardware really does. */
12614 if (STACK_TOP_P (operands[0]))
12615 p = "{p\t%0, %2|rp\t%2, %0}";
12617 p = "{rp\t%2, %0|p\t%0, %2}";
12619 if (STACK_TOP_P (operands[0]))
12620 /* As above for fmul/fadd, we can't store to st(0). */
12621 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12623 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12628 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12631 if (STACK_TOP_P (operands[0]))
12632 p = "{rp\t%0, %1|p\t%1, %0}";
12634 p = "{p\t%1, %0|rp\t%0, %1}";
12636 if (STACK_TOP_P (operands[0]))
12637 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12639 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12644 if (STACK_TOP_P (operands[0]))
12646 if (STACK_TOP_P (operands[1]))
12647 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12649 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12652 else if (STACK_TOP_P (operands[1]))
12655 p = "{\t%1, %0|r\t%0, %1}";
12657 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12663 p = "{r\t%2, %0|\t%0, %2}";
12665 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12671 gcc_unreachable ();
12678 /* Return needed mode for entity in optimize_mode_switching pass. */
12681 ix86_mode_needed (int entity, rtx insn)
12683 enum attr_i387_cw mode;
12685 /* The mode UNINITIALIZED is used to store control word after a
12686 function call or ASM pattern. The mode ANY specify that function
12687 has no requirements on the control word and make no changes in the
12688 bits we are interested in. */
12691 || (NONJUMP_INSN_P (insn)
12692 && (asm_noperands (PATTERN (insn)) >= 0
12693 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12694 return I387_CW_UNINITIALIZED;
12696 if (recog_memoized (insn) < 0)
12697 return I387_CW_ANY;
12699 mode = get_attr_i387_cw (insn);
12704 if (mode == I387_CW_TRUNC)
12709 if (mode == I387_CW_FLOOR)
12714 if (mode == I387_CW_CEIL)
12719 if (mode == I387_CW_MASK_PM)
12724 gcc_unreachable ();
12727 return I387_CW_ANY;
12730 /* Output code to initialize control word copies used by trunc?f?i and
12731 rounding patterns. CURRENT_MODE is set to current control word,
12732 while NEW_MODE is set to new control word. */
12735 emit_i387_cw_initialization (int mode)
12737 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12740 enum ix86_stack_slot slot;
12742 rtx reg = gen_reg_rtx (HImode);
12744 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12745 emit_move_insn (reg, copy_rtx (stored_mode));
12747 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12748 || optimize_function_for_size_p (cfun))
12752 case I387_CW_TRUNC:
12753 /* round toward zero (truncate) */
12754 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12755 slot = SLOT_CW_TRUNC;
12758 case I387_CW_FLOOR:
12759 /* round down toward -oo */
12760 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12761 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12762 slot = SLOT_CW_FLOOR;
12766 /* round up toward +oo */
12767 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12768 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12769 slot = SLOT_CW_CEIL;
12772 case I387_CW_MASK_PM:
12773 /* mask precision exception for nearbyint() */
12774 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12775 slot = SLOT_CW_MASK_PM;
12779 gcc_unreachable ();
12786 case I387_CW_TRUNC:
12787 /* round toward zero (truncate) */
12788 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12789 slot = SLOT_CW_TRUNC;
12792 case I387_CW_FLOOR:
12793 /* round down toward -oo */
12794 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12795 slot = SLOT_CW_FLOOR;
12799 /* round up toward +oo */
12800 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12801 slot = SLOT_CW_CEIL;
12804 case I387_CW_MASK_PM:
12805 /* mask precision exception for nearbyint() */
12806 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12807 slot = SLOT_CW_MASK_PM;
12811 gcc_unreachable ();
12815 gcc_assert (slot < MAX_386_STACK_LOCALS);
12817 new_mode = assign_386_stack_local (HImode, slot);
12818 emit_move_insn (new_mode, reg);
12821 /* Output code for INSN to convert a float to a signed int. OPERANDS
12822 are the insn operands. The output may be [HSD]Imode and the input
12823 operand may be [SDX]Fmode. */
12826 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12828 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12829 int dimode_p = GET_MODE (operands[0]) == DImode;
12830 int round_mode = get_attr_i387_cw (insn);
12832 /* Jump through a hoop or two for DImode, since the hardware has no
12833 non-popping instruction. We used to do this a different way, but
12834 that was somewhat fragile and broke with post-reload splitters. */
12835 if ((dimode_p || fisttp) && !stack_top_dies)
12836 output_asm_insn ("fld\t%y1", operands);
12838 gcc_assert (STACK_TOP_P (operands[1]));
12839 gcc_assert (MEM_P (operands[0]));
12840 gcc_assert (GET_MODE (operands[1]) != TFmode);
12843 output_asm_insn ("fisttp%Z0\t%0", operands);
12846 if (round_mode != I387_CW_ANY)
12847 output_asm_insn ("fldcw\t%3", operands);
12848 if (stack_top_dies || dimode_p)
12849 output_asm_insn ("fistp%Z0\t%0", operands);
12851 output_asm_insn ("fist%Z0\t%0", operands);
12852 if (round_mode != I387_CW_ANY)
12853 output_asm_insn ("fldcw\t%2", operands);
12859 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12860 have the values zero or one, indicates the ffreep insn's operand
12861 from the OPERANDS array. */
12863 static const char *
12864 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12866 if (TARGET_USE_FFREEP)
12867 #ifdef HAVE_AS_IX86_FFREEP
12868 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12871 static char retval[32];
12872 int regno = REGNO (operands[opno]);
12874 gcc_assert (FP_REGNO_P (regno));
12876 regno -= FIRST_STACK_REG;
12878 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12883 return opno ? "fstp\t%y1" : "fstp\t%y0";
12887 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12888 should be used. UNORDERED_P is true when fucom should be used. */
12891 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12893 int stack_top_dies;
12894 rtx cmp_op0, cmp_op1;
12895 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12899 cmp_op0 = operands[0];
12900 cmp_op1 = operands[1];
12904 cmp_op0 = operands[1];
12905 cmp_op1 = operands[2];
12910 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12911 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12912 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12913 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12915 if (GET_MODE (operands[0]) == SFmode)
12917 return &ucomiss[TARGET_AVX ? 0 : 1];
12919 return &comiss[TARGET_AVX ? 0 : 1];
12922 return &ucomisd[TARGET_AVX ? 0 : 1];
12924 return &comisd[TARGET_AVX ? 0 : 1];
12927 gcc_assert (STACK_TOP_P (cmp_op0));
12929 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12931 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12933 if (stack_top_dies)
12935 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12936 return output_387_ffreep (operands, 1);
12939 return "ftst\n\tfnstsw\t%0";
12942 if (STACK_REG_P (cmp_op1)
12944 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12945 && REGNO (cmp_op1) != FIRST_STACK_REG)
12947 /* If both the top of the 387 stack dies, and the other operand
12948 is also a stack register that dies, then this must be a
12949 `fcompp' float compare */
12953 /* There is no double popping fcomi variant. Fortunately,
12954 eflags is immune from the fstp's cc clobbering. */
12956 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12958 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12959 return output_387_ffreep (operands, 0);
12964 return "fucompp\n\tfnstsw\t%0";
12966 return "fcompp\n\tfnstsw\t%0";
12971 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12973 static const char * const alt[16] =
12975 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12976 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12977 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12978 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12980 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12981 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12985 "fcomi\t{%y1, %0|%0, %y1}",
12986 "fcomip\t{%y1, %0|%0, %y1}",
12987 "fucomi\t{%y1, %0|%0, %y1}",
12988 "fucomip\t{%y1, %0|%0, %y1}",
12999 mask = eflags_p << 3;
13000 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
13001 mask |= unordered_p << 1;
13002 mask |= stack_top_dies;
13004 gcc_assert (mask < 16);
13013 ix86_output_addr_vec_elt (FILE *file, int value)
13015 const char *directive = ASM_LONG;
13019 directive = ASM_QUAD;
13021 gcc_assert (!TARGET_64BIT);
13024 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
13028 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
13030 const char *directive = ASM_LONG;
13033 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
13034 directive = ASM_QUAD;
13036 gcc_assert (!TARGET_64BIT);
13038 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
13039 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
13040 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
13041 directive, value, rel);
13042 else if (HAVE_AS_GOTOFF_IN_DATA)
13043 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
13045 else if (TARGET_MACHO)
13047 fprintf (file, ASM_LONG LPREFIX "%d-", value);
13048 machopic_output_function_base_name (file);
13053 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
13054 GOT_SYMBOL_NAME, value);
13057 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
13061 ix86_expand_clear (rtx dest)
13065 /* We play register width games, which are only valid after reload. */
13066 gcc_assert (reload_completed);
13068 /* Avoid HImode and its attendant prefix byte. */
13069 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
13070 dest = gen_rtx_REG (SImode, REGNO (dest));
13071 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
13073 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
13074 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
13076 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13077 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
13083 /* X is an unchanging MEM. If it is a constant pool reference, return
13084 the constant pool rtx, else NULL. */
13087 maybe_get_pool_constant (rtx x)
13089 x = ix86_delegitimize_address (XEXP (x, 0));
13091 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
13092 return get_pool_constant (x);
13098 ix86_expand_move (enum machine_mode mode, rtx operands[])
13101 enum tls_model model;
13106 if (GET_CODE (op1) == SYMBOL_REF)
13108 model = SYMBOL_REF_TLS_MODEL (op1);
13111 op1 = legitimize_tls_address (op1, model, true);
13112 op1 = force_operand (op1, op0);
13116 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13117 && SYMBOL_REF_DLLIMPORT_P (op1))
13118 op1 = legitimize_dllimport_symbol (op1, false);
13120 else if (GET_CODE (op1) == CONST
13121 && GET_CODE (XEXP (op1, 0)) == PLUS
13122 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13124 rtx addend = XEXP (XEXP (op1, 0), 1);
13125 rtx symbol = XEXP (XEXP (op1, 0), 0);
13128 model = SYMBOL_REF_TLS_MODEL (symbol);
13130 tmp = legitimize_tls_address (symbol, model, true);
13131 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13132 && SYMBOL_REF_DLLIMPORT_P (symbol))
13133 tmp = legitimize_dllimport_symbol (symbol, true);
13137 tmp = force_operand (tmp, NULL);
13138 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13139 op0, 1, OPTAB_DIRECT);
13145 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13147 if (TARGET_MACHO && !TARGET_64BIT)
13152 rtx temp = ((reload_in_progress
13153 || ((op0 && REG_P (op0))
13155 ? op0 : gen_reg_rtx (Pmode));
13156 op1 = machopic_indirect_data_reference (op1, temp);
13157 op1 = machopic_legitimize_pic_address (op1, mode,
13158 temp == op1 ? 0 : temp);
13160 else if (MACHOPIC_INDIRECT)
13161 op1 = machopic_indirect_data_reference (op1, 0);
13169 op1 = force_reg (Pmode, op1);
13170 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13172 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13173 op1 = legitimize_pic_address (op1, reg);
13182 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13183 || !push_operand (op0, mode))
13185 op1 = force_reg (mode, op1);
13187 if (push_operand (op0, mode)
13188 && ! general_no_elim_operand (op1, mode))
13189 op1 = copy_to_mode_reg (mode, op1);
13191 /* Force large constants in 64bit compilation into register
13192 to get them CSEed. */
13193 if (can_create_pseudo_p ()
13194 && (mode == DImode) && TARGET_64BIT
13195 && immediate_operand (op1, mode)
13196 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13197 && !register_operand (op0, mode)
13199 op1 = copy_to_mode_reg (mode, op1);
13201 if (can_create_pseudo_p ()
13202 && FLOAT_MODE_P (mode)
13203 && GET_CODE (op1) == CONST_DOUBLE)
13205 /* If we are loading a floating point constant to a register,
13206 force the value to memory now, since we'll get better code
13207 out the back end. */
13209 op1 = validize_mem (force_const_mem (mode, op1));
13210 if (!register_operand (op0, mode))
13212 rtx temp = gen_reg_rtx (mode);
13213 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13214 emit_move_insn (op0, temp);
13220 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13224 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13226 rtx op0 = operands[0], op1 = operands[1];
13227 unsigned int align = GET_MODE_ALIGNMENT (mode);
13229 /* Force constants other than zero into memory. We do not know how
13230 the instructions used to build constants modify the upper 64 bits
13231 of the register, once we have that information we may be able
13232 to handle some of them more efficiently. */
13233 if (can_create_pseudo_p ()
13234 && register_operand (op0, mode)
13235 && (CONSTANT_P (op1)
13236 || (GET_CODE (op1) == SUBREG
13237 && CONSTANT_P (SUBREG_REG (op1))))
13238 && !standard_sse_constant_p (op1))
13239 op1 = validize_mem (force_const_mem (mode, op1));
13241 /* We need to check memory alignment for SSE mode since attribute
13242 can make operands unaligned. */
13243 if (can_create_pseudo_p ()
13244 && SSE_REG_MODE_P (mode)
13245 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13246 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13250 /* ix86_expand_vector_move_misalign() does not like constants ... */
13251 if (CONSTANT_P (op1)
13252 || (GET_CODE (op1) == SUBREG
13253 && CONSTANT_P (SUBREG_REG (op1))))
13254 op1 = validize_mem (force_const_mem (mode, op1));
13256 /* ... nor both arguments in memory. */
13257 if (!register_operand (op0, mode)
13258 && !register_operand (op1, mode))
13259 op1 = force_reg (mode, op1);
13261 tmp[0] = op0; tmp[1] = op1;
13262 ix86_expand_vector_move_misalign (mode, tmp);
13266 /* Make operand1 a register if it isn't already. */
13267 if (can_create_pseudo_p ()
13268 && !register_operand (op0, mode)
13269 && !register_operand (op1, mode))
13271 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13275 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13278 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13279 straight to ix86_expand_vector_move. */
13280 /* Code generation for scalar reg-reg moves of single and double precision data:
13281 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13285 if (x86_sse_partial_reg_dependency == true)
13290 Code generation for scalar loads of double precision data:
13291 if (x86_sse_split_regs == true)
13292 movlpd mem, reg (gas syntax)
13296 Code generation for unaligned packed loads of single precision data
13297 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13298 if (x86_sse_unaligned_move_optimal)
13301 if (x86_sse_partial_reg_dependency == true)
13313 Code generation for unaligned packed loads of double precision data
13314 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13315 if (x86_sse_unaligned_move_optimal)
13318 if (x86_sse_split_regs == true)
13331 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13340 switch (GET_MODE_CLASS (mode))
13342 case MODE_VECTOR_INT:
13344 switch (GET_MODE_SIZE (mode))
13347 /* If we're optimizing for size, movups is the smallest. */
13348 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13350 op0 = gen_lowpart (V4SFmode, op0);
13351 op1 = gen_lowpart (V4SFmode, op1);
13352 emit_insn (gen_avx_movups (op0, op1));
13355 op0 = gen_lowpart (V16QImode, op0);
13356 op1 = gen_lowpart (V16QImode, op1);
13357 emit_insn (gen_avx_movdqu (op0, op1));
13360 op0 = gen_lowpart (V32QImode, op0);
13361 op1 = gen_lowpart (V32QImode, op1);
13362 emit_insn (gen_avx_movdqu256 (op0, op1));
13365 gcc_unreachable ();
13368 case MODE_VECTOR_FLOAT:
13369 op0 = gen_lowpart (mode, op0);
13370 op1 = gen_lowpart (mode, op1);
13375 emit_insn (gen_avx_movups (op0, op1));
13378 emit_insn (gen_avx_movups256 (op0, op1));
13381 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13383 op0 = gen_lowpart (V4SFmode, op0);
13384 op1 = gen_lowpart (V4SFmode, op1);
13385 emit_insn (gen_avx_movups (op0, op1));
13388 emit_insn (gen_avx_movupd (op0, op1));
13391 emit_insn (gen_avx_movupd256 (op0, op1));
13394 gcc_unreachable ();
13399 gcc_unreachable ();
13407 /* If we're optimizing for size, movups is the smallest. */
13408 if (optimize_insn_for_size_p ()
13409 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13411 op0 = gen_lowpart (V4SFmode, op0);
13412 op1 = gen_lowpart (V4SFmode, op1);
13413 emit_insn (gen_sse_movups (op0, op1));
13417 /* ??? If we have typed data, then it would appear that using
13418 movdqu is the only way to get unaligned data loaded with
13420 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13422 op0 = gen_lowpart (V16QImode, op0);
13423 op1 = gen_lowpart (V16QImode, op1);
13424 emit_insn (gen_sse2_movdqu (op0, op1));
13428 if (TARGET_SSE2 && mode == V2DFmode)
13432 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13434 op0 = gen_lowpart (V2DFmode, op0);
13435 op1 = gen_lowpart (V2DFmode, op1);
13436 emit_insn (gen_sse2_movupd (op0, op1));
13440 /* When SSE registers are split into halves, we can avoid
13441 writing to the top half twice. */
13442 if (TARGET_SSE_SPLIT_REGS)
13444 emit_clobber (op0);
13449 /* ??? Not sure about the best option for the Intel chips.
13450 The following would seem to satisfy; the register is
13451 entirely cleared, breaking the dependency chain. We
13452 then store to the upper half, with a dependency depth
13453 of one. A rumor has it that Intel recommends two movsd
13454 followed by an unpacklpd, but this is unconfirmed. And
13455 given that the dependency depth of the unpacklpd would
13456 still be one, I'm not sure why this would be better. */
13457 zero = CONST0_RTX (V2DFmode);
13460 m = adjust_address (op1, DFmode, 0);
13461 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13462 m = adjust_address (op1, DFmode, 8);
13463 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13467 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13469 op0 = gen_lowpart (V4SFmode, op0);
13470 op1 = gen_lowpart (V4SFmode, op1);
13471 emit_insn (gen_sse_movups (op0, op1));
13475 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13476 emit_move_insn (op0, CONST0_RTX (mode));
13478 emit_clobber (op0);
13480 if (mode != V4SFmode)
13481 op0 = gen_lowpart (V4SFmode, op0);
13482 m = adjust_address (op1, V2SFmode, 0);
13483 emit_insn (gen_sse_loadlps (op0, op0, m));
13484 m = adjust_address (op1, V2SFmode, 8);
13485 emit_insn (gen_sse_loadhps (op0, op0, m));
13488 else if (MEM_P (op0))
13490 /* If we're optimizing for size, movups is the smallest. */
13491 if (optimize_insn_for_size_p ()
13492 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13494 op0 = gen_lowpart (V4SFmode, op0);
13495 op1 = gen_lowpart (V4SFmode, op1);
13496 emit_insn (gen_sse_movups (op0, op1));
13500 /* ??? Similar to above, only less clear because of quote
13501 typeless stores unquote. */
13502 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13503 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13505 op0 = gen_lowpart (V16QImode, op0);
13506 op1 = gen_lowpart (V16QImode, op1);
13507 emit_insn (gen_sse2_movdqu (op0, op1));
13511 if (TARGET_SSE2 && mode == V2DFmode)
13513 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13515 op0 = gen_lowpart (V2DFmode, op0);
13516 op1 = gen_lowpart (V2DFmode, op1);
13517 emit_insn (gen_sse2_movupd (op0, op1));
13521 m = adjust_address (op0, DFmode, 0);
13522 emit_insn (gen_sse2_storelpd (m, op1));
13523 m = adjust_address (op0, DFmode, 8);
13524 emit_insn (gen_sse2_storehpd (m, op1));
13529 if (mode != V4SFmode)
13530 op1 = gen_lowpart (V4SFmode, op1);
13532 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13534 op0 = gen_lowpart (V4SFmode, op0);
13535 emit_insn (gen_sse_movups (op0, op1));
13539 m = adjust_address (op0, V2SFmode, 0);
13540 emit_insn (gen_sse_storelps (m, op1));
13541 m = adjust_address (op0, V2SFmode, 8);
13542 emit_insn (gen_sse_storehps (m, op1));
13547 gcc_unreachable ();
13550 /* Expand a push in MODE. This is some mode for which we do not support
13551 proper push instructions, at least from the registers that we expect
13552 the value to live in. */
13555 ix86_expand_push (enum machine_mode mode, rtx x)
13559 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13560 GEN_INT (-GET_MODE_SIZE (mode)),
13561 stack_pointer_rtx, 1, OPTAB_DIRECT);
13562 if (tmp != stack_pointer_rtx)
13563 emit_move_insn (stack_pointer_rtx, tmp);
13565 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13567 /* When we push an operand onto stack, it has to be aligned at least
13568 at the function argument boundary. However since we don't have
13569 the argument type, we can't determine the actual argument
13571 emit_move_insn (tmp, x);
13574 /* Helper function of ix86_fixup_binary_operands to canonicalize
13575 operand order. Returns true if the operands should be swapped. */
13578 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13581 rtx dst = operands[0];
13582 rtx src1 = operands[1];
13583 rtx src2 = operands[2];
13585 /* If the operation is not commutative, we can't do anything. */
13586 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13589 /* Highest priority is that src1 should match dst. */
13590 if (rtx_equal_p (dst, src1))
13592 if (rtx_equal_p (dst, src2))
13595 /* Next highest priority is that immediate constants come second. */
13596 if (immediate_operand (src2, mode))
13598 if (immediate_operand (src1, mode))
13601 /* Lowest priority is that memory references should come second. */
13611 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13612 destination to use for the operation. If different from the true
13613 destination in operands[0], a copy operation will be required. */
13616 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13619 rtx dst = operands[0];
13620 rtx src1 = operands[1];
13621 rtx src2 = operands[2];
13623 /* Canonicalize operand order. */
13624 if (ix86_swap_binary_operands_p (code, mode, operands))
13628 /* It is invalid to swap operands of different modes. */
13629 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13636 /* Both source operands cannot be in memory. */
13637 if (MEM_P (src1) && MEM_P (src2))
13639 /* Optimization: Only read from memory once. */
13640 if (rtx_equal_p (src1, src2))
13642 src2 = force_reg (mode, src2);
13646 src2 = force_reg (mode, src2);
13649 /* If the destination is memory, and we do not have matching source
13650 operands, do things in registers. */
13651 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13652 dst = gen_reg_rtx (mode);
13654 /* Source 1 cannot be a constant. */
13655 if (CONSTANT_P (src1))
13656 src1 = force_reg (mode, src1);
13658 /* Source 1 cannot be a non-matching memory. */
13659 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13660 src1 = force_reg (mode, src1);
13662 operands[1] = src1;
13663 operands[2] = src2;
13667 /* Similarly, but assume that the destination has already been
13668 set up properly. */
13671 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13672 enum machine_mode mode, rtx operands[])
13674 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13675 gcc_assert (dst == operands[0]);
13678 /* Attempt to expand a binary operator. Make the expansion closer to the
13679 actual machine, then just general_operand, which will allow 3 separate
13680 memory references (one output, two input) in a single insn. */
13683 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13686 rtx src1, src2, dst, op, clob;
13688 dst = ix86_fixup_binary_operands (code, mode, operands);
13689 src1 = operands[1];
13690 src2 = operands[2];
13692 /* Emit the instruction. */
13694 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13695 if (reload_in_progress)
13697 /* Reload doesn't know about the flags register, and doesn't know that
13698 it doesn't want to clobber it. We can only do this with PLUS. */
13699 gcc_assert (code == PLUS);
13704 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13705 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13708 /* Fix up the destination if needed. */
13709 if (dst != operands[0])
13710 emit_move_insn (operands[0], dst);
13713 /* Return TRUE or FALSE depending on whether the binary operator meets the
13714 appropriate constraints. */
13717 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13720 rtx dst = operands[0];
13721 rtx src1 = operands[1];
13722 rtx src2 = operands[2];
13724 /* Both source operands cannot be in memory. */
13725 if (MEM_P (src1) && MEM_P (src2))
13728 /* Canonicalize operand order for commutative operators. */
13729 if (ix86_swap_binary_operands_p (code, mode, operands))
13736 /* If the destination is memory, we must have a matching source operand. */
13737 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13740 /* Source 1 cannot be a constant. */
13741 if (CONSTANT_P (src1))
13744 /* Source 1 cannot be a non-matching memory. */
13745 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13751 /* Attempt to expand a unary operator. Make the expansion closer to the
13752 actual machine, then just general_operand, which will allow 2 separate
13753 memory references (one output, one input) in a single insn. */
13756 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13759 int matching_memory;
13760 rtx src, dst, op, clob;
13765 /* If the destination is memory, and we do not have matching source
13766 operands, do things in registers. */
13767 matching_memory = 0;
13770 if (rtx_equal_p (dst, src))
13771 matching_memory = 1;
13773 dst = gen_reg_rtx (mode);
13776 /* When source operand is memory, destination must match. */
13777 if (MEM_P (src) && !matching_memory)
13778 src = force_reg (mode, src);
13780 /* Emit the instruction. */
13782 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13783 if (reload_in_progress || code == NOT)
13785 /* Reload doesn't know about the flags register, and doesn't know that
13786 it doesn't want to clobber it. */
13787 gcc_assert (code == NOT);
13792 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13793 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13796 /* Fix up the destination if needed. */
13797 if (dst != operands[0])
13798 emit_move_insn (operands[0], dst);
13801 #define LEA_SEARCH_THRESHOLD 12
13803 /* Search backward for non-agu definition of register number REGNO1
13804 or register number REGNO2 in INSN's basic block until
13805 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13806 2. Reach BB boundary, or
13807 3. Reach agu definition.
13808 Returns the distance between the non-agu definition point and INSN.
13809 If no definition point, returns -1. */
13812 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13815 basic_block bb = BLOCK_FOR_INSN (insn);
13818 enum attr_type insn_type;
13820 if (insn != BB_HEAD (bb))
13822 rtx prev = PREV_INSN (insn);
13823 while (prev && distance < LEA_SEARCH_THRESHOLD)
13825 if (NONDEBUG_INSN_P (prev))
13828 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13829 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13830 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13831 && (regno1 == DF_REF_REGNO (*def_rec)
13832 || regno2 == DF_REF_REGNO (*def_rec)))
13834 insn_type = get_attr_type (prev);
13835 if (insn_type != TYPE_LEA)
13839 if (prev == BB_HEAD (bb))
13841 prev = PREV_INSN (prev);
13845 if (distance < LEA_SEARCH_THRESHOLD)
13849 bool simple_loop = false;
13851 FOR_EACH_EDGE (e, ei, bb->preds)
13854 simple_loop = true;
13860 rtx prev = BB_END (bb);
13863 && distance < LEA_SEARCH_THRESHOLD)
13865 if (NONDEBUG_INSN_P (prev))
13868 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13869 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13870 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13871 && (regno1 == DF_REF_REGNO (*def_rec)
13872 || regno2 == DF_REF_REGNO (*def_rec)))
13874 insn_type = get_attr_type (prev);
13875 if (insn_type != TYPE_LEA)
13879 prev = PREV_INSN (prev);
13887 /* get_attr_type may modify recog data. We want to make sure
13888 that recog data is valid for instruction INSN, on which
13889 distance_non_agu_define is called. INSN is unchanged here. */
13890 extract_insn_cached (insn);
13894 /* Return the distance between INSN and the next insn that uses
13895 register number REGNO0 in memory address. Return -1 if no such
13896 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13899 distance_agu_use (unsigned int regno0, rtx insn)
13901 basic_block bb = BLOCK_FOR_INSN (insn);
13906 if (insn != BB_END (bb))
13908 rtx next = NEXT_INSN (insn);
13909 while (next && distance < LEA_SEARCH_THRESHOLD)
13911 if (NONDEBUG_INSN_P (next))
13915 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13916 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13917 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13918 && regno0 == DF_REF_REGNO (*use_rec))
13920 /* Return DISTANCE if OP0 is used in memory
13921 address in NEXT. */
13925 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13926 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13927 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13928 && regno0 == DF_REF_REGNO (*def_rec))
13930 /* Return -1 if OP0 is set in NEXT. */
13934 if (next == BB_END (bb))
13936 next = NEXT_INSN (next);
13940 if (distance < LEA_SEARCH_THRESHOLD)
13944 bool simple_loop = false;
13946 FOR_EACH_EDGE (e, ei, bb->succs)
13949 simple_loop = true;
13955 rtx next = BB_HEAD (bb);
13958 && distance < LEA_SEARCH_THRESHOLD)
13960 if (NONDEBUG_INSN_P (next))
13964 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13965 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13966 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13967 && regno0 == DF_REF_REGNO (*use_rec))
13969 /* Return DISTANCE if OP0 is used in memory
13970 address in NEXT. */
13974 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13975 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13976 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13977 && regno0 == DF_REF_REGNO (*def_rec))
13979 /* Return -1 if OP0 is set in NEXT. */
13984 next = NEXT_INSN (next);
13992 /* Define this macro to tune LEA priority vs ADD, it take effect when
13993 there is a dilemma of choicing LEA or ADD
13994 Negative value: ADD is more preferred than LEA
13996 Positive value: LEA is more preferred than ADD*/
13997 #define IX86_LEA_PRIORITY 2
13999 /* Return true if it is ok to optimize an ADD operation to LEA
14000 operation to avoid flag register consumation. For the processors
14001 like ATOM, if the destination register of LEA holds an actual
14002 address which will be used soon, LEA is better and otherwise ADD
14006 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14007 rtx insn, rtx operands[])
14009 unsigned int regno0 = true_regnum (operands[0]);
14010 unsigned int regno1 = true_regnum (operands[1]);
14011 unsigned int regno2;
14013 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
14014 return regno0 != regno1;
14016 regno2 = true_regnum (operands[2]);
14018 /* If a = b + c, (a!=b && a!=c), must use lea form. */
14019 if (regno0 != regno1 && regno0 != regno2)
14023 int dist_define, dist_use;
14024 dist_define = distance_non_agu_define (regno1, regno2, insn);
14025 if (dist_define <= 0)
14028 /* If this insn has both backward non-agu dependence and forward
14029 agu dependence, the one with short distance take effect. */
14030 dist_use = distance_agu_use (regno0, insn);
14032 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
14039 /* Return true if destination reg of SET_BODY is shift count of
14043 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
14049 /* Retrieve destination of SET_BODY. */
14050 switch (GET_CODE (set_body))
14053 set_dest = SET_DEST (set_body);
14054 if (!set_dest || !REG_P (set_dest))
14058 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
14059 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
14067 /* Retrieve shift count of USE_BODY. */
14068 switch (GET_CODE (use_body))
14071 shift_rtx = XEXP (use_body, 1);
14074 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
14075 if (ix86_dep_by_shift_count_body (set_body,
14076 XVECEXP (use_body, 0, i)))
14084 && (GET_CODE (shift_rtx) == ASHIFT
14085 || GET_CODE (shift_rtx) == LSHIFTRT
14086 || GET_CODE (shift_rtx) == ASHIFTRT
14087 || GET_CODE (shift_rtx) == ROTATE
14088 || GET_CODE (shift_rtx) == ROTATERT))
14090 rtx shift_count = XEXP (shift_rtx, 1);
14092 /* Return true if shift count is dest of SET_BODY. */
14093 if (REG_P (shift_count)
14094 && true_regnum (set_dest) == true_regnum (shift_count))
14101 /* Return true if destination reg of SET_INSN is shift count of
14105 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
14107 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
14108 PATTERN (use_insn));
14111 /* Return TRUE or FALSE depending on whether the unary operator meets the
14112 appropriate constraints. */
14115 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14116 enum machine_mode mode ATTRIBUTE_UNUSED,
14117 rtx operands[2] ATTRIBUTE_UNUSED)
14119 /* If one of operands is memory, source and destination must match. */
14120 if ((MEM_P (operands[0])
14121 || MEM_P (operands[1]))
14122 && ! rtx_equal_p (operands[0], operands[1]))
14127 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
14128 are ok, keeping in mind the possible movddup alternative. */
14131 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
14133 if (MEM_P (operands[0]))
14134 return rtx_equal_p (operands[0], operands[1 + high]);
14135 if (MEM_P (operands[1]) && MEM_P (operands[2]))
14136 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
14140 /* Post-reload splitter for converting an SF or DFmode value in an
14141 SSE register into an unsigned SImode. */
14144 ix86_split_convert_uns_si_sse (rtx operands[])
14146 enum machine_mode vecmode;
14147 rtx value, large, zero_or_two31, input, two31, x;
14149 large = operands[1];
14150 zero_or_two31 = operands[2];
14151 input = operands[3];
14152 two31 = operands[4];
14153 vecmode = GET_MODE (large);
14154 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14156 /* Load up the value into the low element. We must ensure that the other
14157 elements are valid floats -- zero is the easiest such value. */
14160 if (vecmode == V4SFmode)
14161 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14163 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14167 input = gen_rtx_REG (vecmode, REGNO (input));
14168 emit_move_insn (value, CONST0_RTX (vecmode));
14169 if (vecmode == V4SFmode)
14170 emit_insn (gen_sse_movss (value, value, input));
14172 emit_insn (gen_sse2_movsd (value, value, input));
14175 emit_move_insn (large, two31);
14176 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14178 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14179 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14181 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14182 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14184 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14185 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14187 large = gen_rtx_REG (V4SImode, REGNO (large));
14188 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14190 x = gen_rtx_REG (V4SImode, REGNO (value));
14191 if (vecmode == V4SFmode)
14192 emit_insn (gen_sse2_cvttps2dq (x, value));
14194 emit_insn (gen_sse2_cvttpd2dq (x, value));
14197 emit_insn (gen_xorv4si3 (value, value, large));
14200 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14201 Expects the 64-bit DImode to be supplied in a pair of integral
14202 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14203 -mfpmath=sse, !optimize_size only. */
14206 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14208 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14209 rtx int_xmm, fp_xmm;
14210 rtx biases, exponents;
14213 int_xmm = gen_reg_rtx (V4SImode);
14214 if (TARGET_INTER_UNIT_MOVES)
14215 emit_insn (gen_movdi_to_sse (int_xmm, input));
14216 else if (TARGET_SSE_SPLIT_REGS)
14218 emit_clobber (int_xmm);
14219 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14223 x = gen_reg_rtx (V2DImode);
14224 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14225 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14228 x = gen_rtx_CONST_VECTOR (V4SImode,
14229 gen_rtvec (4, GEN_INT (0x43300000UL),
14230 GEN_INT (0x45300000UL),
14231 const0_rtx, const0_rtx));
14232 exponents = validize_mem (force_const_mem (V4SImode, x));
14234 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14235 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14237 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14238 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14239 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14240 (0x1.0p84 + double(fp_value_hi_xmm)).
14241 Note these exponents differ by 32. */
14243 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14245 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14246 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14247 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14248 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14249 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14250 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14251 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14252 biases = validize_mem (force_const_mem (V2DFmode, biases));
14253 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14255 /* Add the upper and lower DFmode values together. */
14257 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14260 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14261 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14262 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14265 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14268 /* Not used, but eases macroization of patterns. */
14270 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14271 rtx input ATTRIBUTE_UNUSED)
14273 gcc_unreachable ();
14276 /* Convert an unsigned SImode value into a DFmode. Only currently used
14277 for SSE, but applicable anywhere. */
14280 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14282 REAL_VALUE_TYPE TWO31r;
14285 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14286 NULL, 1, OPTAB_DIRECT);
14288 fp = gen_reg_rtx (DFmode);
14289 emit_insn (gen_floatsidf2 (fp, x));
14291 real_ldexp (&TWO31r, &dconst1, 31);
14292 x = const_double_from_real_value (TWO31r, DFmode);
14294 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14296 emit_move_insn (target, x);
14299 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14300 32-bit mode; otherwise we have a direct convert instruction. */
14303 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14305 REAL_VALUE_TYPE TWO32r;
14306 rtx fp_lo, fp_hi, x;
14308 fp_lo = gen_reg_rtx (DFmode);
14309 fp_hi = gen_reg_rtx (DFmode);
14311 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14313 real_ldexp (&TWO32r, &dconst1, 32);
14314 x = const_double_from_real_value (TWO32r, DFmode);
14315 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14317 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14319 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14322 emit_move_insn (target, x);
14325 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14326 For x86_32, -mfpmath=sse, !optimize_size only. */
14328 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14330 REAL_VALUE_TYPE ONE16r;
14331 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14333 real_ldexp (&ONE16r, &dconst1, 16);
14334 x = const_double_from_real_value (ONE16r, SFmode);
14335 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14336 NULL, 0, OPTAB_DIRECT);
14337 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14338 NULL, 0, OPTAB_DIRECT);
14339 fp_hi = gen_reg_rtx (SFmode);
14340 fp_lo = gen_reg_rtx (SFmode);
14341 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14342 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14343 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14345 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14347 if (!rtx_equal_p (target, fp_hi))
14348 emit_move_insn (target, fp_hi);
14351 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14352 then replicate the value for all elements of the vector
14356 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14363 v = gen_rtvec (4, value, value, value, value);
14364 return gen_rtx_CONST_VECTOR (V4SImode, v);
14368 v = gen_rtvec (2, value, value);
14369 return gen_rtx_CONST_VECTOR (V2DImode, v);
14373 v = gen_rtvec (4, value, value, value, value);
14375 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14376 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14377 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14381 v = gen_rtvec (2, value, value);
14383 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14384 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14387 gcc_unreachable ();
14391 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14392 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14393 for an SSE register. If VECT is true, then replicate the mask for
14394 all elements of the vector register. If INVERT is true, then create
14395 a mask excluding the sign bit. */
14398 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14400 enum machine_mode vec_mode, imode;
14401 HOST_WIDE_INT hi, lo;
14406 /* Find the sign bit, sign extended to 2*HWI. */
14412 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14413 lo = 0x80000000, hi = lo < 0;
14419 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14420 if (HOST_BITS_PER_WIDE_INT >= 64)
14421 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14423 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14428 vec_mode = VOIDmode;
14429 if (HOST_BITS_PER_WIDE_INT >= 64)
14432 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14439 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14443 lo = ~lo, hi = ~hi;
14449 mask = immed_double_const (lo, hi, imode);
14451 vec = gen_rtvec (2, v, mask);
14452 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14453 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14460 gcc_unreachable ();
14464 lo = ~lo, hi = ~hi;
14466 /* Force this value into the low part of a fp vector constant. */
14467 mask = immed_double_const (lo, hi, imode);
14468 mask = gen_lowpart (mode, mask);
14470 if (vec_mode == VOIDmode)
14471 return force_reg (mode, mask);
14473 v = ix86_build_const_vector (mode, vect, mask);
14474 return force_reg (vec_mode, v);
14477 /* Generate code for floating point ABS or NEG. */
14480 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14483 rtx mask, set, use, clob, dst, src;
14484 bool use_sse = false;
14485 bool vector_mode = VECTOR_MODE_P (mode);
14486 enum machine_mode elt_mode = mode;
14490 elt_mode = GET_MODE_INNER (mode);
14493 else if (mode == TFmode)
14495 else if (TARGET_SSE_MATH)
14496 use_sse = SSE_FLOAT_MODE_P (mode);
14498 /* NEG and ABS performed with SSE use bitwise mask operations.
14499 Create the appropriate mask now. */
14501 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14510 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14511 set = gen_rtx_SET (VOIDmode, dst, set);
14516 set = gen_rtx_fmt_e (code, mode, src);
14517 set = gen_rtx_SET (VOIDmode, dst, set);
14520 use = gen_rtx_USE (VOIDmode, mask);
14521 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14522 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14523 gen_rtvec (3, set, use, clob)));
14530 /* Expand a copysign operation. Special case operand 0 being a constant. */
14533 ix86_expand_copysign (rtx operands[])
14535 enum machine_mode mode;
14536 rtx dest, op0, op1, mask, nmask;
14538 dest = operands[0];
14542 mode = GET_MODE (dest);
14544 if (GET_CODE (op0) == CONST_DOUBLE)
14546 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14548 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14549 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14551 if (mode == SFmode || mode == DFmode)
14553 enum machine_mode vmode;
14555 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14557 if (op0 == CONST0_RTX (mode))
14558 op0 = CONST0_RTX (vmode);
14561 rtx v = ix86_build_const_vector (mode, false, op0);
14563 op0 = force_reg (vmode, v);
14566 else if (op0 != CONST0_RTX (mode))
14567 op0 = force_reg (mode, op0);
14569 mask = ix86_build_signbit_mask (mode, 0, 0);
14571 if (mode == SFmode)
14572 copysign_insn = gen_copysignsf3_const;
14573 else if (mode == DFmode)
14574 copysign_insn = gen_copysigndf3_const;
14576 copysign_insn = gen_copysigntf3_const;
14578 emit_insn (copysign_insn (dest, op0, op1, mask));
14582 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14584 nmask = ix86_build_signbit_mask (mode, 0, 1);
14585 mask = ix86_build_signbit_mask (mode, 0, 0);
14587 if (mode == SFmode)
14588 copysign_insn = gen_copysignsf3_var;
14589 else if (mode == DFmode)
14590 copysign_insn = gen_copysigndf3_var;
14592 copysign_insn = gen_copysigntf3_var;
14594 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14598 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14599 be a constant, and so has already been expanded into a vector constant. */
14602 ix86_split_copysign_const (rtx operands[])
14604 enum machine_mode mode, vmode;
14605 rtx dest, op0, mask, x;
14607 dest = operands[0];
14609 mask = operands[3];
14611 mode = GET_MODE (dest);
14612 vmode = GET_MODE (mask);
14614 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14615 x = gen_rtx_AND (vmode, dest, mask);
14616 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14618 if (op0 != CONST0_RTX (vmode))
14620 x = gen_rtx_IOR (vmode, dest, op0);
14621 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14625 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14626 so we have to do two masks. */
14629 ix86_split_copysign_var (rtx operands[])
14631 enum machine_mode mode, vmode;
14632 rtx dest, scratch, op0, op1, mask, nmask, x;
14634 dest = operands[0];
14635 scratch = operands[1];
14638 nmask = operands[4];
14639 mask = operands[5];
14641 mode = GET_MODE (dest);
14642 vmode = GET_MODE (mask);
14644 if (rtx_equal_p (op0, op1))
14646 /* Shouldn't happen often (it's useless, obviously), but when it does
14647 we'd generate incorrect code if we continue below. */
14648 emit_move_insn (dest, op0);
14652 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14654 gcc_assert (REGNO (op1) == REGNO (scratch));
14656 x = gen_rtx_AND (vmode, scratch, mask);
14657 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14660 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14661 x = gen_rtx_NOT (vmode, dest);
14662 x = gen_rtx_AND (vmode, x, op0);
14663 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14667 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14669 x = gen_rtx_AND (vmode, scratch, mask);
14671 else /* alternative 2,4 */
14673 gcc_assert (REGNO (mask) == REGNO (scratch));
14674 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14675 x = gen_rtx_AND (vmode, scratch, op1);
14677 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14679 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14681 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14682 x = gen_rtx_AND (vmode, dest, nmask);
14684 else /* alternative 3,4 */
14686 gcc_assert (REGNO (nmask) == REGNO (dest));
14688 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14689 x = gen_rtx_AND (vmode, dest, op0);
14691 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14694 x = gen_rtx_IOR (vmode, dest, scratch);
14695 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14698 /* Return TRUE or FALSE depending on whether the first SET in INSN
14699 has source and destination with matching CC modes, and that the
14700 CC mode is at least as constrained as REQ_MODE. */
14703 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14706 enum machine_mode set_mode;
14708 set = PATTERN (insn);
14709 if (GET_CODE (set) == PARALLEL)
14710 set = XVECEXP (set, 0, 0);
14711 gcc_assert (GET_CODE (set) == SET);
14712 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14714 set_mode = GET_MODE (SET_DEST (set));
14718 if (req_mode != CCNOmode
14719 && (req_mode != CCmode
14720 || XEXP (SET_SRC (set), 1) != const0_rtx))
14724 if (req_mode == CCGCmode)
14728 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14732 if (req_mode == CCZmode)
14743 gcc_unreachable ();
14746 return (GET_MODE (SET_SRC (set)) == set_mode);
14749 /* Generate insn patterns to do an integer compare of OPERANDS. */
14752 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14754 enum machine_mode cmpmode;
14757 cmpmode = SELECT_CC_MODE (code, op0, op1);
14758 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14760 /* This is very simple, but making the interface the same as in the
14761 FP case makes the rest of the code easier. */
14762 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14763 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14765 /* Return the test that should be put into the flags user, i.e.
14766 the bcc, scc, or cmov instruction. */
14767 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14770 /* Figure out whether to use ordered or unordered fp comparisons.
14771 Return the appropriate mode to use. */
14774 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14776 /* ??? In order to make all comparisons reversible, we do all comparisons
14777 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14778 all forms trapping and nontrapping comparisons, we can make inequality
14779 comparisons trapping again, since it results in better code when using
14780 FCOM based compares. */
14781 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14785 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14787 enum machine_mode mode = GET_MODE (op0);
14789 if (SCALAR_FLOAT_MODE_P (mode))
14791 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14792 return ix86_fp_compare_mode (code);
14797 /* Only zero flag is needed. */
14798 case EQ: /* ZF=0 */
14799 case NE: /* ZF!=0 */
14801 /* Codes needing carry flag. */
14802 case GEU: /* CF=0 */
14803 case LTU: /* CF=1 */
14804 /* Detect overflow checks. They need just the carry flag. */
14805 if (GET_CODE (op0) == PLUS
14806 && rtx_equal_p (op1, XEXP (op0, 0)))
14810 case GTU: /* CF=0 & ZF=0 */
14811 case LEU: /* CF=1 | ZF=1 */
14812 /* Detect overflow checks. They need just the carry flag. */
14813 if (GET_CODE (op0) == MINUS
14814 && rtx_equal_p (op1, XEXP (op0, 0)))
14818 /* Codes possibly doable only with sign flag when
14819 comparing against zero. */
14820 case GE: /* SF=OF or SF=0 */
14821 case LT: /* SF<>OF or SF=1 */
14822 if (op1 == const0_rtx)
14825 /* For other cases Carry flag is not required. */
14827 /* Codes doable only with sign flag when comparing
14828 against zero, but we miss jump instruction for it
14829 so we need to use relational tests against overflow
14830 that thus needs to be zero. */
14831 case GT: /* ZF=0 & SF=OF */
14832 case LE: /* ZF=1 | SF<>OF */
14833 if (op1 == const0_rtx)
14837 /* strcmp pattern do (use flags) and combine may ask us for proper
14842 gcc_unreachable ();
14846 /* Return the fixed registers used for condition codes. */
14849 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14856 /* If two condition code modes are compatible, return a condition code
14857 mode which is compatible with both. Otherwise, return
14860 static enum machine_mode
14861 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14866 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14869 if ((m1 == CCGCmode && m2 == CCGOCmode)
14870 || (m1 == CCGOCmode && m2 == CCGCmode))
14876 gcc_unreachable ();
14906 /* These are only compatible with themselves, which we already
14913 /* Return a comparison we can do and that it is equivalent to
14914 swap_condition (code) apart possibly from orderedness.
14915 But, never change orderedness if TARGET_IEEE_FP, returning
14916 UNKNOWN in that case if necessary. */
14918 static enum rtx_code
14919 ix86_fp_swap_condition (enum rtx_code code)
14923 case GT: /* GTU - CF=0 & ZF=0 */
14924 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14925 case GE: /* GEU - CF=0 */
14926 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14927 case UNLT: /* LTU - CF=1 */
14928 return TARGET_IEEE_FP ? UNKNOWN : GT;
14929 case UNLE: /* LEU - CF=1 | ZF=1 */
14930 return TARGET_IEEE_FP ? UNKNOWN : GE;
14932 return swap_condition (code);
14936 /* Return cost of comparison CODE using the best strategy for performance.
14937 All following functions do use number of instructions as a cost metrics.
14938 In future this should be tweaked to compute bytes for optimize_size and
14939 take into account performance of various instructions on various CPUs. */
14942 ix86_fp_comparison_cost (enum rtx_code code)
14946 /* The cost of code using bit-twiddling on %ah. */
14963 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14967 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14970 gcc_unreachable ();
14973 switch (ix86_fp_comparison_strategy (code))
14975 case IX86_FPCMP_COMI:
14976 return arith_cost > 4 ? 3 : 2;
14977 case IX86_FPCMP_SAHF:
14978 return arith_cost > 4 ? 4 : 3;
14984 /* Return strategy to use for floating-point. We assume that fcomi is always
14985 preferrable where available, since that is also true when looking at size
14986 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14988 enum ix86_fpcmp_strategy
14989 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14991 /* Do fcomi/sahf based test when profitable. */
14994 return IX86_FPCMP_COMI;
14996 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14997 return IX86_FPCMP_SAHF;
14999 return IX86_FPCMP_ARITH;
15002 /* Swap, force into registers, or otherwise massage the two operands
15003 to a fp comparison. The operands are updated in place; the new
15004 comparison code is returned. */
15006 static enum rtx_code
15007 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
15009 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
15010 rtx op0 = *pop0, op1 = *pop1;
15011 enum machine_mode op_mode = GET_MODE (op0);
15012 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
15014 /* All of the unordered compare instructions only work on registers.
15015 The same is true of the fcomi compare instructions. The XFmode
15016 compare instructions require registers except when comparing
15017 against zero or when converting operand 1 from fixed point to
15021 && (fpcmp_mode == CCFPUmode
15022 || (op_mode == XFmode
15023 && ! (standard_80387_constant_p (op0) == 1
15024 || standard_80387_constant_p (op1) == 1)
15025 && GET_CODE (op1) != FLOAT)
15026 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
15028 op0 = force_reg (op_mode, op0);
15029 op1 = force_reg (op_mode, op1);
15033 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
15034 things around if they appear profitable, otherwise force op0
15035 into a register. */
15037 if (standard_80387_constant_p (op0) == 0
15039 && ! (standard_80387_constant_p (op1) == 0
15042 enum rtx_code new_code = ix86_fp_swap_condition (code);
15043 if (new_code != UNKNOWN)
15046 tmp = op0, op0 = op1, op1 = tmp;
15052 op0 = force_reg (op_mode, op0);
15054 if (CONSTANT_P (op1))
15056 int tmp = standard_80387_constant_p (op1);
15058 op1 = validize_mem (force_const_mem (op_mode, op1));
15062 op1 = force_reg (op_mode, op1);
15065 op1 = force_reg (op_mode, op1);
15069 /* Try to rearrange the comparison to make it cheaper. */
15070 if (ix86_fp_comparison_cost (code)
15071 > ix86_fp_comparison_cost (swap_condition (code))
15072 && (REG_P (op1) || can_create_pseudo_p ()))
15075 tmp = op0, op0 = op1, op1 = tmp;
15076 code = swap_condition (code);
15078 op0 = force_reg (op_mode, op0);
15086 /* Convert comparison codes we use to represent FP comparison to integer
15087 code that will result in proper branch. Return UNKNOWN if no such code
15091 ix86_fp_compare_code_to_integer (enum rtx_code code)
15120 /* Generate insn patterns to do a floating point compare of OPERANDS. */
15123 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
15125 enum machine_mode fpcmp_mode, intcmp_mode;
15128 fpcmp_mode = ix86_fp_compare_mode (code);
15129 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
15131 /* Do fcomi/sahf based test when profitable. */
15132 switch (ix86_fp_comparison_strategy (code))
15134 case IX86_FPCMP_COMI:
15135 intcmp_mode = fpcmp_mode;
15136 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15137 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15142 case IX86_FPCMP_SAHF:
15143 intcmp_mode = fpcmp_mode;
15144 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15145 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15149 scratch = gen_reg_rtx (HImode);
15150 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15151 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15154 case IX86_FPCMP_ARITH:
15155 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15156 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15157 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15159 scratch = gen_reg_rtx (HImode);
15160 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15162 /* In the unordered case, we have to check C2 for NaN's, which
15163 doesn't happen to work out to anything nice combination-wise.
15164 So do some bit twiddling on the value we've got in AH to come
15165 up with an appropriate set of condition codes. */
15167 intcmp_mode = CCNOmode;
15172 if (code == GT || !TARGET_IEEE_FP)
15174 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15179 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15180 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15181 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15182 intcmp_mode = CCmode;
15188 if (code == LT && TARGET_IEEE_FP)
15190 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15191 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15192 intcmp_mode = CCmode;
15197 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15203 if (code == GE || !TARGET_IEEE_FP)
15205 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15210 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15211 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15217 if (code == LE && TARGET_IEEE_FP)
15219 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15220 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15221 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15222 intcmp_mode = CCmode;
15227 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15233 if (code == EQ && TARGET_IEEE_FP)
15235 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15236 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15237 intcmp_mode = CCmode;
15242 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15248 if (code == NE && TARGET_IEEE_FP)
15250 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15251 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15257 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15263 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15267 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15272 gcc_unreachable ();
15280 /* Return the test that should be put into the flags user, i.e.
15281 the bcc, scc, or cmov instruction. */
15282 return gen_rtx_fmt_ee (code, VOIDmode,
15283 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15288 ix86_expand_compare (enum rtx_code code)
15291 op0 = ix86_compare_op0;
15292 op1 = ix86_compare_op1;
15294 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15295 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15297 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15299 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15300 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15303 ret = ix86_expand_int_compare (code, op0, op1);
15309 ix86_expand_branch (enum rtx_code code, rtx label)
15313 switch (GET_MODE (ix86_compare_op0))
15322 tmp = ix86_expand_compare (code);
15323 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15324 gen_rtx_LABEL_REF (VOIDmode, label),
15326 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15333 /* Expand DImode branch into multiple compare+branch. */
15335 rtx lo[2], hi[2], label2;
15336 enum rtx_code code1, code2, code3;
15337 enum machine_mode submode;
15339 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15341 tmp = ix86_compare_op0;
15342 ix86_compare_op0 = ix86_compare_op1;
15343 ix86_compare_op1 = tmp;
15344 code = swap_condition (code);
15346 if (GET_MODE (ix86_compare_op0) == DImode)
15348 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15349 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15354 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15355 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15359 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15360 avoid two branches. This costs one extra insn, so disable when
15361 optimizing for size. */
15363 if ((code == EQ || code == NE)
15364 && (!optimize_insn_for_size_p ()
15365 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15370 if (hi[1] != const0_rtx)
15371 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15372 NULL_RTX, 0, OPTAB_WIDEN);
15375 if (lo[1] != const0_rtx)
15376 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15377 NULL_RTX, 0, OPTAB_WIDEN);
15379 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15380 NULL_RTX, 0, OPTAB_WIDEN);
15382 ix86_compare_op0 = tmp;
15383 ix86_compare_op1 = const0_rtx;
15384 ix86_expand_branch (code, label);
15388 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15389 op1 is a constant and the low word is zero, then we can just
15390 examine the high word. Similarly for low word -1 and
15391 less-or-equal-than or greater-than. */
15393 if (CONST_INT_P (hi[1]))
15396 case LT: case LTU: case GE: case GEU:
15397 if (lo[1] == const0_rtx)
15399 ix86_compare_op0 = hi[0];
15400 ix86_compare_op1 = hi[1];
15401 ix86_expand_branch (code, label);
15405 case LE: case LEU: case GT: case GTU:
15406 if (lo[1] == constm1_rtx)
15408 ix86_compare_op0 = hi[0];
15409 ix86_compare_op1 = hi[1];
15410 ix86_expand_branch (code, label);
15418 /* Otherwise, we need two or three jumps. */
15420 label2 = gen_label_rtx ();
15423 code2 = swap_condition (code);
15424 code3 = unsigned_condition (code);
15428 case LT: case GT: case LTU: case GTU:
15431 case LE: code1 = LT; code2 = GT; break;
15432 case GE: code1 = GT; code2 = LT; break;
15433 case LEU: code1 = LTU; code2 = GTU; break;
15434 case GEU: code1 = GTU; code2 = LTU; break;
15436 case EQ: code1 = UNKNOWN; code2 = NE; break;
15437 case NE: code2 = UNKNOWN; break;
15440 gcc_unreachable ();
15445 * if (hi(a) < hi(b)) goto true;
15446 * if (hi(a) > hi(b)) goto false;
15447 * if (lo(a) < lo(b)) goto true;
15451 ix86_compare_op0 = hi[0];
15452 ix86_compare_op1 = hi[1];
15454 if (code1 != UNKNOWN)
15455 ix86_expand_branch (code1, label);
15456 if (code2 != UNKNOWN)
15457 ix86_expand_branch (code2, label2);
15459 ix86_compare_op0 = lo[0];
15460 ix86_compare_op1 = lo[1];
15461 ix86_expand_branch (code3, label);
15463 if (code2 != UNKNOWN)
15464 emit_label (label2);
15469 /* If we have already emitted a compare insn, go straight to simple.
15470 ix86_expand_compare won't emit anything if ix86_compare_emitted
15472 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15477 /* Split branch based on floating point condition. */
15479 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15480 rtx target1, rtx target2, rtx tmp, rtx pushed)
15485 if (target2 != pc_rtx)
15488 code = reverse_condition_maybe_unordered (code);
15493 condition = ix86_expand_fp_compare (code, op1, op2,
15496 /* Remove pushed operand from stack. */
15498 ix86_free_from_memory (GET_MODE (pushed));
15500 i = emit_jump_insn (gen_rtx_SET
15502 gen_rtx_IF_THEN_ELSE (VOIDmode,
15503 condition, target1, target2)));
15504 if (split_branch_probability >= 0)
15505 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15509 ix86_expand_setcc (enum rtx_code code, rtx dest)
15513 gcc_assert (GET_MODE (dest) == QImode);
15515 ret = ix86_expand_compare (code);
15516 PUT_MODE (ret, QImode);
15517 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15520 /* Expand comparison setting or clearing carry flag. Return true when
15521 successful and set pop for the operation. */
15523 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15525 enum machine_mode mode =
15526 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15528 /* Do not handle DImode compares that go through special path. */
15529 if (mode == (TARGET_64BIT ? TImode : DImode))
15532 if (SCALAR_FLOAT_MODE_P (mode))
15534 rtx compare_op, compare_seq;
15536 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15538 /* Shortcut: following common codes never translate
15539 into carry flag compares. */
15540 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15541 || code == ORDERED || code == UNORDERED)
15544 /* These comparisons require zero flag; swap operands so they won't. */
15545 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15546 && !TARGET_IEEE_FP)
15551 code = swap_condition (code);
15554 /* Try to expand the comparison and verify that we end up with
15555 carry flag based comparison. This fails to be true only when
15556 we decide to expand comparison using arithmetic that is not
15557 too common scenario. */
15559 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15560 compare_seq = get_insns ();
15563 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15564 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15565 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15567 code = GET_CODE (compare_op);
15569 if (code != LTU && code != GEU)
15572 emit_insn (compare_seq);
15577 if (!INTEGRAL_MODE_P (mode))
15586 /* Convert a==0 into (unsigned)a<1. */
15589 if (op1 != const0_rtx)
15592 code = (code == EQ ? LTU : GEU);
15595 /* Convert a>b into b<a or a>=b-1. */
15598 if (CONST_INT_P (op1))
15600 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15601 /* Bail out on overflow. We still can swap operands but that
15602 would force loading of the constant into register. */
15603 if (op1 == const0_rtx
15604 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15606 code = (code == GTU ? GEU : LTU);
15613 code = (code == GTU ? LTU : GEU);
15617 /* Convert a>=0 into (unsigned)a<0x80000000. */
15620 if (mode == DImode || op1 != const0_rtx)
15622 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15623 code = (code == LT ? GEU : LTU);
15627 if (mode == DImode || op1 != constm1_rtx)
15629 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15630 code = (code == LE ? GEU : LTU);
15636 /* Swapping operands may cause constant to appear as first operand. */
15637 if (!nonimmediate_operand (op0, VOIDmode))
15639 if (!can_create_pseudo_p ())
15641 op0 = force_reg (mode, op0);
15643 ix86_compare_op0 = op0;
15644 ix86_compare_op1 = op1;
15645 *pop = ix86_expand_compare (code);
15646 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15651 ix86_expand_int_movcc (rtx operands[])
15653 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15654 rtx compare_seq, compare_op;
15655 enum machine_mode mode = GET_MODE (operands[0]);
15656 bool sign_bit_compare_p = false;
15659 ix86_compare_op0 = XEXP (operands[1], 0);
15660 ix86_compare_op1 = XEXP (operands[1], 1);
15661 compare_op = ix86_expand_compare (code);
15662 compare_seq = get_insns ();
15665 compare_code = GET_CODE (compare_op);
15667 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15668 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15669 sign_bit_compare_p = true;
15671 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15672 HImode insns, we'd be swallowed in word prefix ops. */
15674 if ((mode != HImode || TARGET_FAST_PREFIX)
15675 && (mode != (TARGET_64BIT ? TImode : DImode))
15676 && CONST_INT_P (operands[2])
15677 && CONST_INT_P (operands[3]))
15679 rtx out = operands[0];
15680 HOST_WIDE_INT ct = INTVAL (operands[2]);
15681 HOST_WIDE_INT cf = INTVAL (operands[3]);
15682 HOST_WIDE_INT diff;
15685 /* Sign bit compares are better done using shifts than we do by using
15687 if (sign_bit_compare_p
15688 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15689 ix86_compare_op1, &compare_op))
15691 /* Detect overlap between destination and compare sources. */
15694 if (!sign_bit_compare_p)
15697 bool fpcmp = false;
15699 compare_code = GET_CODE (compare_op);
15701 flags = XEXP (compare_op, 0);
15703 if (GET_MODE (flags) == CCFPmode
15704 || GET_MODE (flags) == CCFPUmode)
15708 = ix86_fp_compare_code_to_integer (compare_code);
15711 /* To simplify rest of code, restrict to the GEU case. */
15712 if (compare_code == LTU)
15714 HOST_WIDE_INT tmp = ct;
15717 compare_code = reverse_condition (compare_code);
15718 code = reverse_condition (code);
15723 PUT_CODE (compare_op,
15724 reverse_condition_maybe_unordered
15725 (GET_CODE (compare_op)));
15727 PUT_CODE (compare_op,
15728 reverse_condition (GET_CODE (compare_op)));
15732 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15733 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15734 tmp = gen_reg_rtx (mode);
15736 if (mode == DImode)
15737 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15739 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15740 flags, compare_op));
15744 if (code == GT || code == GE)
15745 code = reverse_condition (code);
15748 HOST_WIDE_INT tmp = ct;
15753 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15754 ix86_compare_op1, VOIDmode, 0, -1);
15767 tmp = expand_simple_binop (mode, PLUS,
15769 copy_rtx (tmp), 1, OPTAB_DIRECT);
15780 tmp = expand_simple_binop (mode, IOR,
15782 copy_rtx (tmp), 1, OPTAB_DIRECT);
15784 else if (diff == -1 && ct)
15794 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15796 tmp = expand_simple_binop (mode, PLUS,
15797 copy_rtx (tmp), GEN_INT (cf),
15798 copy_rtx (tmp), 1, OPTAB_DIRECT);
15806 * andl cf - ct, dest
15816 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15819 tmp = expand_simple_binop (mode, AND,
15821 gen_int_mode (cf - ct, mode),
15822 copy_rtx (tmp), 1, OPTAB_DIRECT);
15824 tmp = expand_simple_binop (mode, PLUS,
15825 copy_rtx (tmp), GEN_INT (ct),
15826 copy_rtx (tmp), 1, OPTAB_DIRECT);
15829 if (!rtx_equal_p (tmp, out))
15830 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15832 return 1; /* DONE */
15837 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15840 tmp = ct, ct = cf, cf = tmp;
15843 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15845 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15847 /* We may be reversing unordered compare to normal compare, that
15848 is not valid in general (we may convert non-trapping condition
15849 to trapping one), however on i386 we currently emit all
15850 comparisons unordered. */
15851 compare_code = reverse_condition_maybe_unordered (compare_code);
15852 code = reverse_condition_maybe_unordered (code);
15856 compare_code = reverse_condition (compare_code);
15857 code = reverse_condition (code);
15861 compare_code = UNKNOWN;
15862 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15863 && CONST_INT_P (ix86_compare_op1))
15865 if (ix86_compare_op1 == const0_rtx
15866 && (code == LT || code == GE))
15867 compare_code = code;
15868 else if (ix86_compare_op1 == constm1_rtx)
15872 else if (code == GT)
15877 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15878 if (compare_code != UNKNOWN
15879 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15880 && (cf == -1 || ct == -1))
15882 /* If lea code below could be used, only optimize
15883 if it results in a 2 insn sequence. */
15885 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15886 || diff == 3 || diff == 5 || diff == 9)
15887 || (compare_code == LT && ct == -1)
15888 || (compare_code == GE && cf == -1))
15891 * notl op1 (if necessary)
15899 code = reverse_condition (code);
15902 out = emit_store_flag (out, code, ix86_compare_op0,
15903 ix86_compare_op1, VOIDmode, 0, -1);
15905 out = expand_simple_binop (mode, IOR,
15907 out, 1, OPTAB_DIRECT);
15908 if (out != operands[0])
15909 emit_move_insn (operands[0], out);
15911 return 1; /* DONE */
15916 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15917 || diff == 3 || diff == 5 || diff == 9)
15918 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15920 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15926 * lea cf(dest*(ct-cf)),dest
15930 * This also catches the degenerate setcc-only case.
15936 out = emit_store_flag (out, code, ix86_compare_op0,
15937 ix86_compare_op1, VOIDmode, 0, 1);
15940 /* On x86_64 the lea instruction operates on Pmode, so we need
15941 to get arithmetics done in proper mode to match. */
15943 tmp = copy_rtx (out);
15947 out1 = copy_rtx (out);
15948 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15952 tmp = gen_rtx_PLUS (mode, tmp, out1);
15958 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15961 if (!rtx_equal_p (tmp, out))
15964 out = force_operand (tmp, copy_rtx (out));
15966 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15968 if (!rtx_equal_p (out, operands[0]))
15969 emit_move_insn (operands[0], copy_rtx (out));
15971 return 1; /* DONE */
15975 * General case: Jumpful:
15976 * xorl dest,dest cmpl op1, op2
15977 * cmpl op1, op2 movl ct, dest
15978 * setcc dest jcc 1f
15979 * decl dest movl cf, dest
15980 * andl (cf-ct),dest 1:
15983 * Size 20. Size 14.
15985 * This is reasonably steep, but branch mispredict costs are
15986 * high on modern cpus, so consider failing only if optimizing
15990 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15991 && BRANCH_COST (optimize_insn_for_speed_p (),
15996 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
16001 if (SCALAR_FLOAT_MODE_P (cmp_mode))
16003 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
16005 /* We may be reversing unordered compare to normal compare,
16006 that is not valid in general (we may convert non-trapping
16007 condition to trapping one), however on i386 we currently
16008 emit all comparisons unordered. */
16009 code = reverse_condition_maybe_unordered (code);
16013 code = reverse_condition (code);
16014 if (compare_code != UNKNOWN)
16015 compare_code = reverse_condition (compare_code);
16019 if (compare_code != UNKNOWN)
16021 /* notl op1 (if needed)
16026 For x < 0 (resp. x <= -1) there will be no notl,
16027 so if possible swap the constants to get rid of the
16029 True/false will be -1/0 while code below (store flag
16030 followed by decrement) is 0/-1, so the constants need
16031 to be exchanged once more. */
16033 if (compare_code == GE || !cf)
16035 code = reverse_condition (code);
16040 HOST_WIDE_INT tmp = cf;
16045 out = emit_store_flag (out, code, ix86_compare_op0,
16046 ix86_compare_op1, VOIDmode, 0, -1);
16050 out = emit_store_flag (out, code, ix86_compare_op0,
16051 ix86_compare_op1, VOIDmode, 0, 1);
16053 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
16054 copy_rtx (out), 1, OPTAB_DIRECT);
16057 out = expand_simple_binop (mode, AND, copy_rtx (out),
16058 gen_int_mode (cf - ct, mode),
16059 copy_rtx (out), 1, OPTAB_DIRECT);
16061 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
16062 copy_rtx (out), 1, OPTAB_DIRECT);
16063 if (!rtx_equal_p (out, operands[0]))
16064 emit_move_insn (operands[0], copy_rtx (out));
16066 return 1; /* DONE */
16070 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16072 /* Try a few things more with specific constants and a variable. */
16075 rtx var, orig_out, out, tmp;
16077 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
16078 return 0; /* FAIL */
16080 /* If one of the two operands is an interesting constant, load a
16081 constant with the above and mask it in with a logical operation. */
16083 if (CONST_INT_P (operands[2]))
16086 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
16087 operands[3] = constm1_rtx, op = and_optab;
16088 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
16089 operands[3] = const0_rtx, op = ior_optab;
16091 return 0; /* FAIL */
16093 else if (CONST_INT_P (operands[3]))
16096 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
16097 operands[2] = constm1_rtx, op = and_optab;
16098 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
16099 operands[2] = const0_rtx, op = ior_optab;
16101 return 0; /* FAIL */
16104 return 0; /* FAIL */
16106 orig_out = operands[0];
16107 tmp = gen_reg_rtx (mode);
16110 /* Recurse to get the constant loaded. */
16111 if (ix86_expand_int_movcc (operands) == 0)
16112 return 0; /* FAIL */
16114 /* Mask in the interesting variable. */
16115 out = expand_binop (mode, op, var, tmp, orig_out, 0,
16117 if (!rtx_equal_p (out, orig_out))
16118 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
16120 return 1; /* DONE */
16124 * For comparison with above,
16134 if (! nonimmediate_operand (operands[2], mode))
16135 operands[2] = force_reg (mode, operands[2]);
16136 if (! nonimmediate_operand (operands[3], mode))
16137 operands[3] = force_reg (mode, operands[3]);
16139 if (! register_operand (operands[2], VOIDmode)
16141 || ! register_operand (operands[3], VOIDmode)))
16142 operands[2] = force_reg (mode, operands[2]);
16145 && ! register_operand (operands[3], VOIDmode))
16146 operands[3] = force_reg (mode, operands[3]);
16148 emit_insn (compare_seq);
16149 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16150 gen_rtx_IF_THEN_ELSE (mode,
16151 compare_op, operands[2],
16154 return 1; /* DONE */
16157 /* Swap, force into registers, or otherwise massage the two operands
16158 to an sse comparison with a mask result. Thus we differ a bit from
16159 ix86_prepare_fp_compare_args which expects to produce a flags result.
16161 The DEST operand exists to help determine whether to commute commutative
16162 operators. The POP0/POP1 operands are updated in place. The new
16163 comparison code is returned, or UNKNOWN if not implementable. */
16165 static enum rtx_code
16166 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16167 rtx *pop0, rtx *pop1)
16175 /* We have no LTGT as an operator. We could implement it with
16176 NE & ORDERED, but this requires an extra temporary. It's
16177 not clear that it's worth it. */
16184 /* These are supported directly. */
16191 /* For commutative operators, try to canonicalize the destination
16192 operand to be first in the comparison - this helps reload to
16193 avoid extra moves. */
16194 if (!dest || !rtx_equal_p (dest, *pop1))
16202 /* These are not supported directly. Swap the comparison operands
16203 to transform into something that is supported. */
16207 code = swap_condition (code);
16211 gcc_unreachable ();
16217 /* Detect conditional moves that exactly match min/max operational
16218 semantics. Note that this is IEEE safe, as long as we don't
16219 interchange the operands.
16221 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16222 and TRUE if the operation is successful and instructions are emitted. */
16225 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16226 rtx cmp_op1, rtx if_true, rtx if_false)
16228 enum machine_mode mode;
16234 else if (code == UNGE)
16237 if_true = if_false;
16243 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16245 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16250 mode = GET_MODE (dest);
16252 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16253 but MODE may be a vector mode and thus not appropriate. */
16254 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16256 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16259 if_true = force_reg (mode, if_true);
16260 v = gen_rtvec (2, if_true, if_false);
16261 tmp = gen_rtx_UNSPEC (mode, v, u);
16265 code = is_min ? SMIN : SMAX;
16266 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16269 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16273 /* Expand an sse vector comparison. Return the register with the result. */
16276 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16277 rtx op_true, rtx op_false)
16279 enum machine_mode mode = GET_MODE (dest);
16282 cmp_op0 = force_reg (mode, cmp_op0);
16283 if (!nonimmediate_operand (cmp_op1, mode))
16284 cmp_op1 = force_reg (mode, cmp_op1);
16287 || reg_overlap_mentioned_p (dest, op_true)
16288 || reg_overlap_mentioned_p (dest, op_false))
16289 dest = gen_reg_rtx (mode);
16291 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16292 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16297 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16298 operations. This is used for both scalar and vector conditional moves. */
16301 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16303 enum machine_mode mode = GET_MODE (dest);
16306 if (op_false == CONST0_RTX (mode))
16308 op_true = force_reg (mode, op_true);
16309 x = gen_rtx_AND (mode, cmp, op_true);
16310 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16312 else if (op_true == CONST0_RTX (mode))
16314 op_false = force_reg (mode, op_false);
16315 x = gen_rtx_NOT (mode, cmp);
16316 x = gen_rtx_AND (mode, x, op_false);
16317 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16319 else if (TARGET_XOP)
16321 rtx pcmov = gen_rtx_SET (mode, dest,
16322 gen_rtx_IF_THEN_ELSE (mode, cmp,
16329 op_true = force_reg (mode, op_true);
16330 op_false = force_reg (mode, op_false);
16332 t2 = gen_reg_rtx (mode);
16334 t3 = gen_reg_rtx (mode);
16338 x = gen_rtx_AND (mode, op_true, cmp);
16339 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16341 x = gen_rtx_NOT (mode, cmp);
16342 x = gen_rtx_AND (mode, x, op_false);
16343 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16345 x = gen_rtx_IOR (mode, t3, t2);
16346 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16350 /* Expand a floating-point conditional move. Return true if successful. */
16353 ix86_expand_fp_movcc (rtx operands[])
16355 enum machine_mode mode = GET_MODE (operands[0]);
16356 enum rtx_code code = GET_CODE (operands[1]);
16357 rtx tmp, compare_op;
16359 ix86_compare_op0 = XEXP (operands[1], 0);
16360 ix86_compare_op1 = XEXP (operands[1], 1);
16361 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16363 enum machine_mode cmode;
16365 /* Since we've no cmove for sse registers, don't force bad register
16366 allocation just to gain access to it. Deny movcc when the
16367 comparison mode doesn't match the move mode. */
16368 cmode = GET_MODE (ix86_compare_op0);
16369 if (cmode == VOIDmode)
16370 cmode = GET_MODE (ix86_compare_op1);
16374 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16376 &ix86_compare_op1);
16377 if (code == UNKNOWN)
16380 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16381 ix86_compare_op1, operands[2],
16385 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16386 ix86_compare_op1, operands[2], operands[3]);
16387 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16391 /* The floating point conditional move instructions don't directly
16392 support conditions resulting from a signed integer comparison. */
16394 compare_op = ix86_expand_compare (code);
16395 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16397 tmp = gen_reg_rtx (QImode);
16398 ix86_expand_setcc (code, tmp);
16400 ix86_compare_op0 = tmp;
16401 ix86_compare_op1 = const0_rtx;
16402 compare_op = ix86_expand_compare (code);
16405 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16406 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16407 operands[2], operands[3])));
16412 /* Expand a floating-point vector conditional move; a vcond operation
16413 rather than a movcc operation. */
16416 ix86_expand_fp_vcond (rtx operands[])
16418 enum rtx_code code = GET_CODE (operands[3]);
16421 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16422 &operands[4], &operands[5]);
16423 if (code == UNKNOWN)
16426 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16427 operands[5], operands[1], operands[2]))
16430 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16431 operands[1], operands[2]);
16432 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16436 /* Expand a signed/unsigned integral vector conditional move. */
16439 ix86_expand_int_vcond (rtx operands[])
16441 enum machine_mode mode = GET_MODE (operands[0]);
16442 enum rtx_code code = GET_CODE (operands[3]);
16443 bool negate = false;
16446 cop0 = operands[4];
16447 cop1 = operands[5];
16449 /* XOP supports all of the comparisons on all vector int types. */
16452 /* Canonicalize the comparison to EQ, GT, GTU. */
16463 code = reverse_condition (code);
16469 code = reverse_condition (code);
16475 code = swap_condition (code);
16476 x = cop0, cop0 = cop1, cop1 = x;
16480 gcc_unreachable ();
16483 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16484 if (mode == V2DImode)
16489 /* SSE4.1 supports EQ. */
16490 if (!TARGET_SSE4_1)
16496 /* SSE4.2 supports GT/GTU. */
16497 if (!TARGET_SSE4_2)
16502 gcc_unreachable ();
16506 /* Unsigned parallel compare is not supported by the hardware.
16507 Play some tricks to turn this into a signed comparison
16511 cop0 = force_reg (mode, cop0);
16519 rtx (*gen_sub3) (rtx, rtx, rtx);
16521 /* Subtract (-(INT MAX) - 1) from both operands to make
16523 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16525 gen_sub3 = (mode == V4SImode
16526 ? gen_subv4si3 : gen_subv2di3);
16527 t1 = gen_reg_rtx (mode);
16528 emit_insn (gen_sub3 (t1, cop0, mask));
16530 t2 = gen_reg_rtx (mode);
16531 emit_insn (gen_sub3 (t2, cop1, mask));
16541 /* Perform a parallel unsigned saturating subtraction. */
16542 x = gen_reg_rtx (mode);
16543 emit_insn (gen_rtx_SET (VOIDmode, x,
16544 gen_rtx_US_MINUS (mode, cop0, cop1)));
16547 cop1 = CONST0_RTX (mode);
16553 gcc_unreachable ();
16558 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16559 operands[1+negate], operands[2-negate]);
16561 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16562 operands[2-negate]);
16566 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16567 true if we should do zero extension, else sign extension. HIGH_P is
16568 true if we want the N/2 high elements, else the low elements. */
16571 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16573 enum machine_mode imode = GET_MODE (operands[1]);
16574 rtx (*unpack)(rtx, rtx, rtx);
16581 unpack = gen_vec_interleave_highv16qi;
16583 unpack = gen_vec_interleave_lowv16qi;
16587 unpack = gen_vec_interleave_highv8hi;
16589 unpack = gen_vec_interleave_lowv8hi;
16593 unpack = gen_vec_interleave_highv4si;
16595 unpack = gen_vec_interleave_lowv4si;
16598 gcc_unreachable ();
16601 dest = gen_lowpart (imode, operands[0]);
16604 se = force_reg (imode, CONST0_RTX (imode));
16606 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16607 operands[1], pc_rtx, pc_rtx);
16609 emit_insn (unpack (dest, operands[1], se));
16612 /* This function performs the same task as ix86_expand_sse_unpack,
16613 but with SSE4.1 instructions. */
16616 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16618 enum machine_mode imode = GET_MODE (operands[1]);
16619 rtx (*unpack)(rtx, rtx);
16626 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16628 unpack = gen_sse4_1_extendv8qiv8hi2;
16632 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16634 unpack = gen_sse4_1_extendv4hiv4si2;
16638 unpack = gen_sse4_1_zero_extendv2siv2di2;
16640 unpack = gen_sse4_1_extendv2siv2di2;
16643 gcc_unreachable ();
16646 dest = operands[0];
16649 /* Shift higher 8 bytes to lower 8 bytes. */
16650 src = gen_reg_rtx (imode);
16651 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16652 gen_lowpart (V1TImode, operands[1]),
16658 emit_insn (unpack (dest, src));
16661 /* Expand conditional increment or decrement using adb/sbb instructions.
16662 The default case using setcc followed by the conditional move can be
16663 done by generic code. */
16665 ix86_expand_int_addcc (rtx operands[])
16667 enum rtx_code code = GET_CODE (operands[1]);
16669 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16671 rtx val = const0_rtx;
16672 bool fpcmp = false;
16673 enum machine_mode mode;
16675 ix86_compare_op0 = XEXP (operands[1], 0);
16676 ix86_compare_op1 = XEXP (operands[1], 1);
16677 if (operands[3] != const1_rtx
16678 && operands[3] != constm1_rtx)
16680 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16681 ix86_compare_op1, &compare_op))
16683 code = GET_CODE (compare_op);
16685 flags = XEXP (compare_op, 0);
16687 if (GET_MODE (flags) == CCFPmode
16688 || GET_MODE (flags) == CCFPUmode)
16691 code = ix86_fp_compare_code_to_integer (code);
16698 PUT_CODE (compare_op,
16699 reverse_condition_maybe_unordered
16700 (GET_CODE (compare_op)));
16702 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16705 mode = GET_MODE (operands[0]);
16707 /* Construct either adc or sbb insn. */
16708 if ((code == LTU) == (operands[3] == constm1_rtx))
16713 insn = gen_subqi3_carry;
16716 insn = gen_subhi3_carry;
16719 insn = gen_subsi3_carry;
16722 insn = gen_subdi3_carry;
16725 gcc_unreachable ();
16733 insn = gen_addqi3_carry;
16736 insn = gen_addhi3_carry;
16739 insn = gen_addsi3_carry;
16742 insn = gen_adddi3_carry;
16745 gcc_unreachable ();
16748 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16750 return 1; /* DONE */
16754 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16755 works for floating pointer parameters and nonoffsetable memories.
16756 For pushes, it returns just stack offsets; the values will be saved
16757 in the right order. Maximally three parts are generated. */
16760 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16765 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16767 size = (GET_MODE_SIZE (mode) + 4) / 8;
16769 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16770 gcc_assert (size >= 2 && size <= 4);
16772 /* Optimize constant pool reference to immediates. This is used by fp
16773 moves, that force all constants to memory to allow combining. */
16774 if (MEM_P (operand) && MEM_READONLY_P (operand))
16776 rtx tmp = maybe_get_pool_constant (operand);
16781 if (MEM_P (operand) && !offsettable_memref_p (operand))
16783 /* The only non-offsetable memories we handle are pushes. */
16784 int ok = push_operand (operand, VOIDmode);
16788 operand = copy_rtx (operand);
16789 PUT_MODE (operand, Pmode);
16790 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16794 if (GET_CODE (operand) == CONST_VECTOR)
16796 enum machine_mode imode = int_mode_for_mode (mode);
16797 /* Caution: if we looked through a constant pool memory above,
16798 the operand may actually have a different mode now. That's
16799 ok, since we want to pun this all the way back to an integer. */
16800 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16801 gcc_assert (operand != NULL);
16807 if (mode == DImode)
16808 split_di (&operand, 1, &parts[0], &parts[1]);
16813 if (REG_P (operand))
16815 gcc_assert (reload_completed);
16816 for (i = 0; i < size; i++)
16817 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16819 else if (offsettable_memref_p (operand))
16821 operand = adjust_address (operand, SImode, 0);
16822 parts[0] = operand;
16823 for (i = 1; i < size; i++)
16824 parts[i] = adjust_address (operand, SImode, 4 * i);
16826 else if (GET_CODE (operand) == CONST_DOUBLE)
16831 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16835 real_to_target (l, &r, mode);
16836 parts[3] = gen_int_mode (l[3], SImode);
16837 parts[2] = gen_int_mode (l[2], SImode);
16840 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16841 parts[2] = gen_int_mode (l[2], SImode);
16844 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16847 gcc_unreachable ();
16849 parts[1] = gen_int_mode (l[1], SImode);
16850 parts[0] = gen_int_mode (l[0], SImode);
16853 gcc_unreachable ();
16858 if (mode == TImode)
16859 split_ti (&operand, 1, &parts[0], &parts[1]);
16860 if (mode == XFmode || mode == TFmode)
16862 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16863 if (REG_P (operand))
16865 gcc_assert (reload_completed);
16866 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16867 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16869 else if (offsettable_memref_p (operand))
16871 operand = adjust_address (operand, DImode, 0);
16872 parts[0] = operand;
16873 parts[1] = adjust_address (operand, upper_mode, 8);
16875 else if (GET_CODE (operand) == CONST_DOUBLE)
16880 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16881 real_to_target (l, &r, mode);
16883 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16884 if (HOST_BITS_PER_WIDE_INT >= 64)
16887 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16888 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16891 parts[0] = immed_double_const (l[0], l[1], DImode);
16893 if (upper_mode == SImode)
16894 parts[1] = gen_int_mode (l[2], SImode);
16895 else if (HOST_BITS_PER_WIDE_INT >= 64)
16898 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16899 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16902 parts[1] = immed_double_const (l[2], l[3], DImode);
16905 gcc_unreachable ();
16912 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16913 Return false when normal moves are needed; true when all required
16914 insns have been emitted. Operands 2-4 contain the input values
16915 int the correct order; operands 5-7 contain the output values. */
16918 ix86_split_long_move (rtx operands[])
16923 int collisions = 0;
16924 enum machine_mode mode = GET_MODE (operands[0]);
16925 bool collisionparts[4];
16927 /* The DFmode expanders may ask us to move double.
16928 For 64bit target this is single move. By hiding the fact
16929 here we simplify i386.md splitters. */
16930 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16932 /* Optimize constant pool reference to immediates. This is used by
16933 fp moves, that force all constants to memory to allow combining. */
16935 if (MEM_P (operands[1])
16936 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16937 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16938 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16939 if (push_operand (operands[0], VOIDmode))
16941 operands[0] = copy_rtx (operands[0]);
16942 PUT_MODE (operands[0], Pmode);
16945 operands[0] = gen_lowpart (DImode, operands[0]);
16946 operands[1] = gen_lowpart (DImode, operands[1]);
16947 emit_move_insn (operands[0], operands[1]);
16951 /* The only non-offsettable memory we handle is push. */
16952 if (push_operand (operands[0], VOIDmode))
16955 gcc_assert (!MEM_P (operands[0])
16956 || offsettable_memref_p (operands[0]));
16958 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16959 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16961 /* When emitting push, take care for source operands on the stack. */
16962 if (push && MEM_P (operands[1])
16963 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16965 rtx src_base = XEXP (part[1][nparts - 1], 0);
16967 /* Compensate for the stack decrement by 4. */
16968 if (!TARGET_64BIT && nparts == 3
16969 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16970 src_base = plus_constant (src_base, 4);
16972 /* src_base refers to the stack pointer and is
16973 automatically decreased by emitted push. */
16974 for (i = 0; i < nparts; i++)
16975 part[1][i] = change_address (part[1][i],
16976 GET_MODE (part[1][i]), src_base);
16979 /* We need to do copy in the right order in case an address register
16980 of the source overlaps the destination. */
16981 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16985 for (i = 0; i < nparts; i++)
16988 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16989 if (collisionparts[i])
16993 /* Collision in the middle part can be handled by reordering. */
16994 if (collisions == 1 && nparts == 3 && collisionparts [1])
16996 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16997 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16999 else if (collisions == 1
17001 && (collisionparts [1] || collisionparts [2]))
17003 if (collisionparts [1])
17005 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17006 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17010 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
17011 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
17015 /* If there are more collisions, we can't handle it by reordering.
17016 Do an lea to the last part and use only one colliding move. */
17017 else if (collisions > 1)
17023 base = part[0][nparts - 1];
17025 /* Handle the case when the last part isn't valid for lea.
17026 Happens in 64-bit mode storing the 12-byte XFmode. */
17027 if (GET_MODE (base) != Pmode)
17028 base = gen_rtx_REG (Pmode, REGNO (base));
17030 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
17031 part[1][0] = replace_equiv_address (part[1][0], base);
17032 for (i = 1; i < nparts; i++)
17034 tmp = plus_constant (base, UNITS_PER_WORD * i);
17035 part[1][i] = replace_equiv_address (part[1][i], tmp);
17046 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
17047 emit_insn (gen_addsi3 (stack_pointer_rtx,
17048 stack_pointer_rtx, GEN_INT (-4)));
17049 emit_move_insn (part[0][2], part[1][2]);
17051 else if (nparts == 4)
17053 emit_move_insn (part[0][3], part[1][3]);
17054 emit_move_insn (part[0][2], part[1][2]);
17059 /* In 64bit mode we don't have 32bit push available. In case this is
17060 register, it is OK - we will just use larger counterpart. We also
17061 retype memory - these comes from attempt to avoid REX prefix on
17062 moving of second half of TFmode value. */
17063 if (GET_MODE (part[1][1]) == SImode)
17065 switch (GET_CODE (part[1][1]))
17068 part[1][1] = adjust_address (part[1][1], DImode, 0);
17072 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
17076 gcc_unreachable ();
17079 if (GET_MODE (part[1][0]) == SImode)
17080 part[1][0] = part[1][1];
17083 emit_move_insn (part[0][1], part[1][1]);
17084 emit_move_insn (part[0][0], part[1][0]);
17088 /* Choose correct order to not overwrite the source before it is copied. */
17089 if ((REG_P (part[0][0])
17090 && REG_P (part[1][1])
17091 && (REGNO (part[0][0]) == REGNO (part[1][1])
17093 && REGNO (part[0][0]) == REGNO (part[1][2]))
17095 && REGNO (part[0][0]) == REGNO (part[1][3]))))
17097 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
17099 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
17101 operands[2 + i] = part[0][j];
17102 operands[6 + i] = part[1][j];
17107 for (i = 0; i < nparts; i++)
17109 operands[2 + i] = part[0][i];
17110 operands[6 + i] = part[1][i];
17114 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
17115 if (optimize_insn_for_size_p ())
17117 for (j = 0; j < nparts - 1; j++)
17118 if (CONST_INT_P (operands[6 + j])
17119 && operands[6 + j] != const0_rtx
17120 && REG_P (operands[2 + j]))
17121 for (i = j; i < nparts - 1; i++)
17122 if (CONST_INT_P (operands[7 + i])
17123 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
17124 operands[7 + i] = operands[2 + j];
17127 for (i = 0; i < nparts; i++)
17128 emit_move_insn (operands[2 + i], operands[6 + i]);
17133 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
17134 left shift by a constant, either using a single shift or
17135 a sequence of add instructions. */
17138 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
17142 emit_insn ((mode == DImode
17144 : gen_adddi3) (operand, operand, operand));
17146 else if (!optimize_insn_for_size_p ()
17147 && count * ix86_cost->add <= ix86_cost->shift_const)
17150 for (i=0; i<count; i++)
17152 emit_insn ((mode == DImode
17154 : gen_adddi3) (operand, operand, operand));
17158 emit_insn ((mode == DImode
17160 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17164 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17166 rtx low[2], high[2];
17168 const int single_width = mode == DImode ? 32 : 64;
17170 if (CONST_INT_P (operands[2]))
17172 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17173 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17175 if (count >= single_width)
17177 emit_move_insn (high[0], low[1]);
17178 emit_move_insn (low[0], const0_rtx);
17180 if (count > single_width)
17181 ix86_expand_ashl_const (high[0], count - single_width, mode);
17185 if (!rtx_equal_p (operands[0], operands[1]))
17186 emit_move_insn (operands[0], operands[1]);
17187 emit_insn ((mode == DImode
17189 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17190 ix86_expand_ashl_const (low[0], count, mode);
17195 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17197 if (operands[1] == const1_rtx)
17199 /* Assuming we've chosen a QImode capable registers, then 1 << N
17200 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17201 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17203 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17205 ix86_expand_clear (low[0]);
17206 ix86_expand_clear (high[0]);
17207 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17209 d = gen_lowpart (QImode, low[0]);
17210 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17211 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17212 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17214 d = gen_lowpart (QImode, high[0]);
17215 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17216 s = gen_rtx_NE (QImode, flags, const0_rtx);
17217 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17220 /* Otherwise, we can get the same results by manually performing
17221 a bit extract operation on bit 5/6, and then performing the two
17222 shifts. The two methods of getting 0/1 into low/high are exactly
17223 the same size. Avoiding the shift in the bit extract case helps
17224 pentium4 a bit; no one else seems to care much either way. */
17229 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17230 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17232 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17233 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17235 emit_insn ((mode == DImode
17237 : gen_lshrdi3) (high[0], high[0],
17238 GEN_INT (mode == DImode ? 5 : 6)));
17239 emit_insn ((mode == DImode
17241 : gen_anddi3) (high[0], high[0], const1_rtx));
17242 emit_move_insn (low[0], high[0]);
17243 emit_insn ((mode == DImode
17245 : gen_xordi3) (low[0], low[0], const1_rtx));
17248 emit_insn ((mode == DImode
17250 : gen_ashldi3) (low[0], low[0], operands[2]));
17251 emit_insn ((mode == DImode
17253 : gen_ashldi3) (high[0], high[0], operands[2]));
17257 if (operands[1] == constm1_rtx)
17259 /* For -1 << N, we can avoid the shld instruction, because we
17260 know that we're shifting 0...31/63 ones into a -1. */
17261 emit_move_insn (low[0], constm1_rtx);
17262 if (optimize_insn_for_size_p ())
17263 emit_move_insn (high[0], low[0]);
17265 emit_move_insn (high[0], constm1_rtx);
17269 if (!rtx_equal_p (operands[0], operands[1]))
17270 emit_move_insn (operands[0], operands[1]);
17272 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17273 emit_insn ((mode == DImode
17275 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17278 emit_insn ((mode == DImode
17280 : gen_ashldi3) (low[0], low[0], operands[2]));
17282 if (TARGET_CMOVE && scratch)
17284 ix86_expand_clear (scratch);
17285 emit_insn ((mode == DImode
17286 ? gen_x86_shiftsi_adj_1
17287 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17291 emit_insn ((mode == DImode
17292 ? gen_x86_shiftsi_adj_2
17293 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17297 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17299 rtx low[2], high[2];
17301 const int single_width = mode == DImode ? 32 : 64;
17303 if (CONST_INT_P (operands[2]))
17305 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17306 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17308 if (count == single_width * 2 - 1)
17310 emit_move_insn (high[0], high[1]);
17311 emit_insn ((mode == DImode
17313 : gen_ashrdi3) (high[0], high[0],
17314 GEN_INT (single_width - 1)));
17315 emit_move_insn (low[0], high[0]);
17318 else if (count >= single_width)
17320 emit_move_insn (low[0], high[1]);
17321 emit_move_insn (high[0], low[0]);
17322 emit_insn ((mode == DImode
17324 : gen_ashrdi3) (high[0], high[0],
17325 GEN_INT (single_width - 1)));
17326 if (count > single_width)
17327 emit_insn ((mode == DImode
17329 : gen_ashrdi3) (low[0], low[0],
17330 GEN_INT (count - single_width)));
17334 if (!rtx_equal_p (operands[0], operands[1]))
17335 emit_move_insn (operands[0], operands[1]);
17336 emit_insn ((mode == DImode
17338 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17339 emit_insn ((mode == DImode
17341 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17346 if (!rtx_equal_p (operands[0], operands[1]))
17347 emit_move_insn (operands[0], operands[1]);
17349 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17351 emit_insn ((mode == DImode
17353 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17354 emit_insn ((mode == DImode
17356 : gen_ashrdi3) (high[0], high[0], operands[2]));
17358 if (TARGET_CMOVE && scratch)
17360 emit_move_insn (scratch, high[0]);
17361 emit_insn ((mode == DImode
17363 : gen_ashrdi3) (scratch, scratch,
17364 GEN_INT (single_width - 1)));
17365 emit_insn ((mode == DImode
17366 ? gen_x86_shiftsi_adj_1
17367 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17371 emit_insn ((mode == DImode
17372 ? gen_x86_shiftsi_adj_3
17373 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17378 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17380 rtx low[2], high[2];
17382 const int single_width = mode == DImode ? 32 : 64;
17384 if (CONST_INT_P (operands[2]))
17386 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17387 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17389 if (count >= single_width)
17391 emit_move_insn (low[0], high[1]);
17392 ix86_expand_clear (high[0]);
17394 if (count > single_width)
17395 emit_insn ((mode == DImode
17397 : gen_lshrdi3) (low[0], low[0],
17398 GEN_INT (count - single_width)));
17402 if (!rtx_equal_p (operands[0], operands[1]))
17403 emit_move_insn (operands[0], operands[1]);
17404 emit_insn ((mode == DImode
17406 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17407 emit_insn ((mode == DImode
17409 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17414 if (!rtx_equal_p (operands[0], operands[1]))
17415 emit_move_insn (operands[0], operands[1]);
17417 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17419 emit_insn ((mode == DImode
17421 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17422 emit_insn ((mode == DImode
17424 : gen_lshrdi3) (high[0], high[0], operands[2]));
17426 /* Heh. By reversing the arguments, we can reuse this pattern. */
17427 if (TARGET_CMOVE && scratch)
17429 ix86_expand_clear (scratch);
17430 emit_insn ((mode == DImode
17431 ? gen_x86_shiftsi_adj_1
17432 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17436 emit_insn ((mode == DImode
17437 ? gen_x86_shiftsi_adj_2
17438 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17442 /* Predict just emitted jump instruction to be taken with probability PROB. */
17444 predict_jump (int prob)
17446 rtx insn = get_last_insn ();
17447 gcc_assert (JUMP_P (insn));
17448 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17451 /* Helper function for the string operations below. Dest VARIABLE whether
17452 it is aligned to VALUE bytes. If true, jump to the label. */
17454 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17456 rtx label = gen_label_rtx ();
17457 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17458 if (GET_MODE (variable) == DImode)
17459 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17461 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17462 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17465 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17467 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17471 /* Adjust COUNTER by the VALUE. */
17473 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17475 if (GET_MODE (countreg) == DImode)
17476 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17478 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17481 /* Zero extend possibly SImode EXP to Pmode register. */
17483 ix86_zero_extend_to_Pmode (rtx exp)
17486 if (GET_MODE (exp) == VOIDmode)
17487 return force_reg (Pmode, exp);
17488 if (GET_MODE (exp) == Pmode)
17489 return copy_to_mode_reg (Pmode, exp);
17490 r = gen_reg_rtx (Pmode);
17491 emit_insn (gen_zero_extendsidi2 (r, exp));
17495 /* Divide COUNTREG by SCALE. */
17497 scale_counter (rtx countreg, int scale)
17503 if (CONST_INT_P (countreg))
17504 return GEN_INT (INTVAL (countreg) / scale);
17505 gcc_assert (REG_P (countreg));
17507 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17508 GEN_INT (exact_log2 (scale)),
17509 NULL, 1, OPTAB_DIRECT);
17513 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17514 DImode for constant loop counts. */
17516 static enum machine_mode
17517 counter_mode (rtx count_exp)
17519 if (GET_MODE (count_exp) != VOIDmode)
17520 return GET_MODE (count_exp);
17521 if (!CONST_INT_P (count_exp))
17523 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17528 /* When SRCPTR is non-NULL, output simple loop to move memory
17529 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17530 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17531 equivalent loop to set memory by VALUE (supposed to be in MODE).
17533 The size is rounded down to whole number of chunk size moved at once.
17534 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17538 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17539 rtx destptr, rtx srcptr, rtx value,
17540 rtx count, enum machine_mode mode, int unroll,
17543 rtx out_label, top_label, iter, tmp;
17544 enum machine_mode iter_mode = counter_mode (count);
17545 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17546 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17552 top_label = gen_label_rtx ();
17553 out_label = gen_label_rtx ();
17554 iter = gen_reg_rtx (iter_mode);
17556 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17557 NULL, 1, OPTAB_DIRECT);
17558 /* Those two should combine. */
17559 if (piece_size == const1_rtx)
17561 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17563 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17565 emit_move_insn (iter, const0_rtx);
17567 emit_label (top_label);
17569 tmp = convert_modes (Pmode, iter_mode, iter, true);
17570 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17571 destmem = change_address (destmem, mode, x_addr);
17575 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17576 srcmem = change_address (srcmem, mode, y_addr);
17578 /* When unrolling for chips that reorder memory reads and writes,
17579 we can save registers by using single temporary.
17580 Also using 4 temporaries is overkill in 32bit mode. */
17581 if (!TARGET_64BIT && 0)
17583 for (i = 0; i < unroll; i++)
17588 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17590 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17592 emit_move_insn (destmem, srcmem);
17598 gcc_assert (unroll <= 4);
17599 for (i = 0; i < unroll; i++)
17601 tmpreg[i] = gen_reg_rtx (mode);
17605 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17607 emit_move_insn (tmpreg[i], srcmem);
17609 for (i = 0; i < unroll; i++)
17614 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17616 emit_move_insn (destmem, tmpreg[i]);
17621 for (i = 0; i < unroll; i++)
17625 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17626 emit_move_insn (destmem, value);
17629 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17630 true, OPTAB_LIB_WIDEN);
17632 emit_move_insn (iter, tmp);
17634 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17636 if (expected_size != -1)
17638 expected_size /= GET_MODE_SIZE (mode) * unroll;
17639 if (expected_size == 0)
17641 else if (expected_size > REG_BR_PROB_BASE)
17642 predict_jump (REG_BR_PROB_BASE - 1);
17644 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17647 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17648 iter = ix86_zero_extend_to_Pmode (iter);
17649 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17650 true, OPTAB_LIB_WIDEN);
17651 if (tmp != destptr)
17652 emit_move_insn (destptr, tmp);
17655 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17656 true, OPTAB_LIB_WIDEN);
17658 emit_move_insn (srcptr, tmp);
17660 emit_label (out_label);
17663 /* Output "rep; mov" instruction.
17664 Arguments have same meaning as for previous function */
17666 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17667 rtx destptr, rtx srcptr,
17669 enum machine_mode mode)
17675 /* If the size is known, it is shorter to use rep movs. */
17676 if (mode == QImode && CONST_INT_P (count)
17677 && !(INTVAL (count) & 3))
17680 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17681 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17682 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17683 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17684 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17685 if (mode != QImode)
17687 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17688 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17689 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17690 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17691 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17692 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17696 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17697 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17699 if (CONST_INT_P (count))
17701 count = GEN_INT (INTVAL (count)
17702 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17703 destmem = shallow_copy_rtx (destmem);
17704 srcmem = shallow_copy_rtx (srcmem);
17705 set_mem_size (destmem, count);
17706 set_mem_size (srcmem, count);
17710 if (MEM_SIZE (destmem))
17711 set_mem_size (destmem, NULL_RTX);
17712 if (MEM_SIZE (srcmem))
17713 set_mem_size (srcmem, NULL_RTX);
17715 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17719 /* Output "rep; stos" instruction.
17720 Arguments have same meaning as for previous function */
17722 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17723 rtx count, enum machine_mode mode,
17729 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17730 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17731 value = force_reg (mode, gen_lowpart (mode, value));
17732 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17733 if (mode != QImode)
17735 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17736 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17737 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17740 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17741 if (orig_value == const0_rtx && CONST_INT_P (count))
17743 count = GEN_INT (INTVAL (count)
17744 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17745 destmem = shallow_copy_rtx (destmem);
17746 set_mem_size (destmem, count);
17748 else if (MEM_SIZE (destmem))
17749 set_mem_size (destmem, NULL_RTX);
17750 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17754 emit_strmov (rtx destmem, rtx srcmem,
17755 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17757 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17758 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17759 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17762 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17764 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17765 rtx destptr, rtx srcptr, rtx count, int max_size)
17768 if (CONST_INT_P (count))
17770 HOST_WIDE_INT countval = INTVAL (count);
17773 if ((countval & 0x10) && max_size > 16)
17777 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17778 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17781 gcc_unreachable ();
17784 if ((countval & 0x08) && max_size > 8)
17787 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17790 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17791 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17795 if ((countval & 0x04) && max_size > 4)
17797 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17800 if ((countval & 0x02) && max_size > 2)
17802 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17805 if ((countval & 0x01) && max_size > 1)
17807 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17814 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17815 count, 1, OPTAB_DIRECT);
17816 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17817 count, QImode, 1, 4);
17821 /* When there are stringops, we can cheaply increase dest and src pointers.
17822 Otherwise we save code size by maintaining offset (zero is readily
17823 available from preceding rep operation) and using x86 addressing modes.
17825 if (TARGET_SINGLE_STRINGOP)
17829 rtx label = ix86_expand_aligntest (count, 4, true);
17830 src = change_address (srcmem, SImode, srcptr);
17831 dest = change_address (destmem, SImode, destptr);
17832 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17833 emit_label (label);
17834 LABEL_NUSES (label) = 1;
17838 rtx label = ix86_expand_aligntest (count, 2, true);
17839 src = change_address (srcmem, HImode, srcptr);
17840 dest = change_address (destmem, HImode, destptr);
17841 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17842 emit_label (label);
17843 LABEL_NUSES (label) = 1;
17847 rtx label = ix86_expand_aligntest (count, 1, true);
17848 src = change_address (srcmem, QImode, srcptr);
17849 dest = change_address (destmem, QImode, destptr);
17850 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17851 emit_label (label);
17852 LABEL_NUSES (label) = 1;
17857 rtx offset = force_reg (Pmode, const0_rtx);
17862 rtx label = ix86_expand_aligntest (count, 4, true);
17863 src = change_address (srcmem, SImode, srcptr);
17864 dest = change_address (destmem, SImode, destptr);
17865 emit_move_insn (dest, src);
17866 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17867 true, OPTAB_LIB_WIDEN);
17869 emit_move_insn (offset, tmp);
17870 emit_label (label);
17871 LABEL_NUSES (label) = 1;
17875 rtx label = ix86_expand_aligntest (count, 2, true);
17876 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17877 src = change_address (srcmem, HImode, tmp);
17878 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17879 dest = change_address (destmem, HImode, tmp);
17880 emit_move_insn (dest, src);
17881 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17882 true, OPTAB_LIB_WIDEN);
17884 emit_move_insn (offset, tmp);
17885 emit_label (label);
17886 LABEL_NUSES (label) = 1;
17890 rtx label = ix86_expand_aligntest (count, 1, true);
17891 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17892 src = change_address (srcmem, QImode, tmp);
17893 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17894 dest = change_address (destmem, QImode, tmp);
17895 emit_move_insn (dest, src);
17896 emit_label (label);
17897 LABEL_NUSES (label) = 1;
17902 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17904 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17905 rtx count, int max_size)
17908 expand_simple_binop (counter_mode (count), AND, count,
17909 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17910 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17911 gen_lowpart (QImode, value), count, QImode,
17915 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17917 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17921 if (CONST_INT_P (count))
17923 HOST_WIDE_INT countval = INTVAL (count);
17926 if ((countval & 0x10) && max_size > 16)
17930 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17931 emit_insn (gen_strset (destptr, dest, value));
17932 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17933 emit_insn (gen_strset (destptr, dest, value));
17936 gcc_unreachable ();
17939 if ((countval & 0x08) && max_size > 8)
17943 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17944 emit_insn (gen_strset (destptr, dest, value));
17948 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17949 emit_insn (gen_strset (destptr, dest, value));
17950 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17951 emit_insn (gen_strset (destptr, dest, value));
17955 if ((countval & 0x04) && max_size > 4)
17957 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17958 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17961 if ((countval & 0x02) && max_size > 2)
17963 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17964 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17967 if ((countval & 0x01) && max_size > 1)
17969 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17970 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17977 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17982 rtx label = ix86_expand_aligntest (count, 16, true);
17985 dest = change_address (destmem, DImode, destptr);
17986 emit_insn (gen_strset (destptr, dest, value));
17987 emit_insn (gen_strset (destptr, dest, value));
17991 dest = change_address (destmem, SImode, destptr);
17992 emit_insn (gen_strset (destptr, dest, value));
17993 emit_insn (gen_strset (destptr, dest, value));
17994 emit_insn (gen_strset (destptr, dest, value));
17995 emit_insn (gen_strset (destptr, dest, value));
17997 emit_label (label);
17998 LABEL_NUSES (label) = 1;
18002 rtx label = ix86_expand_aligntest (count, 8, true);
18005 dest = change_address (destmem, DImode, destptr);
18006 emit_insn (gen_strset (destptr, dest, value));
18010 dest = change_address (destmem, SImode, destptr);
18011 emit_insn (gen_strset (destptr, dest, value));
18012 emit_insn (gen_strset (destptr, dest, value));
18014 emit_label (label);
18015 LABEL_NUSES (label) = 1;
18019 rtx label = ix86_expand_aligntest (count, 4, true);
18020 dest = change_address (destmem, SImode, destptr);
18021 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18022 emit_label (label);
18023 LABEL_NUSES (label) = 1;
18027 rtx label = ix86_expand_aligntest (count, 2, true);
18028 dest = change_address (destmem, HImode, destptr);
18029 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18030 emit_label (label);
18031 LABEL_NUSES (label) = 1;
18035 rtx label = ix86_expand_aligntest (count, 1, true);
18036 dest = change_address (destmem, QImode, destptr);
18037 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18038 emit_label (label);
18039 LABEL_NUSES (label) = 1;
18043 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
18044 DESIRED_ALIGNMENT. */
18046 expand_movmem_prologue (rtx destmem, rtx srcmem,
18047 rtx destptr, rtx srcptr, rtx count,
18048 int align, int desired_alignment)
18050 if (align <= 1 && desired_alignment > 1)
18052 rtx label = ix86_expand_aligntest (destptr, 1, false);
18053 srcmem = change_address (srcmem, QImode, srcptr);
18054 destmem = change_address (destmem, QImode, destptr);
18055 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18056 ix86_adjust_counter (count, 1);
18057 emit_label (label);
18058 LABEL_NUSES (label) = 1;
18060 if (align <= 2 && desired_alignment > 2)
18062 rtx label = ix86_expand_aligntest (destptr, 2, false);
18063 srcmem = change_address (srcmem, HImode, srcptr);
18064 destmem = change_address (destmem, HImode, destptr);
18065 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18066 ix86_adjust_counter (count, 2);
18067 emit_label (label);
18068 LABEL_NUSES (label) = 1;
18070 if (align <= 4 && desired_alignment > 4)
18072 rtx label = ix86_expand_aligntest (destptr, 4, false);
18073 srcmem = change_address (srcmem, SImode, srcptr);
18074 destmem = change_address (destmem, SImode, destptr);
18075 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18076 ix86_adjust_counter (count, 4);
18077 emit_label (label);
18078 LABEL_NUSES (label) = 1;
18080 gcc_assert (desired_alignment <= 8);
18083 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
18084 ALIGN_BYTES is how many bytes need to be copied. */
18086 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
18087 int desired_align, int align_bytes)
18090 rtx src_size, dst_size;
18092 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
18093 if (src_align_bytes >= 0)
18094 src_align_bytes = desired_align - src_align_bytes;
18095 src_size = MEM_SIZE (src);
18096 dst_size = MEM_SIZE (dst);
18097 if (align_bytes & 1)
18099 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18100 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
18102 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18104 if (align_bytes & 2)
18106 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18107 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
18108 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18109 set_mem_align (dst, 2 * BITS_PER_UNIT);
18110 if (src_align_bytes >= 0
18111 && (src_align_bytes & 1) == (align_bytes & 1)
18112 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
18113 set_mem_align (src, 2 * BITS_PER_UNIT);
18115 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18117 if (align_bytes & 4)
18119 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18120 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
18121 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18122 set_mem_align (dst, 4 * BITS_PER_UNIT);
18123 if (src_align_bytes >= 0)
18125 unsigned int src_align = 0;
18126 if ((src_align_bytes & 3) == (align_bytes & 3))
18128 else if ((src_align_bytes & 1) == (align_bytes & 1))
18130 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18131 set_mem_align (src, src_align * BITS_PER_UNIT);
18134 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18136 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18137 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
18138 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18139 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18140 if (src_align_bytes >= 0)
18142 unsigned int src_align = 0;
18143 if ((src_align_bytes & 7) == (align_bytes & 7))
18145 else if ((src_align_bytes & 3) == (align_bytes & 3))
18147 else if ((src_align_bytes & 1) == (align_bytes & 1))
18149 if (src_align > (unsigned int) desired_align)
18150 src_align = desired_align;
18151 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18152 set_mem_align (src, src_align * BITS_PER_UNIT);
18155 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18157 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18162 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18163 DESIRED_ALIGNMENT. */
18165 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18166 int align, int desired_alignment)
18168 if (align <= 1 && desired_alignment > 1)
18170 rtx label = ix86_expand_aligntest (destptr, 1, false);
18171 destmem = change_address (destmem, QImode, destptr);
18172 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18173 ix86_adjust_counter (count, 1);
18174 emit_label (label);
18175 LABEL_NUSES (label) = 1;
18177 if (align <= 2 && desired_alignment > 2)
18179 rtx label = ix86_expand_aligntest (destptr, 2, false);
18180 destmem = change_address (destmem, HImode, destptr);
18181 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18182 ix86_adjust_counter (count, 2);
18183 emit_label (label);
18184 LABEL_NUSES (label) = 1;
18186 if (align <= 4 && desired_alignment > 4)
18188 rtx label = ix86_expand_aligntest (destptr, 4, false);
18189 destmem = change_address (destmem, SImode, destptr);
18190 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18191 ix86_adjust_counter (count, 4);
18192 emit_label (label);
18193 LABEL_NUSES (label) = 1;
18195 gcc_assert (desired_alignment <= 8);
18198 /* Set enough from DST to align DST known to by aligned by ALIGN to
18199 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18201 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18202 int desired_align, int align_bytes)
18205 rtx dst_size = MEM_SIZE (dst);
18206 if (align_bytes & 1)
18208 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18210 emit_insn (gen_strset (destreg, dst,
18211 gen_lowpart (QImode, value)));
18213 if (align_bytes & 2)
18215 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18216 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18217 set_mem_align (dst, 2 * BITS_PER_UNIT);
18219 emit_insn (gen_strset (destreg, dst,
18220 gen_lowpart (HImode, value)));
18222 if (align_bytes & 4)
18224 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18225 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18226 set_mem_align (dst, 4 * BITS_PER_UNIT);
18228 emit_insn (gen_strset (destreg, dst,
18229 gen_lowpart (SImode, value)));
18231 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18232 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18233 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18235 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18239 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18240 static enum stringop_alg
18241 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18242 int *dynamic_check)
18244 const struct stringop_algs * algs;
18245 bool optimize_for_speed;
18246 /* Algorithms using the rep prefix want at least edi and ecx;
18247 additionally, memset wants eax and memcpy wants esi. Don't
18248 consider such algorithms if the user has appropriated those
18249 registers for their own purposes. */
18250 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18252 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18254 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18255 || (alg != rep_prefix_1_byte \
18256 && alg != rep_prefix_4_byte \
18257 && alg != rep_prefix_8_byte))
18258 const struct processor_costs *cost;
18260 /* Even if the string operation call is cold, we still might spend a lot
18261 of time processing large blocks. */
18262 if (optimize_function_for_size_p (cfun)
18263 || (optimize_insn_for_size_p ()
18264 && expected_size != -1 && expected_size < 256))
18265 optimize_for_speed = false;
18267 optimize_for_speed = true;
18269 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18271 *dynamic_check = -1;
18273 algs = &cost->memset[TARGET_64BIT != 0];
18275 algs = &cost->memcpy[TARGET_64BIT != 0];
18276 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18277 return stringop_alg;
18278 /* rep; movq or rep; movl is the smallest variant. */
18279 else if (!optimize_for_speed)
18281 if (!count || (count & 3))
18282 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18284 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18286 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18288 else if (expected_size != -1 && expected_size < 4)
18289 return loop_1_byte;
18290 else if (expected_size != -1)
18293 enum stringop_alg alg = libcall;
18294 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18296 /* We get here if the algorithms that were not libcall-based
18297 were rep-prefix based and we are unable to use rep prefixes
18298 based on global register usage. Break out of the loop and
18299 use the heuristic below. */
18300 if (algs->size[i].max == 0)
18302 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18304 enum stringop_alg candidate = algs->size[i].alg;
18306 if (candidate != libcall && ALG_USABLE_P (candidate))
18308 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18309 last non-libcall inline algorithm. */
18310 if (TARGET_INLINE_ALL_STRINGOPS)
18312 /* When the current size is best to be copied by a libcall,
18313 but we are still forced to inline, run the heuristic below
18314 that will pick code for medium sized blocks. */
18315 if (alg != libcall)
18319 else if (ALG_USABLE_P (candidate))
18323 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18325 /* When asked to inline the call anyway, try to pick meaningful choice.
18326 We look for maximal size of block that is faster to copy by hand and
18327 take blocks of at most of that size guessing that average size will
18328 be roughly half of the block.
18330 If this turns out to be bad, we might simply specify the preferred
18331 choice in ix86_costs. */
18332 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18333 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18336 enum stringop_alg alg;
18338 bool any_alg_usable_p = true;
18340 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18342 enum stringop_alg candidate = algs->size[i].alg;
18343 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18345 if (candidate != libcall && candidate
18346 && ALG_USABLE_P (candidate))
18347 max = algs->size[i].max;
18349 /* If there aren't any usable algorithms, then recursing on
18350 smaller sizes isn't going to find anything. Just return the
18351 simple byte-at-a-time copy loop. */
18352 if (!any_alg_usable_p)
18354 /* Pick something reasonable. */
18355 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18356 *dynamic_check = 128;
18357 return loop_1_byte;
18361 alg = decide_alg (count, max / 2, memset, dynamic_check);
18362 gcc_assert (*dynamic_check == -1);
18363 gcc_assert (alg != libcall);
18364 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18365 *dynamic_check = max;
18368 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18369 #undef ALG_USABLE_P
18372 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18373 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18375 decide_alignment (int align,
18376 enum stringop_alg alg,
18379 int desired_align = 0;
18383 gcc_unreachable ();
18385 case unrolled_loop:
18386 desired_align = GET_MODE_SIZE (Pmode);
18388 case rep_prefix_8_byte:
18391 case rep_prefix_4_byte:
18392 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18393 copying whole cacheline at once. */
18394 if (TARGET_PENTIUMPRO)
18399 case rep_prefix_1_byte:
18400 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18401 copying whole cacheline at once. */
18402 if (TARGET_PENTIUMPRO)
18416 if (desired_align < align)
18417 desired_align = align;
18418 if (expected_size != -1 && expected_size < 4)
18419 desired_align = align;
18420 return desired_align;
18423 /* Return the smallest power of 2 greater than VAL. */
18425 smallest_pow2_greater_than (int val)
18433 /* Expand string move (memcpy) operation. Use i386 string operations when
18434 profitable. expand_setmem contains similar code. The code depends upon
18435 architecture, block size and alignment, but always has the same
18438 1) Prologue guard: Conditional that jumps up to epilogues for small
18439 blocks that can be handled by epilogue alone. This is faster but
18440 also needed for correctness, since prologue assume the block is larger
18441 than the desired alignment.
18443 Optional dynamic check for size and libcall for large
18444 blocks is emitted here too, with -minline-stringops-dynamically.
18446 2) Prologue: copy first few bytes in order to get destination aligned
18447 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18448 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18449 We emit either a jump tree on power of two sized blocks, or a byte loop.
18451 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18452 with specified algorithm.
18454 4) Epilogue: code copying tail of the block that is too small to be
18455 handled by main body (or up to size guarded by prologue guard). */
18458 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18459 rtx expected_align_exp, rtx expected_size_exp)
18465 rtx jump_around_label = NULL;
18466 HOST_WIDE_INT align = 1;
18467 unsigned HOST_WIDE_INT count = 0;
18468 HOST_WIDE_INT expected_size = -1;
18469 int size_needed = 0, epilogue_size_needed;
18470 int desired_align = 0, align_bytes = 0;
18471 enum stringop_alg alg;
18473 bool need_zero_guard = false;
18475 if (CONST_INT_P (align_exp))
18476 align = INTVAL (align_exp);
18477 /* i386 can do misaligned access on reasonably increased cost. */
18478 if (CONST_INT_P (expected_align_exp)
18479 && INTVAL (expected_align_exp) > align)
18480 align = INTVAL (expected_align_exp);
18481 /* ALIGN is the minimum of destination and source alignment, but we care here
18482 just about destination alignment. */
18483 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18484 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18486 if (CONST_INT_P (count_exp))
18487 count = expected_size = INTVAL (count_exp);
18488 if (CONST_INT_P (expected_size_exp) && count == 0)
18489 expected_size = INTVAL (expected_size_exp);
18491 /* Make sure we don't need to care about overflow later on. */
18492 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18495 /* Step 0: Decide on preferred algorithm, desired alignment and
18496 size of chunks to be copied by main loop. */
18498 alg = decide_alg (count, expected_size, false, &dynamic_check);
18499 desired_align = decide_alignment (align, alg, expected_size);
18501 if (!TARGET_ALIGN_STRINGOPS)
18502 align = desired_align;
18504 if (alg == libcall)
18506 gcc_assert (alg != no_stringop);
18508 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18509 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18510 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18515 gcc_unreachable ();
18517 need_zero_guard = true;
18518 size_needed = GET_MODE_SIZE (Pmode);
18520 case unrolled_loop:
18521 need_zero_guard = true;
18522 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18524 case rep_prefix_8_byte:
18527 case rep_prefix_4_byte:
18530 case rep_prefix_1_byte:
18534 need_zero_guard = true;
18539 epilogue_size_needed = size_needed;
18541 /* Step 1: Prologue guard. */
18543 /* Alignment code needs count to be in register. */
18544 if (CONST_INT_P (count_exp) && desired_align > align)
18546 if (INTVAL (count_exp) > desired_align
18547 && INTVAL (count_exp) > size_needed)
18550 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18551 if (align_bytes <= 0)
18554 align_bytes = desired_align - align_bytes;
18556 if (align_bytes == 0)
18557 count_exp = force_reg (counter_mode (count_exp), count_exp);
18559 gcc_assert (desired_align >= 1 && align >= 1);
18561 /* Ensure that alignment prologue won't copy past end of block. */
18562 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18564 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18565 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18566 Make sure it is power of 2. */
18567 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18571 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18573 /* If main algorithm works on QImode, no epilogue is needed.
18574 For small sizes just don't align anything. */
18575 if (size_needed == 1)
18576 desired_align = align;
18583 label = gen_label_rtx ();
18584 emit_cmp_and_jump_insns (count_exp,
18585 GEN_INT (epilogue_size_needed),
18586 LTU, 0, counter_mode (count_exp), 1, label);
18587 if (expected_size == -1 || expected_size < epilogue_size_needed)
18588 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18590 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18594 /* Emit code to decide on runtime whether library call or inline should be
18596 if (dynamic_check != -1)
18598 if (CONST_INT_P (count_exp))
18600 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18602 emit_block_move_via_libcall (dst, src, count_exp, false);
18603 count_exp = const0_rtx;
18609 rtx hot_label = gen_label_rtx ();
18610 jump_around_label = gen_label_rtx ();
18611 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18612 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18613 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18614 emit_block_move_via_libcall (dst, src, count_exp, false);
18615 emit_jump (jump_around_label);
18616 emit_label (hot_label);
18620 /* Step 2: Alignment prologue. */
18622 if (desired_align > align)
18624 if (align_bytes == 0)
18626 /* Except for the first move in epilogue, we no longer know
18627 constant offset in aliasing info. It don't seems to worth
18628 the pain to maintain it for the first move, so throw away
18630 src = change_address (src, BLKmode, srcreg);
18631 dst = change_address (dst, BLKmode, destreg);
18632 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18637 /* If we know how many bytes need to be stored before dst is
18638 sufficiently aligned, maintain aliasing info accurately. */
18639 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18640 desired_align, align_bytes);
18641 count_exp = plus_constant (count_exp, -align_bytes);
18642 count -= align_bytes;
18644 if (need_zero_guard
18645 && (count < (unsigned HOST_WIDE_INT) size_needed
18646 || (align_bytes == 0
18647 && count < ((unsigned HOST_WIDE_INT) size_needed
18648 + desired_align - align))))
18650 /* It is possible that we copied enough so the main loop will not
18652 gcc_assert (size_needed > 1);
18653 if (label == NULL_RTX)
18654 label = gen_label_rtx ();
18655 emit_cmp_and_jump_insns (count_exp,
18656 GEN_INT (size_needed),
18657 LTU, 0, counter_mode (count_exp), 1, label);
18658 if (expected_size == -1
18659 || expected_size < (desired_align - align) / 2 + size_needed)
18660 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18662 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18665 if (label && size_needed == 1)
18667 emit_label (label);
18668 LABEL_NUSES (label) = 1;
18670 epilogue_size_needed = 1;
18672 else if (label == NULL_RTX)
18673 epilogue_size_needed = size_needed;
18675 /* Step 3: Main loop. */
18681 gcc_unreachable ();
18683 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18684 count_exp, QImode, 1, expected_size);
18687 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18688 count_exp, Pmode, 1, expected_size);
18690 case unrolled_loop:
18691 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18692 registers for 4 temporaries anyway. */
18693 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18694 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18697 case rep_prefix_8_byte:
18698 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18701 case rep_prefix_4_byte:
18702 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18705 case rep_prefix_1_byte:
18706 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18710 /* Adjust properly the offset of src and dest memory for aliasing. */
18711 if (CONST_INT_P (count_exp))
18713 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18714 (count / size_needed) * size_needed);
18715 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18716 (count / size_needed) * size_needed);
18720 src = change_address (src, BLKmode, srcreg);
18721 dst = change_address (dst, BLKmode, destreg);
18724 /* Step 4: Epilogue to copy the remaining bytes. */
18728 /* When the main loop is done, COUNT_EXP might hold original count,
18729 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18730 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18731 bytes. Compensate if needed. */
18733 if (size_needed < epilogue_size_needed)
18736 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18737 GEN_INT (size_needed - 1), count_exp, 1,
18739 if (tmp != count_exp)
18740 emit_move_insn (count_exp, tmp);
18742 emit_label (label);
18743 LABEL_NUSES (label) = 1;
18746 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18747 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18748 epilogue_size_needed);
18749 if (jump_around_label)
18750 emit_label (jump_around_label);
18754 /* Helper function for memcpy. For QImode value 0xXY produce
18755 0xXYXYXYXY of wide specified by MODE. This is essentially
18756 a * 0x10101010, but we can do slightly better than
18757 synth_mult by unwinding the sequence by hand on CPUs with
18760 promote_duplicated_reg (enum machine_mode mode, rtx val)
18762 enum machine_mode valmode = GET_MODE (val);
18764 int nops = mode == DImode ? 3 : 2;
18766 gcc_assert (mode == SImode || mode == DImode);
18767 if (val == const0_rtx)
18768 return copy_to_mode_reg (mode, const0_rtx);
18769 if (CONST_INT_P (val))
18771 HOST_WIDE_INT v = INTVAL (val) & 255;
18775 if (mode == DImode)
18776 v |= (v << 16) << 16;
18777 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18780 if (valmode == VOIDmode)
18782 if (valmode != QImode)
18783 val = gen_lowpart (QImode, val);
18784 if (mode == QImode)
18786 if (!TARGET_PARTIAL_REG_STALL)
18788 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18789 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18790 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18791 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18793 rtx reg = convert_modes (mode, QImode, val, true);
18794 tmp = promote_duplicated_reg (mode, const1_rtx);
18795 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18800 rtx reg = convert_modes (mode, QImode, val, true);
18802 if (!TARGET_PARTIAL_REG_STALL)
18803 if (mode == SImode)
18804 emit_insn (gen_movsi_insv_1 (reg, reg));
18806 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18809 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18810 NULL, 1, OPTAB_DIRECT);
18812 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18814 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18815 NULL, 1, OPTAB_DIRECT);
18816 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18817 if (mode == SImode)
18819 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18820 NULL, 1, OPTAB_DIRECT);
18821 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18826 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18827 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18828 alignment from ALIGN to DESIRED_ALIGN. */
18830 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18835 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18836 promoted_val = promote_duplicated_reg (DImode, val);
18837 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18838 promoted_val = promote_duplicated_reg (SImode, val);
18839 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18840 promoted_val = promote_duplicated_reg (HImode, val);
18842 promoted_val = val;
18844 return promoted_val;
18847 /* Expand string clear operation (bzero). Use i386 string operations when
18848 profitable. See expand_movmem comment for explanation of individual
18849 steps performed. */
18851 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18852 rtx expected_align_exp, rtx expected_size_exp)
18857 rtx jump_around_label = NULL;
18858 HOST_WIDE_INT align = 1;
18859 unsigned HOST_WIDE_INT count = 0;
18860 HOST_WIDE_INT expected_size = -1;
18861 int size_needed = 0, epilogue_size_needed;
18862 int desired_align = 0, align_bytes = 0;
18863 enum stringop_alg alg;
18864 rtx promoted_val = NULL;
18865 bool force_loopy_epilogue = false;
18867 bool need_zero_guard = false;
18869 if (CONST_INT_P (align_exp))
18870 align = INTVAL (align_exp);
18871 /* i386 can do misaligned access on reasonably increased cost. */
18872 if (CONST_INT_P (expected_align_exp)
18873 && INTVAL (expected_align_exp) > align)
18874 align = INTVAL (expected_align_exp);
18875 if (CONST_INT_P (count_exp))
18876 count = expected_size = INTVAL (count_exp);
18877 if (CONST_INT_P (expected_size_exp) && count == 0)
18878 expected_size = INTVAL (expected_size_exp);
18880 /* Make sure we don't need to care about overflow later on. */
18881 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18884 /* Step 0: Decide on preferred algorithm, desired alignment and
18885 size of chunks to be copied by main loop. */
18887 alg = decide_alg (count, expected_size, true, &dynamic_check);
18888 desired_align = decide_alignment (align, alg, expected_size);
18890 if (!TARGET_ALIGN_STRINGOPS)
18891 align = desired_align;
18893 if (alg == libcall)
18895 gcc_assert (alg != no_stringop);
18897 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18898 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18903 gcc_unreachable ();
18905 need_zero_guard = true;
18906 size_needed = GET_MODE_SIZE (Pmode);
18908 case unrolled_loop:
18909 need_zero_guard = true;
18910 size_needed = GET_MODE_SIZE (Pmode) * 4;
18912 case rep_prefix_8_byte:
18915 case rep_prefix_4_byte:
18918 case rep_prefix_1_byte:
18922 need_zero_guard = true;
18926 epilogue_size_needed = size_needed;
18928 /* Step 1: Prologue guard. */
18930 /* Alignment code needs count to be in register. */
18931 if (CONST_INT_P (count_exp) && desired_align > align)
18933 if (INTVAL (count_exp) > desired_align
18934 && INTVAL (count_exp) > size_needed)
18937 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18938 if (align_bytes <= 0)
18941 align_bytes = desired_align - align_bytes;
18943 if (align_bytes == 0)
18945 enum machine_mode mode = SImode;
18946 if (TARGET_64BIT && (count & ~0xffffffff))
18948 count_exp = force_reg (mode, count_exp);
18951 /* Do the cheap promotion to allow better CSE across the
18952 main loop and epilogue (ie one load of the big constant in the
18953 front of all code. */
18954 if (CONST_INT_P (val_exp))
18955 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18956 desired_align, align);
18957 /* Ensure that alignment prologue won't copy past end of block. */
18958 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18960 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18961 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18962 Make sure it is power of 2. */
18963 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18965 /* To improve performance of small blocks, we jump around the VAL
18966 promoting mode. This mean that if the promoted VAL is not constant,
18967 we might not use it in the epilogue and have to use byte
18969 if (epilogue_size_needed > 2 && !promoted_val)
18970 force_loopy_epilogue = true;
18973 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18975 /* If main algorithm works on QImode, no epilogue is needed.
18976 For small sizes just don't align anything. */
18977 if (size_needed == 1)
18978 desired_align = align;
18985 label = gen_label_rtx ();
18986 emit_cmp_and_jump_insns (count_exp,
18987 GEN_INT (epilogue_size_needed),
18988 LTU, 0, counter_mode (count_exp), 1, label);
18989 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18990 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18992 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18995 if (dynamic_check != -1)
18997 rtx hot_label = gen_label_rtx ();
18998 jump_around_label = gen_label_rtx ();
18999 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
19000 LEU, 0, counter_mode (count_exp), 1, hot_label);
19001 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19002 set_storage_via_libcall (dst, count_exp, val_exp, false);
19003 emit_jump (jump_around_label);
19004 emit_label (hot_label);
19007 /* Step 2: Alignment prologue. */
19009 /* Do the expensive promotion once we branched off the small blocks. */
19011 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19012 desired_align, align);
19013 gcc_assert (desired_align >= 1 && align >= 1);
19015 if (desired_align > align)
19017 if (align_bytes == 0)
19019 /* Except for the first move in epilogue, we no longer know
19020 constant offset in aliasing info. It don't seems to worth
19021 the pain to maintain it for the first move, so throw away
19023 dst = change_address (dst, BLKmode, destreg);
19024 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
19029 /* If we know how many bytes need to be stored before dst is
19030 sufficiently aligned, maintain aliasing info accurately. */
19031 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
19032 desired_align, align_bytes);
19033 count_exp = plus_constant (count_exp, -align_bytes);
19034 count -= align_bytes;
19036 if (need_zero_guard
19037 && (count < (unsigned HOST_WIDE_INT) size_needed
19038 || (align_bytes == 0
19039 && count < ((unsigned HOST_WIDE_INT) size_needed
19040 + desired_align - align))))
19042 /* It is possible that we copied enough so the main loop will not
19044 gcc_assert (size_needed > 1);
19045 if (label == NULL_RTX)
19046 label = gen_label_rtx ();
19047 emit_cmp_and_jump_insns (count_exp,
19048 GEN_INT (size_needed),
19049 LTU, 0, counter_mode (count_exp), 1, label);
19050 if (expected_size == -1
19051 || expected_size < (desired_align - align) / 2 + size_needed)
19052 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19054 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19057 if (label && size_needed == 1)
19059 emit_label (label);
19060 LABEL_NUSES (label) = 1;
19062 promoted_val = val_exp;
19063 epilogue_size_needed = 1;
19065 else if (label == NULL_RTX)
19066 epilogue_size_needed = size_needed;
19068 /* Step 3: Main loop. */
19074 gcc_unreachable ();
19076 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19077 count_exp, QImode, 1, expected_size);
19080 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19081 count_exp, Pmode, 1, expected_size);
19083 case unrolled_loop:
19084 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19085 count_exp, Pmode, 4, expected_size);
19087 case rep_prefix_8_byte:
19088 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19091 case rep_prefix_4_byte:
19092 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19095 case rep_prefix_1_byte:
19096 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19100 /* Adjust properly the offset of src and dest memory for aliasing. */
19101 if (CONST_INT_P (count_exp))
19102 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
19103 (count / size_needed) * size_needed);
19105 dst = change_address (dst, BLKmode, destreg);
19107 /* Step 4: Epilogue to copy the remaining bytes. */
19111 /* When the main loop is done, COUNT_EXP might hold original count,
19112 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
19113 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
19114 bytes. Compensate if needed. */
19116 if (size_needed < epilogue_size_needed)
19119 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
19120 GEN_INT (size_needed - 1), count_exp, 1,
19122 if (tmp != count_exp)
19123 emit_move_insn (count_exp, tmp);
19125 emit_label (label);
19126 LABEL_NUSES (label) = 1;
19129 if (count_exp != const0_rtx && epilogue_size_needed > 1)
19131 if (force_loopy_epilogue)
19132 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
19133 epilogue_size_needed);
19135 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
19136 epilogue_size_needed);
19138 if (jump_around_label)
19139 emit_label (jump_around_label);
19143 /* Expand the appropriate insns for doing strlen if not just doing
19146 out = result, initialized with the start address
19147 align_rtx = alignment of the address.
19148 scratch = scratch register, initialized with the startaddress when
19149 not aligned, otherwise undefined
19151 This is just the body. It needs the initializations mentioned above and
19152 some address computing at the end. These things are done in i386.md. */
19155 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19159 rtx align_2_label = NULL_RTX;
19160 rtx align_3_label = NULL_RTX;
19161 rtx align_4_label = gen_label_rtx ();
19162 rtx end_0_label = gen_label_rtx ();
19164 rtx tmpreg = gen_reg_rtx (SImode);
19165 rtx scratch = gen_reg_rtx (SImode);
19169 if (CONST_INT_P (align_rtx))
19170 align = INTVAL (align_rtx);
19172 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19174 /* Is there a known alignment and is it less than 4? */
19177 rtx scratch1 = gen_reg_rtx (Pmode);
19178 emit_move_insn (scratch1, out);
19179 /* Is there a known alignment and is it not 2? */
19182 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19183 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19185 /* Leave just the 3 lower bits. */
19186 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19187 NULL_RTX, 0, OPTAB_WIDEN);
19189 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19190 Pmode, 1, align_4_label);
19191 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19192 Pmode, 1, align_2_label);
19193 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19194 Pmode, 1, align_3_label);
19198 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19199 check if is aligned to 4 - byte. */
19201 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19202 NULL_RTX, 0, OPTAB_WIDEN);
19204 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19205 Pmode, 1, align_4_label);
19208 mem = change_address (src, QImode, out);
19210 /* Now compare the bytes. */
19212 /* Compare the first n unaligned byte on a byte per byte basis. */
19213 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19214 QImode, 1, end_0_label);
19216 /* Increment the address. */
19217 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19219 /* Not needed with an alignment of 2 */
19222 emit_label (align_2_label);
19224 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19227 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19229 emit_label (align_3_label);
19232 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19235 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19238 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19239 align this loop. It gives only huge programs, but does not help to
19241 emit_label (align_4_label);
19243 mem = change_address (src, SImode, out);
19244 emit_move_insn (scratch, mem);
19245 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19247 /* This formula yields a nonzero result iff one of the bytes is zero.
19248 This saves three branches inside loop and many cycles. */
19250 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19251 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19252 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19253 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19254 gen_int_mode (0x80808080, SImode)));
19255 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19260 rtx reg = gen_reg_rtx (SImode);
19261 rtx reg2 = gen_reg_rtx (Pmode);
19262 emit_move_insn (reg, tmpreg);
19263 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19265 /* If zero is not in the first two bytes, move two bytes forward. */
19266 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19267 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19268 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19269 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19270 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19273 /* Emit lea manually to avoid clobbering of flags. */
19274 emit_insn (gen_rtx_SET (SImode, reg2,
19275 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19277 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19278 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19279 emit_insn (gen_rtx_SET (VOIDmode, out,
19280 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19286 rtx end_2_label = gen_label_rtx ();
19287 /* Is zero in the first two bytes? */
19289 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19290 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19291 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19292 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19293 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19295 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19296 JUMP_LABEL (tmp) = end_2_label;
19298 /* Not in the first two. Move two bytes forward. */
19299 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19300 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19302 emit_label (end_2_label);
19306 /* Avoid branch in fixing the byte. */
19307 tmpreg = gen_lowpart (QImode, tmpreg);
19308 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19309 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19310 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19311 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19313 emit_label (end_0_label);
19316 /* Expand strlen. */
19319 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19321 rtx addr, scratch1, scratch2, scratch3, scratch4;
19323 /* The generic case of strlen expander is long. Avoid it's
19324 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19326 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19327 && !TARGET_INLINE_ALL_STRINGOPS
19328 && !optimize_insn_for_size_p ()
19329 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19332 addr = force_reg (Pmode, XEXP (src, 0));
19333 scratch1 = gen_reg_rtx (Pmode);
19335 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19336 && !optimize_insn_for_size_p ())
19338 /* Well it seems that some optimizer does not combine a call like
19339 foo(strlen(bar), strlen(bar));
19340 when the move and the subtraction is done here. It does calculate
19341 the length just once when these instructions are done inside of
19342 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19343 often used and I use one fewer register for the lifetime of
19344 output_strlen_unroll() this is better. */
19346 emit_move_insn (out, addr);
19348 ix86_expand_strlensi_unroll_1 (out, src, align);
19350 /* strlensi_unroll_1 returns the address of the zero at the end of
19351 the string, like memchr(), so compute the length by subtracting
19352 the start address. */
19353 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19359 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19360 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19363 scratch2 = gen_reg_rtx (Pmode);
19364 scratch3 = gen_reg_rtx (Pmode);
19365 scratch4 = force_reg (Pmode, constm1_rtx);
19367 emit_move_insn (scratch3, addr);
19368 eoschar = force_reg (QImode, eoschar);
19370 src = replace_equiv_address_nv (src, scratch3);
19372 /* If .md starts supporting :P, this can be done in .md. */
19373 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19374 scratch4), UNSPEC_SCAS);
19375 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19376 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19377 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19382 /* For given symbol (function) construct code to compute address of it's PLT
19383 entry in large x86-64 PIC model. */
19385 construct_plt_address (rtx symbol)
19387 rtx tmp = gen_reg_rtx (Pmode);
19388 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19390 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19391 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19393 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19394 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19399 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19401 rtx pop, int sibcall)
19403 rtx use = NULL, call;
19405 if (pop == const0_rtx)
19407 gcc_assert (!TARGET_64BIT || !pop);
19409 if (TARGET_MACHO && !TARGET_64BIT)
19412 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19413 fnaddr = machopic_indirect_call_target (fnaddr);
19418 /* Static functions and indirect calls don't need the pic register. */
19419 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19420 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19421 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19422 use_reg (&use, pic_offset_table_rtx);
19425 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19427 rtx al = gen_rtx_REG (QImode, AX_REG);
19428 emit_move_insn (al, callarg2);
19429 use_reg (&use, al);
19432 if (ix86_cmodel == CM_LARGE_PIC
19434 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19435 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19436 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19438 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19439 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19441 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19442 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19445 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19447 call = gen_rtx_SET (VOIDmode, retval, call);
19450 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19451 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19452 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19455 && ix86_cfun_abi () == MS_ABI
19456 && (!callarg2 || INTVAL (callarg2) != -2))
19458 /* We need to represent that SI and DI registers are clobbered
19460 static int clobbered_registers[] = {
19461 XMM6_REG, XMM7_REG, XMM8_REG,
19462 XMM9_REG, XMM10_REG, XMM11_REG,
19463 XMM12_REG, XMM13_REG, XMM14_REG,
19464 XMM15_REG, SI_REG, DI_REG
19467 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19468 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19469 UNSPEC_MS_TO_SYSV_CALL);
19473 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19474 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19477 (SSE_REGNO_P (clobbered_registers[i])
19479 clobbered_registers[i]));
19481 call = gen_rtx_PARALLEL (VOIDmode,
19482 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19486 call = emit_call_insn (call);
19488 CALL_INSN_FUNCTION_USAGE (call) = use;
19492 /* Clear stack slot assignments remembered from previous functions.
19493 This is called from INIT_EXPANDERS once before RTL is emitted for each
19496 static struct machine_function *
19497 ix86_init_machine_status (void)
19499 struct machine_function *f;
19501 f = GGC_CNEW (struct machine_function);
19502 f->use_fast_prologue_epilogue_nregs = -1;
19503 f->tls_descriptor_call_expanded_p = 0;
19504 f->call_abi = ix86_abi;
19509 /* Return a MEM corresponding to a stack slot with mode MODE.
19510 Allocate a new slot if necessary.
19512 The RTL for a function can have several slots available: N is
19513 which slot to use. */
19516 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19518 struct stack_local_entry *s;
19520 gcc_assert (n < MAX_386_STACK_LOCALS);
19522 /* Virtual slot is valid only before vregs are instantiated. */
19523 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19525 for (s = ix86_stack_locals; s; s = s->next)
19526 if (s->mode == mode && s->n == n)
19527 return copy_rtx (s->rtl);
19529 s = (struct stack_local_entry *)
19530 ggc_alloc (sizeof (struct stack_local_entry));
19533 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19535 s->next = ix86_stack_locals;
19536 ix86_stack_locals = s;
19540 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19542 static GTY(()) rtx ix86_tls_symbol;
19544 ix86_tls_get_addr (void)
19547 if (!ix86_tls_symbol)
19549 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19550 (TARGET_ANY_GNU_TLS
19552 ? "___tls_get_addr"
19553 : "__tls_get_addr");
19556 return ix86_tls_symbol;
19559 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19561 static GTY(()) rtx ix86_tls_module_base_symbol;
19563 ix86_tls_module_base (void)
19566 if (!ix86_tls_module_base_symbol)
19568 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19569 "_TLS_MODULE_BASE_");
19570 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19571 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19574 return ix86_tls_module_base_symbol;
19577 /* Calculate the length of the memory address in the instruction
19578 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19581 memory_address_length (rtx addr)
19583 struct ix86_address parts;
19584 rtx base, index, disp;
19588 if (GET_CODE (addr) == PRE_DEC
19589 || GET_CODE (addr) == POST_INC
19590 || GET_CODE (addr) == PRE_MODIFY
19591 || GET_CODE (addr) == POST_MODIFY)
19594 ok = ix86_decompose_address (addr, &parts);
19597 if (parts.base && GET_CODE (parts.base) == SUBREG)
19598 parts.base = SUBREG_REG (parts.base);
19599 if (parts.index && GET_CODE (parts.index) == SUBREG)
19600 parts.index = SUBREG_REG (parts.index);
19603 index = parts.index;
19608 - esp as the base always wants an index,
19609 - ebp as the base always wants a displacement,
19610 - r12 as the base always wants an index,
19611 - r13 as the base always wants a displacement. */
19613 /* Register Indirect. */
19614 if (base && !index && !disp)
19616 /* esp (for its index) and ebp (for its displacement) need
19617 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19620 && (addr == arg_pointer_rtx
19621 || addr == frame_pointer_rtx
19622 || REGNO (addr) == SP_REG
19623 || REGNO (addr) == BP_REG
19624 || REGNO (addr) == R12_REG
19625 || REGNO (addr) == R13_REG))
19629 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19630 is not disp32, but disp32(%rip), so for disp32
19631 SIB byte is needed, unless print_operand_address
19632 optimizes it into disp32(%rip) or (%rip) is implied
19634 else if (disp && !base && !index)
19641 if (GET_CODE (disp) == CONST)
19642 symbol = XEXP (disp, 0);
19643 if (GET_CODE (symbol) == PLUS
19644 && CONST_INT_P (XEXP (symbol, 1)))
19645 symbol = XEXP (symbol, 0);
19647 if (GET_CODE (symbol) != LABEL_REF
19648 && (GET_CODE (symbol) != SYMBOL_REF
19649 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19650 && (GET_CODE (symbol) != UNSPEC
19651 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19652 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19659 /* Find the length of the displacement constant. */
19662 if (base && satisfies_constraint_K (disp))
19667 /* ebp always wants a displacement. Similarly r13. */
19668 else if (base && REG_P (base)
19669 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19672 /* An index requires the two-byte modrm form.... */
19674 /* ...like esp (or r12), which always wants an index. */
19675 || base == arg_pointer_rtx
19676 || base == frame_pointer_rtx
19677 || (base && REG_P (base)
19678 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19695 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19696 is set, expect that insn have 8bit immediate alternative. */
19698 ix86_attr_length_immediate_default (rtx insn, int shortform)
19702 extract_insn_cached (insn);
19703 for (i = recog_data.n_operands - 1; i >= 0; --i)
19704 if (CONSTANT_P (recog_data.operand[i]))
19706 enum attr_mode mode = get_attr_mode (insn);
19709 if (shortform && CONST_INT_P (recog_data.operand[i]))
19711 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19718 ival = trunc_int_for_mode (ival, HImode);
19721 ival = trunc_int_for_mode (ival, SImode);
19726 if (IN_RANGE (ival, -128, 127))
19743 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19748 fatal_insn ("unknown insn mode", insn);
19753 /* Compute default value for "length_address" attribute. */
19755 ix86_attr_length_address_default (rtx insn)
19759 if (get_attr_type (insn) == TYPE_LEA)
19761 rtx set = PATTERN (insn), addr;
19763 if (GET_CODE (set) == PARALLEL)
19764 set = XVECEXP (set, 0, 0);
19766 gcc_assert (GET_CODE (set) == SET);
19768 addr = SET_SRC (set);
19769 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19771 if (GET_CODE (addr) == ZERO_EXTEND)
19772 addr = XEXP (addr, 0);
19773 if (GET_CODE (addr) == SUBREG)
19774 addr = SUBREG_REG (addr);
19777 return memory_address_length (addr);
19780 extract_insn_cached (insn);
19781 for (i = recog_data.n_operands - 1; i >= 0; --i)
19782 if (MEM_P (recog_data.operand[i]))
19784 constrain_operands_cached (reload_completed);
19785 if (which_alternative != -1)
19787 const char *constraints = recog_data.constraints[i];
19788 int alt = which_alternative;
19790 while (*constraints == '=' || *constraints == '+')
19793 while (*constraints++ != ',')
19795 /* Skip ignored operands. */
19796 if (*constraints == 'X')
19799 return memory_address_length (XEXP (recog_data.operand[i], 0));
19804 /* Compute default value for "length_vex" attribute. It includes
19805 2 or 3 byte VEX prefix and 1 opcode byte. */
19808 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19813 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19814 byte VEX prefix. */
19815 if (!has_0f_opcode || has_vex_w)
19818 /* We can always use 2 byte VEX prefix in 32bit. */
19822 extract_insn_cached (insn);
19824 for (i = recog_data.n_operands - 1; i >= 0; --i)
19825 if (REG_P (recog_data.operand[i]))
19827 /* REX.W bit uses 3 byte VEX prefix. */
19828 if (GET_MODE (recog_data.operand[i]) == DImode
19829 && GENERAL_REG_P (recog_data.operand[i]))
19834 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19835 if (MEM_P (recog_data.operand[i])
19836 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19843 /* Return the maximum number of instructions a cpu can issue. */
19846 ix86_issue_rate (void)
19850 case PROCESSOR_PENTIUM:
19851 case PROCESSOR_ATOM:
19855 case PROCESSOR_PENTIUMPRO:
19856 case PROCESSOR_PENTIUM4:
19857 case PROCESSOR_ATHLON:
19859 case PROCESSOR_AMDFAM10:
19860 case PROCESSOR_NOCONA:
19861 case PROCESSOR_GENERIC32:
19862 case PROCESSOR_GENERIC64:
19863 case PROCESSOR_BDVER1:
19866 case PROCESSOR_CORE2:
19874 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19875 by DEP_INSN and nothing set by DEP_INSN. */
19878 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19882 /* Simplify the test for uninteresting insns. */
19883 if (insn_type != TYPE_SETCC
19884 && insn_type != TYPE_ICMOV
19885 && insn_type != TYPE_FCMOV
19886 && insn_type != TYPE_IBR)
19889 if ((set = single_set (dep_insn)) != 0)
19891 set = SET_DEST (set);
19894 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19895 && XVECLEN (PATTERN (dep_insn), 0) == 2
19896 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19897 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19899 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19900 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19905 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19908 /* This test is true if the dependent insn reads the flags but
19909 not any other potentially set register. */
19910 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19913 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19919 /* Return true iff USE_INSN has a memory address with operands set by
19923 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19926 extract_insn_cached (use_insn);
19927 for (i = recog_data.n_operands - 1; i >= 0; --i)
19928 if (MEM_P (recog_data.operand[i]))
19930 rtx addr = XEXP (recog_data.operand[i], 0);
19931 return modified_in_p (addr, set_insn) != 0;
19937 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19939 enum attr_type insn_type, dep_insn_type;
19940 enum attr_memory memory;
19942 int dep_insn_code_number;
19944 /* Anti and output dependencies have zero cost on all CPUs. */
19945 if (REG_NOTE_KIND (link) != 0)
19948 dep_insn_code_number = recog_memoized (dep_insn);
19950 /* If we can't recognize the insns, we can't really do anything. */
19951 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19954 insn_type = get_attr_type (insn);
19955 dep_insn_type = get_attr_type (dep_insn);
19959 case PROCESSOR_PENTIUM:
19960 /* Address Generation Interlock adds a cycle of latency. */
19961 if (insn_type == TYPE_LEA)
19963 rtx addr = PATTERN (insn);
19965 if (GET_CODE (addr) == PARALLEL)
19966 addr = XVECEXP (addr, 0, 0);
19968 gcc_assert (GET_CODE (addr) == SET);
19970 addr = SET_SRC (addr);
19971 if (modified_in_p (addr, dep_insn))
19974 else if (ix86_agi_dependent (dep_insn, insn))
19977 /* ??? Compares pair with jump/setcc. */
19978 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19981 /* Floating point stores require value to be ready one cycle earlier. */
19982 if (insn_type == TYPE_FMOV
19983 && get_attr_memory (insn) == MEMORY_STORE
19984 && !ix86_agi_dependent (dep_insn, insn))
19988 case PROCESSOR_PENTIUMPRO:
19989 memory = get_attr_memory (insn);
19991 /* INT->FP conversion is expensive. */
19992 if (get_attr_fp_int_src (dep_insn))
19995 /* There is one cycle extra latency between an FP op and a store. */
19996 if (insn_type == TYPE_FMOV
19997 && (set = single_set (dep_insn)) != NULL_RTX
19998 && (set2 = single_set (insn)) != NULL_RTX
19999 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
20000 && MEM_P (SET_DEST (set2)))
20003 /* Show ability of reorder buffer to hide latency of load by executing
20004 in parallel with previous instruction in case
20005 previous instruction is not needed to compute the address. */
20006 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20007 && !ix86_agi_dependent (dep_insn, insn))
20009 /* Claim moves to take one cycle, as core can issue one load
20010 at time and the next load can start cycle later. */
20011 if (dep_insn_type == TYPE_IMOV
20012 || dep_insn_type == TYPE_FMOV)
20020 memory = get_attr_memory (insn);
20022 /* The esp dependency is resolved before the instruction is really
20024 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
20025 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
20028 /* INT->FP conversion is expensive. */
20029 if (get_attr_fp_int_src (dep_insn))
20032 /* Show ability of reorder buffer to hide latency of load by executing
20033 in parallel with previous instruction in case
20034 previous instruction is not needed to compute the address. */
20035 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20036 && !ix86_agi_dependent (dep_insn, insn))
20038 /* Claim moves to take one cycle, as core can issue one load
20039 at time and the next load can start cycle later. */
20040 if (dep_insn_type == TYPE_IMOV
20041 || dep_insn_type == TYPE_FMOV)
20050 case PROCESSOR_ATHLON:
20052 case PROCESSOR_AMDFAM10:
20053 case PROCESSOR_BDVER1:
20054 case PROCESSOR_ATOM:
20055 case PROCESSOR_GENERIC32:
20056 case PROCESSOR_GENERIC64:
20057 memory = get_attr_memory (insn);
20059 /* Show ability of reorder buffer to hide latency of load by executing
20060 in parallel with previous instruction in case
20061 previous instruction is not needed to compute the address. */
20062 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20063 && !ix86_agi_dependent (dep_insn, insn))
20065 enum attr_unit unit = get_attr_unit (insn);
20068 /* Because of the difference between the length of integer and
20069 floating unit pipeline preparation stages, the memory operands
20070 for floating point are cheaper.
20072 ??? For Athlon it the difference is most probably 2. */
20073 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
20076 loadcost = TARGET_ATHLON ? 2 : 0;
20078 if (cost >= loadcost)
20091 /* How many alternative schedules to try. This should be as wide as the
20092 scheduling freedom in the DFA, but no wider. Making this value too
20093 large results extra work for the scheduler. */
20096 ia32_multipass_dfa_lookahead (void)
20100 case PROCESSOR_PENTIUM:
20103 case PROCESSOR_PENTIUMPRO:
20113 /* Compute the alignment given to a constant that is being placed in memory.
20114 EXP is the constant and ALIGN is the alignment that the object would
20116 The value of this function is used instead of that alignment to align
20120 ix86_constant_alignment (tree exp, int align)
20122 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
20123 || TREE_CODE (exp) == INTEGER_CST)
20125 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
20127 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
20130 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
20131 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
20132 return BITS_PER_WORD;
20137 /* Compute the alignment for a static variable.
20138 TYPE is the data type, and ALIGN is the alignment that
20139 the object would ordinarily have. The value of this function is used
20140 instead of that alignment to align the object. */
20143 ix86_data_alignment (tree type, int align)
20145 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
20147 if (AGGREGATE_TYPE_P (type)
20148 && TYPE_SIZE (type)
20149 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20150 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20151 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20152 && align < max_align)
20155 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20156 to 16byte boundary. */
20159 if (AGGREGATE_TYPE_P (type)
20160 && TYPE_SIZE (type)
20161 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20162 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20163 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20167 if (TREE_CODE (type) == ARRAY_TYPE)
20169 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20171 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20174 else if (TREE_CODE (type) == COMPLEX_TYPE)
20177 if (TYPE_MODE (type) == DCmode && align < 64)
20179 if ((TYPE_MODE (type) == XCmode
20180 || TYPE_MODE (type) == TCmode) && align < 128)
20183 else if ((TREE_CODE (type) == RECORD_TYPE
20184 || TREE_CODE (type) == UNION_TYPE
20185 || TREE_CODE (type) == QUAL_UNION_TYPE)
20186 && TYPE_FIELDS (type))
20188 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20190 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20193 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20194 || TREE_CODE (type) == INTEGER_TYPE)
20196 if (TYPE_MODE (type) == DFmode && align < 64)
20198 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20205 /* Compute the alignment for a local variable or a stack slot. EXP is
20206 the data type or decl itself, MODE is the widest mode available and
20207 ALIGN is the alignment that the object would ordinarily have. The
20208 value of this macro is used instead of that alignment to align the
20212 ix86_local_alignment (tree exp, enum machine_mode mode,
20213 unsigned int align)
20217 if (exp && DECL_P (exp))
20219 type = TREE_TYPE (exp);
20228 /* Don't do dynamic stack realignment for long long objects with
20229 -mpreferred-stack-boundary=2. */
20232 && ix86_preferred_stack_boundary < 64
20233 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20234 && (!type || !TYPE_USER_ALIGN (type))
20235 && (!decl || !DECL_USER_ALIGN (decl)))
20238 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20239 register in MODE. We will return the largest alignment of XF
20243 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20244 align = GET_MODE_ALIGNMENT (DFmode);
20248 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20249 to 16byte boundary. Exact wording is:
20251 An array uses the same alignment as its elements, except that a local or
20252 global array variable of length at least 16 bytes or
20253 a C99 variable-length array variable always has alignment of at least 16 bytes.
20255 This was added to allow use of aligned SSE instructions at arrays. This
20256 rule is meant for static storage (where compiler can not do the analysis
20257 by itself). We follow it for automatic variables only when convenient.
20258 We fully control everything in the function compiled and functions from
20259 other unit can not rely on the alignment.
20261 Exclude va_list type. It is the common case of local array where
20262 we can not benefit from the alignment. */
20263 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20266 if (AGGREGATE_TYPE_P (type)
20267 && (TYPE_MAIN_VARIANT (type)
20268 != TYPE_MAIN_VARIANT (va_list_type_node))
20269 && TYPE_SIZE (type)
20270 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20271 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20272 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20275 if (TREE_CODE (type) == ARRAY_TYPE)
20277 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20279 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20282 else if (TREE_CODE (type) == COMPLEX_TYPE)
20284 if (TYPE_MODE (type) == DCmode && align < 64)
20286 if ((TYPE_MODE (type) == XCmode
20287 || TYPE_MODE (type) == TCmode) && align < 128)
20290 else if ((TREE_CODE (type) == RECORD_TYPE
20291 || TREE_CODE (type) == UNION_TYPE
20292 || TREE_CODE (type) == QUAL_UNION_TYPE)
20293 && TYPE_FIELDS (type))
20295 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20297 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20300 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20301 || TREE_CODE (type) == INTEGER_TYPE)
20304 if (TYPE_MODE (type) == DFmode && align < 64)
20306 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20312 /* Compute the minimum required alignment for dynamic stack realignment
20313 purposes for a local variable, parameter or a stack slot. EXP is
20314 the data type or decl itself, MODE is its mode and ALIGN is the
20315 alignment that the object would ordinarily have. */
20318 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20319 unsigned int align)
20323 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20326 if (exp && DECL_P (exp))
20328 type = TREE_TYPE (exp);
20337 /* Don't do dynamic stack realignment for long long objects with
20338 -mpreferred-stack-boundary=2. */
20339 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20340 && (!type || !TYPE_USER_ALIGN (type))
20341 && (!decl || !DECL_USER_ALIGN (decl)))
20347 /* Find a location for the static chain incoming to a nested function.
20348 This is a register, unless all free registers are used by arguments. */
20351 ix86_static_chain (const_tree fndecl, bool incoming_p)
20355 if (!DECL_STATIC_CHAIN (fndecl))
20360 /* We always use R10 in 64-bit mode. */
20366 /* By default in 32-bit mode we use ECX to pass the static chain. */
20369 fntype = TREE_TYPE (fndecl);
20370 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20372 /* Fastcall functions use ecx/edx for arguments, which leaves
20373 us with EAX for the static chain. */
20376 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20378 /* Thiscall functions use ecx for arguments, which leaves
20379 us with EAX for the static chain. */
20382 else if (ix86_function_regparm (fntype, fndecl) == 3)
20384 /* For regparm 3, we have no free call-clobbered registers in
20385 which to store the static chain. In order to implement this,
20386 we have the trampoline push the static chain to the stack.
20387 However, we can't push a value below the return address when
20388 we call the nested function directly, so we have to use an
20389 alternate entry point. For this we use ESI, and have the
20390 alternate entry point push ESI, so that things appear the
20391 same once we're executing the nested function. */
20394 if (fndecl == current_function_decl)
20395 ix86_static_chain_on_stack = true;
20396 return gen_frame_mem (SImode,
20397 plus_constant (arg_pointer_rtx, -8));
20403 return gen_rtx_REG (Pmode, regno);
20406 /* Emit RTL insns to initialize the variable parts of a trampoline.
20407 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20408 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20409 to be passed to the target function. */
20412 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20416 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20423 /* Depending on the static chain location, either load a register
20424 with a constant, or push the constant to the stack. All of the
20425 instructions are the same size. */
20426 chain = ix86_static_chain (fndecl, true);
20429 if (REGNO (chain) == CX_REG)
20431 else if (REGNO (chain) == AX_REG)
20434 gcc_unreachable ();
20439 mem = adjust_address (m_tramp, QImode, 0);
20440 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20442 mem = adjust_address (m_tramp, SImode, 1);
20443 emit_move_insn (mem, chain_value);
20445 /* Compute offset from the end of the jmp to the target function.
20446 In the case in which the trampoline stores the static chain on
20447 the stack, we need to skip the first insn which pushes the
20448 (call-saved) register static chain; this push is 1 byte. */
20449 disp = expand_binop (SImode, sub_optab, fnaddr,
20450 plus_constant (XEXP (m_tramp, 0),
20451 MEM_P (chain) ? 9 : 10),
20452 NULL_RTX, 1, OPTAB_DIRECT);
20454 mem = adjust_address (m_tramp, QImode, 5);
20455 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20457 mem = adjust_address (m_tramp, SImode, 6);
20458 emit_move_insn (mem, disp);
20464 /* Load the function address to r11. Try to load address using
20465 the shorter movl instead of movabs. We may want to support
20466 movq for kernel mode, but kernel does not use trampolines at
20468 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20470 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20472 mem = adjust_address (m_tramp, HImode, offset);
20473 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20475 mem = adjust_address (m_tramp, SImode, offset + 2);
20476 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20481 mem = adjust_address (m_tramp, HImode, offset);
20482 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20484 mem = adjust_address (m_tramp, DImode, offset + 2);
20485 emit_move_insn (mem, fnaddr);
20489 /* Load static chain using movabs to r10. */
20490 mem = adjust_address (m_tramp, HImode, offset);
20491 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20493 mem = adjust_address (m_tramp, DImode, offset + 2);
20494 emit_move_insn (mem, chain_value);
20497 /* Jump to r11; the last (unused) byte is a nop, only there to
20498 pad the write out to a single 32-bit store. */
20499 mem = adjust_address (m_tramp, SImode, offset);
20500 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20503 gcc_assert (offset <= TRAMPOLINE_SIZE);
20506 #ifdef ENABLE_EXECUTE_STACK
20507 #ifdef CHECK_EXECUTE_STACK_ENABLED
20508 if (CHECK_EXECUTE_STACK_ENABLED)
20510 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20511 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20515 /* The following file contains several enumerations and data structures
20516 built from the definitions in i386-builtin-types.def. */
20518 #include "i386-builtin-types.inc"
20520 /* Table for the ix86 builtin non-function types. */
20521 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20523 /* Retrieve an element from the above table, building some of
20524 the types lazily. */
20527 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20529 unsigned int index;
20532 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20534 type = ix86_builtin_type_tab[(int) tcode];
20538 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20539 if (tcode <= IX86_BT_LAST_VECT)
20541 enum machine_mode mode;
20543 index = tcode - IX86_BT_LAST_PRIM - 1;
20544 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20545 mode = ix86_builtin_type_vect_mode[index];
20547 type = build_vector_type_for_mode (itype, mode);
20553 index = tcode - IX86_BT_LAST_VECT - 1;
20554 if (tcode <= IX86_BT_LAST_PTR)
20555 quals = TYPE_UNQUALIFIED;
20557 quals = TYPE_QUAL_CONST;
20559 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20560 if (quals != TYPE_UNQUALIFIED)
20561 itype = build_qualified_type (itype, quals);
20563 type = build_pointer_type (itype);
20566 ix86_builtin_type_tab[(int) tcode] = type;
20570 /* Table for the ix86 builtin function types. */
20571 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20573 /* Retrieve an element from the above table, building some of
20574 the types lazily. */
20577 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20581 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20583 type = ix86_builtin_func_type_tab[(int) tcode];
20587 if (tcode <= IX86_BT_LAST_FUNC)
20589 unsigned start = ix86_builtin_func_start[(int) tcode];
20590 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20591 tree rtype, atype, args = void_list_node;
20594 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20595 for (i = after - 1; i > start; --i)
20597 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20598 args = tree_cons (NULL, atype, args);
20601 type = build_function_type (rtype, args);
20605 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20606 enum ix86_builtin_func_type icode;
20608 icode = ix86_builtin_func_alias_base[index];
20609 type = ix86_get_builtin_func_type (icode);
20612 ix86_builtin_func_type_tab[(int) tcode] = type;
20617 /* Codes for all the SSE/MMX builtins. */
20620 IX86_BUILTIN_ADDPS,
20621 IX86_BUILTIN_ADDSS,
20622 IX86_BUILTIN_DIVPS,
20623 IX86_BUILTIN_DIVSS,
20624 IX86_BUILTIN_MULPS,
20625 IX86_BUILTIN_MULSS,
20626 IX86_BUILTIN_SUBPS,
20627 IX86_BUILTIN_SUBSS,
20629 IX86_BUILTIN_CMPEQPS,
20630 IX86_BUILTIN_CMPLTPS,
20631 IX86_BUILTIN_CMPLEPS,
20632 IX86_BUILTIN_CMPGTPS,
20633 IX86_BUILTIN_CMPGEPS,
20634 IX86_BUILTIN_CMPNEQPS,
20635 IX86_BUILTIN_CMPNLTPS,
20636 IX86_BUILTIN_CMPNLEPS,
20637 IX86_BUILTIN_CMPNGTPS,
20638 IX86_BUILTIN_CMPNGEPS,
20639 IX86_BUILTIN_CMPORDPS,
20640 IX86_BUILTIN_CMPUNORDPS,
20641 IX86_BUILTIN_CMPEQSS,
20642 IX86_BUILTIN_CMPLTSS,
20643 IX86_BUILTIN_CMPLESS,
20644 IX86_BUILTIN_CMPNEQSS,
20645 IX86_BUILTIN_CMPNLTSS,
20646 IX86_BUILTIN_CMPNLESS,
20647 IX86_BUILTIN_CMPNGTSS,
20648 IX86_BUILTIN_CMPNGESS,
20649 IX86_BUILTIN_CMPORDSS,
20650 IX86_BUILTIN_CMPUNORDSS,
20652 IX86_BUILTIN_COMIEQSS,
20653 IX86_BUILTIN_COMILTSS,
20654 IX86_BUILTIN_COMILESS,
20655 IX86_BUILTIN_COMIGTSS,
20656 IX86_BUILTIN_COMIGESS,
20657 IX86_BUILTIN_COMINEQSS,
20658 IX86_BUILTIN_UCOMIEQSS,
20659 IX86_BUILTIN_UCOMILTSS,
20660 IX86_BUILTIN_UCOMILESS,
20661 IX86_BUILTIN_UCOMIGTSS,
20662 IX86_BUILTIN_UCOMIGESS,
20663 IX86_BUILTIN_UCOMINEQSS,
20665 IX86_BUILTIN_CVTPI2PS,
20666 IX86_BUILTIN_CVTPS2PI,
20667 IX86_BUILTIN_CVTSI2SS,
20668 IX86_BUILTIN_CVTSI642SS,
20669 IX86_BUILTIN_CVTSS2SI,
20670 IX86_BUILTIN_CVTSS2SI64,
20671 IX86_BUILTIN_CVTTPS2PI,
20672 IX86_BUILTIN_CVTTSS2SI,
20673 IX86_BUILTIN_CVTTSS2SI64,
20675 IX86_BUILTIN_MAXPS,
20676 IX86_BUILTIN_MAXSS,
20677 IX86_BUILTIN_MINPS,
20678 IX86_BUILTIN_MINSS,
20680 IX86_BUILTIN_LOADUPS,
20681 IX86_BUILTIN_STOREUPS,
20682 IX86_BUILTIN_MOVSS,
20684 IX86_BUILTIN_MOVHLPS,
20685 IX86_BUILTIN_MOVLHPS,
20686 IX86_BUILTIN_LOADHPS,
20687 IX86_BUILTIN_LOADLPS,
20688 IX86_BUILTIN_STOREHPS,
20689 IX86_BUILTIN_STORELPS,
20691 IX86_BUILTIN_MASKMOVQ,
20692 IX86_BUILTIN_MOVMSKPS,
20693 IX86_BUILTIN_PMOVMSKB,
20695 IX86_BUILTIN_MOVNTPS,
20696 IX86_BUILTIN_MOVNTQ,
20698 IX86_BUILTIN_LOADDQU,
20699 IX86_BUILTIN_STOREDQU,
20701 IX86_BUILTIN_PACKSSWB,
20702 IX86_BUILTIN_PACKSSDW,
20703 IX86_BUILTIN_PACKUSWB,
20705 IX86_BUILTIN_PADDB,
20706 IX86_BUILTIN_PADDW,
20707 IX86_BUILTIN_PADDD,
20708 IX86_BUILTIN_PADDQ,
20709 IX86_BUILTIN_PADDSB,
20710 IX86_BUILTIN_PADDSW,
20711 IX86_BUILTIN_PADDUSB,
20712 IX86_BUILTIN_PADDUSW,
20713 IX86_BUILTIN_PSUBB,
20714 IX86_BUILTIN_PSUBW,
20715 IX86_BUILTIN_PSUBD,
20716 IX86_BUILTIN_PSUBQ,
20717 IX86_BUILTIN_PSUBSB,
20718 IX86_BUILTIN_PSUBSW,
20719 IX86_BUILTIN_PSUBUSB,
20720 IX86_BUILTIN_PSUBUSW,
20723 IX86_BUILTIN_PANDN,
20727 IX86_BUILTIN_PAVGB,
20728 IX86_BUILTIN_PAVGW,
20730 IX86_BUILTIN_PCMPEQB,
20731 IX86_BUILTIN_PCMPEQW,
20732 IX86_BUILTIN_PCMPEQD,
20733 IX86_BUILTIN_PCMPGTB,
20734 IX86_BUILTIN_PCMPGTW,
20735 IX86_BUILTIN_PCMPGTD,
20737 IX86_BUILTIN_PMADDWD,
20739 IX86_BUILTIN_PMAXSW,
20740 IX86_BUILTIN_PMAXUB,
20741 IX86_BUILTIN_PMINSW,
20742 IX86_BUILTIN_PMINUB,
20744 IX86_BUILTIN_PMULHUW,
20745 IX86_BUILTIN_PMULHW,
20746 IX86_BUILTIN_PMULLW,
20748 IX86_BUILTIN_PSADBW,
20749 IX86_BUILTIN_PSHUFW,
20751 IX86_BUILTIN_PSLLW,
20752 IX86_BUILTIN_PSLLD,
20753 IX86_BUILTIN_PSLLQ,
20754 IX86_BUILTIN_PSRAW,
20755 IX86_BUILTIN_PSRAD,
20756 IX86_BUILTIN_PSRLW,
20757 IX86_BUILTIN_PSRLD,
20758 IX86_BUILTIN_PSRLQ,
20759 IX86_BUILTIN_PSLLWI,
20760 IX86_BUILTIN_PSLLDI,
20761 IX86_BUILTIN_PSLLQI,
20762 IX86_BUILTIN_PSRAWI,
20763 IX86_BUILTIN_PSRADI,
20764 IX86_BUILTIN_PSRLWI,
20765 IX86_BUILTIN_PSRLDI,
20766 IX86_BUILTIN_PSRLQI,
20768 IX86_BUILTIN_PUNPCKHBW,
20769 IX86_BUILTIN_PUNPCKHWD,
20770 IX86_BUILTIN_PUNPCKHDQ,
20771 IX86_BUILTIN_PUNPCKLBW,
20772 IX86_BUILTIN_PUNPCKLWD,
20773 IX86_BUILTIN_PUNPCKLDQ,
20775 IX86_BUILTIN_SHUFPS,
20777 IX86_BUILTIN_RCPPS,
20778 IX86_BUILTIN_RCPSS,
20779 IX86_BUILTIN_RSQRTPS,
20780 IX86_BUILTIN_RSQRTPS_NR,
20781 IX86_BUILTIN_RSQRTSS,
20782 IX86_BUILTIN_RSQRTF,
20783 IX86_BUILTIN_SQRTPS,
20784 IX86_BUILTIN_SQRTPS_NR,
20785 IX86_BUILTIN_SQRTSS,
20787 IX86_BUILTIN_UNPCKHPS,
20788 IX86_BUILTIN_UNPCKLPS,
20790 IX86_BUILTIN_ANDPS,
20791 IX86_BUILTIN_ANDNPS,
20793 IX86_BUILTIN_XORPS,
20796 IX86_BUILTIN_LDMXCSR,
20797 IX86_BUILTIN_STMXCSR,
20798 IX86_BUILTIN_SFENCE,
20800 /* 3DNow! Original */
20801 IX86_BUILTIN_FEMMS,
20802 IX86_BUILTIN_PAVGUSB,
20803 IX86_BUILTIN_PF2ID,
20804 IX86_BUILTIN_PFACC,
20805 IX86_BUILTIN_PFADD,
20806 IX86_BUILTIN_PFCMPEQ,
20807 IX86_BUILTIN_PFCMPGE,
20808 IX86_BUILTIN_PFCMPGT,
20809 IX86_BUILTIN_PFMAX,
20810 IX86_BUILTIN_PFMIN,
20811 IX86_BUILTIN_PFMUL,
20812 IX86_BUILTIN_PFRCP,
20813 IX86_BUILTIN_PFRCPIT1,
20814 IX86_BUILTIN_PFRCPIT2,
20815 IX86_BUILTIN_PFRSQIT1,
20816 IX86_BUILTIN_PFRSQRT,
20817 IX86_BUILTIN_PFSUB,
20818 IX86_BUILTIN_PFSUBR,
20819 IX86_BUILTIN_PI2FD,
20820 IX86_BUILTIN_PMULHRW,
20822 /* 3DNow! Athlon Extensions */
20823 IX86_BUILTIN_PF2IW,
20824 IX86_BUILTIN_PFNACC,
20825 IX86_BUILTIN_PFPNACC,
20826 IX86_BUILTIN_PI2FW,
20827 IX86_BUILTIN_PSWAPDSI,
20828 IX86_BUILTIN_PSWAPDSF,
20831 IX86_BUILTIN_ADDPD,
20832 IX86_BUILTIN_ADDSD,
20833 IX86_BUILTIN_DIVPD,
20834 IX86_BUILTIN_DIVSD,
20835 IX86_BUILTIN_MULPD,
20836 IX86_BUILTIN_MULSD,
20837 IX86_BUILTIN_SUBPD,
20838 IX86_BUILTIN_SUBSD,
20840 IX86_BUILTIN_CMPEQPD,
20841 IX86_BUILTIN_CMPLTPD,
20842 IX86_BUILTIN_CMPLEPD,
20843 IX86_BUILTIN_CMPGTPD,
20844 IX86_BUILTIN_CMPGEPD,
20845 IX86_BUILTIN_CMPNEQPD,
20846 IX86_BUILTIN_CMPNLTPD,
20847 IX86_BUILTIN_CMPNLEPD,
20848 IX86_BUILTIN_CMPNGTPD,
20849 IX86_BUILTIN_CMPNGEPD,
20850 IX86_BUILTIN_CMPORDPD,
20851 IX86_BUILTIN_CMPUNORDPD,
20852 IX86_BUILTIN_CMPEQSD,
20853 IX86_BUILTIN_CMPLTSD,
20854 IX86_BUILTIN_CMPLESD,
20855 IX86_BUILTIN_CMPNEQSD,
20856 IX86_BUILTIN_CMPNLTSD,
20857 IX86_BUILTIN_CMPNLESD,
20858 IX86_BUILTIN_CMPORDSD,
20859 IX86_BUILTIN_CMPUNORDSD,
20861 IX86_BUILTIN_COMIEQSD,
20862 IX86_BUILTIN_COMILTSD,
20863 IX86_BUILTIN_COMILESD,
20864 IX86_BUILTIN_COMIGTSD,
20865 IX86_BUILTIN_COMIGESD,
20866 IX86_BUILTIN_COMINEQSD,
20867 IX86_BUILTIN_UCOMIEQSD,
20868 IX86_BUILTIN_UCOMILTSD,
20869 IX86_BUILTIN_UCOMILESD,
20870 IX86_BUILTIN_UCOMIGTSD,
20871 IX86_BUILTIN_UCOMIGESD,
20872 IX86_BUILTIN_UCOMINEQSD,
20874 IX86_BUILTIN_MAXPD,
20875 IX86_BUILTIN_MAXSD,
20876 IX86_BUILTIN_MINPD,
20877 IX86_BUILTIN_MINSD,
20879 IX86_BUILTIN_ANDPD,
20880 IX86_BUILTIN_ANDNPD,
20882 IX86_BUILTIN_XORPD,
20884 IX86_BUILTIN_SQRTPD,
20885 IX86_BUILTIN_SQRTSD,
20887 IX86_BUILTIN_UNPCKHPD,
20888 IX86_BUILTIN_UNPCKLPD,
20890 IX86_BUILTIN_SHUFPD,
20892 IX86_BUILTIN_LOADUPD,
20893 IX86_BUILTIN_STOREUPD,
20894 IX86_BUILTIN_MOVSD,
20896 IX86_BUILTIN_LOADHPD,
20897 IX86_BUILTIN_LOADLPD,
20899 IX86_BUILTIN_CVTDQ2PD,
20900 IX86_BUILTIN_CVTDQ2PS,
20902 IX86_BUILTIN_CVTPD2DQ,
20903 IX86_BUILTIN_CVTPD2PI,
20904 IX86_BUILTIN_CVTPD2PS,
20905 IX86_BUILTIN_CVTTPD2DQ,
20906 IX86_BUILTIN_CVTTPD2PI,
20908 IX86_BUILTIN_CVTPI2PD,
20909 IX86_BUILTIN_CVTSI2SD,
20910 IX86_BUILTIN_CVTSI642SD,
20912 IX86_BUILTIN_CVTSD2SI,
20913 IX86_BUILTIN_CVTSD2SI64,
20914 IX86_BUILTIN_CVTSD2SS,
20915 IX86_BUILTIN_CVTSS2SD,
20916 IX86_BUILTIN_CVTTSD2SI,
20917 IX86_BUILTIN_CVTTSD2SI64,
20919 IX86_BUILTIN_CVTPS2DQ,
20920 IX86_BUILTIN_CVTPS2PD,
20921 IX86_BUILTIN_CVTTPS2DQ,
20923 IX86_BUILTIN_MOVNTI,
20924 IX86_BUILTIN_MOVNTPD,
20925 IX86_BUILTIN_MOVNTDQ,
20927 IX86_BUILTIN_MOVQ128,
20930 IX86_BUILTIN_MASKMOVDQU,
20931 IX86_BUILTIN_MOVMSKPD,
20932 IX86_BUILTIN_PMOVMSKB128,
20934 IX86_BUILTIN_PACKSSWB128,
20935 IX86_BUILTIN_PACKSSDW128,
20936 IX86_BUILTIN_PACKUSWB128,
20938 IX86_BUILTIN_PADDB128,
20939 IX86_BUILTIN_PADDW128,
20940 IX86_BUILTIN_PADDD128,
20941 IX86_BUILTIN_PADDQ128,
20942 IX86_BUILTIN_PADDSB128,
20943 IX86_BUILTIN_PADDSW128,
20944 IX86_BUILTIN_PADDUSB128,
20945 IX86_BUILTIN_PADDUSW128,
20946 IX86_BUILTIN_PSUBB128,
20947 IX86_BUILTIN_PSUBW128,
20948 IX86_BUILTIN_PSUBD128,
20949 IX86_BUILTIN_PSUBQ128,
20950 IX86_BUILTIN_PSUBSB128,
20951 IX86_BUILTIN_PSUBSW128,
20952 IX86_BUILTIN_PSUBUSB128,
20953 IX86_BUILTIN_PSUBUSW128,
20955 IX86_BUILTIN_PAND128,
20956 IX86_BUILTIN_PANDN128,
20957 IX86_BUILTIN_POR128,
20958 IX86_BUILTIN_PXOR128,
20960 IX86_BUILTIN_PAVGB128,
20961 IX86_BUILTIN_PAVGW128,
20963 IX86_BUILTIN_PCMPEQB128,
20964 IX86_BUILTIN_PCMPEQW128,
20965 IX86_BUILTIN_PCMPEQD128,
20966 IX86_BUILTIN_PCMPGTB128,
20967 IX86_BUILTIN_PCMPGTW128,
20968 IX86_BUILTIN_PCMPGTD128,
20970 IX86_BUILTIN_PMADDWD128,
20972 IX86_BUILTIN_PMAXSW128,
20973 IX86_BUILTIN_PMAXUB128,
20974 IX86_BUILTIN_PMINSW128,
20975 IX86_BUILTIN_PMINUB128,
20977 IX86_BUILTIN_PMULUDQ,
20978 IX86_BUILTIN_PMULUDQ128,
20979 IX86_BUILTIN_PMULHUW128,
20980 IX86_BUILTIN_PMULHW128,
20981 IX86_BUILTIN_PMULLW128,
20983 IX86_BUILTIN_PSADBW128,
20984 IX86_BUILTIN_PSHUFHW,
20985 IX86_BUILTIN_PSHUFLW,
20986 IX86_BUILTIN_PSHUFD,
20988 IX86_BUILTIN_PSLLDQI128,
20989 IX86_BUILTIN_PSLLWI128,
20990 IX86_BUILTIN_PSLLDI128,
20991 IX86_BUILTIN_PSLLQI128,
20992 IX86_BUILTIN_PSRAWI128,
20993 IX86_BUILTIN_PSRADI128,
20994 IX86_BUILTIN_PSRLDQI128,
20995 IX86_BUILTIN_PSRLWI128,
20996 IX86_BUILTIN_PSRLDI128,
20997 IX86_BUILTIN_PSRLQI128,
20999 IX86_BUILTIN_PSLLDQ128,
21000 IX86_BUILTIN_PSLLW128,
21001 IX86_BUILTIN_PSLLD128,
21002 IX86_BUILTIN_PSLLQ128,
21003 IX86_BUILTIN_PSRAW128,
21004 IX86_BUILTIN_PSRAD128,
21005 IX86_BUILTIN_PSRLW128,
21006 IX86_BUILTIN_PSRLD128,
21007 IX86_BUILTIN_PSRLQ128,
21009 IX86_BUILTIN_PUNPCKHBW128,
21010 IX86_BUILTIN_PUNPCKHWD128,
21011 IX86_BUILTIN_PUNPCKHDQ128,
21012 IX86_BUILTIN_PUNPCKHQDQ128,
21013 IX86_BUILTIN_PUNPCKLBW128,
21014 IX86_BUILTIN_PUNPCKLWD128,
21015 IX86_BUILTIN_PUNPCKLDQ128,
21016 IX86_BUILTIN_PUNPCKLQDQ128,
21018 IX86_BUILTIN_CLFLUSH,
21019 IX86_BUILTIN_MFENCE,
21020 IX86_BUILTIN_LFENCE,
21022 IX86_BUILTIN_BSRSI,
21023 IX86_BUILTIN_BSRDI,
21024 IX86_BUILTIN_RDPMC,
21025 IX86_BUILTIN_RDTSC,
21026 IX86_BUILTIN_RDTSCP,
21027 IX86_BUILTIN_ROLQI,
21028 IX86_BUILTIN_ROLHI,
21029 IX86_BUILTIN_RORQI,
21030 IX86_BUILTIN_RORHI,
21033 IX86_BUILTIN_ADDSUBPS,
21034 IX86_BUILTIN_HADDPS,
21035 IX86_BUILTIN_HSUBPS,
21036 IX86_BUILTIN_MOVSHDUP,
21037 IX86_BUILTIN_MOVSLDUP,
21038 IX86_BUILTIN_ADDSUBPD,
21039 IX86_BUILTIN_HADDPD,
21040 IX86_BUILTIN_HSUBPD,
21041 IX86_BUILTIN_LDDQU,
21043 IX86_BUILTIN_MONITOR,
21044 IX86_BUILTIN_MWAIT,
21047 IX86_BUILTIN_PHADDW,
21048 IX86_BUILTIN_PHADDD,
21049 IX86_BUILTIN_PHADDSW,
21050 IX86_BUILTIN_PHSUBW,
21051 IX86_BUILTIN_PHSUBD,
21052 IX86_BUILTIN_PHSUBSW,
21053 IX86_BUILTIN_PMADDUBSW,
21054 IX86_BUILTIN_PMULHRSW,
21055 IX86_BUILTIN_PSHUFB,
21056 IX86_BUILTIN_PSIGNB,
21057 IX86_BUILTIN_PSIGNW,
21058 IX86_BUILTIN_PSIGND,
21059 IX86_BUILTIN_PALIGNR,
21060 IX86_BUILTIN_PABSB,
21061 IX86_BUILTIN_PABSW,
21062 IX86_BUILTIN_PABSD,
21064 IX86_BUILTIN_PHADDW128,
21065 IX86_BUILTIN_PHADDD128,
21066 IX86_BUILTIN_PHADDSW128,
21067 IX86_BUILTIN_PHSUBW128,
21068 IX86_BUILTIN_PHSUBD128,
21069 IX86_BUILTIN_PHSUBSW128,
21070 IX86_BUILTIN_PMADDUBSW128,
21071 IX86_BUILTIN_PMULHRSW128,
21072 IX86_BUILTIN_PSHUFB128,
21073 IX86_BUILTIN_PSIGNB128,
21074 IX86_BUILTIN_PSIGNW128,
21075 IX86_BUILTIN_PSIGND128,
21076 IX86_BUILTIN_PALIGNR128,
21077 IX86_BUILTIN_PABSB128,
21078 IX86_BUILTIN_PABSW128,
21079 IX86_BUILTIN_PABSD128,
21081 /* AMDFAM10 - SSE4A New Instructions. */
21082 IX86_BUILTIN_MOVNTSD,
21083 IX86_BUILTIN_MOVNTSS,
21084 IX86_BUILTIN_EXTRQI,
21085 IX86_BUILTIN_EXTRQ,
21086 IX86_BUILTIN_INSERTQI,
21087 IX86_BUILTIN_INSERTQ,
21090 IX86_BUILTIN_BLENDPD,
21091 IX86_BUILTIN_BLENDPS,
21092 IX86_BUILTIN_BLENDVPD,
21093 IX86_BUILTIN_BLENDVPS,
21094 IX86_BUILTIN_PBLENDVB128,
21095 IX86_BUILTIN_PBLENDW128,
21100 IX86_BUILTIN_INSERTPS128,
21102 IX86_BUILTIN_MOVNTDQA,
21103 IX86_BUILTIN_MPSADBW128,
21104 IX86_BUILTIN_PACKUSDW128,
21105 IX86_BUILTIN_PCMPEQQ,
21106 IX86_BUILTIN_PHMINPOSUW128,
21108 IX86_BUILTIN_PMAXSB128,
21109 IX86_BUILTIN_PMAXSD128,
21110 IX86_BUILTIN_PMAXUD128,
21111 IX86_BUILTIN_PMAXUW128,
21113 IX86_BUILTIN_PMINSB128,
21114 IX86_BUILTIN_PMINSD128,
21115 IX86_BUILTIN_PMINUD128,
21116 IX86_BUILTIN_PMINUW128,
21118 IX86_BUILTIN_PMOVSXBW128,
21119 IX86_BUILTIN_PMOVSXBD128,
21120 IX86_BUILTIN_PMOVSXBQ128,
21121 IX86_BUILTIN_PMOVSXWD128,
21122 IX86_BUILTIN_PMOVSXWQ128,
21123 IX86_BUILTIN_PMOVSXDQ128,
21125 IX86_BUILTIN_PMOVZXBW128,
21126 IX86_BUILTIN_PMOVZXBD128,
21127 IX86_BUILTIN_PMOVZXBQ128,
21128 IX86_BUILTIN_PMOVZXWD128,
21129 IX86_BUILTIN_PMOVZXWQ128,
21130 IX86_BUILTIN_PMOVZXDQ128,
21132 IX86_BUILTIN_PMULDQ128,
21133 IX86_BUILTIN_PMULLD128,
21135 IX86_BUILTIN_ROUNDPD,
21136 IX86_BUILTIN_ROUNDPS,
21137 IX86_BUILTIN_ROUNDSD,
21138 IX86_BUILTIN_ROUNDSS,
21140 IX86_BUILTIN_PTESTZ,
21141 IX86_BUILTIN_PTESTC,
21142 IX86_BUILTIN_PTESTNZC,
21144 IX86_BUILTIN_VEC_INIT_V2SI,
21145 IX86_BUILTIN_VEC_INIT_V4HI,
21146 IX86_BUILTIN_VEC_INIT_V8QI,
21147 IX86_BUILTIN_VEC_EXT_V2DF,
21148 IX86_BUILTIN_VEC_EXT_V2DI,
21149 IX86_BUILTIN_VEC_EXT_V4SF,
21150 IX86_BUILTIN_VEC_EXT_V4SI,
21151 IX86_BUILTIN_VEC_EXT_V8HI,
21152 IX86_BUILTIN_VEC_EXT_V2SI,
21153 IX86_BUILTIN_VEC_EXT_V4HI,
21154 IX86_BUILTIN_VEC_EXT_V16QI,
21155 IX86_BUILTIN_VEC_SET_V2DI,
21156 IX86_BUILTIN_VEC_SET_V4SF,
21157 IX86_BUILTIN_VEC_SET_V4SI,
21158 IX86_BUILTIN_VEC_SET_V8HI,
21159 IX86_BUILTIN_VEC_SET_V4HI,
21160 IX86_BUILTIN_VEC_SET_V16QI,
21162 IX86_BUILTIN_VEC_PACK_SFIX,
21165 IX86_BUILTIN_CRC32QI,
21166 IX86_BUILTIN_CRC32HI,
21167 IX86_BUILTIN_CRC32SI,
21168 IX86_BUILTIN_CRC32DI,
21170 IX86_BUILTIN_PCMPESTRI128,
21171 IX86_BUILTIN_PCMPESTRM128,
21172 IX86_BUILTIN_PCMPESTRA128,
21173 IX86_BUILTIN_PCMPESTRC128,
21174 IX86_BUILTIN_PCMPESTRO128,
21175 IX86_BUILTIN_PCMPESTRS128,
21176 IX86_BUILTIN_PCMPESTRZ128,
21177 IX86_BUILTIN_PCMPISTRI128,
21178 IX86_BUILTIN_PCMPISTRM128,
21179 IX86_BUILTIN_PCMPISTRA128,
21180 IX86_BUILTIN_PCMPISTRC128,
21181 IX86_BUILTIN_PCMPISTRO128,
21182 IX86_BUILTIN_PCMPISTRS128,
21183 IX86_BUILTIN_PCMPISTRZ128,
21185 IX86_BUILTIN_PCMPGTQ,
21187 /* AES instructions */
21188 IX86_BUILTIN_AESENC128,
21189 IX86_BUILTIN_AESENCLAST128,
21190 IX86_BUILTIN_AESDEC128,
21191 IX86_BUILTIN_AESDECLAST128,
21192 IX86_BUILTIN_AESIMC128,
21193 IX86_BUILTIN_AESKEYGENASSIST128,
21195 /* PCLMUL instruction */
21196 IX86_BUILTIN_PCLMULQDQ128,
21199 IX86_BUILTIN_ADDPD256,
21200 IX86_BUILTIN_ADDPS256,
21201 IX86_BUILTIN_ADDSUBPD256,
21202 IX86_BUILTIN_ADDSUBPS256,
21203 IX86_BUILTIN_ANDPD256,
21204 IX86_BUILTIN_ANDPS256,
21205 IX86_BUILTIN_ANDNPD256,
21206 IX86_BUILTIN_ANDNPS256,
21207 IX86_BUILTIN_BLENDPD256,
21208 IX86_BUILTIN_BLENDPS256,
21209 IX86_BUILTIN_BLENDVPD256,
21210 IX86_BUILTIN_BLENDVPS256,
21211 IX86_BUILTIN_DIVPD256,
21212 IX86_BUILTIN_DIVPS256,
21213 IX86_BUILTIN_DPPS256,
21214 IX86_BUILTIN_HADDPD256,
21215 IX86_BUILTIN_HADDPS256,
21216 IX86_BUILTIN_HSUBPD256,
21217 IX86_BUILTIN_HSUBPS256,
21218 IX86_BUILTIN_MAXPD256,
21219 IX86_BUILTIN_MAXPS256,
21220 IX86_BUILTIN_MINPD256,
21221 IX86_BUILTIN_MINPS256,
21222 IX86_BUILTIN_MULPD256,
21223 IX86_BUILTIN_MULPS256,
21224 IX86_BUILTIN_ORPD256,
21225 IX86_BUILTIN_ORPS256,
21226 IX86_BUILTIN_SHUFPD256,
21227 IX86_BUILTIN_SHUFPS256,
21228 IX86_BUILTIN_SUBPD256,
21229 IX86_BUILTIN_SUBPS256,
21230 IX86_BUILTIN_XORPD256,
21231 IX86_BUILTIN_XORPS256,
21232 IX86_BUILTIN_CMPSD,
21233 IX86_BUILTIN_CMPSS,
21234 IX86_BUILTIN_CMPPD,
21235 IX86_BUILTIN_CMPPS,
21236 IX86_BUILTIN_CMPPD256,
21237 IX86_BUILTIN_CMPPS256,
21238 IX86_BUILTIN_CVTDQ2PD256,
21239 IX86_BUILTIN_CVTDQ2PS256,
21240 IX86_BUILTIN_CVTPD2PS256,
21241 IX86_BUILTIN_CVTPS2DQ256,
21242 IX86_BUILTIN_CVTPS2PD256,
21243 IX86_BUILTIN_CVTTPD2DQ256,
21244 IX86_BUILTIN_CVTPD2DQ256,
21245 IX86_BUILTIN_CVTTPS2DQ256,
21246 IX86_BUILTIN_EXTRACTF128PD256,
21247 IX86_BUILTIN_EXTRACTF128PS256,
21248 IX86_BUILTIN_EXTRACTF128SI256,
21249 IX86_BUILTIN_VZEROALL,
21250 IX86_BUILTIN_VZEROUPPER,
21251 IX86_BUILTIN_VPERMILVARPD,
21252 IX86_BUILTIN_VPERMILVARPS,
21253 IX86_BUILTIN_VPERMILVARPD256,
21254 IX86_BUILTIN_VPERMILVARPS256,
21255 IX86_BUILTIN_VPERMILPD,
21256 IX86_BUILTIN_VPERMILPS,
21257 IX86_BUILTIN_VPERMILPD256,
21258 IX86_BUILTIN_VPERMILPS256,
21259 IX86_BUILTIN_VPERMIL2PD,
21260 IX86_BUILTIN_VPERMIL2PS,
21261 IX86_BUILTIN_VPERMIL2PD256,
21262 IX86_BUILTIN_VPERMIL2PS256,
21263 IX86_BUILTIN_VPERM2F128PD256,
21264 IX86_BUILTIN_VPERM2F128PS256,
21265 IX86_BUILTIN_VPERM2F128SI256,
21266 IX86_BUILTIN_VBROADCASTSS,
21267 IX86_BUILTIN_VBROADCASTSD256,
21268 IX86_BUILTIN_VBROADCASTSS256,
21269 IX86_BUILTIN_VBROADCASTPD256,
21270 IX86_BUILTIN_VBROADCASTPS256,
21271 IX86_BUILTIN_VINSERTF128PD256,
21272 IX86_BUILTIN_VINSERTF128PS256,
21273 IX86_BUILTIN_VINSERTF128SI256,
21274 IX86_BUILTIN_LOADUPD256,
21275 IX86_BUILTIN_LOADUPS256,
21276 IX86_BUILTIN_STOREUPD256,
21277 IX86_BUILTIN_STOREUPS256,
21278 IX86_BUILTIN_LDDQU256,
21279 IX86_BUILTIN_MOVNTDQ256,
21280 IX86_BUILTIN_MOVNTPD256,
21281 IX86_BUILTIN_MOVNTPS256,
21282 IX86_BUILTIN_LOADDQU256,
21283 IX86_BUILTIN_STOREDQU256,
21284 IX86_BUILTIN_MASKLOADPD,
21285 IX86_BUILTIN_MASKLOADPS,
21286 IX86_BUILTIN_MASKSTOREPD,
21287 IX86_BUILTIN_MASKSTOREPS,
21288 IX86_BUILTIN_MASKLOADPD256,
21289 IX86_BUILTIN_MASKLOADPS256,
21290 IX86_BUILTIN_MASKSTOREPD256,
21291 IX86_BUILTIN_MASKSTOREPS256,
21292 IX86_BUILTIN_MOVSHDUP256,
21293 IX86_BUILTIN_MOVSLDUP256,
21294 IX86_BUILTIN_MOVDDUP256,
21296 IX86_BUILTIN_SQRTPD256,
21297 IX86_BUILTIN_SQRTPS256,
21298 IX86_BUILTIN_SQRTPS_NR256,
21299 IX86_BUILTIN_RSQRTPS256,
21300 IX86_BUILTIN_RSQRTPS_NR256,
21302 IX86_BUILTIN_RCPPS256,
21304 IX86_BUILTIN_ROUNDPD256,
21305 IX86_BUILTIN_ROUNDPS256,
21307 IX86_BUILTIN_UNPCKHPD256,
21308 IX86_BUILTIN_UNPCKLPD256,
21309 IX86_BUILTIN_UNPCKHPS256,
21310 IX86_BUILTIN_UNPCKLPS256,
21312 IX86_BUILTIN_SI256_SI,
21313 IX86_BUILTIN_PS256_PS,
21314 IX86_BUILTIN_PD256_PD,
21315 IX86_BUILTIN_SI_SI256,
21316 IX86_BUILTIN_PS_PS256,
21317 IX86_BUILTIN_PD_PD256,
21319 IX86_BUILTIN_VTESTZPD,
21320 IX86_BUILTIN_VTESTCPD,
21321 IX86_BUILTIN_VTESTNZCPD,
21322 IX86_BUILTIN_VTESTZPS,
21323 IX86_BUILTIN_VTESTCPS,
21324 IX86_BUILTIN_VTESTNZCPS,
21325 IX86_BUILTIN_VTESTZPD256,
21326 IX86_BUILTIN_VTESTCPD256,
21327 IX86_BUILTIN_VTESTNZCPD256,
21328 IX86_BUILTIN_VTESTZPS256,
21329 IX86_BUILTIN_VTESTCPS256,
21330 IX86_BUILTIN_VTESTNZCPS256,
21331 IX86_BUILTIN_PTESTZ256,
21332 IX86_BUILTIN_PTESTC256,
21333 IX86_BUILTIN_PTESTNZC256,
21335 IX86_BUILTIN_MOVMSKPD256,
21336 IX86_BUILTIN_MOVMSKPS256,
21338 /* TFmode support builtins. */
21340 IX86_BUILTIN_HUGE_VALQ,
21341 IX86_BUILTIN_FABSQ,
21342 IX86_BUILTIN_COPYSIGNQ,
21344 /* Vectorizer support builtins. */
21345 IX86_BUILTIN_CPYSGNPS,
21346 IX86_BUILTIN_CPYSGNPD,
21348 IX86_BUILTIN_CVTUDQ2PS,
21350 IX86_BUILTIN_VEC_PERM_V2DF,
21351 IX86_BUILTIN_VEC_PERM_V4SF,
21352 IX86_BUILTIN_VEC_PERM_V2DI,
21353 IX86_BUILTIN_VEC_PERM_V4SI,
21354 IX86_BUILTIN_VEC_PERM_V8HI,
21355 IX86_BUILTIN_VEC_PERM_V16QI,
21356 IX86_BUILTIN_VEC_PERM_V2DI_U,
21357 IX86_BUILTIN_VEC_PERM_V4SI_U,
21358 IX86_BUILTIN_VEC_PERM_V8HI_U,
21359 IX86_BUILTIN_VEC_PERM_V16QI_U,
21360 IX86_BUILTIN_VEC_PERM_V4DF,
21361 IX86_BUILTIN_VEC_PERM_V8SF,
21363 /* FMA4 and XOP instructions. */
21364 IX86_BUILTIN_VFMADDSS,
21365 IX86_BUILTIN_VFMADDSD,
21366 IX86_BUILTIN_VFMADDPS,
21367 IX86_BUILTIN_VFMADDPD,
21368 IX86_BUILTIN_VFMSUBSS,
21369 IX86_BUILTIN_VFMSUBSD,
21370 IX86_BUILTIN_VFMSUBPS,
21371 IX86_BUILTIN_VFMSUBPD,
21372 IX86_BUILTIN_VFMADDSUBPS,
21373 IX86_BUILTIN_VFMADDSUBPD,
21374 IX86_BUILTIN_VFMSUBADDPS,
21375 IX86_BUILTIN_VFMSUBADDPD,
21376 IX86_BUILTIN_VFNMADDSS,
21377 IX86_BUILTIN_VFNMADDSD,
21378 IX86_BUILTIN_VFNMADDPS,
21379 IX86_BUILTIN_VFNMADDPD,
21380 IX86_BUILTIN_VFNMSUBSS,
21381 IX86_BUILTIN_VFNMSUBSD,
21382 IX86_BUILTIN_VFNMSUBPS,
21383 IX86_BUILTIN_VFNMSUBPD,
21384 IX86_BUILTIN_VFMADDPS256,
21385 IX86_BUILTIN_VFMADDPD256,
21386 IX86_BUILTIN_VFMSUBPS256,
21387 IX86_BUILTIN_VFMSUBPD256,
21388 IX86_BUILTIN_VFMADDSUBPS256,
21389 IX86_BUILTIN_VFMADDSUBPD256,
21390 IX86_BUILTIN_VFMSUBADDPS256,
21391 IX86_BUILTIN_VFMSUBADDPD256,
21392 IX86_BUILTIN_VFNMADDPS256,
21393 IX86_BUILTIN_VFNMADDPD256,
21394 IX86_BUILTIN_VFNMSUBPS256,
21395 IX86_BUILTIN_VFNMSUBPD256,
21397 IX86_BUILTIN_VPCMOV,
21398 IX86_BUILTIN_VPCMOV_V2DI,
21399 IX86_BUILTIN_VPCMOV_V4SI,
21400 IX86_BUILTIN_VPCMOV_V8HI,
21401 IX86_BUILTIN_VPCMOV_V16QI,
21402 IX86_BUILTIN_VPCMOV_V4SF,
21403 IX86_BUILTIN_VPCMOV_V2DF,
21404 IX86_BUILTIN_VPCMOV256,
21405 IX86_BUILTIN_VPCMOV_V4DI256,
21406 IX86_BUILTIN_VPCMOV_V8SI256,
21407 IX86_BUILTIN_VPCMOV_V16HI256,
21408 IX86_BUILTIN_VPCMOV_V32QI256,
21409 IX86_BUILTIN_VPCMOV_V8SF256,
21410 IX86_BUILTIN_VPCMOV_V4DF256,
21412 IX86_BUILTIN_VPPERM,
21414 IX86_BUILTIN_VPMACSSWW,
21415 IX86_BUILTIN_VPMACSWW,
21416 IX86_BUILTIN_VPMACSSWD,
21417 IX86_BUILTIN_VPMACSWD,
21418 IX86_BUILTIN_VPMACSSDD,
21419 IX86_BUILTIN_VPMACSDD,
21420 IX86_BUILTIN_VPMACSSDQL,
21421 IX86_BUILTIN_VPMACSSDQH,
21422 IX86_BUILTIN_VPMACSDQL,
21423 IX86_BUILTIN_VPMACSDQH,
21424 IX86_BUILTIN_VPMADCSSWD,
21425 IX86_BUILTIN_VPMADCSWD,
21427 IX86_BUILTIN_VPHADDBW,
21428 IX86_BUILTIN_VPHADDBD,
21429 IX86_BUILTIN_VPHADDBQ,
21430 IX86_BUILTIN_VPHADDWD,
21431 IX86_BUILTIN_VPHADDWQ,
21432 IX86_BUILTIN_VPHADDDQ,
21433 IX86_BUILTIN_VPHADDUBW,
21434 IX86_BUILTIN_VPHADDUBD,
21435 IX86_BUILTIN_VPHADDUBQ,
21436 IX86_BUILTIN_VPHADDUWD,
21437 IX86_BUILTIN_VPHADDUWQ,
21438 IX86_BUILTIN_VPHADDUDQ,
21439 IX86_BUILTIN_VPHSUBBW,
21440 IX86_BUILTIN_VPHSUBWD,
21441 IX86_BUILTIN_VPHSUBDQ,
21443 IX86_BUILTIN_VPROTB,
21444 IX86_BUILTIN_VPROTW,
21445 IX86_BUILTIN_VPROTD,
21446 IX86_BUILTIN_VPROTQ,
21447 IX86_BUILTIN_VPROTB_IMM,
21448 IX86_BUILTIN_VPROTW_IMM,
21449 IX86_BUILTIN_VPROTD_IMM,
21450 IX86_BUILTIN_VPROTQ_IMM,
21452 IX86_BUILTIN_VPSHLB,
21453 IX86_BUILTIN_VPSHLW,
21454 IX86_BUILTIN_VPSHLD,
21455 IX86_BUILTIN_VPSHLQ,
21456 IX86_BUILTIN_VPSHAB,
21457 IX86_BUILTIN_VPSHAW,
21458 IX86_BUILTIN_VPSHAD,
21459 IX86_BUILTIN_VPSHAQ,
21461 IX86_BUILTIN_VFRCZSS,
21462 IX86_BUILTIN_VFRCZSD,
21463 IX86_BUILTIN_VFRCZPS,
21464 IX86_BUILTIN_VFRCZPD,
21465 IX86_BUILTIN_VFRCZPS256,
21466 IX86_BUILTIN_VFRCZPD256,
21468 IX86_BUILTIN_VPCOMEQUB,
21469 IX86_BUILTIN_VPCOMNEUB,
21470 IX86_BUILTIN_VPCOMLTUB,
21471 IX86_BUILTIN_VPCOMLEUB,
21472 IX86_BUILTIN_VPCOMGTUB,
21473 IX86_BUILTIN_VPCOMGEUB,
21474 IX86_BUILTIN_VPCOMFALSEUB,
21475 IX86_BUILTIN_VPCOMTRUEUB,
21477 IX86_BUILTIN_VPCOMEQUW,
21478 IX86_BUILTIN_VPCOMNEUW,
21479 IX86_BUILTIN_VPCOMLTUW,
21480 IX86_BUILTIN_VPCOMLEUW,
21481 IX86_BUILTIN_VPCOMGTUW,
21482 IX86_BUILTIN_VPCOMGEUW,
21483 IX86_BUILTIN_VPCOMFALSEUW,
21484 IX86_BUILTIN_VPCOMTRUEUW,
21486 IX86_BUILTIN_VPCOMEQUD,
21487 IX86_BUILTIN_VPCOMNEUD,
21488 IX86_BUILTIN_VPCOMLTUD,
21489 IX86_BUILTIN_VPCOMLEUD,
21490 IX86_BUILTIN_VPCOMGTUD,
21491 IX86_BUILTIN_VPCOMGEUD,
21492 IX86_BUILTIN_VPCOMFALSEUD,
21493 IX86_BUILTIN_VPCOMTRUEUD,
21495 IX86_BUILTIN_VPCOMEQUQ,
21496 IX86_BUILTIN_VPCOMNEUQ,
21497 IX86_BUILTIN_VPCOMLTUQ,
21498 IX86_BUILTIN_VPCOMLEUQ,
21499 IX86_BUILTIN_VPCOMGTUQ,
21500 IX86_BUILTIN_VPCOMGEUQ,
21501 IX86_BUILTIN_VPCOMFALSEUQ,
21502 IX86_BUILTIN_VPCOMTRUEUQ,
21504 IX86_BUILTIN_VPCOMEQB,
21505 IX86_BUILTIN_VPCOMNEB,
21506 IX86_BUILTIN_VPCOMLTB,
21507 IX86_BUILTIN_VPCOMLEB,
21508 IX86_BUILTIN_VPCOMGTB,
21509 IX86_BUILTIN_VPCOMGEB,
21510 IX86_BUILTIN_VPCOMFALSEB,
21511 IX86_BUILTIN_VPCOMTRUEB,
21513 IX86_BUILTIN_VPCOMEQW,
21514 IX86_BUILTIN_VPCOMNEW,
21515 IX86_BUILTIN_VPCOMLTW,
21516 IX86_BUILTIN_VPCOMLEW,
21517 IX86_BUILTIN_VPCOMGTW,
21518 IX86_BUILTIN_VPCOMGEW,
21519 IX86_BUILTIN_VPCOMFALSEW,
21520 IX86_BUILTIN_VPCOMTRUEW,
21522 IX86_BUILTIN_VPCOMEQD,
21523 IX86_BUILTIN_VPCOMNED,
21524 IX86_BUILTIN_VPCOMLTD,
21525 IX86_BUILTIN_VPCOMLED,
21526 IX86_BUILTIN_VPCOMGTD,
21527 IX86_BUILTIN_VPCOMGED,
21528 IX86_BUILTIN_VPCOMFALSED,
21529 IX86_BUILTIN_VPCOMTRUED,
21531 IX86_BUILTIN_VPCOMEQQ,
21532 IX86_BUILTIN_VPCOMNEQ,
21533 IX86_BUILTIN_VPCOMLTQ,
21534 IX86_BUILTIN_VPCOMLEQ,
21535 IX86_BUILTIN_VPCOMGTQ,
21536 IX86_BUILTIN_VPCOMGEQ,
21537 IX86_BUILTIN_VPCOMFALSEQ,
21538 IX86_BUILTIN_VPCOMTRUEQ,
21540 /* LWP instructions. */
21541 IX86_BUILTIN_LLWPCB,
21542 IX86_BUILTIN_SLWPCB,
21543 IX86_BUILTIN_LWPVAL32,
21544 IX86_BUILTIN_LWPVAL64,
21545 IX86_BUILTIN_LWPINS32,
21546 IX86_BUILTIN_LWPINS64,
21553 /* Table for the ix86 builtin decls. */
21554 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21556 /* Table of all of the builtin functions that are possible with different ISA's
21557 but are waiting to be built until a function is declared to use that
21559 struct builtin_isa {
21560 const char *name; /* function name */
21561 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21562 int isa; /* isa_flags this builtin is defined for */
21563 bool const_p; /* true if the declaration is constant */
21564 bool set_and_not_built_p;
21567 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21570 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21571 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21572 function decl in the ix86_builtins array. Returns the function decl or
21573 NULL_TREE, if the builtin was not added.
21575 If the front end has a special hook for builtin functions, delay adding
21576 builtin functions that aren't in the current ISA until the ISA is changed
21577 with function specific optimization. Doing so, can save about 300K for the
21578 default compiler. When the builtin is expanded, check at that time whether
21581 If the front end doesn't have a special hook, record all builtins, even if
21582 it isn't an instruction set in the current ISA in case the user uses
21583 function specific options for a different ISA, so that we don't get scope
21584 errors if a builtin is added in the middle of a function scope. */
21587 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21588 enum ix86_builtins code)
21590 tree decl = NULL_TREE;
21592 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21594 ix86_builtins_isa[(int) code].isa = mask;
21597 || (mask & ix86_isa_flags) != 0
21598 || (lang_hooks.builtin_function
21599 == lang_hooks.builtin_function_ext_scope))
21602 tree type = ix86_get_builtin_func_type (tcode);
21603 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21605 ix86_builtins[(int) code] = decl;
21606 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21610 ix86_builtins[(int) code] = NULL_TREE;
21611 ix86_builtins_isa[(int) code].tcode = tcode;
21612 ix86_builtins_isa[(int) code].name = name;
21613 ix86_builtins_isa[(int) code].const_p = false;
21614 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21621 /* Like def_builtin, but also marks the function decl "const". */
21624 def_builtin_const (int mask, const char *name,
21625 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21627 tree decl = def_builtin (mask, name, tcode, code);
21629 TREE_READONLY (decl) = 1;
21631 ix86_builtins_isa[(int) code].const_p = true;
21636 /* Add any new builtin functions for a given ISA that may not have been
21637 declared. This saves a bit of space compared to adding all of the
21638 declarations to the tree, even if we didn't use them. */
21641 ix86_add_new_builtins (int isa)
21645 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21647 if ((ix86_builtins_isa[i].isa & isa) != 0
21648 && ix86_builtins_isa[i].set_and_not_built_p)
21652 /* Don't define the builtin again. */
21653 ix86_builtins_isa[i].set_and_not_built_p = false;
21655 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21656 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21657 type, i, BUILT_IN_MD, NULL,
21660 ix86_builtins[i] = decl;
21661 if (ix86_builtins_isa[i].const_p)
21662 TREE_READONLY (decl) = 1;
21667 /* Bits for builtin_description.flag. */
21669 /* Set when we don't support the comparison natively, and should
21670 swap_comparison in order to support it. */
21671 #define BUILTIN_DESC_SWAP_OPERANDS 1
21673 struct builtin_description
21675 const unsigned int mask;
21676 const enum insn_code icode;
21677 const char *const name;
21678 const enum ix86_builtins code;
21679 const enum rtx_code comparison;
21683 static const struct builtin_description bdesc_comi[] =
21685 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21689 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21690 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21691 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21694 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21695 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21696 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21697 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21698 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21699 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21700 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21702 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21703 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21708 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21711 static const struct builtin_description bdesc_pcmpestr[] =
21714 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21715 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21716 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21717 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21718 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21719 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21720 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21723 static const struct builtin_description bdesc_pcmpistr[] =
21726 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21727 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21728 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21729 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21730 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21731 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21732 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21735 /* Special builtins with variable number of arguments. */
21736 static const struct builtin_description bdesc_special_args[] =
21738 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21739 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21742 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21745 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21748 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21749 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21750 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21752 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21753 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21754 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21755 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21757 /* SSE or 3DNow!A */
21758 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21759 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21762 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21763 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21764 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21765 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21766 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21767 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21768 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21769 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21770 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21772 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21773 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21776 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21779 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21782 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21783 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21786 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21787 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21789 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21790 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21791 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21792 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21793 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21795 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21796 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21797 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21798 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21800 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21801 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21803 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21804 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21805 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21807 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21808 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21809 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21810 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21811 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21812 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21813 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21814 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21816 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21817 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21818 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21819 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21820 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21821 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21825 /* Builtins with variable number of arguments. */
21826 static const struct builtin_description bdesc_args[] =
21828 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21829 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21830 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21831 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21832 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21833 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21834 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21837 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21839 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21840 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21841 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21842 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21844 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21845 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21846 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21847 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21848 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21849 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21850 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21851 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21853 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21854 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21856 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21857 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21858 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21859 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21861 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21862 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21863 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21864 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21865 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21866 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21868 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21869 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21870 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21871 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21872 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21873 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21875 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21876 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21877 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21879 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21881 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21882 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21883 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21884 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21885 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21886 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21888 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21889 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21890 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21891 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21892 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21893 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21895 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21896 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21897 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21898 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21901 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21902 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21903 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21904 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21906 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21907 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21908 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21909 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21910 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21911 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21912 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21913 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21914 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21915 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21916 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21917 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21918 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21919 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21920 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21923 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21924 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21925 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21926 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21927 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21928 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21931 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21932 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21933 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21934 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21935 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21936 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21937 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21938 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21939 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21940 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21941 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21942 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21944 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21946 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21947 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21948 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21949 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21955 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21962 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21963 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21964 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21965 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21966 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21967 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21968 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21969 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21970 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21971 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21972 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21973 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21974 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21975 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21976 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21978 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21979 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21980 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21981 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21983 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21984 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21985 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21986 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21988 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21990 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21992 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21993 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21994 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21996 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21997 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21998 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
22000 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
22002 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22003 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22004 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22006 /* SSE MMX or 3Dnow!A */
22007 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22008 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22009 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22011 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22012 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22013 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22014 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22016 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
22017 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
22019 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
22022 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22024 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
22025 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
22026 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
22033 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
22034 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
22035 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
22037 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
22039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22042 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
22047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22048 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22050 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
22052 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22053 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22054 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22055 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22057 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22058 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
22059 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22061 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22062 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22063 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22064 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22065 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22068 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22070 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22071 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22072 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22073 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22074 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
22075 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22076 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22077 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22078 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22079 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22080 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22081 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22084 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22085 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22087 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22088 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22089 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22091 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22092 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22093 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22094 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22096 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22097 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22098 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22099 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22101 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22103 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22104 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22105 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22107 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
22109 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22110 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22111 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22112 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22113 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22114 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22115 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22116 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22118 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22120 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22124 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22125 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22127 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22128 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
22130 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22131 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22132 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22133 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22138 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22143 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22145 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22146 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22147 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22148 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22150 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22151 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22152 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22153 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22154 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22155 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22156 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22157 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22159 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22163 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22164 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22167 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22169 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22171 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22172 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22173 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22174 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22176 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22177 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22178 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22179 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22180 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22181 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22182 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22184 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22185 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22186 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22187 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22188 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22189 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22190 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22192 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22193 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22194 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22195 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22197 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22198 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22199 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22201 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22203 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22204 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22206 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22209 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22210 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22213 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22214 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22216 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22217 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22218 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22219 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22220 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22221 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22224 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22225 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22226 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22227 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22228 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22229 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22231 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22232 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22233 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22234 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22235 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22236 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22237 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22238 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22239 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22240 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22241 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22242 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22243 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22244 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22245 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22246 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22247 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22248 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22249 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22250 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22251 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22252 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22253 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22254 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22257 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22258 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22261 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22262 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22263 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22264 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22265 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22266 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22267 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22268 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22269 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22270 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22272 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22273 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22274 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22275 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22276 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22277 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22278 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22279 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22280 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22281 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22282 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22283 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22284 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22286 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22287 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22288 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22289 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22290 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22291 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22292 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22293 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22294 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22295 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22296 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22297 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22300 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22301 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22302 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22303 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22305 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22306 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22307 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22310 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22311 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22312 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22313 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22314 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22317 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22318 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22319 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22320 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22323 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22324 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22326 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22327 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22328 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22329 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22332 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22335 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22336 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22337 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22338 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22339 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22340 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22341 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22342 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22343 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22344 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22345 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22346 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22347 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22348 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22349 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22350 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22351 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22352 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22353 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22354 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22355 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22356 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22357 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22358 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22359 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22360 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22362 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22365 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22367 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22370 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22371 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22373 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22374 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22375 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22377 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22378 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22382 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22383 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22388 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22392 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22393 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22398 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22406 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22407 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22408 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22409 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22410 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22412 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22414 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22415 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22417 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22422 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22426 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22427 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22429 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22432 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22433 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22434 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22435 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22436 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22437 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22438 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22441 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22442 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22443 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22445 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22448 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22451 /* FMA4 and XOP. */
22452 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22453 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22454 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22455 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22456 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22457 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22458 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22459 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22460 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22461 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22462 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22463 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22464 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22465 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22466 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22467 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22468 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22469 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22470 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22471 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22472 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22473 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22474 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22475 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22476 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22477 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22478 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22479 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22480 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22481 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22482 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22483 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22484 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22485 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22486 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22487 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22488 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22489 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22490 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22491 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22492 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22493 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22494 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22495 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22496 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22497 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22498 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22499 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22500 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22501 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22502 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22503 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22505 static const struct builtin_description bdesc_multi_arg[] =
22507 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22508 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22509 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22510 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22511 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22512 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22513 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22514 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22516 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22517 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22518 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22519 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22520 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22521 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22522 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22523 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22525 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22526 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22527 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22528 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22530 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22531 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22532 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22533 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22535 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22536 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22537 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22538 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22540 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22541 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22542 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22543 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22545 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22547 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22550 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22551 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22553 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22554 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22555 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22556 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22557 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22558 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22559 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22561 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22563 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22564 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22565 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22566 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22567 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22568 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22569 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22570 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22571 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22572 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22573 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22574 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22576 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22579 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22581 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22587 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22589 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22591 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22593 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22594 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22595 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22598 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22600 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22603 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22604 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22607 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22610 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22611 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22614 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22616 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22618 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22619 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22621 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22682 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22683 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22684 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22687 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22689 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22692 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22696 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22698 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22699 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22700 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22701 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22705 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22706 in the current target ISA to allow the user to compile particular modules
22707 with different target specific options that differ from the command line
22710 ix86_init_mmx_sse_builtins (void)
22712 const struct builtin_description * d;
22713 enum ix86_builtin_func_type ftype;
22716 /* Add all special builtins with variable number of operands. */
22717 for (i = 0, d = bdesc_special_args;
22718 i < ARRAY_SIZE (bdesc_special_args);
22724 ftype = (enum ix86_builtin_func_type) d->flag;
22725 def_builtin (d->mask, d->name, ftype, d->code);
22728 /* Add all builtins with variable number of operands. */
22729 for (i = 0, d = bdesc_args;
22730 i < ARRAY_SIZE (bdesc_args);
22736 ftype = (enum ix86_builtin_func_type) d->flag;
22737 def_builtin_const (d->mask, d->name, ftype, d->code);
22740 /* pcmpestr[im] insns. */
22741 for (i = 0, d = bdesc_pcmpestr;
22742 i < ARRAY_SIZE (bdesc_pcmpestr);
22745 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22746 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22748 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22749 def_builtin_const (d->mask, d->name, ftype, d->code);
22752 /* pcmpistr[im] insns. */
22753 for (i = 0, d = bdesc_pcmpistr;
22754 i < ARRAY_SIZE (bdesc_pcmpistr);
22757 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22758 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22760 ftype = INT_FTYPE_V16QI_V16QI_INT;
22761 def_builtin_const (d->mask, d->name, ftype, d->code);
22764 /* comi/ucomi insns. */
22765 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22767 if (d->mask == OPTION_MASK_ISA_SSE2)
22768 ftype = INT_FTYPE_V2DF_V2DF;
22770 ftype = INT_FTYPE_V4SF_V4SF;
22771 def_builtin_const (d->mask, d->name, ftype, d->code);
22775 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22776 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22777 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22778 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22780 /* SSE or 3DNow!A */
22781 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22782 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22783 IX86_BUILTIN_MASKMOVQ);
22786 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22787 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22789 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22790 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22791 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22792 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22795 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22796 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22797 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22798 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22801 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22802 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22803 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22804 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22805 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22806 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22807 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22808 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22809 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22810 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22811 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22812 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22815 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22816 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22818 /* MMX access to the vec_init patterns. */
22819 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22820 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22822 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22823 V4HI_FTYPE_HI_HI_HI_HI,
22824 IX86_BUILTIN_VEC_INIT_V4HI);
22826 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22827 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22828 IX86_BUILTIN_VEC_INIT_V8QI);
22830 /* Access to the vec_extract patterns. */
22831 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22832 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22833 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22834 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22835 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22836 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22837 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22838 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22839 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22840 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22842 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22843 "__builtin_ia32_vec_ext_v4hi",
22844 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22846 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22847 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22849 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22850 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22852 /* Access to the vec_set patterns. */
22853 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22854 "__builtin_ia32_vec_set_v2di",
22855 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22857 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22858 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22860 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22861 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22863 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22864 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22866 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22867 "__builtin_ia32_vec_set_v4hi",
22868 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22870 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22871 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22873 /* Add FMA4 multi-arg argument instructions */
22874 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22879 ftype = (enum ix86_builtin_func_type) d->flag;
22880 def_builtin_const (d->mask, d->name, ftype, d->code);
22884 /* Internal method for ix86_init_builtins. */
22887 ix86_init_builtins_va_builtins_abi (void)
22889 tree ms_va_ref, sysv_va_ref;
22890 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22891 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22892 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22893 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22897 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22898 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22899 ms_va_ref = build_reference_type (ms_va_list_type_node);
22901 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22904 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22905 fnvoid_va_start_ms =
22906 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22907 fnvoid_va_end_sysv =
22908 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22909 fnvoid_va_start_sysv =
22910 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22912 fnvoid_va_copy_ms =
22913 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22915 fnvoid_va_copy_sysv =
22916 build_function_type_list (void_type_node, sysv_va_ref,
22917 sysv_va_ref, NULL_TREE);
22919 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22920 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22921 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22922 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22923 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22924 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22925 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22926 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22927 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22928 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22929 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22930 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22934 ix86_init_builtin_types (void)
22936 tree float128_type_node, float80_type_node;
22938 /* The __float80 type. */
22939 float80_type_node = long_double_type_node;
22940 if (TYPE_MODE (float80_type_node) != XFmode)
22942 /* The __float80 type. */
22943 float80_type_node = make_node (REAL_TYPE);
22945 TYPE_PRECISION (float80_type_node) = 80;
22946 layout_type (float80_type_node);
22948 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22950 /* The __float128 type. */
22951 float128_type_node = make_node (REAL_TYPE);
22952 TYPE_PRECISION (float128_type_node) = 128;
22953 layout_type (float128_type_node);
22954 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22956 /* This macro is built by i386-builtin-types.awk. */
22957 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22961 ix86_init_builtins (void)
22965 ix86_init_builtin_types ();
22967 /* TFmode support builtins. */
22968 def_builtin_const (0, "__builtin_infq",
22969 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22970 def_builtin_const (0, "__builtin_huge_valq",
22971 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22973 /* We will expand them to normal call if SSE2 isn't available since
22974 they are used by libgcc. */
22975 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22976 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22977 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22978 TREE_READONLY (t) = 1;
22979 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22981 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22982 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22983 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22984 TREE_READONLY (t) = 1;
22985 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22987 ix86_init_mmx_sse_builtins ();
22990 ix86_init_builtins_va_builtins_abi ();
22993 /* Return the ix86 builtin for CODE. */
22996 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22998 if (code >= IX86_BUILTIN_MAX)
22999 return error_mark_node;
23001 return ix86_builtins[code];
23004 /* Errors in the source file can cause expand_expr to return const0_rtx
23005 where we expect a vector. To avoid crashing, use one of the vector
23006 clear instructions. */
23008 safe_vector_operand (rtx x, enum machine_mode mode)
23010 if (x == const0_rtx)
23011 x = CONST0_RTX (mode);
23015 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23018 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23021 tree arg0 = CALL_EXPR_ARG (exp, 0);
23022 tree arg1 = CALL_EXPR_ARG (exp, 1);
23023 rtx op0 = expand_normal (arg0);
23024 rtx op1 = expand_normal (arg1);
23025 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23026 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23027 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23029 if (VECTOR_MODE_P (mode0))
23030 op0 = safe_vector_operand (op0, mode0);
23031 if (VECTOR_MODE_P (mode1))
23032 op1 = safe_vector_operand (op1, mode1);
23034 if (optimize || !target
23035 || GET_MODE (target) != tmode
23036 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23037 target = gen_reg_rtx (tmode);
23039 if (GET_MODE (op1) == SImode && mode1 == TImode)
23041 rtx x = gen_reg_rtx (V4SImode);
23042 emit_insn (gen_sse2_loadd (x, op1));
23043 op1 = gen_lowpart (TImode, x);
23046 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23047 op0 = copy_to_mode_reg (mode0, op0);
23048 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23049 op1 = copy_to_mode_reg (mode1, op1);
23051 pat = GEN_FCN (icode) (target, op0, op1);
23060 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23063 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23064 enum ix86_builtin_func_type m_type,
23065 enum rtx_code sub_code)
23070 bool comparison_p = false;
23072 bool last_arg_constant = false;
23073 int num_memory = 0;
23076 enum machine_mode mode;
23079 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23083 case MULTI_ARG_4_DF2_DI_I:
23084 case MULTI_ARG_4_DF2_DI_I1:
23085 case MULTI_ARG_4_SF2_SI_I:
23086 case MULTI_ARG_4_SF2_SI_I1:
23088 last_arg_constant = true;
23091 case MULTI_ARG_3_SF:
23092 case MULTI_ARG_3_DF:
23093 case MULTI_ARG_3_SF2:
23094 case MULTI_ARG_3_DF2:
23095 case MULTI_ARG_3_DI:
23096 case MULTI_ARG_3_SI:
23097 case MULTI_ARG_3_SI_DI:
23098 case MULTI_ARG_3_HI:
23099 case MULTI_ARG_3_HI_SI:
23100 case MULTI_ARG_3_QI:
23101 case MULTI_ARG_3_DI2:
23102 case MULTI_ARG_3_SI2:
23103 case MULTI_ARG_3_HI2:
23104 case MULTI_ARG_3_QI2:
23108 case MULTI_ARG_2_SF:
23109 case MULTI_ARG_2_DF:
23110 case MULTI_ARG_2_DI:
23111 case MULTI_ARG_2_SI:
23112 case MULTI_ARG_2_HI:
23113 case MULTI_ARG_2_QI:
23117 case MULTI_ARG_2_DI_IMM:
23118 case MULTI_ARG_2_SI_IMM:
23119 case MULTI_ARG_2_HI_IMM:
23120 case MULTI_ARG_2_QI_IMM:
23122 last_arg_constant = true;
23125 case MULTI_ARG_1_SF:
23126 case MULTI_ARG_1_DF:
23127 case MULTI_ARG_1_SF2:
23128 case MULTI_ARG_1_DF2:
23129 case MULTI_ARG_1_DI:
23130 case MULTI_ARG_1_SI:
23131 case MULTI_ARG_1_HI:
23132 case MULTI_ARG_1_QI:
23133 case MULTI_ARG_1_SI_DI:
23134 case MULTI_ARG_1_HI_DI:
23135 case MULTI_ARG_1_HI_SI:
23136 case MULTI_ARG_1_QI_DI:
23137 case MULTI_ARG_1_QI_SI:
23138 case MULTI_ARG_1_QI_HI:
23142 case MULTI_ARG_2_DI_CMP:
23143 case MULTI_ARG_2_SI_CMP:
23144 case MULTI_ARG_2_HI_CMP:
23145 case MULTI_ARG_2_QI_CMP:
23147 comparison_p = true;
23150 case MULTI_ARG_2_SF_TF:
23151 case MULTI_ARG_2_DF_TF:
23152 case MULTI_ARG_2_DI_TF:
23153 case MULTI_ARG_2_SI_TF:
23154 case MULTI_ARG_2_HI_TF:
23155 case MULTI_ARG_2_QI_TF:
23161 gcc_unreachable ();
23164 if (optimize || !target
23165 || GET_MODE (target) != tmode
23166 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23167 target = gen_reg_rtx (tmode);
23169 gcc_assert (nargs <= 4);
23171 for (i = 0; i < nargs; i++)
23173 tree arg = CALL_EXPR_ARG (exp, i);
23174 rtx op = expand_normal (arg);
23175 int adjust = (comparison_p) ? 1 : 0;
23176 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23178 if (last_arg_constant && i == nargs-1)
23180 if (!CONST_INT_P (op))
23182 error ("last argument must be an immediate");
23183 return gen_reg_rtx (tmode);
23188 if (VECTOR_MODE_P (mode))
23189 op = safe_vector_operand (op, mode);
23191 /* If we aren't optimizing, only allow one memory operand to be
23193 if (memory_operand (op, mode))
23196 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23199 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23201 op = force_reg (mode, op);
23205 args[i].mode = mode;
23211 pat = GEN_FCN (icode) (target, args[0].op);
23216 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23217 GEN_INT ((int)sub_code));
23218 else if (! comparison_p)
23219 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23222 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23226 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23231 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23235 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23239 gcc_unreachable ();
23249 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23250 insns with vec_merge. */
23253 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23257 tree arg0 = CALL_EXPR_ARG (exp, 0);
23258 rtx op1, op0 = expand_normal (arg0);
23259 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23260 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23262 if (optimize || !target
23263 || GET_MODE (target) != tmode
23264 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23265 target = gen_reg_rtx (tmode);
23267 if (VECTOR_MODE_P (mode0))
23268 op0 = safe_vector_operand (op0, mode0);
23270 if ((optimize && !register_operand (op0, mode0))
23271 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23272 op0 = copy_to_mode_reg (mode0, op0);
23275 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23276 op1 = copy_to_mode_reg (mode0, op1);
23278 pat = GEN_FCN (icode) (target, op0, op1);
23285 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23288 ix86_expand_sse_compare (const struct builtin_description *d,
23289 tree exp, rtx target, bool swap)
23292 tree arg0 = CALL_EXPR_ARG (exp, 0);
23293 tree arg1 = CALL_EXPR_ARG (exp, 1);
23294 rtx op0 = expand_normal (arg0);
23295 rtx op1 = expand_normal (arg1);
23297 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23298 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23299 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23300 enum rtx_code comparison = d->comparison;
23302 if (VECTOR_MODE_P (mode0))
23303 op0 = safe_vector_operand (op0, mode0);
23304 if (VECTOR_MODE_P (mode1))
23305 op1 = safe_vector_operand (op1, mode1);
23307 /* Swap operands if we have a comparison that isn't available in
23311 rtx tmp = gen_reg_rtx (mode1);
23312 emit_move_insn (tmp, op1);
23317 if (optimize || !target
23318 || GET_MODE (target) != tmode
23319 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23320 target = gen_reg_rtx (tmode);
23322 if ((optimize && !register_operand (op0, mode0))
23323 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23324 op0 = copy_to_mode_reg (mode0, op0);
23325 if ((optimize && !register_operand (op1, mode1))
23326 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23327 op1 = copy_to_mode_reg (mode1, op1);
23329 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23330 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23337 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23340 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23344 tree arg0 = CALL_EXPR_ARG (exp, 0);
23345 tree arg1 = CALL_EXPR_ARG (exp, 1);
23346 rtx op0 = expand_normal (arg0);
23347 rtx op1 = expand_normal (arg1);
23348 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23349 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23350 enum rtx_code comparison = d->comparison;
23352 if (VECTOR_MODE_P (mode0))
23353 op0 = safe_vector_operand (op0, mode0);
23354 if (VECTOR_MODE_P (mode1))
23355 op1 = safe_vector_operand (op1, mode1);
23357 /* Swap operands if we have a comparison that isn't available in
23359 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23366 target = gen_reg_rtx (SImode);
23367 emit_move_insn (target, const0_rtx);
23368 target = gen_rtx_SUBREG (QImode, target, 0);
23370 if ((optimize && !register_operand (op0, mode0))
23371 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23372 op0 = copy_to_mode_reg (mode0, op0);
23373 if ((optimize && !register_operand (op1, mode1))
23374 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23375 op1 = copy_to_mode_reg (mode1, op1);
23377 pat = GEN_FCN (d->icode) (op0, op1);
23381 emit_insn (gen_rtx_SET (VOIDmode,
23382 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23383 gen_rtx_fmt_ee (comparison, QImode,
23387 return SUBREG_REG (target);
23390 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23393 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23397 tree arg0 = CALL_EXPR_ARG (exp, 0);
23398 tree arg1 = CALL_EXPR_ARG (exp, 1);
23399 rtx op0 = expand_normal (arg0);
23400 rtx op1 = expand_normal (arg1);
23401 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23402 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23403 enum rtx_code comparison = d->comparison;
23405 if (VECTOR_MODE_P (mode0))
23406 op0 = safe_vector_operand (op0, mode0);
23407 if (VECTOR_MODE_P (mode1))
23408 op1 = safe_vector_operand (op1, mode1);
23410 target = gen_reg_rtx (SImode);
23411 emit_move_insn (target, const0_rtx);
23412 target = gen_rtx_SUBREG (QImode, target, 0);
23414 if ((optimize && !register_operand (op0, mode0))
23415 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23416 op0 = copy_to_mode_reg (mode0, op0);
23417 if ((optimize && !register_operand (op1, mode1))
23418 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23419 op1 = copy_to_mode_reg (mode1, op1);
23421 pat = GEN_FCN (d->icode) (op0, op1);
23425 emit_insn (gen_rtx_SET (VOIDmode,
23426 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23427 gen_rtx_fmt_ee (comparison, QImode,
23431 return SUBREG_REG (target);
23434 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23437 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23438 tree exp, rtx target)
23441 tree arg0 = CALL_EXPR_ARG (exp, 0);
23442 tree arg1 = CALL_EXPR_ARG (exp, 1);
23443 tree arg2 = CALL_EXPR_ARG (exp, 2);
23444 tree arg3 = CALL_EXPR_ARG (exp, 3);
23445 tree arg4 = CALL_EXPR_ARG (exp, 4);
23446 rtx scratch0, scratch1;
23447 rtx op0 = expand_normal (arg0);
23448 rtx op1 = expand_normal (arg1);
23449 rtx op2 = expand_normal (arg2);
23450 rtx op3 = expand_normal (arg3);
23451 rtx op4 = expand_normal (arg4);
23452 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23454 tmode0 = insn_data[d->icode].operand[0].mode;
23455 tmode1 = insn_data[d->icode].operand[1].mode;
23456 modev2 = insn_data[d->icode].operand[2].mode;
23457 modei3 = insn_data[d->icode].operand[3].mode;
23458 modev4 = insn_data[d->icode].operand[4].mode;
23459 modei5 = insn_data[d->icode].operand[5].mode;
23460 modeimm = insn_data[d->icode].operand[6].mode;
23462 if (VECTOR_MODE_P (modev2))
23463 op0 = safe_vector_operand (op0, modev2);
23464 if (VECTOR_MODE_P (modev4))
23465 op2 = safe_vector_operand (op2, modev4);
23467 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23468 op0 = copy_to_mode_reg (modev2, op0);
23469 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23470 op1 = copy_to_mode_reg (modei3, op1);
23471 if ((optimize && !register_operand (op2, modev4))
23472 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23473 op2 = copy_to_mode_reg (modev4, op2);
23474 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23475 op3 = copy_to_mode_reg (modei5, op3);
23477 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23479 error ("the fifth argument must be a 8-bit immediate");
23483 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23485 if (optimize || !target
23486 || GET_MODE (target) != tmode0
23487 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23488 target = gen_reg_rtx (tmode0);
23490 scratch1 = gen_reg_rtx (tmode1);
23492 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23494 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23496 if (optimize || !target
23497 || GET_MODE (target) != tmode1
23498 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23499 target = gen_reg_rtx (tmode1);
23501 scratch0 = gen_reg_rtx (tmode0);
23503 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23507 gcc_assert (d->flag);
23509 scratch0 = gen_reg_rtx (tmode0);
23510 scratch1 = gen_reg_rtx (tmode1);
23512 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23522 target = gen_reg_rtx (SImode);
23523 emit_move_insn (target, const0_rtx);
23524 target = gen_rtx_SUBREG (QImode, target, 0);
23527 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23528 gen_rtx_fmt_ee (EQ, QImode,
23529 gen_rtx_REG ((enum machine_mode) d->flag,
23532 return SUBREG_REG (target);
23539 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23542 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23543 tree exp, rtx target)
23546 tree arg0 = CALL_EXPR_ARG (exp, 0);
23547 tree arg1 = CALL_EXPR_ARG (exp, 1);
23548 tree arg2 = CALL_EXPR_ARG (exp, 2);
23549 rtx scratch0, scratch1;
23550 rtx op0 = expand_normal (arg0);
23551 rtx op1 = expand_normal (arg1);
23552 rtx op2 = expand_normal (arg2);
23553 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23555 tmode0 = insn_data[d->icode].operand[0].mode;
23556 tmode1 = insn_data[d->icode].operand[1].mode;
23557 modev2 = insn_data[d->icode].operand[2].mode;
23558 modev3 = insn_data[d->icode].operand[3].mode;
23559 modeimm = insn_data[d->icode].operand[4].mode;
23561 if (VECTOR_MODE_P (modev2))
23562 op0 = safe_vector_operand (op0, modev2);
23563 if (VECTOR_MODE_P (modev3))
23564 op1 = safe_vector_operand (op1, modev3);
23566 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23567 op0 = copy_to_mode_reg (modev2, op0);
23568 if ((optimize && !register_operand (op1, modev3))
23569 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23570 op1 = copy_to_mode_reg (modev3, op1);
23572 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23574 error ("the third argument must be a 8-bit immediate");
23578 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23580 if (optimize || !target
23581 || GET_MODE (target) != tmode0
23582 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23583 target = gen_reg_rtx (tmode0);
23585 scratch1 = gen_reg_rtx (tmode1);
23587 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23589 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23591 if (optimize || !target
23592 || GET_MODE (target) != tmode1
23593 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23594 target = gen_reg_rtx (tmode1);
23596 scratch0 = gen_reg_rtx (tmode0);
23598 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23602 gcc_assert (d->flag);
23604 scratch0 = gen_reg_rtx (tmode0);
23605 scratch1 = gen_reg_rtx (tmode1);
23607 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23617 target = gen_reg_rtx (SImode);
23618 emit_move_insn (target, const0_rtx);
23619 target = gen_rtx_SUBREG (QImode, target, 0);
23622 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23623 gen_rtx_fmt_ee (EQ, QImode,
23624 gen_rtx_REG ((enum machine_mode) d->flag,
23627 return SUBREG_REG (target);
23633 /* Subroutine of ix86_expand_builtin to take care of insns with
23634 variable number of operands. */
23637 ix86_expand_args_builtin (const struct builtin_description *d,
23638 tree exp, rtx target)
23640 rtx pat, real_target;
23641 unsigned int i, nargs;
23642 unsigned int nargs_constant = 0;
23643 int num_memory = 0;
23647 enum machine_mode mode;
23649 bool last_arg_count = false;
23650 enum insn_code icode = d->icode;
23651 const struct insn_data *insn_p = &insn_data[icode];
23652 enum machine_mode tmode = insn_p->operand[0].mode;
23653 enum machine_mode rmode = VOIDmode;
23655 enum rtx_code comparison = d->comparison;
23657 switch ((enum ix86_builtin_func_type) d->flag)
23659 case INT_FTYPE_V8SF_V8SF_PTEST:
23660 case INT_FTYPE_V4DI_V4DI_PTEST:
23661 case INT_FTYPE_V4DF_V4DF_PTEST:
23662 case INT_FTYPE_V4SF_V4SF_PTEST:
23663 case INT_FTYPE_V2DI_V2DI_PTEST:
23664 case INT_FTYPE_V2DF_V2DF_PTEST:
23665 return ix86_expand_sse_ptest (d, exp, target);
23666 case FLOAT128_FTYPE_FLOAT128:
23667 case FLOAT_FTYPE_FLOAT:
23668 case INT_FTYPE_INT:
23669 case UINT64_FTYPE_INT:
23670 case UINT16_FTYPE_UINT16:
23671 case INT64_FTYPE_INT64:
23672 case INT64_FTYPE_V4SF:
23673 case INT64_FTYPE_V2DF:
23674 case INT_FTYPE_V16QI:
23675 case INT_FTYPE_V8QI:
23676 case INT_FTYPE_V8SF:
23677 case INT_FTYPE_V4DF:
23678 case INT_FTYPE_V4SF:
23679 case INT_FTYPE_V2DF:
23680 case V16QI_FTYPE_V16QI:
23681 case V8SI_FTYPE_V8SF:
23682 case V8SI_FTYPE_V4SI:
23683 case V8HI_FTYPE_V8HI:
23684 case V8HI_FTYPE_V16QI:
23685 case V8QI_FTYPE_V8QI:
23686 case V8SF_FTYPE_V8SF:
23687 case V8SF_FTYPE_V8SI:
23688 case V8SF_FTYPE_V4SF:
23689 case V4SI_FTYPE_V4SI:
23690 case V4SI_FTYPE_V16QI:
23691 case V4SI_FTYPE_V4SF:
23692 case V4SI_FTYPE_V8SI:
23693 case V4SI_FTYPE_V8HI:
23694 case V4SI_FTYPE_V4DF:
23695 case V4SI_FTYPE_V2DF:
23696 case V4HI_FTYPE_V4HI:
23697 case V4DF_FTYPE_V4DF:
23698 case V4DF_FTYPE_V4SI:
23699 case V4DF_FTYPE_V4SF:
23700 case V4DF_FTYPE_V2DF:
23701 case V4SF_FTYPE_V4SF:
23702 case V4SF_FTYPE_V4SI:
23703 case V4SF_FTYPE_V8SF:
23704 case V4SF_FTYPE_V4DF:
23705 case V4SF_FTYPE_V2DF:
23706 case V2DI_FTYPE_V2DI:
23707 case V2DI_FTYPE_V16QI:
23708 case V2DI_FTYPE_V8HI:
23709 case V2DI_FTYPE_V4SI:
23710 case V2DF_FTYPE_V2DF:
23711 case V2DF_FTYPE_V4SI:
23712 case V2DF_FTYPE_V4DF:
23713 case V2DF_FTYPE_V4SF:
23714 case V2DF_FTYPE_V2SI:
23715 case V2SI_FTYPE_V2SI:
23716 case V2SI_FTYPE_V4SF:
23717 case V2SI_FTYPE_V2SF:
23718 case V2SI_FTYPE_V2DF:
23719 case V2SF_FTYPE_V2SF:
23720 case V2SF_FTYPE_V2SI:
23723 case V4SF_FTYPE_V4SF_VEC_MERGE:
23724 case V2DF_FTYPE_V2DF_VEC_MERGE:
23725 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23726 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23727 case V16QI_FTYPE_V16QI_V16QI:
23728 case V16QI_FTYPE_V8HI_V8HI:
23729 case V8QI_FTYPE_V8QI_V8QI:
23730 case V8QI_FTYPE_V4HI_V4HI:
23731 case V8HI_FTYPE_V8HI_V8HI:
23732 case V8HI_FTYPE_V16QI_V16QI:
23733 case V8HI_FTYPE_V4SI_V4SI:
23734 case V8SF_FTYPE_V8SF_V8SF:
23735 case V8SF_FTYPE_V8SF_V8SI:
23736 case V4SI_FTYPE_V4SI_V4SI:
23737 case V4SI_FTYPE_V8HI_V8HI:
23738 case V4SI_FTYPE_V4SF_V4SF:
23739 case V4SI_FTYPE_V2DF_V2DF:
23740 case V4HI_FTYPE_V4HI_V4HI:
23741 case V4HI_FTYPE_V8QI_V8QI:
23742 case V4HI_FTYPE_V2SI_V2SI:
23743 case V4DF_FTYPE_V4DF_V4DF:
23744 case V4DF_FTYPE_V4DF_V4DI:
23745 case V4SF_FTYPE_V4SF_V4SF:
23746 case V4SF_FTYPE_V4SF_V4SI:
23747 case V4SF_FTYPE_V4SF_V2SI:
23748 case V4SF_FTYPE_V4SF_V2DF:
23749 case V4SF_FTYPE_V4SF_DI:
23750 case V4SF_FTYPE_V4SF_SI:
23751 case V2DI_FTYPE_V2DI_V2DI:
23752 case V2DI_FTYPE_V16QI_V16QI:
23753 case V2DI_FTYPE_V4SI_V4SI:
23754 case V2DI_FTYPE_V2DI_V16QI:
23755 case V2DI_FTYPE_V2DF_V2DF:
23756 case V2SI_FTYPE_V2SI_V2SI:
23757 case V2SI_FTYPE_V4HI_V4HI:
23758 case V2SI_FTYPE_V2SF_V2SF:
23759 case V2DF_FTYPE_V2DF_V2DF:
23760 case V2DF_FTYPE_V2DF_V4SF:
23761 case V2DF_FTYPE_V2DF_V2DI:
23762 case V2DF_FTYPE_V2DF_DI:
23763 case V2DF_FTYPE_V2DF_SI:
23764 case V2SF_FTYPE_V2SF_V2SF:
23765 case V1DI_FTYPE_V1DI_V1DI:
23766 case V1DI_FTYPE_V8QI_V8QI:
23767 case V1DI_FTYPE_V2SI_V2SI:
23768 if (comparison == UNKNOWN)
23769 return ix86_expand_binop_builtin (icode, exp, target);
23772 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23773 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23774 gcc_assert (comparison != UNKNOWN);
23778 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23779 case V8HI_FTYPE_V8HI_SI_COUNT:
23780 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23781 case V4SI_FTYPE_V4SI_SI_COUNT:
23782 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23783 case V4HI_FTYPE_V4HI_SI_COUNT:
23784 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23785 case V2DI_FTYPE_V2DI_SI_COUNT:
23786 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23787 case V2SI_FTYPE_V2SI_SI_COUNT:
23788 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23789 case V1DI_FTYPE_V1DI_SI_COUNT:
23791 last_arg_count = true;
23793 case UINT64_FTYPE_UINT64_UINT64:
23794 case UINT_FTYPE_UINT_UINT:
23795 case UINT_FTYPE_UINT_USHORT:
23796 case UINT_FTYPE_UINT_UCHAR:
23797 case UINT16_FTYPE_UINT16_INT:
23798 case UINT8_FTYPE_UINT8_INT:
23801 case V2DI_FTYPE_V2DI_INT_CONVERT:
23804 nargs_constant = 1;
23806 case V8HI_FTYPE_V8HI_INT:
23807 case V8SF_FTYPE_V8SF_INT:
23808 case V4SI_FTYPE_V4SI_INT:
23809 case V4SI_FTYPE_V8SI_INT:
23810 case V4HI_FTYPE_V4HI_INT:
23811 case V4DF_FTYPE_V4DF_INT:
23812 case V4SF_FTYPE_V4SF_INT:
23813 case V4SF_FTYPE_V8SF_INT:
23814 case V2DI_FTYPE_V2DI_INT:
23815 case V2DF_FTYPE_V2DF_INT:
23816 case V2DF_FTYPE_V4DF_INT:
23818 nargs_constant = 1;
23820 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23821 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23822 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23823 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23824 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23827 case V16QI_FTYPE_V16QI_V16QI_INT:
23828 case V8HI_FTYPE_V8HI_V8HI_INT:
23829 case V8SI_FTYPE_V8SI_V8SI_INT:
23830 case V8SI_FTYPE_V8SI_V4SI_INT:
23831 case V8SF_FTYPE_V8SF_V8SF_INT:
23832 case V8SF_FTYPE_V8SF_V4SF_INT:
23833 case V4SI_FTYPE_V4SI_V4SI_INT:
23834 case V4DF_FTYPE_V4DF_V4DF_INT:
23835 case V4DF_FTYPE_V4DF_V2DF_INT:
23836 case V4SF_FTYPE_V4SF_V4SF_INT:
23837 case V2DI_FTYPE_V2DI_V2DI_INT:
23838 case V2DF_FTYPE_V2DF_V2DF_INT:
23840 nargs_constant = 1;
23842 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23845 nargs_constant = 1;
23847 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23850 nargs_constant = 1;
23852 case V2DI_FTYPE_V2DI_UINT_UINT:
23854 nargs_constant = 2;
23856 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23857 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23858 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23859 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23861 nargs_constant = 1;
23863 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23865 nargs_constant = 2;
23868 gcc_unreachable ();
23871 gcc_assert (nargs <= ARRAY_SIZE (args));
23873 if (comparison != UNKNOWN)
23875 gcc_assert (nargs == 2);
23876 return ix86_expand_sse_compare (d, exp, target, swap);
23879 if (rmode == VOIDmode || rmode == tmode)
23883 || GET_MODE (target) != tmode
23884 || ! (*insn_p->operand[0].predicate) (target, tmode))
23885 target = gen_reg_rtx (tmode);
23886 real_target = target;
23890 target = gen_reg_rtx (rmode);
23891 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23894 for (i = 0; i < nargs; i++)
23896 tree arg = CALL_EXPR_ARG (exp, i);
23897 rtx op = expand_normal (arg);
23898 enum machine_mode mode = insn_p->operand[i + 1].mode;
23899 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23901 if (last_arg_count && (i + 1) == nargs)
23903 /* SIMD shift insns take either an 8-bit immediate or
23904 register as count. But builtin functions take int as
23905 count. If count doesn't match, we put it in register. */
23908 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23909 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23910 op = copy_to_reg (op);
23913 else if ((nargs - i) <= nargs_constant)
23918 case CODE_FOR_sse4_1_roundpd:
23919 case CODE_FOR_sse4_1_roundps:
23920 case CODE_FOR_sse4_1_roundsd:
23921 case CODE_FOR_sse4_1_roundss:
23922 case CODE_FOR_sse4_1_blendps:
23923 case CODE_FOR_avx_blendpd256:
23924 case CODE_FOR_avx_vpermilv4df:
23925 case CODE_FOR_avx_roundpd256:
23926 case CODE_FOR_avx_roundps256:
23927 error ("the last argument must be a 4-bit immediate");
23930 case CODE_FOR_sse4_1_blendpd:
23931 case CODE_FOR_avx_vpermilv2df:
23932 case CODE_FOR_xop_vpermil2v2df3:
23933 case CODE_FOR_xop_vpermil2v4sf3:
23934 case CODE_FOR_xop_vpermil2v4df3:
23935 case CODE_FOR_xop_vpermil2v8sf3:
23936 error ("the last argument must be a 2-bit immediate");
23939 case CODE_FOR_avx_vextractf128v4df:
23940 case CODE_FOR_avx_vextractf128v8sf:
23941 case CODE_FOR_avx_vextractf128v8si:
23942 case CODE_FOR_avx_vinsertf128v4df:
23943 case CODE_FOR_avx_vinsertf128v8sf:
23944 case CODE_FOR_avx_vinsertf128v8si:
23945 error ("the last argument must be a 1-bit immediate");
23948 case CODE_FOR_avx_cmpsdv2df3:
23949 case CODE_FOR_avx_cmpssv4sf3:
23950 case CODE_FOR_avx_cmppdv2df3:
23951 case CODE_FOR_avx_cmppsv4sf3:
23952 case CODE_FOR_avx_cmppdv4df3:
23953 case CODE_FOR_avx_cmppsv8sf3:
23954 error ("the last argument must be a 5-bit immediate");
23958 switch (nargs_constant)
23961 if ((nargs - i) == nargs_constant)
23963 error ("the next to last argument must be an 8-bit immediate");
23967 error ("the last argument must be an 8-bit immediate");
23970 gcc_unreachable ();
23977 if (VECTOR_MODE_P (mode))
23978 op = safe_vector_operand (op, mode);
23980 /* If we aren't optimizing, only allow one memory operand to
23982 if (memory_operand (op, mode))
23985 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23987 if (optimize || !match || num_memory > 1)
23988 op = copy_to_mode_reg (mode, op);
23992 op = copy_to_reg (op);
23993 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23998 args[i].mode = mode;
24004 pat = GEN_FCN (icode) (real_target, args[0].op);
24007 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24010 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24014 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24015 args[2].op, args[3].op);
24018 gcc_unreachable ();
24028 /* Subroutine of ix86_expand_builtin to take care of special insns
24029 with variable number of operands. */
24032 ix86_expand_special_args_builtin (const struct builtin_description *d,
24033 tree exp, rtx target)
24037 unsigned int i, nargs, arg_adjust, memory;
24041 enum machine_mode mode;
24043 enum insn_code icode = d->icode;
24044 bool last_arg_constant = false;
24045 const struct insn_data *insn_p = &insn_data[icode];
24046 enum machine_mode tmode = insn_p->operand[0].mode;
24047 enum { load, store } klass;
24049 switch ((enum ix86_builtin_func_type) d->flag)
24051 case VOID_FTYPE_VOID:
24052 emit_insn (GEN_FCN (icode) (target));
24054 case UINT64_FTYPE_VOID:
24059 case UINT64_FTYPE_PUNSIGNED:
24060 case V2DI_FTYPE_PV2DI:
24061 case V32QI_FTYPE_PCCHAR:
24062 case V16QI_FTYPE_PCCHAR:
24063 case V8SF_FTYPE_PCV4SF:
24064 case V8SF_FTYPE_PCFLOAT:
24065 case V4SF_FTYPE_PCFLOAT:
24066 case V4DF_FTYPE_PCV2DF:
24067 case V4DF_FTYPE_PCDOUBLE:
24068 case V2DF_FTYPE_PCDOUBLE:
24069 case VOID_FTYPE_PVOID:
24074 case VOID_FTYPE_PV2SF_V4SF:
24075 case VOID_FTYPE_PV4DI_V4DI:
24076 case VOID_FTYPE_PV2DI_V2DI:
24077 case VOID_FTYPE_PCHAR_V32QI:
24078 case VOID_FTYPE_PCHAR_V16QI:
24079 case VOID_FTYPE_PFLOAT_V8SF:
24080 case VOID_FTYPE_PFLOAT_V4SF:
24081 case VOID_FTYPE_PDOUBLE_V4DF:
24082 case VOID_FTYPE_PDOUBLE_V2DF:
24083 case VOID_FTYPE_PULONGLONG_ULONGLONG:
24084 case VOID_FTYPE_PINT_INT:
24087 /* Reserve memory operand for target. */
24088 memory = ARRAY_SIZE (args);
24090 case V4SF_FTYPE_V4SF_PCV2SF:
24091 case V2DF_FTYPE_V2DF_PCDOUBLE:
24096 case V8SF_FTYPE_PCV8SF_V8SF:
24097 case V4DF_FTYPE_PCV4DF_V4DF:
24098 case V4SF_FTYPE_PCV4SF_V4SF:
24099 case V2DF_FTYPE_PCV2DF_V2DF:
24104 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24105 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24106 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24107 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24110 /* Reserve memory operand for target. */
24111 memory = ARRAY_SIZE (args);
24113 case VOID_FTYPE_UINT_UINT_UINT:
24114 case VOID_FTYPE_UINT64_UINT_UINT:
24115 case UCHAR_FTYPE_UINT_UINT_UINT:
24116 case UCHAR_FTYPE_UINT64_UINT_UINT:
24119 memory = ARRAY_SIZE (args);
24120 last_arg_constant = true;
24123 gcc_unreachable ();
24126 gcc_assert (nargs <= ARRAY_SIZE (args));
24128 if (klass == store)
24130 arg = CALL_EXPR_ARG (exp, 0);
24131 op = expand_normal (arg);
24132 gcc_assert (target == 0);
24133 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24141 || GET_MODE (target) != tmode
24142 || ! (*insn_p->operand[0].predicate) (target, tmode))
24143 target = gen_reg_rtx (tmode);
24146 for (i = 0; i < nargs; i++)
24148 enum machine_mode mode = insn_p->operand[i + 1].mode;
24151 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24152 op = expand_normal (arg);
24153 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24155 if (last_arg_constant && (i + 1) == nargs)
24159 if (icode == CODE_FOR_lwp_lwpvalsi3
24160 || icode == CODE_FOR_lwp_lwpinssi3
24161 || icode == CODE_FOR_lwp_lwpvaldi3
24162 || icode == CODE_FOR_lwp_lwpinsdi3)
24163 error ("the last argument must be a 32-bit immediate");
24165 error ("the last argument must be an 8-bit immediate");
24173 /* This must be the memory operand. */
24174 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24175 gcc_assert (GET_MODE (op) == mode
24176 || GET_MODE (op) == VOIDmode);
24180 /* This must be register. */
24181 if (VECTOR_MODE_P (mode))
24182 op = safe_vector_operand (op, mode);
24184 gcc_assert (GET_MODE (op) == mode
24185 || GET_MODE (op) == VOIDmode);
24186 op = copy_to_mode_reg (mode, op);
24191 args[i].mode = mode;
24197 pat = GEN_FCN (icode) (target);
24200 pat = GEN_FCN (icode) (target, args[0].op);
24203 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24206 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24209 gcc_unreachable ();
24215 return klass == store ? 0 : target;
24218 /* Return the integer constant in ARG. Constrain it to be in the range
24219 of the subparts of VEC_TYPE; issue an error if not. */
24222 get_element_number (tree vec_type, tree arg)
24224 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24226 if (!host_integerp (arg, 1)
24227 || (elt = tree_low_cst (arg, 1), elt > max))
24229 error ("selector must be an integer constant in the range 0..%wi", max);
24236 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24237 ix86_expand_vector_init. We DO have language-level syntax for this, in
24238 the form of (type){ init-list }. Except that since we can't place emms
24239 instructions from inside the compiler, we can't allow the use of MMX
24240 registers unless the user explicitly asks for it. So we do *not* define
24241 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24242 we have builtins invoked by mmintrin.h that gives us license to emit
24243 these sorts of instructions. */
24246 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24248 enum machine_mode tmode = TYPE_MODE (type);
24249 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24250 int i, n_elt = GET_MODE_NUNITS (tmode);
24251 rtvec v = rtvec_alloc (n_elt);
24253 gcc_assert (VECTOR_MODE_P (tmode));
24254 gcc_assert (call_expr_nargs (exp) == n_elt);
24256 for (i = 0; i < n_elt; ++i)
24258 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24259 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24262 if (!target || !register_operand (target, tmode))
24263 target = gen_reg_rtx (tmode);
24265 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24269 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24270 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24271 had a language-level syntax for referencing vector elements. */
24274 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24276 enum machine_mode tmode, mode0;
24281 arg0 = CALL_EXPR_ARG (exp, 0);
24282 arg1 = CALL_EXPR_ARG (exp, 1);
24284 op0 = expand_normal (arg0);
24285 elt = get_element_number (TREE_TYPE (arg0), arg1);
24287 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24288 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24289 gcc_assert (VECTOR_MODE_P (mode0));
24291 op0 = force_reg (mode0, op0);
24293 if (optimize || !target || !register_operand (target, tmode))
24294 target = gen_reg_rtx (tmode);
24296 ix86_expand_vector_extract (true, target, op0, elt);
24301 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24302 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24303 a language-level syntax for referencing vector elements. */
24306 ix86_expand_vec_set_builtin (tree exp)
24308 enum machine_mode tmode, mode1;
24309 tree arg0, arg1, arg2;
24311 rtx op0, op1, target;
24313 arg0 = CALL_EXPR_ARG (exp, 0);
24314 arg1 = CALL_EXPR_ARG (exp, 1);
24315 arg2 = CALL_EXPR_ARG (exp, 2);
24317 tmode = TYPE_MODE (TREE_TYPE (arg0));
24318 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24319 gcc_assert (VECTOR_MODE_P (tmode));
24321 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24322 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24323 elt = get_element_number (TREE_TYPE (arg0), arg2);
24325 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24326 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24328 op0 = force_reg (tmode, op0);
24329 op1 = force_reg (mode1, op1);
24331 /* OP0 is the source of these builtin functions and shouldn't be
24332 modified. Create a copy, use it and return it as target. */
24333 target = gen_reg_rtx (tmode);
24334 emit_move_insn (target, op0);
24335 ix86_expand_vector_set (true, target, op1, elt);
24340 /* Expand an expression EXP that calls a built-in function,
24341 with result going to TARGET if that's convenient
24342 (and in mode MODE if that's convenient).
24343 SUBTARGET may be used as the target for computing one of EXP's operands.
24344 IGNORE is nonzero if the value is to be ignored. */
24347 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24348 enum machine_mode mode ATTRIBUTE_UNUSED,
24349 int ignore ATTRIBUTE_UNUSED)
24351 const struct builtin_description *d;
24353 enum insn_code icode;
24354 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24355 tree arg0, arg1, arg2;
24356 rtx op0, op1, op2, pat;
24357 enum machine_mode mode0, mode1, mode2;
24358 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24360 /* Determine whether the builtin function is available under the current ISA.
24361 Originally the builtin was not created if it wasn't applicable to the
24362 current ISA based on the command line switches. With function specific
24363 options, we need to check in the context of the function making the call
24364 whether it is supported. */
24365 if (ix86_builtins_isa[fcode].isa
24366 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24368 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24369 NULL, NULL, false);
24372 error ("%qE needs unknown isa option", fndecl);
24375 gcc_assert (opts != NULL);
24376 error ("%qE needs isa option %s", fndecl, opts);
24384 case IX86_BUILTIN_MASKMOVQ:
24385 case IX86_BUILTIN_MASKMOVDQU:
24386 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24387 ? CODE_FOR_mmx_maskmovq
24388 : CODE_FOR_sse2_maskmovdqu);
24389 /* Note the arg order is different from the operand order. */
24390 arg1 = CALL_EXPR_ARG (exp, 0);
24391 arg2 = CALL_EXPR_ARG (exp, 1);
24392 arg0 = CALL_EXPR_ARG (exp, 2);
24393 op0 = expand_normal (arg0);
24394 op1 = expand_normal (arg1);
24395 op2 = expand_normal (arg2);
24396 mode0 = insn_data[icode].operand[0].mode;
24397 mode1 = insn_data[icode].operand[1].mode;
24398 mode2 = insn_data[icode].operand[2].mode;
24400 op0 = force_reg (Pmode, op0);
24401 op0 = gen_rtx_MEM (mode1, op0);
24403 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24404 op0 = copy_to_mode_reg (mode0, op0);
24405 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24406 op1 = copy_to_mode_reg (mode1, op1);
24407 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24408 op2 = copy_to_mode_reg (mode2, op2);
24409 pat = GEN_FCN (icode) (op0, op1, op2);
24415 case IX86_BUILTIN_LDMXCSR:
24416 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24417 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24418 emit_move_insn (target, op0);
24419 emit_insn (gen_sse_ldmxcsr (target));
24422 case IX86_BUILTIN_STMXCSR:
24423 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24424 emit_insn (gen_sse_stmxcsr (target));
24425 return copy_to_mode_reg (SImode, target);
24427 case IX86_BUILTIN_CLFLUSH:
24428 arg0 = CALL_EXPR_ARG (exp, 0);
24429 op0 = expand_normal (arg0);
24430 icode = CODE_FOR_sse2_clflush;
24431 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24432 op0 = copy_to_mode_reg (Pmode, op0);
24434 emit_insn (gen_sse2_clflush (op0));
24437 case IX86_BUILTIN_MONITOR:
24438 arg0 = CALL_EXPR_ARG (exp, 0);
24439 arg1 = CALL_EXPR_ARG (exp, 1);
24440 arg2 = CALL_EXPR_ARG (exp, 2);
24441 op0 = expand_normal (arg0);
24442 op1 = expand_normal (arg1);
24443 op2 = expand_normal (arg2);
24445 op0 = copy_to_mode_reg (Pmode, op0);
24447 op1 = copy_to_mode_reg (SImode, op1);
24449 op2 = copy_to_mode_reg (SImode, op2);
24450 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24453 case IX86_BUILTIN_MWAIT:
24454 arg0 = CALL_EXPR_ARG (exp, 0);
24455 arg1 = CALL_EXPR_ARG (exp, 1);
24456 op0 = expand_normal (arg0);
24457 op1 = expand_normal (arg1);
24459 op0 = copy_to_mode_reg (SImode, op0);
24461 op1 = copy_to_mode_reg (SImode, op1);
24462 emit_insn (gen_sse3_mwait (op0, op1));
24465 case IX86_BUILTIN_VEC_INIT_V2SI:
24466 case IX86_BUILTIN_VEC_INIT_V4HI:
24467 case IX86_BUILTIN_VEC_INIT_V8QI:
24468 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24470 case IX86_BUILTIN_VEC_EXT_V2DF:
24471 case IX86_BUILTIN_VEC_EXT_V2DI:
24472 case IX86_BUILTIN_VEC_EXT_V4SF:
24473 case IX86_BUILTIN_VEC_EXT_V4SI:
24474 case IX86_BUILTIN_VEC_EXT_V8HI:
24475 case IX86_BUILTIN_VEC_EXT_V2SI:
24476 case IX86_BUILTIN_VEC_EXT_V4HI:
24477 case IX86_BUILTIN_VEC_EXT_V16QI:
24478 return ix86_expand_vec_ext_builtin (exp, target);
24480 case IX86_BUILTIN_VEC_SET_V2DI:
24481 case IX86_BUILTIN_VEC_SET_V4SF:
24482 case IX86_BUILTIN_VEC_SET_V4SI:
24483 case IX86_BUILTIN_VEC_SET_V8HI:
24484 case IX86_BUILTIN_VEC_SET_V4HI:
24485 case IX86_BUILTIN_VEC_SET_V16QI:
24486 return ix86_expand_vec_set_builtin (exp);
24488 case IX86_BUILTIN_VEC_PERM_V2DF:
24489 case IX86_BUILTIN_VEC_PERM_V4SF:
24490 case IX86_BUILTIN_VEC_PERM_V2DI:
24491 case IX86_BUILTIN_VEC_PERM_V4SI:
24492 case IX86_BUILTIN_VEC_PERM_V8HI:
24493 case IX86_BUILTIN_VEC_PERM_V16QI:
24494 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24495 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24496 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24497 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24498 case IX86_BUILTIN_VEC_PERM_V4DF:
24499 case IX86_BUILTIN_VEC_PERM_V8SF:
24500 return ix86_expand_vec_perm_builtin (exp);
24502 case IX86_BUILTIN_INFQ:
24503 case IX86_BUILTIN_HUGE_VALQ:
24505 REAL_VALUE_TYPE inf;
24509 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24511 tmp = validize_mem (force_const_mem (mode, tmp));
24514 target = gen_reg_rtx (mode);
24516 emit_move_insn (target, tmp);
24520 case IX86_BUILTIN_LLWPCB:
24521 arg0 = CALL_EXPR_ARG (exp, 0);
24522 op0 = expand_normal (arg0);
24523 icode = CODE_FOR_lwp_llwpcb;
24524 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24525 op0 = copy_to_mode_reg (Pmode, op0);
24526 emit_insn (gen_lwp_llwpcb (op0));
24529 case IX86_BUILTIN_SLWPCB:
24530 icode = CODE_FOR_lwp_slwpcb;
24532 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24533 target = gen_reg_rtx (Pmode);
24534 emit_insn (gen_lwp_slwpcb (target));
24541 for (i = 0, d = bdesc_special_args;
24542 i < ARRAY_SIZE (bdesc_special_args);
24544 if (d->code == fcode)
24545 return ix86_expand_special_args_builtin (d, exp, target);
24547 for (i = 0, d = bdesc_args;
24548 i < ARRAY_SIZE (bdesc_args);
24550 if (d->code == fcode)
24553 case IX86_BUILTIN_FABSQ:
24554 case IX86_BUILTIN_COPYSIGNQ:
24556 /* Emit a normal call if SSE2 isn't available. */
24557 return expand_call (exp, target, ignore);
24559 return ix86_expand_args_builtin (d, exp, target);
24562 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24563 if (d->code == fcode)
24564 return ix86_expand_sse_comi (d, exp, target);
24566 for (i = 0, d = bdesc_pcmpestr;
24567 i < ARRAY_SIZE (bdesc_pcmpestr);
24569 if (d->code == fcode)
24570 return ix86_expand_sse_pcmpestr (d, exp, target);
24572 for (i = 0, d = bdesc_pcmpistr;
24573 i < ARRAY_SIZE (bdesc_pcmpistr);
24575 if (d->code == fcode)
24576 return ix86_expand_sse_pcmpistr (d, exp, target);
24578 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24579 if (d->code == fcode)
24580 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24581 (enum ix86_builtin_func_type)
24582 d->flag, d->comparison);
24584 gcc_unreachable ();
24587 /* Returns a function decl for a vectorized version of the builtin function
24588 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24589 if it is not available. */
24592 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24595 enum machine_mode in_mode, out_mode;
24597 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24599 if (TREE_CODE (type_out) != VECTOR_TYPE
24600 || TREE_CODE (type_in) != VECTOR_TYPE
24601 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24604 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24605 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24606 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24607 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24611 case BUILT_IN_SQRT:
24612 if (out_mode == DFmode && out_n == 2
24613 && in_mode == DFmode && in_n == 2)
24614 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24617 case BUILT_IN_SQRTF:
24618 if (out_mode == SFmode && out_n == 4
24619 && in_mode == SFmode && in_n == 4)
24620 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24623 case BUILT_IN_LRINT:
24624 if (out_mode == SImode && out_n == 4
24625 && in_mode == DFmode && in_n == 2)
24626 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24629 case BUILT_IN_LRINTF:
24630 if (out_mode == SImode && out_n == 4
24631 && in_mode == SFmode && in_n == 4)
24632 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24635 case BUILT_IN_COPYSIGN:
24636 if (out_mode == DFmode && out_n == 2
24637 && in_mode == DFmode && in_n == 2)
24638 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24641 case BUILT_IN_COPYSIGNF:
24642 if (out_mode == SFmode && out_n == 4
24643 && in_mode == SFmode && in_n == 4)
24644 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24651 /* Dispatch to a handler for a vectorization library. */
24652 if (ix86_veclib_handler)
24653 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24659 /* Handler for an SVML-style interface to
24660 a library with vectorized intrinsics. */
24663 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24666 tree fntype, new_fndecl, args;
24669 enum machine_mode el_mode, in_mode;
24672 /* The SVML is suitable for unsafe math only. */
24673 if (!flag_unsafe_math_optimizations)
24676 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24677 n = TYPE_VECTOR_SUBPARTS (type_out);
24678 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24679 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24680 if (el_mode != in_mode
24688 case BUILT_IN_LOG10:
24690 case BUILT_IN_TANH:
24692 case BUILT_IN_ATAN:
24693 case BUILT_IN_ATAN2:
24694 case BUILT_IN_ATANH:
24695 case BUILT_IN_CBRT:
24696 case BUILT_IN_SINH:
24698 case BUILT_IN_ASINH:
24699 case BUILT_IN_ASIN:
24700 case BUILT_IN_COSH:
24702 case BUILT_IN_ACOSH:
24703 case BUILT_IN_ACOS:
24704 if (el_mode != DFmode || n != 2)
24708 case BUILT_IN_EXPF:
24709 case BUILT_IN_LOGF:
24710 case BUILT_IN_LOG10F:
24711 case BUILT_IN_POWF:
24712 case BUILT_IN_TANHF:
24713 case BUILT_IN_TANF:
24714 case BUILT_IN_ATANF:
24715 case BUILT_IN_ATAN2F:
24716 case BUILT_IN_ATANHF:
24717 case BUILT_IN_CBRTF:
24718 case BUILT_IN_SINHF:
24719 case BUILT_IN_SINF:
24720 case BUILT_IN_ASINHF:
24721 case BUILT_IN_ASINF:
24722 case BUILT_IN_COSHF:
24723 case BUILT_IN_COSF:
24724 case BUILT_IN_ACOSHF:
24725 case BUILT_IN_ACOSF:
24726 if (el_mode != SFmode || n != 4)
24734 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24736 if (fn == BUILT_IN_LOGF)
24737 strcpy (name, "vmlsLn4");
24738 else if (fn == BUILT_IN_LOG)
24739 strcpy (name, "vmldLn2");
24742 sprintf (name, "vmls%s", bname+10);
24743 name[strlen (name)-1] = '4';
24746 sprintf (name, "vmld%s2", bname+10);
24748 /* Convert to uppercase. */
24752 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24753 args = TREE_CHAIN (args))
24757 fntype = build_function_type_list (type_out, type_in, NULL);
24759 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24761 /* Build a function declaration for the vectorized function. */
24762 new_fndecl = build_decl (BUILTINS_LOCATION,
24763 FUNCTION_DECL, get_identifier (name), fntype);
24764 TREE_PUBLIC (new_fndecl) = 1;
24765 DECL_EXTERNAL (new_fndecl) = 1;
24766 DECL_IS_NOVOPS (new_fndecl) = 1;
24767 TREE_READONLY (new_fndecl) = 1;
24772 /* Handler for an ACML-style interface to
24773 a library with vectorized intrinsics. */
24776 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24778 char name[20] = "__vr.._";
24779 tree fntype, new_fndecl, args;
24782 enum machine_mode el_mode, in_mode;
24785 /* The ACML is 64bits only and suitable for unsafe math only as
24786 it does not correctly support parts of IEEE with the required
24787 precision such as denormals. */
24789 || !flag_unsafe_math_optimizations)
24792 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24793 n = TYPE_VECTOR_SUBPARTS (type_out);
24794 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24795 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24796 if (el_mode != in_mode
24806 case BUILT_IN_LOG2:
24807 case BUILT_IN_LOG10:
24810 if (el_mode != DFmode
24815 case BUILT_IN_SINF:
24816 case BUILT_IN_COSF:
24817 case BUILT_IN_EXPF:
24818 case BUILT_IN_POWF:
24819 case BUILT_IN_LOGF:
24820 case BUILT_IN_LOG2F:
24821 case BUILT_IN_LOG10F:
24824 if (el_mode != SFmode
24833 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24834 sprintf (name + 7, "%s", bname+10);
24837 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24838 args = TREE_CHAIN (args))
24842 fntype = build_function_type_list (type_out, type_in, NULL);
24844 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24846 /* Build a function declaration for the vectorized function. */
24847 new_fndecl = build_decl (BUILTINS_LOCATION,
24848 FUNCTION_DECL, get_identifier (name), fntype);
24849 TREE_PUBLIC (new_fndecl) = 1;
24850 DECL_EXTERNAL (new_fndecl) = 1;
24851 DECL_IS_NOVOPS (new_fndecl) = 1;
24852 TREE_READONLY (new_fndecl) = 1;
24858 /* Returns a decl of a function that implements conversion of an integer vector
24859 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24860 are the types involved when converting according to CODE.
24861 Return NULL_TREE if it is not available. */
24864 ix86_vectorize_builtin_conversion (unsigned int code,
24865 tree dest_type, tree src_type)
24873 switch (TYPE_MODE (src_type))
24876 switch (TYPE_MODE (dest_type))
24879 return (TYPE_UNSIGNED (src_type)
24880 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24881 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24883 return (TYPE_UNSIGNED (src_type)
24885 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24891 switch (TYPE_MODE (dest_type))
24894 return (TYPE_UNSIGNED (src_type)
24896 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24905 case FIX_TRUNC_EXPR:
24906 switch (TYPE_MODE (dest_type))
24909 switch (TYPE_MODE (src_type))
24912 return (TYPE_UNSIGNED (dest_type)
24914 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24916 return (TYPE_UNSIGNED (dest_type)
24918 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24925 switch (TYPE_MODE (src_type))
24928 return (TYPE_UNSIGNED (dest_type)
24930 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24947 /* Returns a code for a target-specific builtin that implements
24948 reciprocal of the function, or NULL_TREE if not available. */
24951 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24952 bool sqrt ATTRIBUTE_UNUSED)
24954 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24955 && flag_finite_math_only && !flag_trapping_math
24956 && flag_unsafe_math_optimizations))
24960 /* Machine dependent builtins. */
24963 /* Vectorized version of sqrt to rsqrt conversion. */
24964 case IX86_BUILTIN_SQRTPS_NR:
24965 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24971 /* Normal builtins. */
24974 /* Sqrt to rsqrt conversion. */
24975 case BUILT_IN_SQRTF:
24976 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24983 /* Helper for avx_vpermilps256_operand et al. This is also used by
24984 the expansion functions to turn the parallel back into a mask.
24985 The return value is 0 for no match and the imm8+1 for a match. */
24988 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24990 unsigned i, nelt = GET_MODE_NUNITS (mode);
24992 unsigned char ipar[8];
24994 if (XVECLEN (par, 0) != (int) nelt)
24997 /* Validate that all of the elements are constants, and not totally
24998 out of range. Copy the data into an integral array to make the
24999 subsequent checks easier. */
25000 for (i = 0; i < nelt; ++i)
25002 rtx er = XVECEXP (par, 0, i);
25003 unsigned HOST_WIDE_INT ei;
25005 if (!CONST_INT_P (er))
25016 /* In the 256-bit DFmode case, we can only move elements within
25018 for (i = 0; i < 2; ++i)
25022 mask |= ipar[i] << i;
25024 for (i = 2; i < 4; ++i)
25028 mask |= (ipar[i] - 2) << i;
25033 /* In the 256-bit SFmode case, we have full freedom of movement
25034 within the low 128-bit lane, but the high 128-bit lane must
25035 mirror the exact same pattern. */
25036 for (i = 0; i < 4; ++i)
25037 if (ipar[i] + 4 != ipar[i + 4])
25044 /* In the 128-bit case, we've full freedom in the placement of
25045 the elements from the source operand. */
25046 for (i = 0; i < nelt; ++i)
25047 mask |= ipar[i] << (i * (nelt / 2));
25051 gcc_unreachable ();
25054 /* Make sure success has a non-zero value by adding one. */
25058 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
25059 the expansion functions to turn the parallel back into a mask.
25060 The return value is 0 for no match and the imm8+1 for a match. */
25063 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
25065 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
25067 unsigned char ipar[8];
25069 if (XVECLEN (par, 0) != (int) nelt)
25072 /* Validate that all of the elements are constants, and not totally
25073 out of range. Copy the data into an integral array to make the
25074 subsequent checks easier. */
25075 for (i = 0; i < nelt; ++i)
25077 rtx er = XVECEXP (par, 0, i);
25078 unsigned HOST_WIDE_INT ei;
25080 if (!CONST_INT_P (er))
25083 if (ei >= 2 * nelt)
25088 /* Validate that the halves of the permute are halves. */
25089 for (i = 0; i < nelt2 - 1; ++i)
25090 if (ipar[i] + 1 != ipar[i + 1])
25092 for (i = nelt2; i < nelt - 1; ++i)
25093 if (ipar[i] + 1 != ipar[i + 1])
25096 /* Reconstruct the mask. */
25097 for (i = 0; i < 2; ++i)
25099 unsigned e = ipar[i * nelt2];
25103 mask |= e << (i * 4);
25106 /* Make sure success has a non-zero value by adding one. */
25111 /* Store OPERAND to the memory after reload is completed. This means
25112 that we can't easily use assign_stack_local. */
25114 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25118 gcc_assert (reload_completed);
25119 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25121 result = gen_rtx_MEM (mode,
25122 gen_rtx_PLUS (Pmode,
25124 GEN_INT (-RED_ZONE_SIZE)));
25125 emit_move_insn (result, operand);
25127 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25133 operand = gen_lowpart (DImode, operand);
25137 gen_rtx_SET (VOIDmode,
25138 gen_rtx_MEM (DImode,
25139 gen_rtx_PRE_DEC (DImode,
25140 stack_pointer_rtx)),
25144 gcc_unreachable ();
25146 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25155 split_di (&operand, 1, operands, operands + 1);
25157 gen_rtx_SET (VOIDmode,
25158 gen_rtx_MEM (SImode,
25159 gen_rtx_PRE_DEC (Pmode,
25160 stack_pointer_rtx)),
25163 gen_rtx_SET (VOIDmode,
25164 gen_rtx_MEM (SImode,
25165 gen_rtx_PRE_DEC (Pmode,
25166 stack_pointer_rtx)),
25171 /* Store HImodes as SImodes. */
25172 operand = gen_lowpart (SImode, operand);
25176 gen_rtx_SET (VOIDmode,
25177 gen_rtx_MEM (GET_MODE (operand),
25178 gen_rtx_PRE_DEC (SImode,
25179 stack_pointer_rtx)),
25183 gcc_unreachable ();
25185 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25190 /* Free operand from the memory. */
25192 ix86_free_from_memory (enum machine_mode mode)
25194 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25198 if (mode == DImode || TARGET_64BIT)
25202 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25203 to pop or add instruction if registers are available. */
25204 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25205 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25210 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25211 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25213 static const enum reg_class *
25214 i386_ira_cover_classes (void)
25216 static const enum reg_class sse_fpmath_classes[] = {
25217 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25219 static const enum reg_class no_sse_fpmath_classes[] = {
25220 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25223 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25226 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25227 QImode must go into class Q_REGS.
25228 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25229 movdf to do mem-to-mem moves through integer regs. */
25231 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25233 enum machine_mode mode = GET_MODE (x);
25235 /* We're only allowed to return a subclass of CLASS. Many of the
25236 following checks fail for NO_REGS, so eliminate that early. */
25237 if (regclass == NO_REGS)
25240 /* All classes can load zeros. */
25241 if (x == CONST0_RTX (mode))
25244 /* Force constants into memory if we are loading a (nonzero) constant into
25245 an MMX or SSE register. This is because there are no MMX/SSE instructions
25246 to load from a constant. */
25248 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25251 /* Prefer SSE regs only, if we can use them for math. */
25252 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25253 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25255 /* Floating-point constants need more complex checks. */
25256 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25258 /* General regs can load everything. */
25259 if (reg_class_subset_p (regclass, GENERAL_REGS))
25262 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25263 zero above. We only want to wind up preferring 80387 registers if
25264 we plan on doing computation with them. */
25266 && standard_80387_constant_p (x))
25268 /* Limit class to non-sse. */
25269 if (regclass == FLOAT_SSE_REGS)
25271 if (regclass == FP_TOP_SSE_REGS)
25273 if (regclass == FP_SECOND_SSE_REGS)
25274 return FP_SECOND_REG;
25275 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25282 /* Generally when we see PLUS here, it's the function invariant
25283 (plus soft-fp const_int). Which can only be computed into general
25285 if (GET_CODE (x) == PLUS)
25286 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25288 /* QImode constants are easy to load, but non-constant QImode data
25289 must go into Q_REGS. */
25290 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25292 if (reg_class_subset_p (regclass, Q_REGS))
25294 if (reg_class_subset_p (Q_REGS, regclass))
25302 /* Discourage putting floating-point values in SSE registers unless
25303 SSE math is being used, and likewise for the 387 registers. */
25305 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25307 enum machine_mode mode = GET_MODE (x);
25309 /* Restrict the output reload class to the register bank that we are doing
25310 math on. If we would like not to return a subset of CLASS, reject this
25311 alternative: if reload cannot do this, it will still use its choice. */
25312 mode = GET_MODE (x);
25313 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25314 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25316 if (X87_FLOAT_MODE_P (mode))
25318 if (regclass == FP_TOP_SSE_REGS)
25320 else if (regclass == FP_SECOND_SSE_REGS)
25321 return FP_SECOND_REG;
25323 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25329 static enum reg_class
25330 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25331 enum machine_mode mode,
25332 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25334 /* QImode spills from non-QI registers require
25335 intermediate register on 32bit targets. */
25336 if (!in_p && mode == QImode && !TARGET_64BIT
25337 && (rclass == GENERAL_REGS
25338 || rclass == LEGACY_REGS
25339 || rclass == INDEX_REGS))
25348 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25349 regno = true_regnum (x);
25351 /* Return Q_REGS if the operand is in memory. */
25359 /* If we are copying between general and FP registers, we need a memory
25360 location. The same is true for SSE and MMX registers.
25362 To optimize register_move_cost performance, allow inline variant.
25364 The macro can't work reliably when one of the CLASSES is class containing
25365 registers from multiple units (SSE, MMX, integer). We avoid this by never
25366 combining those units in single alternative in the machine description.
25367 Ensure that this constraint holds to avoid unexpected surprises.
25369 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25370 enforce these sanity checks. */
25373 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25374 enum machine_mode mode, int strict)
25376 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25377 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25378 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25379 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25380 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25381 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25383 gcc_assert (!strict);
25387 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25390 /* ??? This is a lie. We do have moves between mmx/general, and for
25391 mmx/sse2. But by saying we need secondary memory we discourage the
25392 register allocator from using the mmx registers unless needed. */
25393 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25396 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25398 /* SSE1 doesn't have any direct moves from other classes. */
25402 /* If the target says that inter-unit moves are more expensive
25403 than moving through memory, then don't generate them. */
25404 if (!TARGET_INTER_UNIT_MOVES)
25407 /* Between SSE and general, we have moves no larger than word size. */
25408 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25416 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25417 enum machine_mode mode, int strict)
25419 return inline_secondary_memory_needed (class1, class2, mode, strict);
25422 /* Return true if the registers in CLASS cannot represent the change from
25423 modes FROM to TO. */
25426 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25427 enum reg_class regclass)
25432 /* x87 registers can't do subreg at all, as all values are reformatted
25433 to extended precision. */
25434 if (MAYBE_FLOAT_CLASS_P (regclass))
25437 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25439 /* Vector registers do not support QI or HImode loads. If we don't
25440 disallow a change to these modes, reload will assume it's ok to
25441 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25442 the vec_dupv4hi pattern. */
25443 if (GET_MODE_SIZE (from) < 4)
25446 /* Vector registers do not support subreg with nonzero offsets, which
25447 are otherwise valid for integer registers. Since we can't see
25448 whether we have a nonzero offset from here, prohibit all
25449 nonparadoxical subregs changing size. */
25450 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25457 /* Return the cost of moving data of mode M between a
25458 register and memory. A value of 2 is the default; this cost is
25459 relative to those in `REGISTER_MOVE_COST'.
25461 This function is used extensively by register_move_cost that is used to
25462 build tables at startup. Make it inline in this case.
25463 When IN is 2, return maximum of in and out move cost.
25465 If moving between registers and memory is more expensive than
25466 between two registers, you should define this macro to express the
25469 Model also increased moving costs of QImode registers in non
25473 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25477 if (FLOAT_CLASS_P (regclass))
25495 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25496 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25498 if (SSE_CLASS_P (regclass))
25501 switch (GET_MODE_SIZE (mode))
25516 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25517 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25519 if (MMX_CLASS_P (regclass))
25522 switch (GET_MODE_SIZE (mode))
25534 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25535 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25537 switch (GET_MODE_SIZE (mode))
25540 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25543 return ix86_cost->int_store[0];
25544 if (TARGET_PARTIAL_REG_DEPENDENCY
25545 && optimize_function_for_speed_p (cfun))
25546 cost = ix86_cost->movzbl_load;
25548 cost = ix86_cost->int_load[0];
25550 return MAX (cost, ix86_cost->int_store[0]);
25556 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25558 return ix86_cost->movzbl_load;
25560 return ix86_cost->int_store[0] + 4;
25565 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25566 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25568 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25569 if (mode == TFmode)
25572 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25574 cost = ix86_cost->int_load[2];
25576 cost = ix86_cost->int_store[2];
25577 return (cost * (((int) GET_MODE_SIZE (mode)
25578 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25583 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25585 return inline_memory_move_cost (mode, regclass, in);
25589 /* Return the cost of moving data from a register in class CLASS1 to
25590 one in class CLASS2.
25592 It is not required that the cost always equal 2 when FROM is the same as TO;
25593 on some machines it is expensive to move between registers if they are not
25594 general registers. */
25597 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25598 enum reg_class class2)
25600 /* In case we require secondary memory, compute cost of the store followed
25601 by load. In order to avoid bad register allocation choices, we need
25602 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25604 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25608 cost += inline_memory_move_cost (mode, class1, 2);
25609 cost += inline_memory_move_cost (mode, class2, 2);
25611 /* In case of copying from general_purpose_register we may emit multiple
25612 stores followed by single load causing memory size mismatch stall.
25613 Count this as arbitrarily high cost of 20. */
25614 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25617 /* In the case of FP/MMX moves, the registers actually overlap, and we
25618 have to switch modes in order to treat them differently. */
25619 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25620 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25626 /* Moves between SSE/MMX and integer unit are expensive. */
25627 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25628 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25630 /* ??? By keeping returned value relatively high, we limit the number
25631 of moves between integer and MMX/SSE registers for all targets.
25632 Additionally, high value prevents problem with x86_modes_tieable_p(),
25633 where integer modes in MMX/SSE registers are not tieable
25634 because of missing QImode and HImode moves to, from or between
25635 MMX/SSE registers. */
25636 return MAX (8, ix86_cost->mmxsse_to_integer);
25638 if (MAYBE_FLOAT_CLASS_P (class1))
25639 return ix86_cost->fp_move;
25640 if (MAYBE_SSE_CLASS_P (class1))
25641 return ix86_cost->sse_move;
25642 if (MAYBE_MMX_CLASS_P (class1))
25643 return ix86_cost->mmx_move;
25647 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25650 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25652 /* Flags and only flags can only hold CCmode values. */
25653 if (CC_REGNO_P (regno))
25654 return GET_MODE_CLASS (mode) == MODE_CC;
25655 if (GET_MODE_CLASS (mode) == MODE_CC
25656 || GET_MODE_CLASS (mode) == MODE_RANDOM
25657 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25659 if (FP_REGNO_P (regno))
25660 return VALID_FP_MODE_P (mode);
25661 if (SSE_REGNO_P (regno))
25663 /* We implement the move patterns for all vector modes into and
25664 out of SSE registers, even when no operation instructions
25665 are available. OImode move is available only when AVX is
25667 return ((TARGET_AVX && mode == OImode)
25668 || VALID_AVX256_REG_MODE (mode)
25669 || VALID_SSE_REG_MODE (mode)
25670 || VALID_SSE2_REG_MODE (mode)
25671 || VALID_MMX_REG_MODE (mode)
25672 || VALID_MMX_REG_MODE_3DNOW (mode));
25674 if (MMX_REGNO_P (regno))
25676 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25677 so if the register is available at all, then we can move data of
25678 the given mode into or out of it. */
25679 return (VALID_MMX_REG_MODE (mode)
25680 || VALID_MMX_REG_MODE_3DNOW (mode));
25683 if (mode == QImode)
25685 /* Take care for QImode values - they can be in non-QI regs,
25686 but then they do cause partial register stalls. */
25687 if (regno <= BX_REG || TARGET_64BIT)
25689 if (!TARGET_PARTIAL_REG_STALL)
25691 return reload_in_progress || reload_completed;
25693 /* We handle both integer and floats in the general purpose registers. */
25694 else if (VALID_INT_MODE_P (mode))
25696 else if (VALID_FP_MODE_P (mode))
25698 else if (VALID_DFP_MODE_P (mode))
25700 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25701 on to use that value in smaller contexts, this can easily force a
25702 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25703 supporting DImode, allow it. */
25704 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25710 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25711 tieable integer mode. */
25714 ix86_tieable_integer_mode_p (enum machine_mode mode)
25723 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25726 return TARGET_64BIT;
25733 /* Return true if MODE1 is accessible in a register that can hold MODE2
25734 without copying. That is, all register classes that can hold MODE2
25735 can also hold MODE1. */
25738 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25740 if (mode1 == mode2)
25743 if (ix86_tieable_integer_mode_p (mode1)
25744 && ix86_tieable_integer_mode_p (mode2))
25747 /* MODE2 being XFmode implies fp stack or general regs, which means we
25748 can tie any smaller floating point modes to it. Note that we do not
25749 tie this with TFmode. */
25750 if (mode2 == XFmode)
25751 return mode1 == SFmode || mode1 == DFmode;
25753 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25754 that we can tie it with SFmode. */
25755 if (mode2 == DFmode)
25756 return mode1 == SFmode;
25758 /* If MODE2 is only appropriate for an SSE register, then tie with
25759 any other mode acceptable to SSE registers. */
25760 if (GET_MODE_SIZE (mode2) == 16
25761 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25762 return (GET_MODE_SIZE (mode1) == 16
25763 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25765 /* If MODE2 is appropriate for an MMX register, then tie
25766 with any other mode acceptable to MMX registers. */
25767 if (GET_MODE_SIZE (mode2) == 8
25768 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25769 return (GET_MODE_SIZE (mode1) == 8
25770 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25775 /* Compute a (partial) cost for rtx X. Return true if the complete
25776 cost has been computed, and false if subexpressions should be
25777 scanned. In either case, *TOTAL contains the cost result. */
25780 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25782 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25783 enum machine_mode mode = GET_MODE (x);
25784 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25792 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25794 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25796 else if (flag_pic && SYMBOLIC_CONST (x)
25798 || (!GET_CODE (x) != LABEL_REF
25799 && (GET_CODE (x) != SYMBOL_REF
25800 || !SYMBOL_REF_LOCAL_P (x)))))
25807 if (mode == VOIDmode)
25810 switch (standard_80387_constant_p (x))
25815 default: /* Other constants */
25820 /* Start with (MEM (SYMBOL_REF)), since that's where
25821 it'll probably end up. Add a penalty for size. */
25822 *total = (COSTS_N_INSNS (1)
25823 + (flag_pic != 0 && !TARGET_64BIT)
25824 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25830 /* The zero extensions is often completely free on x86_64, so make
25831 it as cheap as possible. */
25832 if (TARGET_64BIT && mode == DImode
25833 && GET_MODE (XEXP (x, 0)) == SImode)
25835 else if (TARGET_ZERO_EXTEND_WITH_AND)
25836 *total = cost->add;
25838 *total = cost->movzx;
25842 *total = cost->movsx;
25846 if (CONST_INT_P (XEXP (x, 1))
25847 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25849 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25852 *total = cost->add;
25855 if ((value == 2 || value == 3)
25856 && cost->lea <= cost->shift_const)
25858 *total = cost->lea;
25868 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25870 if (CONST_INT_P (XEXP (x, 1)))
25872 if (INTVAL (XEXP (x, 1)) > 32)
25873 *total = cost->shift_const + COSTS_N_INSNS (2);
25875 *total = cost->shift_const * 2;
25879 if (GET_CODE (XEXP (x, 1)) == AND)
25880 *total = cost->shift_var * 2;
25882 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25887 if (CONST_INT_P (XEXP (x, 1)))
25888 *total = cost->shift_const;
25890 *total = cost->shift_var;
25895 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25897 /* ??? SSE scalar cost should be used here. */
25898 *total = cost->fmul;
25901 else if (X87_FLOAT_MODE_P (mode))
25903 *total = cost->fmul;
25906 else if (FLOAT_MODE_P (mode))
25908 /* ??? SSE vector cost should be used here. */
25909 *total = cost->fmul;
25914 rtx op0 = XEXP (x, 0);
25915 rtx op1 = XEXP (x, 1);
25917 if (CONST_INT_P (XEXP (x, 1)))
25919 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25920 for (nbits = 0; value != 0; value &= value - 1)
25924 /* This is arbitrary. */
25927 /* Compute costs correctly for widening multiplication. */
25928 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25929 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25930 == GET_MODE_SIZE (mode))
25932 int is_mulwiden = 0;
25933 enum machine_mode inner_mode = GET_MODE (op0);
25935 if (GET_CODE (op0) == GET_CODE (op1))
25936 is_mulwiden = 1, op1 = XEXP (op1, 0);
25937 else if (CONST_INT_P (op1))
25939 if (GET_CODE (op0) == SIGN_EXTEND)
25940 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25943 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25947 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25950 *total = (cost->mult_init[MODE_INDEX (mode)]
25951 + nbits * cost->mult_bit
25952 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25961 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25962 /* ??? SSE cost should be used here. */
25963 *total = cost->fdiv;
25964 else if (X87_FLOAT_MODE_P (mode))
25965 *total = cost->fdiv;
25966 else if (FLOAT_MODE_P (mode))
25967 /* ??? SSE vector cost should be used here. */
25968 *total = cost->fdiv;
25970 *total = cost->divide[MODE_INDEX (mode)];
25974 if (GET_MODE_CLASS (mode) == MODE_INT
25975 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25977 if (GET_CODE (XEXP (x, 0)) == PLUS
25978 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25979 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25980 && CONSTANT_P (XEXP (x, 1)))
25982 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25983 if (val == 2 || val == 4 || val == 8)
25985 *total = cost->lea;
25986 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25987 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25988 outer_code, speed);
25989 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25993 else if (GET_CODE (XEXP (x, 0)) == MULT
25994 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25996 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25997 if (val == 2 || val == 4 || val == 8)
25999 *total = cost->lea;
26000 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26001 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26005 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26007 *total = cost->lea;
26008 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26009 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26010 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26017 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26019 /* ??? SSE cost should be used here. */
26020 *total = cost->fadd;
26023 else if (X87_FLOAT_MODE_P (mode))
26025 *total = cost->fadd;
26028 else if (FLOAT_MODE_P (mode))
26030 /* ??? SSE vector cost should be used here. */
26031 *total = cost->fadd;
26039 if (!TARGET_64BIT && mode == DImode)
26041 *total = (cost->add * 2
26042 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26043 << (GET_MODE (XEXP (x, 0)) != DImode))
26044 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26045 << (GET_MODE (XEXP (x, 1)) != DImode)));
26051 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26053 /* ??? SSE cost should be used here. */
26054 *total = cost->fchs;
26057 else if (X87_FLOAT_MODE_P (mode))
26059 *total = cost->fchs;
26062 else if (FLOAT_MODE_P (mode))
26064 /* ??? SSE vector cost should be used here. */
26065 *total = cost->fchs;
26071 if (!TARGET_64BIT && mode == DImode)
26072 *total = cost->add * 2;
26074 *total = cost->add;
26078 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26079 && XEXP (XEXP (x, 0), 1) == const1_rtx
26080 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26081 && XEXP (x, 1) == const0_rtx)
26083 /* This kind of construct is implemented using test[bwl].
26084 Treat it as if we had an AND. */
26085 *total = (cost->add
26086 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26087 + rtx_cost (const1_rtx, outer_code, speed));
26093 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26098 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26099 /* ??? SSE cost should be used here. */
26100 *total = cost->fabs;
26101 else if (X87_FLOAT_MODE_P (mode))
26102 *total = cost->fabs;
26103 else if (FLOAT_MODE_P (mode))
26104 /* ??? SSE vector cost should be used here. */
26105 *total = cost->fabs;
26109 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26110 /* ??? SSE cost should be used here. */
26111 *total = cost->fsqrt;
26112 else if (X87_FLOAT_MODE_P (mode))
26113 *total = cost->fsqrt;
26114 else if (FLOAT_MODE_P (mode))
26115 /* ??? SSE vector cost should be used here. */
26116 *total = cost->fsqrt;
26120 if (XINT (x, 1) == UNSPEC_TP)
26127 case VEC_DUPLICATE:
26128 /* ??? Assume all of these vector manipulation patterns are
26129 recognizable. In which case they all pretty much have the
26131 *total = COSTS_N_INSNS (1);
26141 static int current_machopic_label_num;
26143 /* Given a symbol name and its associated stub, write out the
26144 definition of the stub. */
26147 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26149 unsigned int length;
26150 char *binder_name, *symbol_name, lazy_ptr_name[32];
26151 int label = ++current_machopic_label_num;
26153 /* For 64-bit we shouldn't get here. */
26154 gcc_assert (!TARGET_64BIT);
26156 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26157 symb = (*targetm.strip_name_encoding) (symb);
26159 length = strlen (stub);
26160 binder_name = XALLOCAVEC (char, length + 32);
26161 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26163 length = strlen (symb);
26164 symbol_name = XALLOCAVEC (char, length + 32);
26165 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26167 sprintf (lazy_ptr_name, "L%d$lz", label);
26170 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26172 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26174 fprintf (file, "%s:\n", stub);
26175 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26179 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26180 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26181 fprintf (file, "\tjmp\t*%%edx\n");
26184 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26186 fprintf (file, "%s:\n", binder_name);
26190 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26191 fputs ("\tpushl\t%eax\n", file);
26194 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26196 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26198 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26199 fprintf (file, "%s:\n", lazy_ptr_name);
26200 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26201 fprintf (file, ASM_LONG "%s\n", binder_name);
26203 #endif /* TARGET_MACHO */
26205 /* Order the registers for register allocator. */
26208 x86_order_regs_for_local_alloc (void)
26213 /* First allocate the local general purpose registers. */
26214 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26215 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26216 reg_alloc_order [pos++] = i;
26218 /* Global general purpose registers. */
26219 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26220 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26221 reg_alloc_order [pos++] = i;
26223 /* x87 registers come first in case we are doing FP math
26225 if (!TARGET_SSE_MATH)
26226 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26227 reg_alloc_order [pos++] = i;
26229 /* SSE registers. */
26230 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26231 reg_alloc_order [pos++] = i;
26232 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26233 reg_alloc_order [pos++] = i;
26235 /* x87 registers. */
26236 if (TARGET_SSE_MATH)
26237 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26238 reg_alloc_order [pos++] = i;
26240 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26241 reg_alloc_order [pos++] = i;
26243 /* Initialize the rest of array as we do not allocate some registers
26245 while (pos < FIRST_PSEUDO_REGISTER)
26246 reg_alloc_order [pos++] = 0;
26249 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26250 struct attribute_spec.handler. */
26252 ix86_handle_abi_attribute (tree *node, tree name,
26253 tree args ATTRIBUTE_UNUSED,
26254 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26256 if (TREE_CODE (*node) != FUNCTION_TYPE
26257 && TREE_CODE (*node) != METHOD_TYPE
26258 && TREE_CODE (*node) != FIELD_DECL
26259 && TREE_CODE (*node) != TYPE_DECL)
26261 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26263 *no_add_attrs = true;
26268 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26270 *no_add_attrs = true;
26274 /* Can combine regparm with all attributes but fastcall. */
26275 if (is_attribute_p ("ms_abi", name))
26277 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26279 error ("ms_abi and sysv_abi attributes are not compatible");
26284 else if (is_attribute_p ("sysv_abi", name))
26286 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26288 error ("ms_abi and sysv_abi attributes are not compatible");
26297 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26298 struct attribute_spec.handler. */
26300 ix86_handle_struct_attribute (tree *node, tree name,
26301 tree args ATTRIBUTE_UNUSED,
26302 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26305 if (DECL_P (*node))
26307 if (TREE_CODE (*node) == TYPE_DECL)
26308 type = &TREE_TYPE (*node);
26313 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26314 || TREE_CODE (*type) == UNION_TYPE)))
26316 warning (OPT_Wattributes, "%qE attribute ignored",
26318 *no_add_attrs = true;
26321 else if ((is_attribute_p ("ms_struct", name)
26322 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26323 || ((is_attribute_p ("gcc_struct", name)
26324 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26326 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26328 *no_add_attrs = true;
26335 ix86_handle_fndecl_attribute (tree *node, tree name,
26336 tree args ATTRIBUTE_UNUSED,
26337 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26339 if (TREE_CODE (*node) != FUNCTION_DECL)
26341 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26343 *no_add_attrs = true;
26349 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26354 #ifndef HAVE_AS_IX86_SWAP
26355 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26362 ix86_ms_bitfield_layout_p (const_tree record_type)
26364 return (TARGET_MS_BITFIELD_LAYOUT &&
26365 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26366 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26369 /* Returns an expression indicating where the this parameter is
26370 located on entry to the FUNCTION. */
26373 x86_this_parameter (tree function)
26375 tree type = TREE_TYPE (function);
26376 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26381 const int *parm_regs;
26383 if (ix86_function_type_abi (type) == MS_ABI)
26384 parm_regs = x86_64_ms_abi_int_parameter_registers;
26386 parm_regs = x86_64_int_parameter_registers;
26387 return gen_rtx_REG (DImode, parm_regs[aggr]);
26390 nregs = ix86_function_regparm (type, function);
26392 if (nregs > 0 && !stdarg_p (type))
26396 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26397 regno = aggr ? DX_REG : CX_REG;
26398 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26402 return gen_rtx_MEM (SImode,
26403 plus_constant (stack_pointer_rtx, 4));
26412 return gen_rtx_MEM (SImode,
26413 plus_constant (stack_pointer_rtx, 4));
26416 return gen_rtx_REG (SImode, regno);
26419 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26422 /* Determine whether x86_output_mi_thunk can succeed. */
26425 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26426 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26427 HOST_WIDE_INT vcall_offset, const_tree function)
26429 /* 64-bit can handle anything. */
26433 /* For 32-bit, everything's fine if we have one free register. */
26434 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26437 /* Need a free register for vcall_offset. */
26441 /* Need a free register for GOT references. */
26442 if (flag_pic && !(*targetm.binds_local_p) (function))
26445 /* Otherwise ok. */
26449 /* Output the assembler code for a thunk function. THUNK_DECL is the
26450 declaration for the thunk function itself, FUNCTION is the decl for
26451 the target function. DELTA is an immediate constant offset to be
26452 added to THIS. If VCALL_OFFSET is nonzero, the word at
26453 *(*this + vcall_offset) should be added to THIS. */
26456 x86_output_mi_thunk (FILE *file,
26457 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26458 HOST_WIDE_INT vcall_offset, tree function)
26461 rtx this_param = x86_this_parameter (function);
26464 /* Make sure unwind info is emitted for the thunk if needed. */
26465 final_start_function (emit_barrier (), file, 1);
26467 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26468 pull it in now and let DELTA benefit. */
26469 if (REG_P (this_param))
26470 this_reg = this_param;
26471 else if (vcall_offset)
26473 /* Put the this parameter into %eax. */
26474 xops[0] = this_param;
26475 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26476 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26479 this_reg = NULL_RTX;
26481 /* Adjust the this parameter by a fixed constant. */
26484 xops[0] = GEN_INT (delta);
26485 xops[1] = this_reg ? this_reg : this_param;
26488 if (!x86_64_general_operand (xops[0], DImode))
26490 tmp = gen_rtx_REG (DImode, R10_REG);
26492 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26494 xops[1] = this_param;
26496 if (x86_maybe_negate_const_int (&xops[0], DImode))
26497 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26499 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26501 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26502 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26504 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26507 /* Adjust the this parameter by a value stored in the vtable. */
26511 tmp = gen_rtx_REG (DImode, R10_REG);
26514 int tmp_regno = CX_REG;
26515 if (lookup_attribute ("fastcall",
26516 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26517 || lookup_attribute ("thiscall",
26518 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26519 tmp_regno = AX_REG;
26520 tmp = gen_rtx_REG (SImode, tmp_regno);
26523 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26525 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26527 /* Adjust the this parameter. */
26528 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26529 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26531 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26532 xops[0] = GEN_INT (vcall_offset);
26534 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26535 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26537 xops[1] = this_reg;
26538 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26541 /* If necessary, drop THIS back to its stack slot. */
26542 if (this_reg && this_reg != this_param)
26544 xops[0] = this_reg;
26545 xops[1] = this_param;
26546 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26549 xops[0] = XEXP (DECL_RTL (function), 0);
26552 if (!flag_pic || (*targetm.binds_local_p) (function))
26553 output_asm_insn ("jmp\t%P0", xops);
26554 /* All thunks should be in the same object as their target,
26555 and thus binds_local_p should be true. */
26556 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26557 gcc_unreachable ();
26560 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26561 tmp = gen_rtx_CONST (Pmode, tmp);
26562 tmp = gen_rtx_MEM (QImode, tmp);
26564 output_asm_insn ("jmp\t%A0", xops);
26569 if (!flag_pic || (*targetm.binds_local_p) (function))
26570 output_asm_insn ("jmp\t%P0", xops);
26575 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26576 tmp = (gen_rtx_SYMBOL_REF
26578 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26579 tmp = gen_rtx_MEM (QImode, tmp);
26581 output_asm_insn ("jmp\t%0", xops);
26584 #endif /* TARGET_MACHO */
26586 tmp = gen_rtx_REG (SImode, CX_REG);
26587 output_set_got (tmp, NULL_RTX);
26590 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26591 output_asm_insn ("jmp\t{*}%1", xops);
26594 final_end_function ();
26598 x86_file_start (void)
26600 default_file_start ();
26602 darwin_file_start ();
26604 if (X86_FILE_START_VERSION_DIRECTIVE)
26605 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26606 if (X86_FILE_START_FLTUSED)
26607 fputs ("\t.global\t__fltused\n", asm_out_file);
26608 if (ix86_asm_dialect == ASM_INTEL)
26609 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26613 x86_field_alignment (tree field, int computed)
26615 enum machine_mode mode;
26616 tree type = TREE_TYPE (field);
26618 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26620 mode = TYPE_MODE (strip_array_types (type));
26621 if (mode == DFmode || mode == DCmode
26622 || GET_MODE_CLASS (mode) == MODE_INT
26623 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26624 return MIN (32, computed);
26628 /* Output assembler code to FILE to increment profiler label # LABELNO
26629 for profiling a function entry. */
26631 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26635 #ifndef NO_PROFILE_COUNTERS
26636 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26639 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26640 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26642 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26646 #ifndef NO_PROFILE_COUNTERS
26647 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26650 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26654 #ifndef NO_PROFILE_COUNTERS
26655 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26658 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26662 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26663 /* We don't have exact information about the insn sizes, but we may assume
26664 quite safely that we are informed about all 1 byte insns and memory
26665 address sizes. This is enough to eliminate unnecessary padding in
26669 min_insn_size (rtx insn)
26673 if (!INSN_P (insn) || !active_insn_p (insn))
26676 /* Discard alignments we've emit and jump instructions. */
26677 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26678 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26680 if (JUMP_TABLE_DATA_P (insn))
26683 /* Important case - calls are always 5 bytes.
26684 It is common to have many calls in the row. */
26686 && symbolic_reference_mentioned_p (PATTERN (insn))
26687 && !SIBLING_CALL_P (insn))
26689 len = get_attr_length (insn);
26693 /* For normal instructions we rely on get_attr_length being exact,
26694 with a few exceptions. */
26695 if (!JUMP_P (insn))
26697 enum attr_type type = get_attr_type (insn);
26702 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26703 || asm_noperands (PATTERN (insn)) >= 0)
26710 /* Otherwise trust get_attr_length. */
26714 l = get_attr_length_address (insn);
26715 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26724 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26728 ix86_avoid_jump_mispredicts (void)
26730 rtx insn, start = get_insns ();
26731 int nbytes = 0, njumps = 0;
26734 /* Look for all minimal intervals of instructions containing 4 jumps.
26735 The intervals are bounded by START and INSN. NBYTES is the total
26736 size of instructions in the interval including INSN and not including
26737 START. When the NBYTES is smaller than 16 bytes, it is possible
26738 that the end of START and INSN ends up in the same 16byte page.
26740 The smallest offset in the page INSN can start is the case where START
26741 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26742 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26744 for (insn = start; insn; insn = NEXT_INSN (insn))
26748 if (LABEL_P (insn))
26750 int align = label_to_alignment (insn);
26751 int max_skip = label_to_max_skip (insn);
26755 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26756 already in the current 16 byte page, because otherwise
26757 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26758 bytes to reach 16 byte boundary. */
26760 || (align <= 3 && max_skip != (1 << align) - 1))
26763 fprintf (dump_file, "Label %i with max_skip %i\n",
26764 INSN_UID (insn), max_skip);
26767 while (nbytes + max_skip >= 16)
26769 start = NEXT_INSN (start);
26770 if ((JUMP_P (start)
26771 && GET_CODE (PATTERN (start)) != ADDR_VEC
26772 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26774 njumps--, isjump = 1;
26777 nbytes -= min_insn_size (start);
26783 min_size = min_insn_size (insn);
26784 nbytes += min_size;
26786 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26787 INSN_UID (insn), min_size);
26789 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26790 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26798 start = NEXT_INSN (start);
26799 if ((JUMP_P (start)
26800 && GET_CODE (PATTERN (start)) != ADDR_VEC
26801 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26803 njumps--, isjump = 1;
26806 nbytes -= min_insn_size (start);
26808 gcc_assert (njumps >= 0);
26810 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26811 INSN_UID (start), INSN_UID (insn), nbytes);
26813 if (njumps == 3 && isjump && nbytes < 16)
26815 int padsize = 15 - nbytes + min_insn_size (insn);
26818 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26819 INSN_UID (insn), padsize);
26820 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26826 /* AMD Athlon works faster
26827 when RET is not destination of conditional jump or directly preceded
26828 by other jump instruction. We avoid the penalty by inserting NOP just
26829 before the RET instructions in such cases. */
26831 ix86_pad_returns (void)
26836 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26838 basic_block bb = e->src;
26839 rtx ret = BB_END (bb);
26841 bool replace = false;
26843 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26844 || optimize_bb_for_size_p (bb))
26846 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26847 if (active_insn_p (prev) || LABEL_P (prev))
26849 if (prev && LABEL_P (prev))
26854 FOR_EACH_EDGE (e, ei, bb->preds)
26855 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26856 && !(e->flags & EDGE_FALLTHRU))
26861 prev = prev_active_insn (ret);
26863 && ((JUMP_P (prev) && any_condjump_p (prev))
26866 /* Empty functions get branch mispredict even when the jump destination
26867 is not visible to us. */
26868 if (!prev && !optimize_function_for_size_p (cfun))
26873 emit_jump_insn_before (gen_return_internal_long (), ret);
26879 /* Implement machine specific optimizations. We implement padding of returns
26880 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26884 if (optimize && optimize_function_for_speed_p (cfun))
26886 if (TARGET_PAD_RETURNS)
26887 ix86_pad_returns ();
26888 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26889 if (TARGET_FOUR_JUMP_LIMIT)
26890 ix86_avoid_jump_mispredicts ();
26895 /* Return nonzero when QImode register that must be represented via REX prefix
26898 x86_extended_QIreg_mentioned_p (rtx insn)
26901 extract_insn_cached (insn);
26902 for (i = 0; i < recog_data.n_operands; i++)
26903 if (REG_P (recog_data.operand[i])
26904 && REGNO (recog_data.operand[i]) > BX_REG)
26909 /* Return nonzero when P points to register encoded via REX prefix.
26910 Called via for_each_rtx. */
26912 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26914 unsigned int regno;
26917 regno = REGNO (*p);
26918 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26921 /* Return true when INSN mentions register that must be encoded using REX
26924 x86_extended_reg_mentioned_p (rtx insn)
26926 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26927 extended_reg_mentioned_1, NULL);
26930 /* If profitable, negate (without causing overflow) integer constant
26931 of mode MODE at location LOC. Return true in this case. */
26933 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26937 if (!CONST_INT_P (*loc))
26943 /* DImode x86_64 constants must fit in 32 bits. */
26944 gcc_assert (x86_64_immediate_operand (*loc, mode));
26955 gcc_unreachable ();
26958 /* Avoid overflows. */
26959 if (mode_signbit_p (mode, *loc))
26962 val = INTVAL (*loc);
26964 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26965 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26966 if ((val < 0 && val != -128)
26969 *loc = GEN_INT (-val);
26976 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26977 optabs would emit if we didn't have TFmode patterns. */
26980 x86_emit_floatuns (rtx operands[2])
26982 rtx neglab, donelab, i0, i1, f0, in, out;
26983 enum machine_mode mode, inmode;
26985 inmode = GET_MODE (operands[1]);
26986 gcc_assert (inmode == SImode || inmode == DImode);
26989 in = force_reg (inmode, operands[1]);
26990 mode = GET_MODE (out);
26991 neglab = gen_label_rtx ();
26992 donelab = gen_label_rtx ();
26993 f0 = gen_reg_rtx (mode);
26995 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26997 expand_float (out, in, 0);
26999 emit_jump_insn (gen_jump (donelab));
27002 emit_label (neglab);
27004 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
27006 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
27008 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
27010 expand_float (f0, i0, 0);
27012 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
27014 emit_label (donelab);
27017 /* AVX does not support 32-byte integer vector operations,
27018 thus the longest vector we are faced with is V16QImode. */
27019 #define MAX_VECT_LEN 16
27021 struct expand_vec_perm_d
27023 rtx target, op0, op1;
27024 unsigned char perm[MAX_VECT_LEN];
27025 enum machine_mode vmode;
27026 unsigned char nelt;
27030 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
27031 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
27033 /* Get a vector mode of the same size as the original but with elements
27034 twice as wide. This is only guaranteed to apply to integral vectors. */
27036 static inline enum machine_mode
27037 get_mode_wider_vector (enum machine_mode o)
27039 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
27040 enum machine_mode n = GET_MODE_WIDER_MODE (o);
27041 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
27042 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
27046 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27047 with all elements equal to VAR. Return true if successful. */
27050 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27051 rtx target, rtx val)
27074 /* First attempt to recognize VAL as-is. */
27075 dup = gen_rtx_VEC_DUPLICATE (mode, val);
27076 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
27077 if (recog_memoized (insn) < 0)
27080 /* If that fails, force VAL into a register. */
27083 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
27084 seq = get_insns ();
27087 emit_insn_before (seq, insn);
27089 ok = recog_memoized (insn) >= 0;
27098 if (TARGET_SSE || TARGET_3DNOW_A)
27102 val = gen_lowpart (SImode, val);
27103 x = gen_rtx_TRUNCATE (HImode, val);
27104 x = gen_rtx_VEC_DUPLICATE (mode, x);
27105 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27118 struct expand_vec_perm_d dperm;
27122 memset (&dperm, 0, sizeof (dperm));
27123 dperm.target = target;
27124 dperm.vmode = mode;
27125 dperm.nelt = GET_MODE_NUNITS (mode);
27126 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
27128 /* Extend to SImode using a paradoxical SUBREG. */
27129 tmp1 = gen_reg_rtx (SImode);
27130 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27132 /* Insert the SImode value as low element of a V4SImode vector. */
27133 tmp2 = gen_lowpart (V4SImode, dperm.op0);
27134 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
27136 ok = (expand_vec_perm_1 (&dperm)
27137 || expand_vec_perm_broadcast_1 (&dperm));
27149 /* Replicate the value once into the next wider mode and recurse. */
27151 enum machine_mode smode, wsmode, wvmode;
27154 smode = GET_MODE_INNER (mode);
27155 wvmode = get_mode_wider_vector (mode);
27156 wsmode = GET_MODE_INNER (wvmode);
27158 val = convert_modes (wsmode, smode, val, true);
27159 x = expand_simple_binop (wsmode, ASHIFT, val,
27160 GEN_INT (GET_MODE_BITSIZE (smode)),
27161 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27162 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27164 x = gen_lowpart (wvmode, target);
27165 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27173 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27174 rtx x = gen_reg_rtx (hvmode);
27176 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27179 x = gen_rtx_VEC_CONCAT (mode, x, x);
27180 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27189 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27190 whose ONE_VAR element is VAR, and other elements are zero. Return true
27194 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27195 rtx target, rtx var, int one_var)
27197 enum machine_mode vsimode;
27200 bool use_vector_set = false;
27205 /* For SSE4.1, we normally use vector set. But if the second
27206 element is zero and inter-unit moves are OK, we use movq
27208 use_vector_set = (TARGET_64BIT
27210 && !(TARGET_INTER_UNIT_MOVES
27216 use_vector_set = TARGET_SSE4_1;
27219 use_vector_set = TARGET_SSE2;
27222 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27229 use_vector_set = TARGET_AVX;
27232 /* Use ix86_expand_vector_set in 64bit mode only. */
27233 use_vector_set = TARGET_AVX && TARGET_64BIT;
27239 if (use_vector_set)
27241 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27242 var = force_reg (GET_MODE_INNER (mode), var);
27243 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27259 var = force_reg (GET_MODE_INNER (mode), var);
27260 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27261 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27266 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27267 new_target = gen_reg_rtx (mode);
27269 new_target = target;
27270 var = force_reg (GET_MODE_INNER (mode), var);
27271 x = gen_rtx_VEC_DUPLICATE (mode, var);
27272 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27273 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27276 /* We need to shuffle the value to the correct position, so
27277 create a new pseudo to store the intermediate result. */
27279 /* With SSE2, we can use the integer shuffle insns. */
27280 if (mode != V4SFmode && TARGET_SSE2)
27282 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27284 GEN_INT (one_var == 1 ? 0 : 1),
27285 GEN_INT (one_var == 2 ? 0 : 1),
27286 GEN_INT (one_var == 3 ? 0 : 1)));
27287 if (target != new_target)
27288 emit_move_insn (target, new_target);
27292 /* Otherwise convert the intermediate result to V4SFmode and
27293 use the SSE1 shuffle instructions. */
27294 if (mode != V4SFmode)
27296 tmp = gen_reg_rtx (V4SFmode);
27297 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27302 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27304 GEN_INT (one_var == 1 ? 0 : 1),
27305 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27306 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27308 if (mode != V4SFmode)
27309 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27310 else if (tmp != target)
27311 emit_move_insn (target, tmp);
27313 else if (target != new_target)
27314 emit_move_insn (target, new_target);
27319 vsimode = V4SImode;
27325 vsimode = V2SImode;
27331 /* Zero extend the variable element to SImode and recurse. */
27332 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27334 x = gen_reg_rtx (vsimode);
27335 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27337 gcc_unreachable ();
27339 emit_move_insn (target, gen_lowpart (mode, x));
27347 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27348 consisting of the values in VALS. It is known that all elements
27349 except ONE_VAR are constants. Return true if successful. */
27352 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27353 rtx target, rtx vals, int one_var)
27355 rtx var = XVECEXP (vals, 0, one_var);
27356 enum machine_mode wmode;
27359 const_vec = copy_rtx (vals);
27360 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27361 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27369 /* For the two element vectors, it's just as easy to use
27370 the general case. */
27374 /* Use ix86_expand_vector_set in 64bit mode only. */
27397 /* There's no way to set one QImode entry easily. Combine
27398 the variable value with its adjacent constant value, and
27399 promote to an HImode set. */
27400 x = XVECEXP (vals, 0, one_var ^ 1);
27403 var = convert_modes (HImode, QImode, var, true);
27404 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27405 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27406 x = GEN_INT (INTVAL (x) & 0xff);
27410 var = convert_modes (HImode, QImode, var, true);
27411 x = gen_int_mode (INTVAL (x) << 8, HImode);
27413 if (x != const0_rtx)
27414 var = expand_simple_binop (HImode, IOR, var, x, var,
27415 1, OPTAB_LIB_WIDEN);
27417 x = gen_reg_rtx (wmode);
27418 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27419 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27421 emit_move_insn (target, gen_lowpart (mode, x));
27428 emit_move_insn (target, const_vec);
27429 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27433 /* A subroutine of ix86_expand_vector_init_general. Use vector
27434 concatenate to handle the most general case: all values variable,
27435 and none identical. */
27438 ix86_expand_vector_init_concat (enum machine_mode mode,
27439 rtx target, rtx *ops, int n)
27441 enum machine_mode cmode, hmode = VOIDmode;
27442 rtx first[8], second[4];
27482 gcc_unreachable ();
27485 if (!register_operand (ops[1], cmode))
27486 ops[1] = force_reg (cmode, ops[1]);
27487 if (!register_operand (ops[0], cmode))
27488 ops[0] = force_reg (cmode, ops[0]);
27489 emit_insn (gen_rtx_SET (VOIDmode, target,
27490 gen_rtx_VEC_CONCAT (mode, ops[0],
27510 gcc_unreachable ();
27526 gcc_unreachable ();
27531 /* FIXME: We process inputs backward to help RA. PR 36222. */
27534 for (; i > 0; i -= 2, j--)
27536 first[j] = gen_reg_rtx (cmode);
27537 v = gen_rtvec (2, ops[i - 1], ops[i]);
27538 ix86_expand_vector_init (false, first[j],
27539 gen_rtx_PARALLEL (cmode, v));
27545 gcc_assert (hmode != VOIDmode);
27546 for (i = j = 0; i < n; i += 2, j++)
27548 second[j] = gen_reg_rtx (hmode);
27549 ix86_expand_vector_init_concat (hmode, second [j],
27553 ix86_expand_vector_init_concat (mode, target, second, n);
27556 ix86_expand_vector_init_concat (mode, target, first, n);
27560 gcc_unreachable ();
27564 /* A subroutine of ix86_expand_vector_init_general. Use vector
27565 interleave to handle the most general case: all values variable,
27566 and none identical. */
27569 ix86_expand_vector_init_interleave (enum machine_mode mode,
27570 rtx target, rtx *ops, int n)
27572 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27575 rtx (*gen_load_even) (rtx, rtx, rtx);
27576 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27577 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27582 gen_load_even = gen_vec_setv8hi;
27583 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27584 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27585 inner_mode = HImode;
27586 first_imode = V4SImode;
27587 second_imode = V2DImode;
27588 third_imode = VOIDmode;
27591 gen_load_even = gen_vec_setv16qi;
27592 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27593 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27594 inner_mode = QImode;
27595 first_imode = V8HImode;
27596 second_imode = V4SImode;
27597 third_imode = V2DImode;
27600 gcc_unreachable ();
27603 for (i = 0; i < n; i++)
27605 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27606 op0 = gen_reg_rtx (SImode);
27607 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27609 /* Insert the SImode value as low element of V4SImode vector. */
27610 op1 = gen_reg_rtx (V4SImode);
27611 op0 = gen_rtx_VEC_MERGE (V4SImode,
27612 gen_rtx_VEC_DUPLICATE (V4SImode,
27614 CONST0_RTX (V4SImode),
27616 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27618 /* Cast the V4SImode vector back to a vector in orignal mode. */
27619 op0 = gen_reg_rtx (mode);
27620 emit_move_insn (op0, gen_lowpart (mode, op1));
27622 /* Load even elements into the second positon. */
27623 emit_insn ((*gen_load_even) (op0,
27624 force_reg (inner_mode,
27628 /* Cast vector to FIRST_IMODE vector. */
27629 ops[i] = gen_reg_rtx (first_imode);
27630 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27633 /* Interleave low FIRST_IMODE vectors. */
27634 for (i = j = 0; i < n; i += 2, j++)
27636 op0 = gen_reg_rtx (first_imode);
27637 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27639 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27640 ops[j] = gen_reg_rtx (second_imode);
27641 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27644 /* Interleave low SECOND_IMODE vectors. */
27645 switch (second_imode)
27648 for (i = j = 0; i < n / 2; i += 2, j++)
27650 op0 = gen_reg_rtx (second_imode);
27651 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27654 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27656 ops[j] = gen_reg_rtx (third_imode);
27657 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27659 second_imode = V2DImode;
27660 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27664 op0 = gen_reg_rtx (second_imode);
27665 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27668 /* Cast the SECOND_IMODE vector back to a vector on original
27670 emit_insn (gen_rtx_SET (VOIDmode, target,
27671 gen_lowpart (mode, op0)));
27675 gcc_unreachable ();
27679 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27680 all values variable, and none identical. */
27683 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27684 rtx target, rtx vals)
27686 rtx ops[32], op0, op1;
27687 enum machine_mode half_mode = VOIDmode;
27694 if (!mmx_ok && !TARGET_SSE)
27706 n = GET_MODE_NUNITS (mode);
27707 for (i = 0; i < n; i++)
27708 ops[i] = XVECEXP (vals, 0, i);
27709 ix86_expand_vector_init_concat (mode, target, ops, n);
27713 half_mode = V16QImode;
27717 half_mode = V8HImode;
27721 n = GET_MODE_NUNITS (mode);
27722 for (i = 0; i < n; i++)
27723 ops[i] = XVECEXP (vals, 0, i);
27724 op0 = gen_reg_rtx (half_mode);
27725 op1 = gen_reg_rtx (half_mode);
27726 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27728 ix86_expand_vector_init_interleave (half_mode, op1,
27729 &ops [n >> 1], n >> 2);
27730 emit_insn (gen_rtx_SET (VOIDmode, target,
27731 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27735 if (!TARGET_SSE4_1)
27743 /* Don't use ix86_expand_vector_init_interleave if we can't
27744 move from GPR to SSE register directly. */
27745 if (!TARGET_INTER_UNIT_MOVES)
27748 n = GET_MODE_NUNITS (mode);
27749 for (i = 0; i < n; i++)
27750 ops[i] = XVECEXP (vals, 0, i);
27751 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27759 gcc_unreachable ();
27763 int i, j, n_elts, n_words, n_elt_per_word;
27764 enum machine_mode inner_mode;
27765 rtx words[4], shift;
27767 inner_mode = GET_MODE_INNER (mode);
27768 n_elts = GET_MODE_NUNITS (mode);
27769 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27770 n_elt_per_word = n_elts / n_words;
27771 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27773 for (i = 0; i < n_words; ++i)
27775 rtx word = NULL_RTX;
27777 for (j = 0; j < n_elt_per_word; ++j)
27779 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27780 elt = convert_modes (word_mode, inner_mode, elt, true);
27786 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27787 word, 1, OPTAB_LIB_WIDEN);
27788 word = expand_simple_binop (word_mode, IOR, word, elt,
27789 word, 1, OPTAB_LIB_WIDEN);
27797 emit_move_insn (target, gen_lowpart (mode, words[0]));
27798 else if (n_words == 2)
27800 rtx tmp = gen_reg_rtx (mode);
27801 emit_clobber (tmp);
27802 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27803 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27804 emit_move_insn (target, tmp);
27806 else if (n_words == 4)
27808 rtx tmp = gen_reg_rtx (V4SImode);
27809 gcc_assert (word_mode == SImode);
27810 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27811 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27812 emit_move_insn (target, gen_lowpart (mode, tmp));
27815 gcc_unreachable ();
27819 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27820 instructions unless MMX_OK is true. */
27823 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27825 enum machine_mode mode = GET_MODE (target);
27826 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27827 int n_elts = GET_MODE_NUNITS (mode);
27828 int n_var = 0, one_var = -1;
27829 bool all_same = true, all_const_zero = true;
27833 for (i = 0; i < n_elts; ++i)
27835 x = XVECEXP (vals, 0, i);
27836 if (!(CONST_INT_P (x)
27837 || GET_CODE (x) == CONST_DOUBLE
27838 || GET_CODE (x) == CONST_FIXED))
27839 n_var++, one_var = i;
27840 else if (x != CONST0_RTX (inner_mode))
27841 all_const_zero = false;
27842 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27846 /* Constants are best loaded from the constant pool. */
27849 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27853 /* If all values are identical, broadcast the value. */
27855 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27856 XVECEXP (vals, 0, 0)))
27859 /* Values where only one field is non-constant are best loaded from
27860 the pool and overwritten via move later. */
27864 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27865 XVECEXP (vals, 0, one_var),
27869 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27873 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27877 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27879 enum machine_mode mode = GET_MODE (target);
27880 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27881 enum machine_mode half_mode;
27882 bool use_vec_merge = false;
27884 static rtx (*gen_extract[6][2]) (rtx, rtx)
27886 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27887 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27888 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27889 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27890 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27891 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27893 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27895 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27896 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27897 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27898 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27899 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27900 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27910 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27911 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27913 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27915 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27916 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27922 use_vec_merge = TARGET_SSE4_1;
27930 /* For the two element vectors, we implement a VEC_CONCAT with
27931 the extraction of the other element. */
27933 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27934 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27937 op0 = val, op1 = tmp;
27939 op0 = tmp, op1 = val;
27941 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27942 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27947 use_vec_merge = TARGET_SSE4_1;
27954 use_vec_merge = true;
27958 /* tmp = target = A B C D */
27959 tmp = copy_to_reg (target);
27960 /* target = A A B B */
27961 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27962 /* target = X A B B */
27963 ix86_expand_vector_set (false, target, val, 0);
27964 /* target = A X C D */
27965 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27966 const1_rtx, const0_rtx,
27967 GEN_INT (2+4), GEN_INT (3+4)));
27971 /* tmp = target = A B C D */
27972 tmp = copy_to_reg (target);
27973 /* tmp = X B C D */
27974 ix86_expand_vector_set (false, tmp, val, 0);
27975 /* target = A B X D */
27976 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27977 const0_rtx, const1_rtx,
27978 GEN_INT (0+4), GEN_INT (3+4)));
27982 /* tmp = target = A B C D */
27983 tmp = copy_to_reg (target);
27984 /* tmp = X B C D */
27985 ix86_expand_vector_set (false, tmp, val, 0);
27986 /* target = A B X D */
27987 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27988 const0_rtx, const1_rtx,
27989 GEN_INT (2+4), GEN_INT (0+4)));
27993 gcc_unreachable ();
27998 use_vec_merge = TARGET_SSE4_1;
28002 /* Element 0 handled by vec_merge below. */
28005 use_vec_merge = true;
28011 /* With SSE2, use integer shuffles to swap element 0 and ELT,
28012 store into element 0, then shuffle them back. */
28016 order[0] = GEN_INT (elt);
28017 order[1] = const1_rtx;
28018 order[2] = const2_rtx;
28019 order[3] = GEN_INT (3);
28020 order[elt] = const0_rtx;
28022 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28023 order[1], order[2], order[3]));
28025 ix86_expand_vector_set (false, target, val, 0);
28027 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28028 order[1], order[2], order[3]));
28032 /* For SSE1, we have to reuse the V4SF code. */
28033 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28034 gen_lowpart (SFmode, val), elt);
28039 use_vec_merge = TARGET_SSE2;
28042 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28046 use_vec_merge = TARGET_SSE4_1;
28053 half_mode = V16QImode;
28059 half_mode = V8HImode;
28065 half_mode = V4SImode;
28071 half_mode = V2DImode;
28077 half_mode = V4SFmode;
28083 half_mode = V2DFmode;
28089 /* Compute offset. */
28093 gcc_assert (i <= 1);
28095 /* Extract the half. */
28096 tmp = gen_reg_rtx (half_mode);
28097 emit_insn ((*gen_extract[j][i]) (tmp, target));
28099 /* Put val in tmp at elt. */
28100 ix86_expand_vector_set (false, tmp, val, elt);
28103 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28112 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28113 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28114 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28118 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28120 emit_move_insn (mem, target);
28122 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28123 emit_move_insn (tmp, val);
28125 emit_move_insn (target, mem);
28130 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28132 enum machine_mode mode = GET_MODE (vec);
28133 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28134 bool use_vec_extr = false;
28147 use_vec_extr = true;
28151 use_vec_extr = TARGET_SSE4_1;
28163 tmp = gen_reg_rtx (mode);
28164 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28165 GEN_INT (elt), GEN_INT (elt),
28166 GEN_INT (elt+4), GEN_INT (elt+4)));
28170 tmp = gen_reg_rtx (mode);
28171 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28175 gcc_unreachable ();
28178 use_vec_extr = true;
28183 use_vec_extr = TARGET_SSE4_1;
28197 tmp = gen_reg_rtx (mode);
28198 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28199 GEN_INT (elt), GEN_INT (elt),
28200 GEN_INT (elt), GEN_INT (elt)));
28204 tmp = gen_reg_rtx (mode);
28205 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28209 gcc_unreachable ();
28212 use_vec_extr = true;
28217 /* For SSE1, we have to reuse the V4SF code. */
28218 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28219 gen_lowpart (V4SFmode, vec), elt);
28225 use_vec_extr = TARGET_SSE2;
28228 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28232 use_vec_extr = TARGET_SSE4_1;
28236 /* ??? Could extract the appropriate HImode element and shift. */
28243 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28244 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28246 /* Let the rtl optimizers know about the zero extension performed. */
28247 if (inner_mode == QImode || inner_mode == HImode)
28249 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28250 target = gen_lowpart (SImode, target);
28253 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28257 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28259 emit_move_insn (mem, vec);
28261 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28262 emit_move_insn (target, tmp);
28266 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28267 pattern to reduce; DEST is the destination; IN is the input vector. */
28270 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28272 rtx tmp1, tmp2, tmp3;
28274 tmp1 = gen_reg_rtx (V4SFmode);
28275 tmp2 = gen_reg_rtx (V4SFmode);
28276 tmp3 = gen_reg_rtx (V4SFmode);
28278 emit_insn (gen_sse_movhlps (tmp1, in, in));
28279 emit_insn (fn (tmp2, tmp1, in));
28281 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28282 const1_rtx, const1_rtx,
28283 GEN_INT (1+4), GEN_INT (1+4)));
28284 emit_insn (fn (dest, tmp2, tmp3));
28287 /* Target hook for scalar_mode_supported_p. */
28289 ix86_scalar_mode_supported_p (enum machine_mode mode)
28291 if (DECIMAL_FLOAT_MODE_P (mode))
28292 return default_decimal_float_supported_p ();
28293 else if (mode == TFmode)
28296 return default_scalar_mode_supported_p (mode);
28299 /* Implements target hook vector_mode_supported_p. */
28301 ix86_vector_mode_supported_p (enum machine_mode mode)
28303 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28305 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28307 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28309 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28311 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28316 /* Target hook for c_mode_for_suffix. */
28317 static enum machine_mode
28318 ix86_c_mode_for_suffix (char suffix)
28328 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28330 We do this in the new i386 backend to maintain source compatibility
28331 with the old cc0-based compiler. */
28334 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28335 tree inputs ATTRIBUTE_UNUSED,
28338 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28340 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28345 /* Implements target vector targetm.asm.encode_section_info. This
28346 is not used by netware. */
28348 static void ATTRIBUTE_UNUSED
28349 ix86_encode_section_info (tree decl, rtx rtl, int first)
28351 default_encode_section_info (decl, rtl, first);
28353 if (TREE_CODE (decl) == VAR_DECL
28354 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28355 && ix86_in_large_data_p (decl))
28356 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28359 /* Worker function for REVERSE_CONDITION. */
28362 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28364 return (mode != CCFPmode && mode != CCFPUmode
28365 ? reverse_condition (code)
28366 : reverse_condition_maybe_unordered (code));
28369 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28373 output_387_reg_move (rtx insn, rtx *operands)
28375 if (REG_P (operands[0]))
28377 if (REG_P (operands[1])
28378 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28380 if (REGNO (operands[0]) == FIRST_STACK_REG)
28381 return output_387_ffreep (operands, 0);
28382 return "fstp\t%y0";
28384 if (STACK_TOP_P (operands[0]))
28385 return "fld%Z1\t%y1";
28388 else if (MEM_P (operands[0]))
28390 gcc_assert (REG_P (operands[1]));
28391 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28392 return "fstp%Z0\t%y0";
28395 /* There is no non-popping store to memory for XFmode.
28396 So if we need one, follow the store with a load. */
28397 if (GET_MODE (operands[0]) == XFmode)
28398 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28400 return "fst%Z0\t%y0";
28407 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28408 FP status register is set. */
28411 ix86_emit_fp_unordered_jump (rtx label)
28413 rtx reg = gen_reg_rtx (HImode);
28416 emit_insn (gen_x86_fnstsw_1 (reg));
28418 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28420 emit_insn (gen_x86_sahf_1 (reg));
28422 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28423 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28427 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28429 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28430 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28433 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28434 gen_rtx_LABEL_REF (VOIDmode, label),
28436 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28438 emit_jump_insn (temp);
28439 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28442 /* Output code to perform a log1p XFmode calculation. */
28444 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28446 rtx label1 = gen_label_rtx ();
28447 rtx label2 = gen_label_rtx ();
28449 rtx tmp = gen_reg_rtx (XFmode);
28450 rtx tmp2 = gen_reg_rtx (XFmode);
28453 emit_insn (gen_absxf2 (tmp, op1));
28454 test = gen_rtx_GE (VOIDmode, tmp,
28455 CONST_DOUBLE_FROM_REAL_VALUE (
28456 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28458 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28460 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28461 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28462 emit_jump (label2);
28464 emit_label (label1);
28465 emit_move_insn (tmp, CONST1_RTX (XFmode));
28466 emit_insn (gen_addxf3 (tmp, op1, tmp));
28467 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28468 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28470 emit_label (label2);
28473 /* Output code to perform a Newton-Rhapson approximation of a single precision
28474 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28476 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28478 rtx x0, x1, e0, e1, two;
28480 x0 = gen_reg_rtx (mode);
28481 e0 = gen_reg_rtx (mode);
28482 e1 = gen_reg_rtx (mode);
28483 x1 = gen_reg_rtx (mode);
28485 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28487 if (VECTOR_MODE_P (mode))
28488 two = ix86_build_const_vector (SFmode, true, two);
28490 two = force_reg (mode, two);
28492 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28494 /* x0 = rcp(b) estimate */
28495 emit_insn (gen_rtx_SET (VOIDmode, x0,
28496 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28499 emit_insn (gen_rtx_SET (VOIDmode, e0,
28500 gen_rtx_MULT (mode, x0, a)));
28502 emit_insn (gen_rtx_SET (VOIDmode, e1,
28503 gen_rtx_MULT (mode, x0, b)));
28505 emit_insn (gen_rtx_SET (VOIDmode, x1,
28506 gen_rtx_MINUS (mode, two, e1)));
28507 /* res = e0 * x1 */
28508 emit_insn (gen_rtx_SET (VOIDmode, res,
28509 gen_rtx_MULT (mode, e0, x1)));
28512 /* Output code to perform a Newton-Rhapson approximation of a
28513 single precision floating point [reciprocal] square root. */
28515 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28518 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28521 x0 = gen_reg_rtx (mode);
28522 e0 = gen_reg_rtx (mode);
28523 e1 = gen_reg_rtx (mode);
28524 e2 = gen_reg_rtx (mode);
28525 e3 = gen_reg_rtx (mode);
28527 real_from_integer (&r, VOIDmode, -3, -1, 0);
28528 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28530 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28531 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28533 if (VECTOR_MODE_P (mode))
28535 mthree = ix86_build_const_vector (SFmode, true, mthree);
28536 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28539 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28540 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28542 /* x0 = rsqrt(a) estimate */
28543 emit_insn (gen_rtx_SET (VOIDmode, x0,
28544 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28547 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28552 zero = gen_reg_rtx (mode);
28553 mask = gen_reg_rtx (mode);
28555 zero = force_reg (mode, CONST0_RTX(mode));
28556 emit_insn (gen_rtx_SET (VOIDmode, mask,
28557 gen_rtx_NE (mode, zero, a)));
28559 emit_insn (gen_rtx_SET (VOIDmode, x0,
28560 gen_rtx_AND (mode, x0, mask)));
28564 emit_insn (gen_rtx_SET (VOIDmode, e0,
28565 gen_rtx_MULT (mode, x0, a)));
28567 emit_insn (gen_rtx_SET (VOIDmode, e1,
28568 gen_rtx_MULT (mode, e0, x0)));
28571 mthree = force_reg (mode, mthree);
28572 emit_insn (gen_rtx_SET (VOIDmode, e2,
28573 gen_rtx_PLUS (mode, e1, mthree)));
28575 mhalf = force_reg (mode, mhalf);
28577 /* e3 = -.5 * x0 */
28578 emit_insn (gen_rtx_SET (VOIDmode, e3,
28579 gen_rtx_MULT (mode, x0, mhalf)));
28581 /* e3 = -.5 * e0 */
28582 emit_insn (gen_rtx_SET (VOIDmode, e3,
28583 gen_rtx_MULT (mode, e0, mhalf)));
28584 /* ret = e2 * e3 */
28585 emit_insn (gen_rtx_SET (VOIDmode, res,
28586 gen_rtx_MULT (mode, e2, e3)));
28589 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28591 static void ATTRIBUTE_UNUSED
28592 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28595 /* With Binutils 2.15, the "@unwind" marker must be specified on
28596 every occurrence of the ".eh_frame" section, not just the first
28599 && strcmp (name, ".eh_frame") == 0)
28601 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28602 flags & SECTION_WRITE ? "aw" : "a");
28605 default_elf_asm_named_section (name, flags, decl);
28608 /* Return the mangling of TYPE if it is an extended fundamental type. */
28610 static const char *
28611 ix86_mangle_type (const_tree type)
28613 type = TYPE_MAIN_VARIANT (type);
28615 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28616 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28619 switch (TYPE_MODE (type))
28622 /* __float128 is "g". */
28625 /* "long double" or __float80 is "e". */
28632 /* For 32-bit code we can save PIC register setup by using
28633 __stack_chk_fail_local hidden function instead of calling
28634 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28635 register, so it is better to call __stack_chk_fail directly. */
28638 ix86_stack_protect_fail (void)
28640 return TARGET_64BIT
28641 ? default_external_stack_protect_fail ()
28642 : default_hidden_stack_protect_fail ();
28645 /* Select a format to encode pointers in exception handling data. CODE
28646 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28647 true if the symbol may be affected by dynamic relocations.
28649 ??? All x86 object file formats are capable of representing this.
28650 After all, the relocation needed is the same as for the call insn.
28651 Whether or not a particular assembler allows us to enter such, I
28652 guess we'll have to see. */
28654 asm_preferred_eh_data_format (int code, int global)
28658 int type = DW_EH_PE_sdata8;
28660 || ix86_cmodel == CM_SMALL_PIC
28661 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28662 type = DW_EH_PE_sdata4;
28663 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28665 if (ix86_cmodel == CM_SMALL
28666 || (ix86_cmodel == CM_MEDIUM && code))
28667 return DW_EH_PE_udata4;
28668 return DW_EH_PE_absptr;
28671 /* Expand copysign from SIGN to the positive value ABS_VALUE
28672 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28675 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28677 enum machine_mode mode = GET_MODE (sign);
28678 rtx sgn = gen_reg_rtx (mode);
28679 if (mask == NULL_RTX)
28681 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28682 if (!VECTOR_MODE_P (mode))
28684 /* We need to generate a scalar mode mask in this case. */
28685 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28686 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28687 mask = gen_reg_rtx (mode);
28688 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28692 mask = gen_rtx_NOT (mode, mask);
28693 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28694 gen_rtx_AND (mode, mask, sign)));
28695 emit_insn (gen_rtx_SET (VOIDmode, result,
28696 gen_rtx_IOR (mode, abs_value, sgn)));
28699 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28700 mask for masking out the sign-bit is stored in *SMASK, if that is
28703 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28705 enum machine_mode mode = GET_MODE (op0);
28708 xa = gen_reg_rtx (mode);
28709 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28710 if (!VECTOR_MODE_P (mode))
28712 /* We need to generate a scalar mode mask in this case. */
28713 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28714 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28715 mask = gen_reg_rtx (mode);
28716 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28718 emit_insn (gen_rtx_SET (VOIDmode, xa,
28719 gen_rtx_AND (mode, op0, mask)));
28727 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28728 swapping the operands if SWAP_OPERANDS is true. The expanded
28729 code is a forward jump to a newly created label in case the
28730 comparison is true. The generated label rtx is returned. */
28732 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28733 bool swap_operands)
28744 label = gen_label_rtx ();
28745 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28746 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28747 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28748 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28749 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28750 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28751 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28752 JUMP_LABEL (tmp) = label;
28757 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28758 using comparison code CODE. Operands are swapped for the comparison if
28759 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28761 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28762 bool swap_operands)
28764 enum machine_mode mode = GET_MODE (op0);
28765 rtx mask = gen_reg_rtx (mode);
28774 if (mode == DFmode)
28775 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28776 gen_rtx_fmt_ee (code, mode, op0, op1)));
28778 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28779 gen_rtx_fmt_ee (code, mode, op0, op1)));
28784 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28785 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28787 ix86_gen_TWO52 (enum machine_mode mode)
28789 REAL_VALUE_TYPE TWO52r;
28792 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28793 TWO52 = const_double_from_real_value (TWO52r, mode);
28794 TWO52 = force_reg (mode, TWO52);
28799 /* Expand SSE sequence for computing lround from OP1 storing
28802 ix86_expand_lround (rtx op0, rtx op1)
28804 /* C code for the stuff we're doing below:
28805 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28808 enum machine_mode mode = GET_MODE (op1);
28809 const struct real_format *fmt;
28810 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28813 /* load nextafter (0.5, 0.0) */
28814 fmt = REAL_MODE_FORMAT (mode);
28815 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28816 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28818 /* adj = copysign (0.5, op1) */
28819 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28820 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28822 /* adj = op1 + adj */
28823 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28825 /* op0 = (imode)adj */
28826 expand_fix (op0, adj, 0);
28829 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28832 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28834 /* C code for the stuff we're doing below (for do_floor):
28836 xi -= (double)xi > op1 ? 1 : 0;
28839 enum machine_mode fmode = GET_MODE (op1);
28840 enum machine_mode imode = GET_MODE (op0);
28841 rtx ireg, freg, label, tmp;
28843 /* reg = (long)op1 */
28844 ireg = gen_reg_rtx (imode);
28845 expand_fix (ireg, op1, 0);
28847 /* freg = (double)reg */
28848 freg = gen_reg_rtx (fmode);
28849 expand_float (freg, ireg, 0);
28851 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28852 label = ix86_expand_sse_compare_and_jump (UNLE,
28853 freg, op1, !do_floor);
28854 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28855 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28856 emit_move_insn (ireg, tmp);
28858 emit_label (label);
28859 LABEL_NUSES (label) = 1;
28861 emit_move_insn (op0, ireg);
28864 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28865 result in OPERAND0. */
28867 ix86_expand_rint (rtx operand0, rtx operand1)
28869 /* C code for the stuff we're doing below:
28870 xa = fabs (operand1);
28871 if (!isless (xa, 2**52))
28873 xa = xa + 2**52 - 2**52;
28874 return copysign (xa, operand1);
28876 enum machine_mode mode = GET_MODE (operand0);
28877 rtx res, xa, label, TWO52, mask;
28879 res = gen_reg_rtx (mode);
28880 emit_move_insn (res, operand1);
28882 /* xa = abs (operand1) */
28883 xa = ix86_expand_sse_fabs (res, &mask);
28885 /* if (!isless (xa, TWO52)) goto label; */
28886 TWO52 = ix86_gen_TWO52 (mode);
28887 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28889 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28890 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28892 ix86_sse_copysign_to_positive (res, xa, res, mask);
28894 emit_label (label);
28895 LABEL_NUSES (label) = 1;
28897 emit_move_insn (operand0, res);
28900 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28903 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28905 /* C code for the stuff we expand below.
28906 double xa = fabs (x), x2;
28907 if (!isless (xa, TWO52))
28909 xa = xa + TWO52 - TWO52;
28910 x2 = copysign (xa, x);
28919 enum machine_mode mode = GET_MODE (operand0);
28920 rtx xa, TWO52, tmp, label, one, res, mask;
28922 TWO52 = ix86_gen_TWO52 (mode);
28924 /* Temporary for holding the result, initialized to the input
28925 operand to ease control flow. */
28926 res = gen_reg_rtx (mode);
28927 emit_move_insn (res, operand1);
28929 /* xa = abs (operand1) */
28930 xa = ix86_expand_sse_fabs (res, &mask);
28932 /* if (!isless (xa, TWO52)) goto label; */
28933 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28935 /* xa = xa + TWO52 - TWO52; */
28936 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28937 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28939 /* xa = copysign (xa, operand1) */
28940 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28942 /* generate 1.0 or -1.0 */
28943 one = force_reg (mode,
28944 const_double_from_real_value (do_floor
28945 ? dconst1 : dconstm1, mode));
28947 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28948 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28949 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28950 gen_rtx_AND (mode, one, tmp)));
28951 /* We always need to subtract here to preserve signed zero. */
28952 tmp = expand_simple_binop (mode, MINUS,
28953 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28954 emit_move_insn (res, tmp);
28956 emit_label (label);
28957 LABEL_NUSES (label) = 1;
28959 emit_move_insn (operand0, res);
28962 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28965 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28967 /* C code for the stuff we expand below.
28968 double xa = fabs (x), x2;
28969 if (!isless (xa, TWO52))
28971 x2 = (double)(long)x;
28978 if (HONOR_SIGNED_ZEROS (mode))
28979 return copysign (x2, x);
28982 enum machine_mode mode = GET_MODE (operand0);
28983 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28985 TWO52 = ix86_gen_TWO52 (mode);
28987 /* Temporary for holding the result, initialized to the input
28988 operand to ease control flow. */
28989 res = gen_reg_rtx (mode);
28990 emit_move_insn (res, operand1);
28992 /* xa = abs (operand1) */
28993 xa = ix86_expand_sse_fabs (res, &mask);
28995 /* if (!isless (xa, TWO52)) goto label; */
28996 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28998 /* xa = (double)(long)x */
28999 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29000 expand_fix (xi, res, 0);
29001 expand_float (xa, xi, 0);
29004 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29006 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29007 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29008 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29009 gen_rtx_AND (mode, one, tmp)));
29010 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
29011 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29012 emit_move_insn (res, tmp);
29014 if (HONOR_SIGNED_ZEROS (mode))
29015 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29017 emit_label (label);
29018 LABEL_NUSES (label) = 1;
29020 emit_move_insn (operand0, res);
29023 /* Expand SSE sequence for computing round from OPERAND1 storing
29024 into OPERAND0. Sequence that works without relying on DImode truncation
29025 via cvttsd2siq that is only available on 64bit targets. */
29027 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29029 /* C code for the stuff we expand below.
29030 double xa = fabs (x), xa2, x2;
29031 if (!isless (xa, TWO52))
29033 Using the absolute value and copying back sign makes
29034 -0.0 -> -0.0 correct.
29035 xa2 = xa + TWO52 - TWO52;
29040 else if (dxa > 0.5)
29042 x2 = copysign (xa2, x);
29045 enum machine_mode mode = GET_MODE (operand0);
29046 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29048 TWO52 = ix86_gen_TWO52 (mode);
29050 /* Temporary for holding the result, initialized to the input
29051 operand to ease control flow. */
29052 res = gen_reg_rtx (mode);
29053 emit_move_insn (res, operand1);
29055 /* xa = abs (operand1) */
29056 xa = ix86_expand_sse_fabs (res, &mask);
29058 /* if (!isless (xa, TWO52)) goto label; */
29059 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29061 /* xa2 = xa + TWO52 - TWO52; */
29062 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29063 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29065 /* dxa = xa2 - xa; */
29066 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29068 /* generate 0.5, 1.0 and -0.5 */
29069 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29070 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29071 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29075 tmp = gen_reg_rtx (mode);
29076 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29077 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29078 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29079 gen_rtx_AND (mode, one, tmp)));
29080 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29081 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29082 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29083 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29084 gen_rtx_AND (mode, one, tmp)));
29085 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29087 /* res = copysign (xa2, operand1) */
29088 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29090 emit_label (label);
29091 LABEL_NUSES (label) = 1;
29093 emit_move_insn (operand0, res);
29096 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29099 ix86_expand_trunc (rtx operand0, rtx operand1)
29101 /* C code for SSE variant we expand below.
29102 double xa = fabs (x), x2;
29103 if (!isless (xa, TWO52))
29105 x2 = (double)(long)x;
29106 if (HONOR_SIGNED_ZEROS (mode))
29107 return copysign (x2, x);
29110 enum machine_mode mode = GET_MODE (operand0);
29111 rtx xa, xi, TWO52, label, res, mask;
29113 TWO52 = ix86_gen_TWO52 (mode);
29115 /* Temporary for holding the result, initialized to the input
29116 operand to ease control flow. */
29117 res = gen_reg_rtx (mode);
29118 emit_move_insn (res, operand1);
29120 /* xa = abs (operand1) */
29121 xa = ix86_expand_sse_fabs (res, &mask);
29123 /* if (!isless (xa, TWO52)) goto label; */
29124 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29126 /* x = (double)(long)x */
29127 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29128 expand_fix (xi, res, 0);
29129 expand_float (res, xi, 0);
29131 if (HONOR_SIGNED_ZEROS (mode))
29132 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29134 emit_label (label);
29135 LABEL_NUSES (label) = 1;
29137 emit_move_insn (operand0, res);
29140 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29143 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29145 enum machine_mode mode = GET_MODE (operand0);
29146 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29148 /* C code for SSE variant we expand below.
29149 double xa = fabs (x), x2;
29150 if (!isless (xa, TWO52))
29152 xa2 = xa + TWO52 - TWO52;
29156 x2 = copysign (xa2, x);
29160 TWO52 = ix86_gen_TWO52 (mode);
29162 /* Temporary for holding the result, initialized to the input
29163 operand to ease control flow. */
29164 res = gen_reg_rtx (mode);
29165 emit_move_insn (res, operand1);
29167 /* xa = abs (operand1) */
29168 xa = ix86_expand_sse_fabs (res, &smask);
29170 /* if (!isless (xa, TWO52)) goto label; */
29171 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29173 /* res = xa + TWO52 - TWO52; */
29174 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29175 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29176 emit_move_insn (res, tmp);
29179 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29181 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29182 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29183 emit_insn (gen_rtx_SET (VOIDmode, mask,
29184 gen_rtx_AND (mode, mask, one)));
29185 tmp = expand_simple_binop (mode, MINUS,
29186 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29187 emit_move_insn (res, tmp);
29189 /* res = copysign (res, operand1) */
29190 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29192 emit_label (label);
29193 LABEL_NUSES (label) = 1;
29195 emit_move_insn (operand0, res);
29198 /* Expand SSE sequence for computing round from OPERAND1 storing
29201 ix86_expand_round (rtx operand0, rtx operand1)
29203 /* C code for the stuff we're doing below:
29204 double xa = fabs (x);
29205 if (!isless (xa, TWO52))
29207 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29208 return copysign (xa, x);
29210 enum machine_mode mode = GET_MODE (operand0);
29211 rtx res, TWO52, xa, label, xi, half, mask;
29212 const struct real_format *fmt;
29213 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29215 /* Temporary for holding the result, initialized to the input
29216 operand to ease control flow. */
29217 res = gen_reg_rtx (mode);
29218 emit_move_insn (res, operand1);
29220 TWO52 = ix86_gen_TWO52 (mode);
29221 xa = ix86_expand_sse_fabs (res, &mask);
29222 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29224 /* load nextafter (0.5, 0.0) */
29225 fmt = REAL_MODE_FORMAT (mode);
29226 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29227 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29229 /* xa = xa + 0.5 */
29230 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29231 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29233 /* xa = (double)(int64_t)xa */
29234 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29235 expand_fix (xi, xa, 0);
29236 expand_float (xa, xi, 0);
29238 /* res = copysign (xa, operand1) */
29239 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29241 emit_label (label);
29242 LABEL_NUSES (label) = 1;
29244 emit_move_insn (operand0, res);
29248 /* Table of valid machine attributes. */
29249 static const struct attribute_spec ix86_attribute_table[] =
29251 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29252 /* Stdcall attribute says callee is responsible for popping arguments
29253 if they are not variable. */
29254 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29255 /* Fastcall attribute says callee is responsible for popping arguments
29256 if they are not variable. */
29257 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29258 /* Thiscall attribute says callee is responsible for popping arguments
29259 if they are not variable. */
29260 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29261 /* Cdecl attribute says the callee is a normal C declaration */
29262 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29263 /* Regparm attribute specifies how many integer arguments are to be
29264 passed in registers. */
29265 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29266 /* Sseregparm attribute says we are using x86_64 calling conventions
29267 for FP arguments. */
29268 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29269 /* force_align_arg_pointer says this function realigns the stack at entry. */
29270 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29271 false, true, true, ix86_handle_cconv_attribute },
29272 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29273 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29274 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29275 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29277 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29278 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29279 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29280 SUBTARGET_ATTRIBUTE_TABLE,
29282 /* ms_abi and sysv_abi calling convention function attributes. */
29283 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29284 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29285 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29287 { NULL, 0, 0, false, false, false, NULL }
29290 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29292 ix86_builtin_vectorization_cost (bool runtime_test)
29294 /* If the branch of the runtime test is taken - i.e. - the vectorized
29295 version is skipped - this incurs a misprediction cost (because the
29296 vectorized version is expected to be the fall-through). So we subtract
29297 the latency of a mispredicted branch from the costs that are incured
29298 when the vectorized version is executed.
29300 TODO: The values in individual target tables have to be tuned or new
29301 fields may be needed. For eg. on K8, the default branch path is the
29302 not-taken path. If the taken path is predicted correctly, the minimum
29303 penalty of going down the taken-path is 1 cycle. If the taken-path is
29304 not predicted correctly, then the minimum penalty is 10 cycles. */
29308 return (-(ix86_cost->cond_taken_branch_cost));
29314 /* Implement targetm.vectorize.builtin_vec_perm. */
29317 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29319 tree itype = TREE_TYPE (vec_type);
29320 bool u = TYPE_UNSIGNED (itype);
29321 enum machine_mode vmode = TYPE_MODE (vec_type);
29322 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29323 bool ok = TARGET_SSE2;
29329 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29332 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29334 itype = ix86_get_builtin_type (IX86_BT_DI);
29339 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29343 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29345 itype = ix86_get_builtin_type (IX86_BT_SI);
29349 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29352 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29355 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29358 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29368 *mask_type = itype;
29369 return ix86_builtins[(int) fcode];
29372 /* Return a vector mode with twice as many elements as VMODE. */
29373 /* ??? Consider moving this to a table generated by genmodes.c. */
29375 static enum machine_mode
29376 doublesize_vector_mode (enum machine_mode vmode)
29380 case V2SFmode: return V4SFmode;
29381 case V1DImode: return V2DImode;
29382 case V2SImode: return V4SImode;
29383 case V4HImode: return V8HImode;
29384 case V8QImode: return V16QImode;
29386 case V2DFmode: return V4DFmode;
29387 case V4SFmode: return V8SFmode;
29388 case V2DImode: return V4DImode;
29389 case V4SImode: return V8SImode;
29390 case V8HImode: return V16HImode;
29391 case V16QImode: return V32QImode;
29393 case V4DFmode: return V8DFmode;
29394 case V8SFmode: return V16SFmode;
29395 case V4DImode: return V8DImode;
29396 case V8SImode: return V16SImode;
29397 case V16HImode: return V32HImode;
29398 case V32QImode: return V64QImode;
29401 gcc_unreachable ();
29405 /* Construct (set target (vec_select op0 (parallel perm))) and
29406 return true if that's a valid instruction in the active ISA. */
29409 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29411 rtx rperm[MAX_VECT_LEN], x;
29414 for (i = 0; i < nelt; ++i)
29415 rperm[i] = GEN_INT (perm[i]);
29417 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29418 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29419 x = gen_rtx_SET (VOIDmode, target, x);
29422 if (recog_memoized (x) < 0)
29430 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29433 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29434 const unsigned char *perm, unsigned nelt)
29436 enum machine_mode v2mode;
29439 v2mode = doublesize_vector_mode (GET_MODE (op0));
29440 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29441 return expand_vselect (target, x, perm, nelt);
29444 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29445 in terms of blendp[sd] / pblendw / pblendvb. */
29448 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29450 enum machine_mode vmode = d->vmode;
29451 unsigned i, mask, nelt = d->nelt;
29452 rtx target, op0, op1, x;
29454 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29456 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29459 /* This is a blend, not a permute. Elements must stay in their
29460 respective lanes. */
29461 for (i = 0; i < nelt; ++i)
29463 unsigned e = d->perm[i];
29464 if (!(e == i || e == i + nelt))
29471 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29472 decision should be extracted elsewhere, so that we only try that
29473 sequence once all budget==3 options have been tried. */
29475 /* For bytes, see if bytes move in pairs so we can use pblendw with
29476 an immediate argument, rather than pblendvb with a vector argument. */
29477 if (vmode == V16QImode)
29479 bool pblendw_ok = true;
29480 for (i = 0; i < 16 && pblendw_ok; i += 2)
29481 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29485 rtx rperm[16], vperm;
29487 for (i = 0; i < nelt; ++i)
29488 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29490 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29491 vperm = force_reg (V16QImode, vperm);
29493 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29498 target = d->target;
29510 for (i = 0; i < nelt; ++i)
29511 mask |= (d->perm[i] >= nelt) << i;
29515 for (i = 0; i < 2; ++i)
29516 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29520 for (i = 0; i < 4; ++i)
29521 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29525 for (i = 0; i < 8; ++i)
29526 mask |= (d->perm[i * 2] >= 16) << i;
29530 target = gen_lowpart (vmode, target);
29531 op0 = gen_lowpart (vmode, op0);
29532 op1 = gen_lowpart (vmode, op1);
29536 gcc_unreachable ();
29539 /* This matches five different patterns with the different modes. */
29540 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29541 x = gen_rtx_SET (VOIDmode, target, x);
29547 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29548 in terms of the variable form of vpermilps.
29550 Note that we will have already failed the immediate input vpermilps,
29551 which requires that the high and low part shuffle be identical; the
29552 variable form doesn't require that. */
29555 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29557 rtx rperm[8], vperm;
29560 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29563 /* We can only permute within the 128-bit lane. */
29564 for (i = 0; i < 8; ++i)
29566 unsigned e = d->perm[i];
29567 if (i < 4 ? e >= 4 : e < 4)
29574 for (i = 0; i < 8; ++i)
29576 unsigned e = d->perm[i];
29578 /* Within each 128-bit lane, the elements of op0 are numbered
29579 from 0 and the elements of op1 are numbered from 4. */
29585 rperm[i] = GEN_INT (e);
29588 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29589 vperm = force_reg (V8SImode, vperm);
29590 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29595 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29596 in terms of pshufb or vpperm. */
29599 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29601 unsigned i, nelt, eltsz;
29602 rtx rperm[16], vperm, target, op0, op1;
29604 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29606 if (GET_MODE_SIZE (d->vmode) != 16)
29613 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29615 for (i = 0; i < nelt; ++i)
29617 unsigned j, e = d->perm[i];
29618 for (j = 0; j < eltsz; ++j)
29619 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29622 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29623 vperm = force_reg (V16QImode, vperm);
29625 target = gen_lowpart (V16QImode, d->target);
29626 op0 = gen_lowpart (V16QImode, d->op0);
29627 if (d->op0 == d->op1)
29628 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29631 op1 = gen_lowpart (V16QImode, d->op1);
29632 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29638 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29639 in a single instruction. */
29642 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29644 unsigned i, nelt = d->nelt;
29645 unsigned char perm2[MAX_VECT_LEN];
29647 /* Check plain VEC_SELECT first, because AVX has instructions that could
29648 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29649 input where SEL+CONCAT may not. */
29650 if (d->op0 == d->op1)
29652 int mask = nelt - 1;
29654 for (i = 0; i < nelt; i++)
29655 perm2[i] = d->perm[i] & mask;
29657 if (expand_vselect (d->target, d->op0, perm2, nelt))
29660 /* There are plenty of patterns in sse.md that are written for
29661 SEL+CONCAT and are not replicated for a single op. Perhaps
29662 that should be changed, to avoid the nastiness here. */
29664 /* Recognize interleave style patterns, which means incrementing
29665 every other permutation operand. */
29666 for (i = 0; i < nelt; i += 2)
29668 perm2[i] = d->perm[i] & mask;
29669 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29671 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29674 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29677 for (i = 0; i < nelt; i += 4)
29679 perm2[i + 0] = d->perm[i + 0] & mask;
29680 perm2[i + 1] = d->perm[i + 1] & mask;
29681 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29682 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29685 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29690 /* Finally, try the fully general two operand permute. */
29691 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29694 /* Recognize interleave style patterns with reversed operands. */
29695 if (d->op0 != d->op1)
29697 for (i = 0; i < nelt; ++i)
29699 unsigned e = d->perm[i];
29707 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29711 /* Try the SSE4.1 blend variable merge instructions. */
29712 if (expand_vec_perm_blend (d))
29715 /* Try one of the AVX vpermil variable permutations. */
29716 if (expand_vec_perm_vpermil (d))
29719 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29720 if (expand_vec_perm_pshufb (d))
29726 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29727 in terms of a pair of pshuflw + pshufhw instructions. */
29730 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29732 unsigned char perm2[MAX_VECT_LEN];
29736 if (d->vmode != V8HImode || d->op0 != d->op1)
29739 /* The two permutations only operate in 64-bit lanes. */
29740 for (i = 0; i < 4; ++i)
29741 if (d->perm[i] >= 4)
29743 for (i = 4; i < 8; ++i)
29744 if (d->perm[i] < 4)
29750 /* Emit the pshuflw. */
29751 memcpy (perm2, d->perm, 4);
29752 for (i = 4; i < 8; ++i)
29754 ok = expand_vselect (d->target, d->op0, perm2, 8);
29757 /* Emit the pshufhw. */
29758 memcpy (perm2 + 4, d->perm + 4, 4);
29759 for (i = 0; i < 4; ++i)
29761 ok = expand_vselect (d->target, d->target, perm2, 8);
29767 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29768 the permutation using the SSSE3 palignr instruction. This succeeds
29769 when all of the elements in PERM fit within one vector and we merely
29770 need to shift them down so that a single vector permutation has a
29771 chance to succeed. */
29774 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29776 unsigned i, nelt = d->nelt;
29781 /* Even with AVX, palignr only operates on 128-bit vectors. */
29782 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29785 min = nelt, max = 0;
29786 for (i = 0; i < nelt; ++i)
29788 unsigned e = d->perm[i];
29794 if (min == 0 || max - min >= nelt)
29797 /* Given that we have SSSE3, we know we'll be able to implement the
29798 single operand permutation after the palignr with pshufb. */
29802 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29803 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29804 gen_lowpart (TImode, d->op1),
29805 gen_lowpart (TImode, d->op0), shift));
29807 d->op0 = d->op1 = d->target;
29810 for (i = 0; i < nelt; ++i)
29812 unsigned e = d->perm[i] - min;
29818 /* Test for the degenerate case where the alignment by itself
29819 produces the desired permutation. */
29823 ok = expand_vec_perm_1 (d);
29829 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29830 a two vector permutation into a single vector permutation by using
29831 an interleave operation to merge the vectors. */
29834 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29836 struct expand_vec_perm_d dremap, dfinal;
29837 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29838 unsigned contents, h1, h2, h3, h4;
29839 unsigned char remap[2 * MAX_VECT_LEN];
29843 if (d->op0 == d->op1)
29846 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29847 lanes. We can use similar techniques with the vperm2f128 instruction,
29848 but it requires slightly different logic. */
29849 if (GET_MODE_SIZE (d->vmode) != 16)
29852 /* Examine from whence the elements come. */
29854 for (i = 0; i < nelt; ++i)
29855 contents |= 1u << d->perm[i];
29857 /* Split the two input vectors into 4 halves. */
29858 h1 = (1u << nelt2) - 1;
29863 memset (remap, 0xff, sizeof (remap));
29866 /* If the elements from the low halves use interleave low, and similarly
29867 for interleave high. If the elements are from mis-matched halves, we
29868 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29869 if ((contents & (h1 | h3)) == contents)
29871 for (i = 0; i < nelt2; ++i)
29874 remap[i + nelt] = i * 2 + 1;
29875 dremap.perm[i * 2] = i;
29876 dremap.perm[i * 2 + 1] = i + nelt;
29879 else if ((contents & (h2 | h4)) == contents)
29881 for (i = 0; i < nelt2; ++i)
29883 remap[i + nelt2] = i * 2;
29884 remap[i + nelt + nelt2] = i * 2 + 1;
29885 dremap.perm[i * 2] = i + nelt2;
29886 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29889 else if ((contents & (h1 | h4)) == contents)
29891 for (i = 0; i < nelt2; ++i)
29894 remap[i + nelt + nelt2] = i + nelt2;
29895 dremap.perm[i] = i;
29896 dremap.perm[i + nelt2] = i + nelt + nelt2;
29900 dremap.vmode = V2DImode;
29902 dremap.perm[0] = 0;
29903 dremap.perm[1] = 3;
29906 else if ((contents & (h2 | h3)) == contents)
29908 for (i = 0; i < nelt2; ++i)
29910 remap[i + nelt2] = i;
29911 remap[i + nelt] = i + nelt2;
29912 dremap.perm[i] = i + nelt2;
29913 dremap.perm[i + nelt2] = i + nelt;
29917 dremap.vmode = V2DImode;
29919 dremap.perm[0] = 1;
29920 dremap.perm[1] = 2;
29926 /* Use the remapping array set up above to move the elements from their
29927 swizzled locations into their final destinations. */
29929 for (i = 0; i < nelt; ++i)
29931 unsigned e = remap[d->perm[i]];
29932 gcc_assert (e < nelt);
29933 dfinal.perm[i] = e;
29935 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29936 dfinal.op1 = dfinal.op0;
29937 dremap.target = dfinal.op0;
29939 /* Test if the final remap can be done with a single insn. For V4SFmode or
29940 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29942 ok = expand_vec_perm_1 (&dfinal);
29943 seq = get_insns ();
29949 if (dremap.vmode != dfinal.vmode)
29951 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29952 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29953 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29956 ok = expand_vec_perm_1 (&dremap);
29963 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29964 permutation with two pshufb insns and an ior. We should have already
29965 failed all two instruction sequences. */
29968 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29970 rtx rperm[2][16], vperm, l, h, op, m128;
29971 unsigned int i, nelt, eltsz;
29973 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29975 gcc_assert (d->op0 != d->op1);
29978 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29980 /* Generate two permutation masks. If the required element is within
29981 the given vector it is shuffled into the proper lane. If the required
29982 element is in the other vector, force a zero into the lane by setting
29983 bit 7 in the permutation mask. */
29984 m128 = GEN_INT (-128);
29985 for (i = 0; i < nelt; ++i)
29987 unsigned j, e = d->perm[i];
29988 unsigned which = (e >= nelt);
29992 for (j = 0; j < eltsz; ++j)
29994 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29995 rperm[1-which][i*eltsz + j] = m128;
29999 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
30000 vperm = force_reg (V16QImode, vperm);
30002 l = gen_reg_rtx (V16QImode);
30003 op = gen_lowpart (V16QImode, d->op0);
30004 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
30006 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
30007 vperm = force_reg (V16QImode, vperm);
30009 h = gen_reg_rtx (V16QImode);
30010 op = gen_lowpart (V16QImode, d->op1);
30011 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
30013 op = gen_lowpart (V16QImode, d->target);
30014 emit_insn (gen_iorv16qi3 (op, l, h));
30019 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
30020 and extract-odd permutations. */
30023 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
30025 rtx t1, t2, t3, t4;
30030 t1 = gen_reg_rtx (V4DFmode);
30031 t2 = gen_reg_rtx (V4DFmode);
30033 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
30034 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
30035 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
30037 /* Now an unpck[lh]pd will produce the result required. */
30039 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
30041 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
30047 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
30048 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
30049 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
30051 t1 = gen_reg_rtx (V8SFmode);
30052 t2 = gen_reg_rtx (V8SFmode);
30053 t3 = gen_reg_rtx (V8SFmode);
30054 t4 = gen_reg_rtx (V8SFmode);
30056 /* Shuffle within the 128-bit lanes to produce:
30057 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
30058 expand_vselect (t1, d->op0, perm1, 8);
30059 expand_vselect (t2, d->op1, perm1, 8);
30061 /* Shuffle the lanes around to produce:
30062 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
30063 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
30064 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
30066 /* Now a vpermil2p will produce the result required. */
30067 /* ??? The vpermil2p requires a vector constant. Another option
30068 is a unpck[lh]ps to merge the two vectors to produce
30069 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
30070 vpermilps to get the elements into the final order. */
30073 memcpy (d->perm, odd ? permo: perme, 8);
30074 expand_vec_perm_vpermil (d);
30082 /* These are always directly implementable by expand_vec_perm_1. */
30083 gcc_unreachable ();
30087 return expand_vec_perm_pshufb2 (d);
30090 /* We need 2*log2(N)-1 operations to achieve odd/even
30091 with interleave. */
30092 t1 = gen_reg_rtx (V8HImode);
30093 t2 = gen_reg_rtx (V8HImode);
30094 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
30095 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
30096 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
30097 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
30099 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
30101 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
30108 return expand_vec_perm_pshufb2 (d);
30111 t1 = gen_reg_rtx (V16QImode);
30112 t2 = gen_reg_rtx (V16QImode);
30113 t3 = gen_reg_rtx (V16QImode);
30114 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
30115 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
30116 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
30117 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
30118 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
30119 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
30121 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
30123 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
30129 gcc_unreachable ();
30135 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30136 extract-even and extract-odd permutations. */
30139 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
30141 unsigned i, odd, nelt = d->nelt;
30144 if (odd != 0 && odd != 1)
30147 for (i = 1; i < nelt; ++i)
30148 if (d->perm[i] != 2 * i + odd)
30151 return expand_vec_perm_even_odd_1 (d, odd);
30154 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
30155 permutations. We assume that expand_vec_perm_1 has already failed. */
30158 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30160 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30161 enum machine_mode vmode = d->vmode;
30162 unsigned char perm2[4];
30170 /* These are special-cased in sse.md so that we can optionally
30171 use the vbroadcast instruction. They expand to two insns
30172 if the input happens to be in a register. */
30173 gcc_unreachable ();
30179 /* These are always implementable using standard shuffle patterns. */
30180 gcc_unreachable ();
30184 /* These can be implemented via interleave. We save one insn by
30185 stopping once we have promoted to V4SImode and then use pshufd. */
30188 optab otab = vec_interleave_low_optab;
30192 otab = vec_interleave_high_optab;
30197 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30198 vmode = get_mode_wider_vector (vmode);
30199 op0 = gen_lowpart (vmode, op0);
30201 while (vmode != V4SImode);
30203 memset (perm2, elt, 4);
30204 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30209 gcc_unreachable ();
30213 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30214 broadcast permutations. */
30217 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30219 unsigned i, elt, nelt = d->nelt;
30221 if (d->op0 != d->op1)
30225 for (i = 1; i < nelt; ++i)
30226 if (d->perm[i] != elt)
30229 return expand_vec_perm_broadcast_1 (d);
30232 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30233 With all of the interface bits taken care of, perform the expansion
30234 in D and return true on success. */
30237 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30239 /* Try a single instruction expansion. */
30240 if (expand_vec_perm_1 (d))
30243 /* Try sequences of two instructions. */
30245 if (expand_vec_perm_pshuflw_pshufhw (d))
30248 if (expand_vec_perm_palignr (d))
30251 if (expand_vec_perm_interleave2 (d))
30254 if (expand_vec_perm_broadcast (d))
30257 /* Try sequences of three instructions. */
30259 if (expand_vec_perm_pshufb2 (d))
30262 /* ??? Look for narrow permutations whose element orderings would
30263 allow the promotion to a wider mode. */
30265 /* ??? Look for sequences of interleave or a wider permute that place
30266 the data into the correct lanes for a half-vector shuffle like
30267 pshuf[lh]w or vpermilps. */
30269 /* ??? Look for sequences of interleave that produce the desired results.
30270 The combinatorics of punpck[lh] get pretty ugly... */
30272 if (expand_vec_perm_even_odd (d))
30278 /* Extract the values from the vector CST into the permutation array in D.
30279 Return 0 on error, 1 if all values from the permutation come from the
30280 first vector, 2 if all values from the second vector, and 3 otherwise. */
30283 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30285 tree list = TREE_VECTOR_CST_ELTS (cst);
30286 unsigned i, nelt = d->nelt;
30289 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30291 unsigned HOST_WIDE_INT e;
30293 if (!host_integerp (TREE_VALUE (list), 1))
30295 e = tree_low_cst (TREE_VALUE (list), 1);
30299 ret |= (e < nelt ? 1 : 2);
30302 gcc_assert (list == NULL);
30304 /* For all elements from second vector, fold the elements to first. */
30306 for (i = 0; i < nelt; ++i)
30307 d->perm[i] -= nelt;
30313 ix86_expand_vec_perm_builtin (tree exp)
30315 struct expand_vec_perm_d d;
30316 tree arg0, arg1, arg2;
30318 arg0 = CALL_EXPR_ARG (exp, 0);
30319 arg1 = CALL_EXPR_ARG (exp, 1);
30320 arg2 = CALL_EXPR_ARG (exp, 2);
30322 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30323 d.nelt = GET_MODE_NUNITS (d.vmode);
30324 d.testing_p = false;
30325 gcc_assert (VECTOR_MODE_P (d.vmode));
30327 if (TREE_CODE (arg2) != VECTOR_CST)
30329 error_at (EXPR_LOCATION (exp),
30330 "vector permutation requires vector constant");
30334 switch (extract_vec_perm_cst (&d, arg2))
30340 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30344 if (!operand_equal_p (arg0, arg1, 0))
30346 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30347 d.op0 = force_reg (d.vmode, d.op0);
30348 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30349 d.op1 = force_reg (d.vmode, d.op1);
30353 /* The elements of PERM do not suggest that only the first operand
30354 is used, but both operands are identical. Allow easier matching
30355 of the permutation by folding the permutation into the single
30358 unsigned i, nelt = d.nelt;
30359 for (i = 0; i < nelt; ++i)
30360 if (d.perm[i] >= nelt)
30366 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30367 d.op0 = force_reg (d.vmode, d.op0);
30372 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30373 d.op0 = force_reg (d.vmode, d.op0);
30378 d.target = gen_reg_rtx (d.vmode);
30379 if (ix86_expand_vec_perm_builtin_1 (&d))
30382 /* For compiler generated permutations, we should never got here, because
30383 the compiler should also be checking the ok hook. But since this is a
30384 builtin the user has access too, so don't abort. */
30388 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30391 sorry ("vector permutation (%d %d %d %d)",
30392 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30395 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30396 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30397 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30400 sorry ("vector permutation "
30401 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30402 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30403 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30404 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30405 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30408 gcc_unreachable ();
30411 return CONST0_RTX (d.vmode);
30414 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30417 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30419 struct expand_vec_perm_d d;
30423 d.vmode = TYPE_MODE (vec_type);
30424 d.nelt = GET_MODE_NUNITS (d.vmode);
30425 d.testing_p = true;
30427 /* Given sufficient ISA support we can just return true here
30428 for selected vector modes. */
30429 if (GET_MODE_SIZE (d.vmode) == 16)
30431 /* All implementable with a single vpperm insn. */
30434 /* All implementable with 2 pshufb + 1 ior. */
30437 /* All implementable with shufpd or unpck[lh]pd. */
30442 vec_mask = extract_vec_perm_cst (&d, mask);
30444 /* This hook is cannot be called in response to something that the
30445 user does (unlike the builtin expander) so we shouldn't ever see
30446 an error generated from the extract. */
30447 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30448 one_vec = (vec_mask != 3);
30450 /* Implementable with shufps or pshufd. */
30451 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30454 /* Otherwise we have to go through the motions and see if we can
30455 figure out how to generate the requested permutation. */
30456 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30457 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30459 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30462 ret = ix86_expand_vec_perm_builtin_1 (&d);
30469 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30471 struct expand_vec_perm_d d;
30477 d.vmode = GET_MODE (targ);
30478 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30479 d.testing_p = false;
30481 for (i = 0; i < nelt; ++i)
30482 d.perm[i] = i * 2 + odd;
30484 /* We'll either be able to implement the permutation directly... */
30485 if (expand_vec_perm_1 (&d))
30488 /* ... or we use the special-case patterns. */
30489 expand_vec_perm_even_odd_1 (&d, odd);
30492 /* This function returns the calling abi specific va_list type node.
30493 It returns the FNDECL specific va_list type. */
30496 ix86_fn_abi_va_list (tree fndecl)
30499 return va_list_type_node;
30500 gcc_assert (fndecl != NULL_TREE);
30502 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30503 return ms_va_list_type_node;
30505 return sysv_va_list_type_node;
30508 /* Returns the canonical va_list type specified by TYPE. If there
30509 is no valid TYPE provided, it return NULL_TREE. */
30512 ix86_canonical_va_list_type (tree type)
30516 /* Resolve references and pointers to va_list type. */
30517 if (INDIRECT_REF_P (type))
30518 type = TREE_TYPE (type);
30519 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30520 type = TREE_TYPE (type);
30524 wtype = va_list_type_node;
30525 gcc_assert (wtype != NULL_TREE);
30527 if (TREE_CODE (wtype) == ARRAY_TYPE)
30529 /* If va_list is an array type, the argument may have decayed
30530 to a pointer type, e.g. by being passed to another function.
30531 In that case, unwrap both types so that we can compare the
30532 underlying records. */
30533 if (TREE_CODE (htype) == ARRAY_TYPE
30534 || POINTER_TYPE_P (htype))
30536 wtype = TREE_TYPE (wtype);
30537 htype = TREE_TYPE (htype);
30540 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30541 return va_list_type_node;
30542 wtype = sysv_va_list_type_node;
30543 gcc_assert (wtype != NULL_TREE);
30545 if (TREE_CODE (wtype) == ARRAY_TYPE)
30547 /* If va_list is an array type, the argument may have decayed
30548 to a pointer type, e.g. by being passed to another function.
30549 In that case, unwrap both types so that we can compare the
30550 underlying records. */
30551 if (TREE_CODE (htype) == ARRAY_TYPE
30552 || POINTER_TYPE_P (htype))
30554 wtype = TREE_TYPE (wtype);
30555 htype = TREE_TYPE (htype);
30558 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30559 return sysv_va_list_type_node;
30560 wtype = ms_va_list_type_node;
30561 gcc_assert (wtype != NULL_TREE);
30563 if (TREE_CODE (wtype) == ARRAY_TYPE)
30565 /* If va_list is an array type, the argument may have decayed
30566 to a pointer type, e.g. by being passed to another function.
30567 In that case, unwrap both types so that we can compare the
30568 underlying records. */
30569 if (TREE_CODE (htype) == ARRAY_TYPE
30570 || POINTER_TYPE_P (htype))
30572 wtype = TREE_TYPE (wtype);
30573 htype = TREE_TYPE (htype);
30576 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30577 return ms_va_list_type_node;
30580 return std_canonical_va_list_type (type);
30583 /* Iterate through the target-specific builtin types for va_list.
30584 IDX denotes the iterator, *PTREE is set to the result type of
30585 the va_list builtin, and *PNAME to its internal type.
30586 Returns zero if there is no element for this index, otherwise
30587 IDX should be increased upon the next call.
30588 Note, do not iterate a base builtin's name like __builtin_va_list.
30589 Used from c_common_nodes_and_builtins. */
30592 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30598 *ptree = ms_va_list_type_node;
30599 *pname = "__builtin_ms_va_list";
30602 *ptree = sysv_va_list_type_node;
30603 *pname = "__builtin_sysv_va_list";
30611 /* Initialize the GCC target structure. */
30612 #undef TARGET_RETURN_IN_MEMORY
30613 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30615 #undef TARGET_LEGITIMIZE_ADDRESS
30616 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30618 #undef TARGET_ATTRIBUTE_TABLE
30619 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30620 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30621 # undef TARGET_MERGE_DECL_ATTRIBUTES
30622 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30625 #undef TARGET_COMP_TYPE_ATTRIBUTES
30626 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30628 #undef TARGET_INIT_BUILTINS
30629 #define TARGET_INIT_BUILTINS ix86_init_builtins
30630 #undef TARGET_BUILTIN_DECL
30631 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30632 #undef TARGET_EXPAND_BUILTIN
30633 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30635 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30636 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30637 ix86_builtin_vectorized_function
30639 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30640 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30642 #undef TARGET_BUILTIN_RECIPROCAL
30643 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30645 #undef TARGET_ASM_FUNCTION_EPILOGUE
30646 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30648 #undef TARGET_ENCODE_SECTION_INFO
30649 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30650 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30652 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30655 #undef TARGET_ASM_OPEN_PAREN
30656 #define TARGET_ASM_OPEN_PAREN ""
30657 #undef TARGET_ASM_CLOSE_PAREN
30658 #define TARGET_ASM_CLOSE_PAREN ""
30660 #undef TARGET_ASM_BYTE_OP
30661 #define TARGET_ASM_BYTE_OP ASM_BYTE
30663 #undef TARGET_ASM_ALIGNED_HI_OP
30664 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30665 #undef TARGET_ASM_ALIGNED_SI_OP
30666 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30668 #undef TARGET_ASM_ALIGNED_DI_OP
30669 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30672 #undef TARGET_ASM_UNALIGNED_HI_OP
30673 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30674 #undef TARGET_ASM_UNALIGNED_SI_OP
30675 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30676 #undef TARGET_ASM_UNALIGNED_DI_OP
30677 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30679 #undef TARGET_SCHED_ADJUST_COST
30680 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30681 #undef TARGET_SCHED_ISSUE_RATE
30682 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30683 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30684 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30685 ia32_multipass_dfa_lookahead
30687 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30688 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30691 #undef TARGET_HAVE_TLS
30692 #define TARGET_HAVE_TLS true
30694 #undef TARGET_CANNOT_FORCE_CONST_MEM
30695 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30696 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30697 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30699 #undef TARGET_DELEGITIMIZE_ADDRESS
30700 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30702 #undef TARGET_MS_BITFIELD_LAYOUT_P
30703 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30706 #undef TARGET_BINDS_LOCAL_P
30707 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30709 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30710 #undef TARGET_BINDS_LOCAL_P
30711 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30714 #undef TARGET_ASM_OUTPUT_MI_THUNK
30715 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30716 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30717 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30719 #undef TARGET_ASM_FILE_START
30720 #define TARGET_ASM_FILE_START x86_file_start
30722 #undef TARGET_DEFAULT_TARGET_FLAGS
30723 #define TARGET_DEFAULT_TARGET_FLAGS \
30725 | TARGET_SUBTARGET_DEFAULT \
30726 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30729 #undef TARGET_HANDLE_OPTION
30730 #define TARGET_HANDLE_OPTION ix86_handle_option
30732 #undef TARGET_RTX_COSTS
30733 #define TARGET_RTX_COSTS ix86_rtx_costs
30734 #undef TARGET_ADDRESS_COST
30735 #define TARGET_ADDRESS_COST ix86_address_cost
30737 #undef TARGET_FIXED_CONDITION_CODE_REGS
30738 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30739 #undef TARGET_CC_MODES_COMPATIBLE
30740 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30742 #undef TARGET_MACHINE_DEPENDENT_REORG
30743 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30745 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30746 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30748 #undef TARGET_BUILD_BUILTIN_VA_LIST
30749 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30751 #undef TARGET_FN_ABI_VA_LIST
30752 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30754 #undef TARGET_CANONICAL_VA_LIST_TYPE
30755 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30757 #undef TARGET_EXPAND_BUILTIN_VA_START
30758 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30760 #undef TARGET_MD_ASM_CLOBBERS
30761 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30763 #undef TARGET_PROMOTE_PROTOTYPES
30764 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30765 #undef TARGET_STRUCT_VALUE_RTX
30766 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30767 #undef TARGET_SETUP_INCOMING_VARARGS
30768 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30769 #undef TARGET_MUST_PASS_IN_STACK
30770 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30771 #undef TARGET_PASS_BY_REFERENCE
30772 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30773 #undef TARGET_INTERNAL_ARG_POINTER
30774 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30775 #undef TARGET_UPDATE_STACK_BOUNDARY
30776 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30777 #undef TARGET_GET_DRAP_RTX
30778 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30779 #undef TARGET_STRICT_ARGUMENT_NAMING
30780 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30781 #undef TARGET_STATIC_CHAIN
30782 #define TARGET_STATIC_CHAIN ix86_static_chain
30783 #undef TARGET_TRAMPOLINE_INIT
30784 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30786 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30787 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30789 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30790 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30792 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30793 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30795 #undef TARGET_C_MODE_FOR_SUFFIX
30796 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30799 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30800 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30803 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30804 #undef TARGET_INSERT_ATTRIBUTES
30805 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30808 #undef TARGET_MANGLE_TYPE
30809 #define TARGET_MANGLE_TYPE ix86_mangle_type
30811 #undef TARGET_STACK_PROTECT_FAIL
30812 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30814 #undef TARGET_FUNCTION_VALUE
30815 #define TARGET_FUNCTION_VALUE ix86_function_value
30817 #undef TARGET_FUNCTION_VALUE_REGNO_P
30818 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30820 #undef TARGET_SECONDARY_RELOAD
30821 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30823 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30824 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30825 ix86_builtin_vectorization_cost
30826 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30827 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30828 ix86_vectorize_builtin_vec_perm
30829 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30830 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30831 ix86_vectorize_builtin_vec_perm_ok
30833 #undef TARGET_SET_CURRENT_FUNCTION
30834 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30836 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30837 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30839 #undef TARGET_OPTION_SAVE
30840 #define TARGET_OPTION_SAVE ix86_function_specific_save
30842 #undef TARGET_OPTION_RESTORE
30843 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30845 #undef TARGET_OPTION_PRINT
30846 #define TARGET_OPTION_PRINT ix86_function_specific_print
30848 #undef TARGET_CAN_INLINE_P
30849 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30851 #undef TARGET_EXPAND_TO_RTL_HOOK
30852 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30854 #undef TARGET_LEGITIMATE_ADDRESS_P
30855 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30857 #undef TARGET_IRA_COVER_CLASSES
30858 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30860 #undef TARGET_FRAME_POINTER_REQUIRED
30861 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30863 #undef TARGET_CAN_ELIMINATE
30864 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30866 #undef TARGET_ASM_CODE_END
30867 #define TARGET_ASM_CODE_END ix86_code_end
30869 struct gcc_target targetm = TARGET_INITIALIZER;
30871 #include "gt-i386.h"