1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1351 /* X86_TUNE_USE_MOV0 */
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1366 /* X86_TUNE_READ_MODIFY */
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1379 /* X86_TUNE_QIMODE_MATH */
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1454 /* X86_TUNE_SHIFT1 */
1457 /* X86_TUNE_USE_FFREEP */
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1521 m_AMDFAM10 | m_GENERIC,
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1567 static enum stringop_alg stringop_alg = no_stringop;
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1609 /* The "default" register map used in 32bit mode. */
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1622 /* The "default" register map used in 64bit mode. */
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1706 /* Define parameter passing and return registers. */
1708 static int const x86_64_int_parameter_registers[6] =
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1715 CX_REG, DX_REG, R8_REG, R9_REG
1718 static int const x86_64_int_return_registers[4] =
1720 AX_REG, DX_REG, DI_REG, SI_REG
1723 /* Define the structure for the machine field in struct function. */
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1729 struct stack_local_entry *next;
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1762 HOST_WIDE_INT frame;
1764 int outgoing_arguments_size;
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1820 /* Alignment for incoming stack boundary in bits specified at
1822 static unsigned int ix86_user_incoming_stack_boundary;
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1843 int ix86_section_threshold = 65536;
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1849 /* Fence to use after loop using movnt. */
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1874 #define MAX_CLASSES 4
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static bool ix86_function_value_regno_p (const unsigned int);
1884 static rtx ix86_static_chain (const_tree, bool);
1885 static int ix86_function_regparm (const_tree, const_tree);
1886 static void ix86_compute_frame_layout (struct ix86_frame *);
1887 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1889 static void ix86_add_new_builtins (int);
1890 static rtx ix86_expand_vec_perm_builtin (tree);
1892 enum ix86_function_specific_strings
1894 IX86_FUNCTION_SPECIFIC_ARCH,
1895 IX86_FUNCTION_SPECIFIC_TUNE,
1896 IX86_FUNCTION_SPECIFIC_FPMATH,
1897 IX86_FUNCTION_SPECIFIC_MAX
1900 static char *ix86_target_string (int, int, const char *, const char *,
1901 const char *, bool);
1902 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1903 static void ix86_function_specific_save (struct cl_target_option *);
1904 static void ix86_function_specific_restore (struct cl_target_option *);
1905 static void ix86_function_specific_print (FILE *, int,
1906 struct cl_target_option *);
1907 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1908 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1909 static bool ix86_can_inline_p (tree, tree);
1910 static void ix86_set_current_function (tree);
1911 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1913 static enum calling_abi ix86_function_abi (const_tree);
1916 #ifndef SUBTARGET32_DEFAULT_CPU
1917 #define SUBTARGET32_DEFAULT_CPU "i386"
1920 /* The svr4 ABI for the i386 says that records and unions are returned
1922 #ifndef DEFAULT_PCC_STRUCT_RETURN
1923 #define DEFAULT_PCC_STRUCT_RETURN 1
1926 /* Whether -mtune= or -march= were specified */
1927 static int ix86_tune_defaulted;
1928 static int ix86_arch_specified;
1930 /* Bit flags that specify the ISA we are compiling for. */
1931 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1933 /* A mask of ix86_isa_flags that includes bit X if X
1934 was set or cleared on the command line. */
1935 static int ix86_isa_flags_explicit;
1937 /* Define a set of ISAs which are available when a given ISA is
1938 enabled. MMX and SSE ISAs are handled separately. */
1940 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1941 #define OPTION_MASK_ISA_3DNOW_SET \
1942 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1944 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1945 #define OPTION_MASK_ISA_SSE2_SET \
1946 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1947 #define OPTION_MASK_ISA_SSE3_SET \
1948 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1949 #define OPTION_MASK_ISA_SSSE3_SET \
1950 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1951 #define OPTION_MASK_ISA_SSE4_1_SET \
1952 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1953 #define OPTION_MASK_ISA_SSE4_2_SET \
1954 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1955 #define OPTION_MASK_ISA_AVX_SET \
1956 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1957 #define OPTION_MASK_ISA_FMA_SET \
1958 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1960 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1962 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1964 #define OPTION_MASK_ISA_SSE4A_SET \
1965 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1966 #define OPTION_MASK_ISA_FMA4_SET \
1967 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1968 | OPTION_MASK_ISA_AVX_SET)
1969 #define OPTION_MASK_ISA_XOP_SET \
1970 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1971 #define OPTION_MASK_ISA_LWP_SET \
1974 /* AES and PCLMUL need SSE2 because they use xmm registers */
1975 #define OPTION_MASK_ISA_AES_SET \
1976 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1977 #define OPTION_MASK_ISA_PCLMUL_SET \
1978 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1980 #define OPTION_MASK_ISA_ABM_SET \
1981 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1983 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1984 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1985 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1986 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1987 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1989 /* Define a set of ISAs which aren't available when a given ISA is
1990 disabled. MMX and SSE ISAs are handled separately. */
1992 #define OPTION_MASK_ISA_MMX_UNSET \
1993 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1994 #define OPTION_MASK_ISA_3DNOW_UNSET \
1995 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1996 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1998 #define OPTION_MASK_ISA_SSE_UNSET \
1999 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2000 #define OPTION_MASK_ISA_SSE2_UNSET \
2001 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2002 #define OPTION_MASK_ISA_SSE3_UNSET \
2003 (OPTION_MASK_ISA_SSE3 \
2004 | OPTION_MASK_ISA_SSSE3_UNSET \
2005 | OPTION_MASK_ISA_SSE4A_UNSET )
2006 #define OPTION_MASK_ISA_SSSE3_UNSET \
2007 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2008 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2009 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2010 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2011 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2012 #define OPTION_MASK_ISA_AVX_UNSET \
2013 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2014 | OPTION_MASK_ISA_FMA4_UNSET)
2015 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2017 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2019 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2021 #define OPTION_MASK_ISA_SSE4A_UNSET \
2022 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2024 #define OPTION_MASK_ISA_FMA4_UNSET \
2025 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2026 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2027 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2029 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2030 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2031 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2032 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2033 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2034 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2035 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2036 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2038 /* Vectorization library interface and handlers. */
2039 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2040 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2041 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2043 /* Processor target table, indexed by processor number */
2046 const struct processor_costs *cost; /* Processor costs */
2047 const int align_loop; /* Default alignments. */
2048 const int align_loop_max_skip;
2049 const int align_jump;
2050 const int align_jump_max_skip;
2051 const int align_func;
2054 static const struct ptt processor_target_table[PROCESSOR_max] =
2056 {&i386_cost, 4, 3, 4, 3, 4},
2057 {&i486_cost, 16, 15, 16, 15, 16},
2058 {&pentium_cost, 16, 7, 16, 7, 16},
2059 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2060 {&geode_cost, 0, 0, 0, 0, 0},
2061 {&k6_cost, 32, 7, 32, 7, 32},
2062 {&athlon_cost, 16, 7, 16, 7, 16},
2063 {&pentium4_cost, 0, 0, 0, 0, 0},
2064 {&k8_cost, 16, 7, 16, 7, 16},
2065 {&nocona_cost, 0, 0, 0, 0, 0},
2066 {&core2_cost, 16, 10, 16, 10, 16},
2067 {&generic32_cost, 16, 7, 16, 7, 16},
2068 {&generic64_cost, 16, 10, 16, 10, 16},
2069 {&amdfam10_cost, 32, 24, 32, 7, 32},
2070 {&atom_cost, 16, 7, 16, 7, 16}
2073 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2099 /* Implement TARGET_HANDLE_OPTION. */
2102 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2109 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2110 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2114 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2115 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2122 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2123 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2127 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2128 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2138 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2139 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2143 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2144 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2151 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2152 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2156 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2157 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2164 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2165 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2169 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2170 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2177 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2178 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2182 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2183 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2190 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2191 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2195 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2196 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2203 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2204 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2208 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2209 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2216 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2217 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2221 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2222 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2229 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2230 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2234 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2240 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2241 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2245 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2246 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2252 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2253 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2257 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2258 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2265 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2266 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2270 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2271 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2278 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2279 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2283 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2284 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2291 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2292 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2296 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2297 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2304 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2305 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2309 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2310 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2317 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2318 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2322 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2323 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2330 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2331 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2335 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2336 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2343 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2344 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2348 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2349 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2356 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2357 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2361 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2362 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2369 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2370 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2374 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2375 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2382 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2383 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2387 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2388 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2395 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2396 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2400 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2401 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2410 /* Return a string that documents the current -m options. The caller is
2411 responsible for freeing the string. */
2414 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2415 const char *fpmath, bool add_nl_p)
2417 struct ix86_target_opts
2419 const char *option; /* option string */
2420 int mask; /* isa mask options */
2423 /* This table is ordered so that options like -msse4.2 that imply
2424 preceding options while match those first. */
2425 static struct ix86_target_opts isa_opts[] =
2427 { "-m64", OPTION_MASK_ISA_64BIT },
2428 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2429 { "-mfma", OPTION_MASK_ISA_FMA },
2430 { "-mxop", OPTION_MASK_ISA_XOP },
2431 { "-mlwp", OPTION_MASK_ISA_LWP },
2432 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2433 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2434 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2435 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2436 { "-msse3", OPTION_MASK_ISA_SSE3 },
2437 { "-msse2", OPTION_MASK_ISA_SSE2 },
2438 { "-msse", OPTION_MASK_ISA_SSE },
2439 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2440 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2441 { "-mmmx", OPTION_MASK_ISA_MMX },
2442 { "-mabm", OPTION_MASK_ISA_ABM },
2443 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2444 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2445 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2446 { "-maes", OPTION_MASK_ISA_AES },
2447 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2451 static struct ix86_target_opts flag_opts[] =
2453 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2454 { "-m80387", MASK_80387 },
2455 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2456 { "-malign-double", MASK_ALIGN_DOUBLE },
2457 { "-mcld", MASK_CLD },
2458 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2459 { "-mieee-fp", MASK_IEEE_FP },
2460 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2461 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2462 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2463 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2464 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2465 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2466 { "-mno-red-zone", MASK_NO_RED_ZONE },
2467 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2468 { "-mrecip", MASK_RECIP },
2469 { "-mrtd", MASK_RTD },
2470 { "-msseregparm", MASK_SSEREGPARM },
2471 { "-mstack-arg-probe", MASK_STACK_PROBE },
2472 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2475 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2478 char target_other[40];
2487 memset (opts, '\0', sizeof (opts));
2489 /* Add -march= option. */
2492 opts[num][0] = "-march=";
2493 opts[num++][1] = arch;
2496 /* Add -mtune= option. */
2499 opts[num][0] = "-mtune=";
2500 opts[num++][1] = tune;
2503 /* Pick out the options in isa options. */
2504 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2506 if ((isa & isa_opts[i].mask) != 0)
2508 opts[num++][0] = isa_opts[i].option;
2509 isa &= ~ isa_opts[i].mask;
2513 if (isa && add_nl_p)
2515 opts[num++][0] = isa_other;
2516 sprintf (isa_other, "(other isa: %#x)", isa);
2519 /* Add flag options. */
2520 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2522 if ((flags & flag_opts[i].mask) != 0)
2524 opts[num++][0] = flag_opts[i].option;
2525 flags &= ~ flag_opts[i].mask;
2529 if (flags && add_nl_p)
2531 opts[num++][0] = target_other;
2532 sprintf (target_other, "(other flags: %#x)", isa);
2535 /* Add -fpmath= option. */
2538 opts[num][0] = "-mfpmath=";
2539 opts[num++][1] = fpmath;
2546 gcc_assert (num < ARRAY_SIZE (opts));
2548 /* Size the string. */
2550 sep_len = (add_nl_p) ? 3 : 1;
2551 for (i = 0; i < num; i++)
2554 for (j = 0; j < 2; j++)
2556 len += strlen (opts[i][j]);
2559 /* Build the string. */
2560 ret = ptr = (char *) xmalloc (len);
2563 for (i = 0; i < num; i++)
2567 for (j = 0; j < 2; j++)
2568 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2575 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2583 for (j = 0; j < 2; j++)
2586 memcpy (ptr, opts[i][j], len2[j]);
2588 line_len += len2[j];
2593 gcc_assert (ret + len >= ptr);
2598 /* Function that is callable from the debugger to print the current
2601 ix86_debug_options (void)
2603 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2604 ix86_arch_string, ix86_tune_string,
2605 ix86_fpmath_string, true);
2609 fprintf (stderr, "%s\n\n", opts);
2613 fputs ("<no options>\n\n", stderr);
2618 /* Sometimes certain combinations of command options do not make
2619 sense on a particular target machine. You can define a macro
2620 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2621 defined, is executed once just after all the command options have
2624 Don't use this macro to turn on various extra optimizations for
2625 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2628 override_options (bool main_args_p)
2631 unsigned int ix86_arch_mask, ix86_tune_mask;
2632 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2637 /* Comes from final.c -- no real reason to change it. */
2638 #define MAX_CODE_ALIGN 16
2646 PTA_PREFETCH_SSE = 1 << 4,
2648 PTA_3DNOW_A = 1 << 6,
2652 PTA_POPCNT = 1 << 10,
2654 PTA_SSE4A = 1 << 12,
2655 PTA_NO_SAHF = 1 << 13,
2656 PTA_SSE4_1 = 1 << 14,
2657 PTA_SSE4_2 = 1 << 15,
2659 PTA_PCLMUL = 1 << 17,
2662 PTA_MOVBE = 1 << 20,
2670 const char *const name; /* processor name or nickname. */
2671 const enum processor_type processor;
2672 const enum attr_cpu schedule;
2673 const unsigned /*enum pta_flags*/ flags;
2675 const processor_alias_table[] =
2677 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2678 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2679 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2681 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2682 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2683 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2685 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2686 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2688 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2689 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2691 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2694 PTA_MMX | PTA_SSE | PTA_SSE2},
2695 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2696 PTA_MMX |PTA_SSE | PTA_SSE2},
2697 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2698 PTA_MMX | PTA_SSE | PTA_SSE2},
2699 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2700 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2701 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2702 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2703 | PTA_CX16 | PTA_NO_SAHF},
2704 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2705 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2706 | PTA_SSSE3 | PTA_CX16},
2707 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2708 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2709 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2710 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2711 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2712 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2713 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2715 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2716 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2717 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2718 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2719 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2720 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2721 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2722 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2723 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2724 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2725 {"x86-64", PROCESSOR_K8, CPU_K8,
2726 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2727 {"k8", PROCESSOR_K8, CPU_K8,
2728 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2729 | PTA_SSE2 | PTA_NO_SAHF},
2730 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2731 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2732 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2733 {"opteron", PROCESSOR_K8, CPU_K8,
2734 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2735 | PTA_SSE2 | PTA_NO_SAHF},
2736 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2737 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2738 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2739 {"athlon64", PROCESSOR_K8, CPU_K8,
2740 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2741 | PTA_SSE2 | PTA_NO_SAHF},
2742 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2743 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2744 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2745 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2746 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2747 | PTA_SSE2 | PTA_NO_SAHF},
2748 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2749 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2750 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2751 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2752 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2753 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2754 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2755 0 /* flags are only used for -march switch. */ },
2756 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2757 PTA_64BIT /* flags are only used for -march switch. */ },
2760 int const pta_size = ARRAY_SIZE (processor_alias_table);
2762 /* Set up prefix/suffix so the error messages refer to either the command
2763 line argument, or the attribute(target). */
2772 prefix = "option(\"";
2777 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2778 SUBTARGET_OVERRIDE_OPTIONS;
2781 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2782 SUBSUBTARGET_OVERRIDE_OPTIONS;
2785 /* -fPIC is the default for x86_64. */
2786 if (TARGET_MACHO && TARGET_64BIT)
2789 /* Set the default values for switches whose default depends on TARGET_64BIT
2790 in case they weren't overwritten by command line options. */
2793 /* Mach-O doesn't support omitting the frame pointer for now. */
2794 if (flag_omit_frame_pointer == 2)
2795 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2796 if (flag_asynchronous_unwind_tables == 2)
2797 flag_asynchronous_unwind_tables = 1;
2798 if (flag_pcc_struct_return == 2)
2799 flag_pcc_struct_return = 0;
2803 if (flag_omit_frame_pointer == 2)
2804 flag_omit_frame_pointer = 0;
2805 if (flag_asynchronous_unwind_tables == 2)
2806 flag_asynchronous_unwind_tables = 0;
2807 if (flag_pcc_struct_return == 2)
2808 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2811 /* Need to check -mtune=generic first. */
2812 if (ix86_tune_string)
2814 if (!strcmp (ix86_tune_string, "generic")
2815 || !strcmp (ix86_tune_string, "i686")
2816 /* As special support for cross compilers we read -mtune=native
2817 as -mtune=generic. With native compilers we won't see the
2818 -mtune=native, as it was changed by the driver. */
2819 || !strcmp (ix86_tune_string, "native"))
2822 ix86_tune_string = "generic64";
2824 ix86_tune_string = "generic32";
2826 /* If this call is for setting the option attribute, allow the
2827 generic32/generic64 that was previously set. */
2828 else if (!main_args_p
2829 && (!strcmp (ix86_tune_string, "generic32")
2830 || !strcmp (ix86_tune_string, "generic64")))
2832 else if (!strncmp (ix86_tune_string, "generic", 7))
2833 error ("bad value (%s) for %stune=%s %s",
2834 ix86_tune_string, prefix, suffix, sw);
2835 else if (!strcmp (ix86_tune_string, "x86-64"))
2836 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2837 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2838 prefix, suffix, prefix, suffix, prefix, suffix);
2842 if (ix86_arch_string)
2843 ix86_tune_string = ix86_arch_string;
2844 if (!ix86_tune_string)
2846 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2847 ix86_tune_defaulted = 1;
2850 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2851 need to use a sensible tune option. */
2852 if (!strcmp (ix86_tune_string, "generic")
2853 || !strcmp (ix86_tune_string, "x86-64")
2854 || !strcmp (ix86_tune_string, "i686"))
2857 ix86_tune_string = "generic64";
2859 ix86_tune_string = "generic32";
2863 if (ix86_stringop_string)
2865 if (!strcmp (ix86_stringop_string, "rep_byte"))
2866 stringop_alg = rep_prefix_1_byte;
2867 else if (!strcmp (ix86_stringop_string, "libcall"))
2868 stringop_alg = libcall;
2869 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2870 stringop_alg = rep_prefix_4_byte;
2871 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2873 /* rep; movq isn't available in 32-bit code. */
2874 stringop_alg = rep_prefix_8_byte;
2875 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2876 stringop_alg = loop_1_byte;
2877 else if (!strcmp (ix86_stringop_string, "loop"))
2878 stringop_alg = loop;
2879 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2880 stringop_alg = unrolled_loop;
2882 error ("bad value (%s) for %sstringop-strategy=%s %s",
2883 ix86_stringop_string, prefix, suffix, sw);
2886 if (!ix86_arch_string)
2887 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2889 ix86_arch_specified = 1;
2891 /* Validate -mabi= value. */
2892 if (ix86_abi_string)
2894 if (strcmp (ix86_abi_string, "sysv") == 0)
2895 ix86_abi = SYSV_ABI;
2896 else if (strcmp (ix86_abi_string, "ms") == 0)
2899 error ("unknown ABI (%s) for %sabi=%s %s",
2900 ix86_abi_string, prefix, suffix, sw);
2903 ix86_abi = DEFAULT_ABI;
2905 if (ix86_cmodel_string != 0)
2907 if (!strcmp (ix86_cmodel_string, "small"))
2908 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2909 else if (!strcmp (ix86_cmodel_string, "medium"))
2910 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2911 else if (!strcmp (ix86_cmodel_string, "large"))
2912 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2914 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2915 else if (!strcmp (ix86_cmodel_string, "32"))
2916 ix86_cmodel = CM_32;
2917 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2918 ix86_cmodel = CM_KERNEL;
2920 error ("bad value (%s) for %scmodel=%s %s",
2921 ix86_cmodel_string, prefix, suffix, sw);
2925 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2926 use of rip-relative addressing. This eliminates fixups that
2927 would otherwise be needed if this object is to be placed in a
2928 DLL, and is essentially just as efficient as direct addressing. */
2929 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2930 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2931 else if (TARGET_64BIT)
2932 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2934 ix86_cmodel = CM_32;
2936 if (ix86_asm_string != 0)
2939 && !strcmp (ix86_asm_string, "intel"))
2940 ix86_asm_dialect = ASM_INTEL;
2941 else if (!strcmp (ix86_asm_string, "att"))
2942 ix86_asm_dialect = ASM_ATT;
2944 error ("bad value (%s) for %sasm=%s %s",
2945 ix86_asm_string, prefix, suffix, sw);
2947 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2948 error ("code model %qs not supported in the %s bit mode",
2949 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2950 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2951 sorry ("%i-bit mode not compiled in",
2952 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2954 for (i = 0; i < pta_size; i++)
2955 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2957 ix86_schedule = processor_alias_table[i].schedule;
2958 ix86_arch = processor_alias_table[i].processor;
2959 /* Default cpu tuning to the architecture. */
2960 ix86_tune = ix86_arch;
2962 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2963 error ("CPU you selected does not support x86-64 "
2966 if (processor_alias_table[i].flags & PTA_MMX
2967 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2968 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2969 if (processor_alias_table[i].flags & PTA_3DNOW
2970 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2971 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2972 if (processor_alias_table[i].flags & PTA_3DNOW_A
2973 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2974 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2975 if (processor_alias_table[i].flags & PTA_SSE
2976 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2977 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2978 if (processor_alias_table[i].flags & PTA_SSE2
2979 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2980 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2981 if (processor_alias_table[i].flags & PTA_SSE3
2982 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2983 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2984 if (processor_alias_table[i].flags & PTA_SSSE3
2985 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2986 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2987 if (processor_alias_table[i].flags & PTA_SSE4_1
2988 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2989 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2990 if (processor_alias_table[i].flags & PTA_SSE4_2
2991 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2992 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2993 if (processor_alias_table[i].flags & PTA_AVX
2994 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2995 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2996 if (processor_alias_table[i].flags & PTA_FMA
2997 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2998 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2999 if (processor_alias_table[i].flags & PTA_SSE4A
3000 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3001 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3002 if (processor_alias_table[i].flags & PTA_FMA4
3003 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3004 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3005 if (processor_alias_table[i].flags & PTA_XOP
3006 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3007 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3008 if (processor_alias_table[i].flags & PTA_LWP
3009 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3010 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3011 if (processor_alias_table[i].flags & PTA_ABM
3012 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3013 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3014 if (processor_alias_table[i].flags & PTA_CX16
3015 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3016 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3017 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3018 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3019 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3020 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3021 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3022 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3023 if (processor_alias_table[i].flags & PTA_MOVBE
3024 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3025 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3026 if (processor_alias_table[i].flags & PTA_AES
3027 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3028 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3029 if (processor_alias_table[i].flags & PTA_PCLMUL
3030 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3031 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3032 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3033 x86_prefetch_sse = true;
3038 if (!strcmp (ix86_arch_string, "generic"))
3039 error ("generic CPU can be used only for %stune=%s %s",
3040 prefix, suffix, sw);
3041 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3042 error ("bad value (%s) for %sarch=%s %s",
3043 ix86_arch_string, prefix, suffix, sw);
3045 ix86_arch_mask = 1u << ix86_arch;
3046 for (i = 0; i < X86_ARCH_LAST; ++i)
3047 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3049 for (i = 0; i < pta_size; i++)
3050 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3052 ix86_schedule = processor_alias_table[i].schedule;
3053 ix86_tune = processor_alias_table[i].processor;
3054 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3056 if (ix86_tune_defaulted)
3058 ix86_tune_string = "x86-64";
3059 for (i = 0; i < pta_size; i++)
3060 if (! strcmp (ix86_tune_string,
3061 processor_alias_table[i].name))
3063 ix86_schedule = processor_alias_table[i].schedule;
3064 ix86_tune = processor_alias_table[i].processor;
3067 error ("CPU you selected does not support x86-64 "
3070 /* Intel CPUs have always interpreted SSE prefetch instructions as
3071 NOPs; so, we can enable SSE prefetch instructions even when
3072 -mtune (rather than -march) points us to a processor that has them.
3073 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3074 higher processors. */
3076 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3077 x86_prefetch_sse = true;
3081 if (ix86_tune_specified && i == pta_size)
3082 error ("bad value (%s) for %stune=%s %s",
3083 ix86_tune_string, prefix, suffix, sw);
3085 ix86_tune_mask = 1u << ix86_tune;
3086 for (i = 0; i < X86_TUNE_LAST; ++i)
3087 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3090 ix86_cost = &ix86_size_cost;
3092 ix86_cost = processor_target_table[ix86_tune].cost;
3094 /* Arrange to set up i386_stack_locals for all functions. */
3095 init_machine_status = ix86_init_machine_status;
3097 /* Validate -mregparm= value. */
3098 if (ix86_regparm_string)
3101 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3102 i = atoi (ix86_regparm_string);
3103 if (i < 0 || i > REGPARM_MAX)
3104 error ("%sregparm=%d%s is not between 0 and %d",
3105 prefix, i, suffix, REGPARM_MAX);
3110 ix86_regparm = REGPARM_MAX;
3112 /* If the user has provided any of the -malign-* options,
3113 warn and use that value only if -falign-* is not set.
3114 Remove this code in GCC 3.2 or later. */
3115 if (ix86_align_loops_string)
3117 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3118 prefix, suffix, suffix);
3119 if (align_loops == 0)
3121 i = atoi (ix86_align_loops_string);
3122 if (i < 0 || i > MAX_CODE_ALIGN)
3123 error ("%salign-loops=%d%s is not between 0 and %d",
3124 prefix, i, suffix, MAX_CODE_ALIGN);
3126 align_loops = 1 << i;
3130 if (ix86_align_jumps_string)
3132 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3133 prefix, suffix, suffix);
3134 if (align_jumps == 0)
3136 i = atoi (ix86_align_jumps_string);
3137 if (i < 0 || i > MAX_CODE_ALIGN)
3138 error ("%salign-loops=%d%s is not between 0 and %d",
3139 prefix, i, suffix, MAX_CODE_ALIGN);
3141 align_jumps = 1 << i;
3145 if (ix86_align_funcs_string)
3147 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3148 prefix, suffix, suffix);
3149 if (align_functions == 0)
3151 i = atoi (ix86_align_funcs_string);
3152 if (i < 0 || i > MAX_CODE_ALIGN)
3153 error ("%salign-loops=%d%s is not between 0 and %d",
3154 prefix, i, suffix, MAX_CODE_ALIGN);
3156 align_functions = 1 << i;
3160 /* Default align_* from the processor table. */
3161 if (align_loops == 0)
3163 align_loops = processor_target_table[ix86_tune].align_loop;
3164 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3166 if (align_jumps == 0)
3168 align_jumps = processor_target_table[ix86_tune].align_jump;
3169 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3171 if (align_functions == 0)
3173 align_functions = processor_target_table[ix86_tune].align_func;
3176 /* Validate -mbranch-cost= value, or provide default. */
3177 ix86_branch_cost = ix86_cost->branch_cost;
3178 if (ix86_branch_cost_string)
3180 i = atoi (ix86_branch_cost_string);
3182 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3184 ix86_branch_cost = i;
3186 if (ix86_section_threshold_string)
3188 i = atoi (ix86_section_threshold_string);
3190 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3192 ix86_section_threshold = i;
3195 if (ix86_tls_dialect_string)
3197 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3198 ix86_tls_dialect = TLS_DIALECT_GNU;
3199 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3200 ix86_tls_dialect = TLS_DIALECT_GNU2;
3202 error ("bad value (%s) for %stls-dialect=%s %s",
3203 ix86_tls_dialect_string, prefix, suffix, sw);
3206 if (ix87_precision_string)
3208 i = atoi (ix87_precision_string);
3209 if (i != 32 && i != 64 && i != 80)
3210 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3215 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3217 /* Enable by default the SSE and MMX builtins. Do allow the user to
3218 explicitly disable any of these. In particular, disabling SSE and
3219 MMX for kernel code is extremely useful. */
3220 if (!ix86_arch_specified)
3222 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3223 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3226 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3230 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3232 if (!ix86_arch_specified)
3234 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3236 /* i386 ABI does not specify red zone. It still makes sense to use it
3237 when programmer takes care to stack from being destroyed. */
3238 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3239 target_flags |= MASK_NO_RED_ZONE;
3242 /* Keep nonleaf frame pointers. */
3243 if (flag_omit_frame_pointer)
3244 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3245 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3246 flag_omit_frame_pointer = 1;
3248 /* If we're doing fast math, we don't care about comparison order
3249 wrt NaNs. This lets us use a shorter comparison sequence. */
3250 if (flag_finite_math_only)
3251 target_flags &= ~MASK_IEEE_FP;
3253 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3254 since the insns won't need emulation. */
3255 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3256 target_flags &= ~MASK_NO_FANCY_MATH_387;
3258 /* Likewise, if the target doesn't have a 387, or we've specified
3259 software floating point, don't use 387 inline intrinsics. */
3261 target_flags |= MASK_NO_FANCY_MATH_387;
3263 /* Turn on MMX builtins for -msse. */
3266 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3267 x86_prefetch_sse = true;
3270 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3271 if (TARGET_SSE4_2 || TARGET_ABM)
3272 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3274 /* Validate -mpreferred-stack-boundary= value or default it to
3275 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3276 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3277 if (ix86_preferred_stack_boundary_string)
3279 i = atoi (ix86_preferred_stack_boundary_string);
3280 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3281 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3282 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3284 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3287 /* Set the default value for -mstackrealign. */
3288 if (ix86_force_align_arg_pointer == -1)
3289 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3291 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3293 /* Validate -mincoming-stack-boundary= value or default it to
3294 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3295 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3296 if (ix86_incoming_stack_boundary_string)
3298 i = atoi (ix86_incoming_stack_boundary_string);
3299 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3300 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3301 i, TARGET_64BIT ? 4 : 2);
3304 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3305 ix86_incoming_stack_boundary
3306 = ix86_user_incoming_stack_boundary;
3310 /* Accept -msseregparm only if at least SSE support is enabled. */
3311 if (TARGET_SSEREGPARM
3313 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3315 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3316 if (ix86_fpmath_string != 0)
3318 if (! strcmp (ix86_fpmath_string, "387"))
3319 ix86_fpmath = FPMATH_387;
3320 else if (! strcmp (ix86_fpmath_string, "sse"))
3324 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3325 ix86_fpmath = FPMATH_387;
3328 ix86_fpmath = FPMATH_SSE;
3330 else if (! strcmp (ix86_fpmath_string, "387,sse")
3331 || ! strcmp (ix86_fpmath_string, "387+sse")
3332 || ! strcmp (ix86_fpmath_string, "sse,387")
3333 || ! strcmp (ix86_fpmath_string, "sse+387")
3334 || ! strcmp (ix86_fpmath_string, "both"))
3338 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3339 ix86_fpmath = FPMATH_387;
3341 else if (!TARGET_80387)
3343 warning (0, "387 instruction set disabled, using SSE arithmetics");
3344 ix86_fpmath = FPMATH_SSE;
3347 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3350 error ("bad value (%s) for %sfpmath=%s %s",
3351 ix86_fpmath_string, prefix, suffix, sw);
3354 /* If the i387 is disabled, then do not return values in it. */
3356 target_flags &= ~MASK_FLOAT_RETURNS;
3358 /* Use external vectorized library in vectorizing intrinsics. */
3359 if (ix86_veclibabi_string)
3361 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3362 ix86_veclib_handler = ix86_veclibabi_svml;
3363 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3364 ix86_veclib_handler = ix86_veclibabi_acml;
3366 error ("unknown vectorization library ABI type (%s) for "
3367 "%sveclibabi=%s %s", ix86_veclibabi_string,
3368 prefix, suffix, sw);
3371 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3372 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3374 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3376 /* ??? Unwind info is not correct around the CFG unless either a frame
3377 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3378 unwind info generation to be aware of the CFG and propagating states
3380 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3381 || flag_exceptions || flag_non_call_exceptions)
3382 && flag_omit_frame_pointer
3383 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3385 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3386 warning (0, "unwind tables currently require either a frame pointer "
3387 "or %saccumulate-outgoing-args%s for correctness",
3389 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3392 /* If stack probes are required, the space used for large function
3393 arguments on the stack must also be probed, so enable
3394 -maccumulate-outgoing-args so this happens in the prologue. */
3395 if (TARGET_STACK_PROBE
3396 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3398 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3399 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3400 "for correctness", prefix, suffix);
3401 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3404 /* For sane SSE instruction set generation we need fcomi instruction.
3405 It is safe to enable all CMOVE instructions. */
3409 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3412 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3413 p = strchr (internal_label_prefix, 'X');
3414 internal_label_prefix_len = p - internal_label_prefix;
3418 /* When scheduling description is not available, disable scheduler pass
3419 so it won't slow down the compilation and make x87 code slower. */
3420 if (!TARGET_SCHEDULE)
3421 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3423 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3424 set_param_value ("simultaneous-prefetches",
3425 ix86_cost->simultaneous_prefetches);
3426 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3427 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3428 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3429 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3430 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3431 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3433 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3434 can be optimized to ap = __builtin_next_arg (0). */
3436 targetm.expand_builtin_va_start = NULL;
3440 ix86_gen_leave = gen_leave_rex64;
3441 ix86_gen_pop1 = gen_popdi1;
3442 ix86_gen_add3 = gen_adddi3;
3443 ix86_gen_sub3 = gen_subdi3;
3444 ix86_gen_sub3_carry = gen_subdi3_carry;
3445 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3446 ix86_gen_monitor = gen_sse3_monitor64;
3447 ix86_gen_andsp = gen_anddi3;
3451 ix86_gen_leave = gen_leave;
3452 ix86_gen_pop1 = gen_popsi1;
3453 ix86_gen_add3 = gen_addsi3;
3454 ix86_gen_sub3 = gen_subsi3;
3455 ix86_gen_sub3_carry = gen_subsi3_carry;
3456 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3457 ix86_gen_monitor = gen_sse3_monitor;
3458 ix86_gen_andsp = gen_andsi3;
3462 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3464 target_flags |= MASK_CLD & ~target_flags_explicit;
3467 /* Save the initial options in case the user does function specific options */
3469 target_option_default_node = target_option_current_node
3470 = build_target_option_node ();
3473 /* Update register usage after having seen the compiler flags. */
3476 ix86_conditional_register_usage (void)
3481 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3483 if (fixed_regs[i] > 1)
3484 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3485 if (call_used_regs[i] > 1)
3486 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3489 /* The PIC register, if it exists, is fixed. */
3490 j = PIC_OFFSET_TABLE_REGNUM;
3491 if (j != INVALID_REGNUM)
3492 fixed_regs[j] = call_used_regs[j] = 1;
3494 /* The MS_ABI changes the set of call-used registers. */
3495 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3497 call_used_regs[SI_REG] = 0;
3498 call_used_regs[DI_REG] = 0;
3499 call_used_regs[XMM6_REG] = 0;
3500 call_used_regs[XMM7_REG] = 0;
3501 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3502 call_used_regs[i] = 0;
3505 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3506 other call-clobbered regs for 64-bit. */
3509 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3511 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3512 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3513 && call_used_regs[i])
3514 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3517 /* If MMX is disabled, squash the registers. */
3519 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3520 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3521 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3523 /* If SSE is disabled, squash the registers. */
3525 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3526 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3527 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3529 /* If the FPU is disabled, squash the registers. */
3530 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3531 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3532 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3533 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3535 /* If 32-bit, squash the 64-bit registers. */
3538 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3540 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3546 /* Save the current options */
3549 ix86_function_specific_save (struct cl_target_option *ptr)
3551 ptr->arch = ix86_arch;
3552 ptr->schedule = ix86_schedule;
3553 ptr->tune = ix86_tune;
3554 ptr->fpmath = ix86_fpmath;
3555 ptr->branch_cost = ix86_branch_cost;
3556 ptr->tune_defaulted = ix86_tune_defaulted;
3557 ptr->arch_specified = ix86_arch_specified;
3558 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3559 ptr->target_flags_explicit = target_flags_explicit;
3561 /* The fields are char but the variables are not; make sure the
3562 values fit in the fields. */
3563 gcc_assert (ptr->arch == ix86_arch);
3564 gcc_assert (ptr->schedule == ix86_schedule);
3565 gcc_assert (ptr->tune == ix86_tune);
3566 gcc_assert (ptr->fpmath == ix86_fpmath);
3567 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3570 /* Restore the current options */
3573 ix86_function_specific_restore (struct cl_target_option *ptr)
3575 enum processor_type old_tune = ix86_tune;
3576 enum processor_type old_arch = ix86_arch;
3577 unsigned int ix86_arch_mask, ix86_tune_mask;
3580 ix86_arch = (enum processor_type) ptr->arch;
3581 ix86_schedule = (enum attr_cpu) ptr->schedule;
3582 ix86_tune = (enum processor_type) ptr->tune;
3583 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3584 ix86_branch_cost = ptr->branch_cost;
3585 ix86_tune_defaulted = ptr->tune_defaulted;
3586 ix86_arch_specified = ptr->arch_specified;
3587 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3588 target_flags_explicit = ptr->target_flags_explicit;
3590 /* Recreate the arch feature tests if the arch changed */
3591 if (old_arch != ix86_arch)
3593 ix86_arch_mask = 1u << ix86_arch;
3594 for (i = 0; i < X86_ARCH_LAST; ++i)
3595 ix86_arch_features[i]
3596 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3599 /* Recreate the tune optimization tests */
3600 if (old_tune != ix86_tune)
3602 ix86_tune_mask = 1u << ix86_tune;
3603 for (i = 0; i < X86_TUNE_LAST; ++i)
3604 ix86_tune_features[i]
3605 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3609 /* Print the current options */
3612 ix86_function_specific_print (FILE *file, int indent,
3613 struct cl_target_option *ptr)
3616 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3617 NULL, NULL, NULL, false);
3619 fprintf (file, "%*sarch = %d (%s)\n",
3622 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3623 ? cpu_names[ptr->arch]
3626 fprintf (file, "%*stune = %d (%s)\n",
3629 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3630 ? cpu_names[ptr->tune]
3633 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3634 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3635 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3636 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3640 fprintf (file, "%*s%s\n", indent, "", target_string);
3641 free (target_string);
3646 /* Inner function to process the attribute((target(...))), take an argument and
3647 set the current options from the argument. If we have a list, recursively go
3651 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3656 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3657 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3658 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3659 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3674 enum ix86_opt_type type;
3679 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3680 IX86_ATTR_ISA ("abm", OPT_mabm),
3681 IX86_ATTR_ISA ("aes", OPT_maes),
3682 IX86_ATTR_ISA ("avx", OPT_mavx),
3683 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3684 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3685 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3686 IX86_ATTR_ISA ("sse", OPT_msse),
3687 IX86_ATTR_ISA ("sse2", OPT_msse2),
3688 IX86_ATTR_ISA ("sse3", OPT_msse3),
3689 IX86_ATTR_ISA ("sse4", OPT_msse4),
3690 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3691 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3692 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3693 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3694 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3695 IX86_ATTR_ISA ("xop", OPT_mxop),
3696 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3698 /* string options */
3699 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3700 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3701 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3704 IX86_ATTR_YES ("cld",
3708 IX86_ATTR_NO ("fancy-math-387",
3709 OPT_mfancy_math_387,
3710 MASK_NO_FANCY_MATH_387),
3712 IX86_ATTR_YES ("ieee-fp",
3716 IX86_ATTR_YES ("inline-all-stringops",
3717 OPT_minline_all_stringops,
3718 MASK_INLINE_ALL_STRINGOPS),
3720 IX86_ATTR_YES ("inline-stringops-dynamically",
3721 OPT_minline_stringops_dynamically,
3722 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3724 IX86_ATTR_NO ("align-stringops",
3725 OPT_mno_align_stringops,
3726 MASK_NO_ALIGN_STRINGOPS),
3728 IX86_ATTR_YES ("recip",
3734 /* If this is a list, recurse to get the options. */
3735 if (TREE_CODE (args) == TREE_LIST)
3739 for (; args; args = TREE_CHAIN (args))
3740 if (TREE_VALUE (args)
3741 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3747 else if (TREE_CODE (args) != STRING_CST)
3750 /* Handle multiple arguments separated by commas. */
3751 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3753 while (next_optstr && *next_optstr != '\0')
3755 char *p = next_optstr;
3757 char *comma = strchr (next_optstr, ',');
3758 const char *opt_string;
3759 size_t len, opt_len;
3764 enum ix86_opt_type type = ix86_opt_unknown;
3770 len = comma - next_optstr;
3771 next_optstr = comma + 1;
3779 /* Recognize no-xxx. */
3780 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3789 /* Find the option. */
3792 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3794 type = attrs[i].type;
3795 opt_len = attrs[i].len;
3796 if (ch == attrs[i].string[0]
3797 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3798 && memcmp (p, attrs[i].string, opt_len) == 0)
3801 mask = attrs[i].mask;
3802 opt_string = attrs[i].string;
3807 /* Process the option. */
3810 error ("attribute(target(\"%s\")) is unknown", orig_p);
3814 else if (type == ix86_opt_isa)
3815 ix86_handle_option (opt, p, opt_set_p);
3817 else if (type == ix86_opt_yes || type == ix86_opt_no)
3819 if (type == ix86_opt_no)
3820 opt_set_p = !opt_set_p;
3823 target_flags |= mask;
3825 target_flags &= ~mask;
3828 else if (type == ix86_opt_str)
3832 error ("option(\"%s\") was already specified", opt_string);
3836 p_strings[opt] = xstrdup (p + opt_len);
3846 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3849 ix86_valid_target_attribute_tree (tree args)
3851 const char *orig_arch_string = ix86_arch_string;
3852 const char *orig_tune_string = ix86_tune_string;
3853 const char *orig_fpmath_string = ix86_fpmath_string;
3854 int orig_tune_defaulted = ix86_tune_defaulted;
3855 int orig_arch_specified = ix86_arch_specified;
3856 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3859 struct cl_target_option *def
3860 = TREE_TARGET_OPTION (target_option_default_node);
3862 /* Process each of the options on the chain. */
3863 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3866 /* If the changed options are different from the default, rerun override_options,
3867 and then save the options away. The string options are are attribute options,
3868 and will be undone when we copy the save structure. */
3869 if (ix86_isa_flags != def->ix86_isa_flags
3870 || target_flags != def->target_flags
3871 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3873 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3875 /* If we are using the default tune= or arch=, undo the string assigned,
3876 and use the default. */
3877 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3878 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3879 else if (!orig_arch_specified)
3880 ix86_arch_string = NULL;
3882 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3883 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3884 else if (orig_tune_defaulted)
3885 ix86_tune_string = NULL;
3887 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3888 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3889 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3890 else if (!TARGET_64BIT && TARGET_SSE)
3891 ix86_fpmath_string = "sse,387";
3893 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3894 override_options (false);
3896 /* Add any builtin functions with the new isa if any. */
3897 ix86_add_new_builtins (ix86_isa_flags);
3899 /* Save the current options unless we are validating options for
3901 t = build_target_option_node ();
3903 ix86_arch_string = orig_arch_string;
3904 ix86_tune_string = orig_tune_string;
3905 ix86_fpmath_string = orig_fpmath_string;
3907 /* Free up memory allocated to hold the strings */
3908 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3909 if (option_strings[i])
3910 free (option_strings[i]);
3916 /* Hook to validate attribute((target("string"))). */
3919 ix86_valid_target_attribute_p (tree fndecl,
3920 tree ARG_UNUSED (name),
3922 int ARG_UNUSED (flags))
3924 struct cl_target_option cur_target;
3926 tree old_optimize = build_optimization_node ();
3927 tree new_target, new_optimize;
3928 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3930 /* If the function changed the optimization levels as well as setting target
3931 options, start with the optimizations specified. */
3932 if (func_optimize && func_optimize != old_optimize)
3933 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3935 /* The target attributes may also change some optimization flags, so update
3936 the optimization options if necessary. */
3937 cl_target_option_save (&cur_target);
3938 new_target = ix86_valid_target_attribute_tree (args);
3939 new_optimize = build_optimization_node ();
3946 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3948 if (old_optimize != new_optimize)
3949 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3952 cl_target_option_restore (&cur_target);
3954 if (old_optimize != new_optimize)
3955 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3961 /* Hook to determine if one function can safely inline another. */
3964 ix86_can_inline_p (tree caller, tree callee)
3967 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3968 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3970 /* If callee has no option attributes, then it is ok to inline. */
3974 /* If caller has no option attributes, but callee does then it is not ok to
3976 else if (!caller_tree)
3981 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3982 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3984 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3985 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3987 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3988 != callee_opts->ix86_isa_flags)
3991 /* See if we have the same non-isa options. */
3992 else if (caller_opts->target_flags != callee_opts->target_flags)
3995 /* See if arch, tune, etc. are the same. */
3996 else if (caller_opts->arch != callee_opts->arch)
3999 else if (caller_opts->tune != callee_opts->tune)
4002 else if (caller_opts->fpmath != callee_opts->fpmath)
4005 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4016 /* Remember the last target of ix86_set_current_function. */
4017 static GTY(()) tree ix86_previous_fndecl;
4019 /* Establish appropriate back-end context for processing the function
4020 FNDECL. The argument might be NULL to indicate processing at top
4021 level, outside of any function scope. */
4023 ix86_set_current_function (tree fndecl)
4025 /* Only change the context if the function changes. This hook is called
4026 several times in the course of compiling a function, and we don't want to
4027 slow things down too much or call target_reinit when it isn't safe. */
4028 if (fndecl && fndecl != ix86_previous_fndecl)
4030 tree old_tree = (ix86_previous_fndecl
4031 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4034 tree new_tree = (fndecl
4035 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4038 ix86_previous_fndecl = fndecl;
4039 if (old_tree == new_tree)
4044 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4050 struct cl_target_option *def
4051 = TREE_TARGET_OPTION (target_option_current_node);
4053 cl_target_option_restore (def);
4060 /* Return true if this goes in large data/bss. */
4063 ix86_in_large_data_p (tree exp)
4065 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4068 /* Functions are never large data. */
4069 if (TREE_CODE (exp) == FUNCTION_DECL)
4072 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4074 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4075 if (strcmp (section, ".ldata") == 0
4076 || strcmp (section, ".lbss") == 0)
4082 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4084 /* If this is an incomplete type with size 0, then we can't put it
4085 in data because it might be too big when completed. */
4086 if (!size || size > ix86_section_threshold)
4093 /* Switch to the appropriate section for output of DECL.
4094 DECL is either a `VAR_DECL' node or a constant of some sort.
4095 RELOC indicates whether forming the initial value of DECL requires
4096 link-time relocations. */
4098 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4102 x86_64_elf_select_section (tree decl, int reloc,
4103 unsigned HOST_WIDE_INT align)
4105 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4106 && ix86_in_large_data_p (decl))
4108 const char *sname = NULL;
4109 unsigned int flags = SECTION_WRITE;
4110 switch (categorize_decl_for_section (decl, reloc))
4115 case SECCAT_DATA_REL:
4116 sname = ".ldata.rel";
4118 case SECCAT_DATA_REL_LOCAL:
4119 sname = ".ldata.rel.local";
4121 case SECCAT_DATA_REL_RO:
4122 sname = ".ldata.rel.ro";
4124 case SECCAT_DATA_REL_RO_LOCAL:
4125 sname = ".ldata.rel.ro.local";
4129 flags |= SECTION_BSS;
4132 case SECCAT_RODATA_MERGE_STR:
4133 case SECCAT_RODATA_MERGE_STR_INIT:
4134 case SECCAT_RODATA_MERGE_CONST:
4138 case SECCAT_SRODATA:
4145 /* We don't split these for medium model. Place them into
4146 default sections and hope for best. */
4148 case SECCAT_EMUTLS_VAR:
4149 case SECCAT_EMUTLS_TMPL:
4154 /* We might get called with string constants, but get_named_section
4155 doesn't like them as they are not DECLs. Also, we need to set
4156 flags in that case. */
4158 return get_section (sname, flags, NULL);
4159 return get_named_section (decl, sname, reloc);
4162 return default_elf_select_section (decl, reloc, align);
4165 /* Build up a unique section name, expressed as a
4166 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4167 RELOC indicates whether the initial value of EXP requires
4168 link-time relocations. */
4170 static void ATTRIBUTE_UNUSED
4171 x86_64_elf_unique_section (tree decl, int reloc)
4173 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4174 && ix86_in_large_data_p (decl))
4176 const char *prefix = NULL;
4177 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4178 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4180 switch (categorize_decl_for_section (decl, reloc))
4183 case SECCAT_DATA_REL:
4184 case SECCAT_DATA_REL_LOCAL:
4185 case SECCAT_DATA_REL_RO:
4186 case SECCAT_DATA_REL_RO_LOCAL:
4187 prefix = one_only ? ".ld" : ".ldata";
4190 prefix = one_only ? ".lb" : ".lbss";
4193 case SECCAT_RODATA_MERGE_STR:
4194 case SECCAT_RODATA_MERGE_STR_INIT:
4195 case SECCAT_RODATA_MERGE_CONST:
4196 prefix = one_only ? ".lr" : ".lrodata";
4198 case SECCAT_SRODATA:
4205 /* We don't split these for medium model. Place them into
4206 default sections and hope for best. */
4208 case SECCAT_EMUTLS_VAR:
4209 prefix = targetm.emutls.var_section;
4211 case SECCAT_EMUTLS_TMPL:
4212 prefix = targetm.emutls.tmpl_section;
4217 const char *name, *linkonce;
4220 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4221 name = targetm.strip_name_encoding (name);
4223 /* If we're using one_only, then there needs to be a .gnu.linkonce
4224 prefix to the section name. */
4225 linkonce = one_only ? ".gnu.linkonce" : "";
4227 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4229 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4233 default_unique_section (decl, reloc);
4236 #ifdef COMMON_ASM_OP
4237 /* This says how to output assembler code to declare an
4238 uninitialized external linkage data object.
4240 For medium model x86-64 we need to use .largecomm opcode for
4243 x86_elf_aligned_common (FILE *file,
4244 const char *name, unsigned HOST_WIDE_INT size,
4247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4248 && size > (unsigned int)ix86_section_threshold)
4249 fputs (".largecomm\t", file);
4251 fputs (COMMON_ASM_OP, file);
4252 assemble_name (file, name);
4253 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4254 size, align / BITS_PER_UNIT);
4258 /* Utility function for targets to use in implementing
4259 ASM_OUTPUT_ALIGNED_BSS. */
4262 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4263 const char *name, unsigned HOST_WIDE_INT size,
4266 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4267 && size > (unsigned int)ix86_section_threshold)
4268 switch_to_section (get_named_section (decl, ".lbss", 0));
4270 switch_to_section (bss_section);
4271 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4272 #ifdef ASM_DECLARE_OBJECT_NAME
4273 last_assemble_variable_decl = decl;
4274 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4276 /* Standard thing is just output label for the object. */
4277 ASM_OUTPUT_LABEL (file, name);
4278 #endif /* ASM_DECLARE_OBJECT_NAME */
4279 ASM_OUTPUT_SKIP (file, size ? size : 1);
4283 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4285 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4286 make the problem with not enough registers even worse. */
4287 #ifdef INSN_SCHEDULING
4289 flag_schedule_insns = 0;
4293 /* The Darwin libraries never set errno, so we might as well
4294 avoid calling them when that's the only reason we would. */
4295 flag_errno_math = 0;
4297 /* The default values of these switches depend on the TARGET_64BIT
4298 that is not known at this moment. Mark these values with 2 and
4299 let user the to override these. In case there is no command line option
4300 specifying them, we will set the defaults in override_options. */
4302 flag_omit_frame_pointer = 2;
4303 flag_pcc_struct_return = 2;
4304 flag_asynchronous_unwind_tables = 2;
4305 flag_vect_cost_model = 1;
4306 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4307 SUBTARGET_OPTIMIZATION_OPTIONS;
4311 /* Decide whether we can make a sibling call to a function. DECL is the
4312 declaration of the function being targeted by the call and EXP is the
4313 CALL_EXPR representing the call. */
4316 ix86_function_ok_for_sibcall (tree decl, tree exp)
4318 tree type, decl_or_type;
4321 /* If we are generating position-independent code, we cannot sibcall
4322 optimize any indirect call, or a direct call to a global function,
4323 as the PLT requires %ebx be live. */
4324 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4327 /* If we need to align the outgoing stack, then sibcalling would
4328 unalign the stack, which may break the called function. */
4329 if (ix86_minimum_incoming_stack_boundary (true)
4330 < PREFERRED_STACK_BOUNDARY)
4335 decl_or_type = decl;
4336 type = TREE_TYPE (decl);
4340 /* We're looking at the CALL_EXPR, we need the type of the function. */
4341 type = CALL_EXPR_FN (exp); /* pointer expression */
4342 type = TREE_TYPE (type); /* pointer type */
4343 type = TREE_TYPE (type); /* function type */
4344 decl_or_type = type;
4347 /* Check that the return value locations are the same. Like
4348 if we are returning floats on the 80387 register stack, we cannot
4349 make a sibcall from a function that doesn't return a float to a
4350 function that does or, conversely, from a function that does return
4351 a float to a function that doesn't; the necessary stack adjustment
4352 would not be executed. This is also the place we notice
4353 differences in the return value ABI. Note that it is ok for one
4354 of the functions to have void return type as long as the return
4355 value of the other is passed in a register. */
4356 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4357 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4359 if (STACK_REG_P (a) || STACK_REG_P (b))
4361 if (!rtx_equal_p (a, b))
4364 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4366 else if (!rtx_equal_p (a, b))
4371 /* The SYSV ABI has more call-clobbered registers;
4372 disallow sibcalls from MS to SYSV. */
4373 if (cfun->machine->call_abi == MS_ABI
4374 && ix86_function_type_abi (type) == SYSV_ABI)
4379 /* If this call is indirect, we'll need to be able to use a
4380 call-clobbered register for the address of the target function.
4381 Make sure that all such registers are not used for passing
4382 parameters. Note that DLLIMPORT functions are indirect. */
4384 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4386 if (ix86_function_regparm (type, NULL) >= 3)
4388 /* ??? Need to count the actual number of registers to be used,
4389 not the possible number of registers. Fix later. */
4395 /* Otherwise okay. That also includes certain types of indirect calls. */
4399 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4400 and "sseregparm" calling convention attributes;
4401 arguments as in struct attribute_spec.handler. */
4404 ix86_handle_cconv_attribute (tree *node, tree name,
4406 int flags ATTRIBUTE_UNUSED,
4409 if (TREE_CODE (*node) != FUNCTION_TYPE
4410 && TREE_CODE (*node) != METHOD_TYPE
4411 && TREE_CODE (*node) != FIELD_DECL
4412 && TREE_CODE (*node) != TYPE_DECL)
4414 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4416 *no_add_attrs = true;
4420 /* Can combine regparm with all attributes but fastcall. */
4421 if (is_attribute_p ("regparm", name))
4425 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4427 error ("fastcall and regparm attributes are not compatible");
4430 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4432 error ("regparam and thiscall attributes are not compatible");
4435 cst = TREE_VALUE (args);
4436 if (TREE_CODE (cst) != INTEGER_CST)
4438 warning (OPT_Wattributes,
4439 "%qE attribute requires an integer constant argument",
4441 *no_add_attrs = true;
4443 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4445 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4447 *no_add_attrs = true;
4455 /* Do not warn when emulating the MS ABI. */
4456 if ((TREE_CODE (*node) != FUNCTION_TYPE
4457 && TREE_CODE (*node) != METHOD_TYPE)
4458 || ix86_function_type_abi (*node) != MS_ABI)
4459 warning (OPT_Wattributes, "%qE attribute ignored",
4461 *no_add_attrs = true;
4465 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4466 if (is_attribute_p ("fastcall", name))
4468 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4470 error ("fastcall and cdecl attributes are not compatible");
4472 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4474 error ("fastcall and stdcall attributes are not compatible");
4476 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4478 error ("fastcall and regparm attributes are not compatible");
4480 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4482 error ("fastcall and thiscall attributes are not compatible");
4486 /* Can combine stdcall with fastcall (redundant), regparm and
4488 else if (is_attribute_p ("stdcall", name))
4490 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4492 error ("stdcall and cdecl attributes are not compatible");
4494 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4496 error ("stdcall and fastcall attributes are not compatible");
4498 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4500 error ("stdcall and thiscall attributes are not compatible");
4504 /* Can combine cdecl with regparm and sseregparm. */
4505 else if (is_attribute_p ("cdecl", name))
4507 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4509 error ("stdcall and cdecl attributes are not compatible");
4511 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4513 error ("fastcall and cdecl attributes are not compatible");
4515 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4517 error ("cdecl and thiscall attributes are not compatible");
4520 else if (is_attribute_p ("thiscall", name))
4522 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4523 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4525 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4527 error ("stdcall and thiscall attributes are not compatible");
4529 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4531 error ("fastcall and thiscall attributes are not compatible");
4533 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4535 error ("cdecl and thiscall attributes are not compatible");
4539 /* Can combine sseregparm with all attributes. */
4544 /* Return 0 if the attributes for two types are incompatible, 1 if they
4545 are compatible, and 2 if they are nearly compatible (which causes a
4546 warning to be generated). */
4549 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4551 /* Check for mismatch of non-default calling convention. */
4552 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4554 if (TREE_CODE (type1) != FUNCTION_TYPE
4555 && TREE_CODE (type1) != METHOD_TYPE)
4558 /* Check for mismatched fastcall/regparm types. */
4559 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4560 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4561 || (ix86_function_regparm (type1, NULL)
4562 != ix86_function_regparm (type2, NULL)))
4565 /* Check for mismatched sseregparm types. */
4566 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4567 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4570 /* Check for mismatched thiscall types. */
4571 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4572 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4575 /* Check for mismatched return types (cdecl vs stdcall). */
4576 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4577 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4583 /* Return the regparm value for a function with the indicated TYPE and DECL.
4584 DECL may be NULL when calling function indirectly
4585 or considering a libcall. */
4588 ix86_function_regparm (const_tree type, const_tree decl)
4594 return (ix86_function_type_abi (type) == SYSV_ABI
4595 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4597 regparm = ix86_regparm;
4598 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4601 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4605 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4608 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4611 /* Use register calling convention for local functions when possible. */
4613 && TREE_CODE (decl) == FUNCTION_DECL
4617 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4618 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4621 int local_regparm, globals = 0, regno;
4623 /* Make sure no regparm register is taken by a
4624 fixed register variable. */
4625 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4626 if (fixed_regs[local_regparm])
4629 /* We don't want to use regparm(3) for nested functions as
4630 these use a static chain pointer in the third argument. */
4631 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4634 /* Each fixed register usage increases register pressure,
4635 so less registers should be used for argument passing.
4636 This functionality can be overriden by an explicit
4638 for (regno = 0; regno <= DI_REG; regno++)
4639 if (fixed_regs[regno])
4643 = globals < local_regparm ? local_regparm - globals : 0;
4645 if (local_regparm > regparm)
4646 regparm = local_regparm;
4653 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4654 DFmode (2) arguments in SSE registers for a function with the
4655 indicated TYPE and DECL. DECL may be NULL when calling function
4656 indirectly or considering a libcall. Otherwise return 0. */
4659 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4661 gcc_assert (!TARGET_64BIT);
4663 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4664 by the sseregparm attribute. */
4665 if (TARGET_SSEREGPARM
4666 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4673 error ("Calling %qD with attribute sseregparm without "
4674 "SSE/SSE2 enabled", decl);
4676 error ("Calling %qT with attribute sseregparm without "
4677 "SSE/SSE2 enabled", type);
4685 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4686 (and DFmode for SSE2) arguments in SSE registers. */
4687 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4689 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4690 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4692 return TARGET_SSE2 ? 2 : 1;
4698 /* Return true if EAX is live at the start of the function. Used by
4699 ix86_expand_prologue to determine if we need special help before
4700 calling allocate_stack_worker. */
4703 ix86_eax_live_at_start_p (void)
4705 /* Cheat. Don't bother working forward from ix86_function_regparm
4706 to the function type to whether an actual argument is located in
4707 eax. Instead just look at cfg info, which is still close enough
4708 to correct at this point. This gives false positives for broken
4709 functions that might use uninitialized data that happens to be
4710 allocated in eax, but who cares? */
4711 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4714 /* Value is the number of bytes of arguments automatically
4715 popped when returning from a subroutine call.
4716 FUNDECL is the declaration node of the function (as a tree),
4717 FUNTYPE is the data type of the function (as a tree),
4718 or for a library call it is an identifier node for the subroutine name.
4719 SIZE is the number of bytes of arguments passed on the stack.
4721 On the 80386, the RTD insn may be used to pop them if the number
4722 of args is fixed, but if the number is variable then the caller
4723 must pop them all. RTD can't be used for library calls now
4724 because the library is compiled with the Unix compiler.
4725 Use of RTD is a selectable option, since it is incompatible with
4726 standard Unix calling sequences. If the option is not selected,
4727 the caller must always pop the args.
4729 The attribute stdcall is equivalent to RTD on a per module basis. */
4732 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4736 /* None of the 64-bit ABIs pop arguments. */
4740 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4742 /* Cdecl functions override -mrtd, and never pop the stack. */
4743 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4745 /* Stdcall and fastcall functions will pop the stack if not
4747 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4748 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4749 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4752 if (rtd && ! stdarg_p (funtype))
4756 /* Lose any fake structure return argument if it is passed on the stack. */
4757 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4758 && !KEEP_AGGREGATE_RETURN_POINTER)
4760 int nregs = ix86_function_regparm (funtype, fundecl);
4762 return GET_MODE_SIZE (Pmode);
4768 /* Argument support functions. */
4770 /* Return true when register may be used to pass function parameters. */
4772 ix86_function_arg_regno_p (int regno)
4775 const int *parm_regs;
4780 return (regno < REGPARM_MAX
4781 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4783 return (regno < REGPARM_MAX
4784 || (TARGET_MMX && MMX_REGNO_P (regno)
4785 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4786 || (TARGET_SSE && SSE_REGNO_P (regno)
4787 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4792 if (SSE_REGNO_P (regno) && TARGET_SSE)
4797 if (TARGET_SSE && SSE_REGNO_P (regno)
4798 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4802 /* TODO: The function should depend on current function ABI but
4803 builtins.c would need updating then. Therefore we use the
4806 /* RAX is used as hidden argument to va_arg functions. */
4807 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4810 if (ix86_abi == MS_ABI)
4811 parm_regs = x86_64_ms_abi_int_parameter_registers;
4813 parm_regs = x86_64_int_parameter_registers;
4814 for (i = 0; i < (ix86_abi == MS_ABI
4815 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4816 if (regno == parm_regs[i])
4821 /* Return if we do not know how to pass TYPE solely in registers. */
4824 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4826 if (must_pass_in_stack_var_size_or_pad (mode, type))
4829 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4830 The layout_type routine is crafty and tries to trick us into passing
4831 currently unsupported vector types on the stack by using TImode. */
4832 return (!TARGET_64BIT && mode == TImode
4833 && type && TREE_CODE (type) != VECTOR_TYPE);
4836 /* It returns the size, in bytes, of the area reserved for arguments passed
4837 in registers for the function represented by fndecl dependent to the used
4840 ix86_reg_parm_stack_space (const_tree fndecl)
4842 enum calling_abi call_abi = SYSV_ABI;
4843 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4844 call_abi = ix86_function_abi (fndecl);
4846 call_abi = ix86_function_type_abi (fndecl);
4847 if (call_abi == MS_ABI)
4852 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4855 ix86_function_type_abi (const_tree fntype)
4857 if (TARGET_64BIT && fntype != NULL)
4859 enum calling_abi abi = ix86_abi;
4860 if (abi == SYSV_ABI)
4862 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4865 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4873 ix86_function_ms_hook_prologue (const_tree fntype)
4877 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4879 if (decl_function_context (fntype) != NULL_TREE)
4881 error_at (DECL_SOURCE_LOCATION (fntype),
4882 "ms_hook_prologue is not compatible with nested function");
4891 static enum calling_abi
4892 ix86_function_abi (const_tree fndecl)
4896 return ix86_function_type_abi (TREE_TYPE (fndecl));
4899 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4902 ix86_cfun_abi (void)
4904 if (! cfun || ! TARGET_64BIT)
4906 return cfun->machine->call_abi;
4910 extern void init_regs (void);
4912 /* Implementation of call abi switching target hook. Specific to FNDECL
4913 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4914 for more details. */
4916 ix86_call_abi_override (const_tree fndecl)
4918 if (fndecl == NULL_TREE)
4919 cfun->machine->call_abi = ix86_abi;
4921 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4924 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4925 re-initialization of init_regs each time we switch function context since
4926 this is needed only during RTL expansion. */
4928 ix86_maybe_switch_abi (void)
4931 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4935 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4936 for a call to a function whose data type is FNTYPE.
4937 For a library call, FNTYPE is 0. */
4940 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4941 tree fntype, /* tree ptr for function decl */
4942 rtx libname, /* SYMBOL_REF of library name or 0 */
4945 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4946 memset (cum, 0, sizeof (*cum));
4949 cum->call_abi = ix86_function_abi (fndecl);
4951 cum->call_abi = ix86_function_type_abi (fntype);
4952 /* Set up the number of registers to use for passing arguments. */
4954 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4955 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4956 "or subtarget optimization implying it");
4957 cum->nregs = ix86_regparm;
4960 if (cum->call_abi != ix86_abi)
4961 cum->nregs = (ix86_abi != SYSV_ABI
4962 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4966 cum->sse_nregs = SSE_REGPARM_MAX;
4969 if (cum->call_abi != ix86_abi)
4970 cum->sse_nregs = (ix86_abi != SYSV_ABI
4971 ? X86_64_SSE_REGPARM_MAX
4972 : X86_64_MS_SSE_REGPARM_MAX);
4976 cum->mmx_nregs = MMX_REGPARM_MAX;
4977 cum->warn_avx = true;
4978 cum->warn_sse = true;
4979 cum->warn_mmx = true;
4981 /* Because type might mismatch in between caller and callee, we need to
4982 use actual type of function for local calls.
4983 FIXME: cgraph_analyze can be told to actually record if function uses
4984 va_start so for local functions maybe_vaarg can be made aggressive
4986 FIXME: once typesytem is fixed, we won't need this code anymore. */
4988 fntype = TREE_TYPE (fndecl);
4989 cum->maybe_vaarg = (fntype
4990 ? (!prototype_p (fntype) || stdarg_p (fntype))
4995 /* If there are variable arguments, then we won't pass anything
4996 in registers in 32-bit mode. */
4997 if (stdarg_p (fntype))
5008 /* Use ecx and edx registers if function has fastcall attribute,
5009 else look for regparm information. */
5012 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5015 cum->fastcall = 1; /* Same first register as in fastcall. */
5017 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5023 cum->nregs = ix86_function_regparm (fntype, fndecl);
5026 /* Set up the number of SSE registers used for passing SFmode
5027 and DFmode arguments. Warn for mismatching ABI. */
5028 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5032 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5033 But in the case of vector types, it is some vector mode.
5035 When we have only some of our vector isa extensions enabled, then there
5036 are some modes for which vector_mode_supported_p is false. For these
5037 modes, the generic vector support in gcc will choose some non-vector mode
5038 in order to implement the type. By computing the natural mode, we'll
5039 select the proper ABI location for the operand and not depend on whatever
5040 the middle-end decides to do with these vector types.
5042 The midde-end can't deal with the vector types > 16 bytes. In this
5043 case, we return the original mode and warn ABI change if CUM isn't
5046 static enum machine_mode
5047 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5049 enum machine_mode mode = TYPE_MODE (type);
5051 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5053 HOST_WIDE_INT size = int_size_in_bytes (type);
5054 if ((size == 8 || size == 16 || size == 32)
5055 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5056 && TYPE_VECTOR_SUBPARTS (type) > 1)
5058 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5060 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5061 mode = MIN_MODE_VECTOR_FLOAT;
5063 mode = MIN_MODE_VECTOR_INT;
5065 /* Get the mode which has this inner mode and number of units. */
5066 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5067 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5068 && GET_MODE_INNER (mode) == innermode)
5070 if (size == 32 && !TARGET_AVX)
5072 static bool warnedavx;
5079 warning (0, "AVX vector argument without AVX "
5080 "enabled changes the ABI");
5082 return TYPE_MODE (type);
5095 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5096 this may not agree with the mode that the type system has chosen for the
5097 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5098 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5101 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5106 if (orig_mode != BLKmode)
5107 tmp = gen_rtx_REG (orig_mode, regno);
5110 tmp = gen_rtx_REG (mode, regno);
5111 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5112 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5118 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5119 of this code is to classify each 8bytes of incoming argument by the register
5120 class and assign registers accordingly. */
5122 /* Return the union class of CLASS1 and CLASS2.
5123 See the x86-64 PS ABI for details. */
5125 static enum x86_64_reg_class
5126 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5128 /* Rule #1: If both classes are equal, this is the resulting class. */
5129 if (class1 == class2)
5132 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5134 if (class1 == X86_64_NO_CLASS)
5136 if (class2 == X86_64_NO_CLASS)
5139 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5140 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5141 return X86_64_MEMORY_CLASS;
5143 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5144 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5145 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5146 return X86_64_INTEGERSI_CLASS;
5147 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5148 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5149 return X86_64_INTEGER_CLASS;
5151 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5153 if (class1 == X86_64_X87_CLASS
5154 || class1 == X86_64_X87UP_CLASS
5155 || class1 == X86_64_COMPLEX_X87_CLASS
5156 || class2 == X86_64_X87_CLASS
5157 || class2 == X86_64_X87UP_CLASS
5158 || class2 == X86_64_COMPLEX_X87_CLASS)
5159 return X86_64_MEMORY_CLASS;
5161 /* Rule #6: Otherwise class SSE is used. */
5162 return X86_64_SSE_CLASS;
5165 /* Classify the argument of type TYPE and mode MODE.
5166 CLASSES will be filled by the register class used to pass each word
5167 of the operand. The number of words is returned. In case the parameter
5168 should be passed in memory, 0 is returned. As a special case for zero
5169 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5171 BIT_OFFSET is used internally for handling records and specifies offset
5172 of the offset in bits modulo 256 to avoid overflow cases.
5174 See the x86-64 PS ABI for details.
5178 classify_argument (enum machine_mode mode, const_tree type,
5179 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5181 HOST_WIDE_INT bytes =
5182 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5183 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5185 /* Variable sized entities are always passed/returned in memory. */
5189 if (mode != VOIDmode
5190 && targetm.calls.must_pass_in_stack (mode, type))
5193 if (type && AGGREGATE_TYPE_P (type))
5197 enum x86_64_reg_class subclasses[MAX_CLASSES];
5199 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5203 for (i = 0; i < words; i++)
5204 classes[i] = X86_64_NO_CLASS;
5206 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5207 signalize memory class, so handle it as special case. */
5210 classes[0] = X86_64_NO_CLASS;
5214 /* Classify each field of record and merge classes. */
5215 switch (TREE_CODE (type))
5218 /* And now merge the fields of structure. */
5219 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5221 if (TREE_CODE (field) == FIELD_DECL)
5225 if (TREE_TYPE (field) == error_mark_node)
5228 /* Bitfields are always classified as integer. Handle them
5229 early, since later code would consider them to be
5230 misaligned integers. */
5231 if (DECL_BIT_FIELD (field))
5233 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5234 i < ((int_bit_position (field) + (bit_offset % 64))
5235 + tree_low_cst (DECL_SIZE (field), 0)
5238 merge_classes (X86_64_INTEGER_CLASS,
5245 type = TREE_TYPE (field);
5247 /* Flexible array member is ignored. */
5248 if (TYPE_MODE (type) == BLKmode
5249 && TREE_CODE (type) == ARRAY_TYPE
5250 && TYPE_SIZE (type) == NULL_TREE
5251 && TYPE_DOMAIN (type) != NULL_TREE
5252 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5257 if (!warned && warn_psabi)
5260 inform (input_location,
5261 "The ABI of passing struct with"
5262 " a flexible array member has"
5263 " changed in GCC 4.4");
5267 num = classify_argument (TYPE_MODE (type), type,
5269 (int_bit_position (field)
5270 + bit_offset) % 256);
5273 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5274 for (i = 0; i < num && (i + pos) < words; i++)
5276 merge_classes (subclasses[i], classes[i + pos]);
5283 /* Arrays are handled as small records. */
5286 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5287 TREE_TYPE (type), subclasses, bit_offset);
5291 /* The partial classes are now full classes. */
5292 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5293 subclasses[0] = X86_64_SSE_CLASS;
5294 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5295 && !((bit_offset % 64) == 0 && bytes == 4))
5296 subclasses[0] = X86_64_INTEGER_CLASS;
5298 for (i = 0; i < words; i++)
5299 classes[i] = subclasses[i % num];
5304 case QUAL_UNION_TYPE:
5305 /* Unions are similar to RECORD_TYPE but offset is always 0.
5307 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5309 if (TREE_CODE (field) == FIELD_DECL)
5313 if (TREE_TYPE (field) == error_mark_node)
5316 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5317 TREE_TYPE (field), subclasses,
5321 for (i = 0; i < num; i++)
5322 classes[i] = merge_classes (subclasses[i], classes[i]);
5333 /* When size > 16 bytes, if the first one isn't
5334 X86_64_SSE_CLASS or any other ones aren't
5335 X86_64_SSEUP_CLASS, everything should be passed in
5337 if (classes[0] != X86_64_SSE_CLASS)
5340 for (i = 1; i < words; i++)
5341 if (classes[i] != X86_64_SSEUP_CLASS)
5345 /* Final merger cleanup. */
5346 for (i = 0; i < words; i++)
5348 /* If one class is MEMORY, everything should be passed in
5350 if (classes[i] == X86_64_MEMORY_CLASS)
5353 /* The X86_64_SSEUP_CLASS should be always preceded by
5354 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5355 if (classes[i] == X86_64_SSEUP_CLASS
5356 && classes[i - 1] != X86_64_SSE_CLASS
5357 && classes[i - 1] != X86_64_SSEUP_CLASS)
5359 /* The first one should never be X86_64_SSEUP_CLASS. */
5360 gcc_assert (i != 0);
5361 classes[i] = X86_64_SSE_CLASS;
5364 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5365 everything should be passed in memory. */
5366 if (classes[i] == X86_64_X87UP_CLASS
5367 && (classes[i - 1] != X86_64_X87_CLASS))
5371 /* The first one should never be X86_64_X87UP_CLASS. */
5372 gcc_assert (i != 0);
5373 if (!warned && warn_psabi)
5376 inform (input_location,
5377 "The ABI of passing union with long double"
5378 " has changed in GCC 4.4");
5386 /* Compute alignment needed. We align all types to natural boundaries with
5387 exception of XFmode that is aligned to 64bits. */
5388 if (mode != VOIDmode && mode != BLKmode)
5390 int mode_alignment = GET_MODE_BITSIZE (mode);
5393 mode_alignment = 128;
5394 else if (mode == XCmode)
5395 mode_alignment = 256;
5396 if (COMPLEX_MODE_P (mode))
5397 mode_alignment /= 2;
5398 /* Misaligned fields are always returned in memory. */
5399 if (bit_offset % mode_alignment)
5403 /* for V1xx modes, just use the base mode */
5404 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5405 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5406 mode = GET_MODE_INNER (mode);
5408 /* Classification of atomic types. */
5413 classes[0] = X86_64_SSE_CLASS;
5416 classes[0] = X86_64_SSE_CLASS;
5417 classes[1] = X86_64_SSEUP_CLASS;
5427 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5431 classes[0] = X86_64_INTEGERSI_CLASS;
5434 else if (size <= 64)
5436 classes[0] = X86_64_INTEGER_CLASS;
5439 else if (size <= 64+32)
5441 classes[0] = X86_64_INTEGER_CLASS;
5442 classes[1] = X86_64_INTEGERSI_CLASS;
5445 else if (size <= 64+64)
5447 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5455 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5459 /* OImode shouldn't be used directly. */
5464 if (!(bit_offset % 64))
5465 classes[0] = X86_64_SSESF_CLASS;
5467 classes[0] = X86_64_SSE_CLASS;
5470 classes[0] = X86_64_SSEDF_CLASS;
5473 classes[0] = X86_64_X87_CLASS;
5474 classes[1] = X86_64_X87UP_CLASS;
5477 classes[0] = X86_64_SSE_CLASS;
5478 classes[1] = X86_64_SSEUP_CLASS;
5481 classes[0] = X86_64_SSE_CLASS;
5482 if (!(bit_offset % 64))
5488 if (!warned && warn_psabi)
5491 inform (input_location,
5492 "The ABI of passing structure with complex float"
5493 " member has changed in GCC 4.4");
5495 classes[1] = X86_64_SSESF_CLASS;
5499 classes[0] = X86_64_SSEDF_CLASS;
5500 classes[1] = X86_64_SSEDF_CLASS;
5503 classes[0] = X86_64_COMPLEX_X87_CLASS;
5506 /* This modes is larger than 16 bytes. */
5514 classes[0] = X86_64_SSE_CLASS;
5515 classes[1] = X86_64_SSEUP_CLASS;
5516 classes[2] = X86_64_SSEUP_CLASS;
5517 classes[3] = X86_64_SSEUP_CLASS;
5525 classes[0] = X86_64_SSE_CLASS;
5526 classes[1] = X86_64_SSEUP_CLASS;
5534 classes[0] = X86_64_SSE_CLASS;
5540 gcc_assert (VECTOR_MODE_P (mode));
5545 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5547 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5548 classes[0] = X86_64_INTEGERSI_CLASS;
5550 classes[0] = X86_64_INTEGER_CLASS;
5551 classes[1] = X86_64_INTEGER_CLASS;
5552 return 1 + (bytes > 8);
5556 /* Examine the argument and return set number of register required in each
5557 class. Return 0 iff parameter should be passed in memory. */
5559 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5560 int *int_nregs, int *sse_nregs)
5562 enum x86_64_reg_class regclass[MAX_CLASSES];
5563 int n = classify_argument (mode, type, regclass, 0);
5569 for (n--; n >= 0; n--)
5570 switch (regclass[n])
5572 case X86_64_INTEGER_CLASS:
5573 case X86_64_INTEGERSI_CLASS:
5576 case X86_64_SSE_CLASS:
5577 case X86_64_SSESF_CLASS:
5578 case X86_64_SSEDF_CLASS:
5581 case X86_64_NO_CLASS:
5582 case X86_64_SSEUP_CLASS:
5584 case X86_64_X87_CLASS:
5585 case X86_64_X87UP_CLASS:
5589 case X86_64_COMPLEX_X87_CLASS:
5590 return in_return ? 2 : 0;
5591 case X86_64_MEMORY_CLASS:
5597 /* Construct container for the argument used by GCC interface. See
5598 FUNCTION_ARG for the detailed description. */
5601 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5602 const_tree type, int in_return, int nintregs, int nsseregs,
5603 const int *intreg, int sse_regno)
5605 /* The following variables hold the static issued_error state. */
5606 static bool issued_sse_arg_error;
5607 static bool issued_sse_ret_error;
5608 static bool issued_x87_ret_error;
5610 enum machine_mode tmpmode;
5612 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5613 enum x86_64_reg_class regclass[MAX_CLASSES];
5617 int needed_sseregs, needed_intregs;
5618 rtx exp[MAX_CLASSES];
5621 n = classify_argument (mode, type, regclass, 0);
5624 if (!examine_argument (mode, type, in_return, &needed_intregs,
5627 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5630 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5631 some less clueful developer tries to use floating-point anyway. */
5632 if (needed_sseregs && !TARGET_SSE)
5636 if (!issued_sse_ret_error)
5638 error ("SSE register return with SSE disabled");
5639 issued_sse_ret_error = true;
5642 else if (!issued_sse_arg_error)
5644 error ("SSE register argument with SSE disabled");
5645 issued_sse_arg_error = true;
5650 /* Likewise, error if the ABI requires us to return values in the
5651 x87 registers and the user specified -mno-80387. */
5652 if (!TARGET_80387 && in_return)
5653 for (i = 0; i < n; i++)
5654 if (regclass[i] == X86_64_X87_CLASS
5655 || regclass[i] == X86_64_X87UP_CLASS
5656 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5658 if (!issued_x87_ret_error)
5660 error ("x87 register return with x87 disabled");
5661 issued_x87_ret_error = true;
5666 /* First construct simple cases. Avoid SCmode, since we want to use
5667 single register to pass this type. */
5668 if (n == 1 && mode != SCmode)
5669 switch (regclass[0])
5671 case X86_64_INTEGER_CLASS:
5672 case X86_64_INTEGERSI_CLASS:
5673 return gen_rtx_REG (mode, intreg[0]);
5674 case X86_64_SSE_CLASS:
5675 case X86_64_SSESF_CLASS:
5676 case X86_64_SSEDF_CLASS:
5677 if (mode != BLKmode)
5678 return gen_reg_or_parallel (mode, orig_mode,
5679 SSE_REGNO (sse_regno));
5681 case X86_64_X87_CLASS:
5682 case X86_64_COMPLEX_X87_CLASS:
5683 return gen_rtx_REG (mode, FIRST_STACK_REG);
5684 case X86_64_NO_CLASS:
5685 /* Zero sized array, struct or class. */
5690 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5691 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5692 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5694 && regclass[0] == X86_64_SSE_CLASS
5695 && regclass[1] == X86_64_SSEUP_CLASS
5696 && regclass[2] == X86_64_SSEUP_CLASS
5697 && regclass[3] == X86_64_SSEUP_CLASS
5699 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5702 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5703 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5704 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5705 && regclass[1] == X86_64_INTEGER_CLASS
5706 && (mode == CDImode || mode == TImode || mode == TFmode)
5707 && intreg[0] + 1 == intreg[1])
5708 return gen_rtx_REG (mode, intreg[0]);
5710 /* Otherwise figure out the entries of the PARALLEL. */
5711 for (i = 0; i < n; i++)
5715 switch (regclass[i])
5717 case X86_64_NO_CLASS:
5719 case X86_64_INTEGER_CLASS:
5720 case X86_64_INTEGERSI_CLASS:
5721 /* Merge TImodes on aligned occasions here too. */
5722 if (i * 8 + 8 > bytes)
5723 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5724 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5728 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5729 if (tmpmode == BLKmode)
5731 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5732 gen_rtx_REG (tmpmode, *intreg),
5736 case X86_64_SSESF_CLASS:
5737 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5738 gen_rtx_REG (SFmode,
5739 SSE_REGNO (sse_regno)),
5743 case X86_64_SSEDF_CLASS:
5744 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5745 gen_rtx_REG (DFmode,
5746 SSE_REGNO (sse_regno)),
5750 case X86_64_SSE_CLASS:
5758 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5768 && regclass[1] == X86_64_SSEUP_CLASS
5769 && regclass[2] == X86_64_SSEUP_CLASS
5770 && regclass[3] == X86_64_SSEUP_CLASS);
5777 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5778 gen_rtx_REG (tmpmode,
5779 SSE_REGNO (sse_regno)),
5788 /* Empty aligned struct, union or class. */
5792 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5793 for (i = 0; i < nexps; i++)
5794 XVECEXP (ret, 0, i) = exp [i];
5798 /* Update the data in CUM to advance over an argument of mode MODE
5799 and data type TYPE. (TYPE is null for libcalls where that information
5800 may not be available.) */
5803 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5804 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5820 cum->words += words;
5821 cum->nregs -= words;
5822 cum->regno += words;
5824 if (cum->nregs <= 0)
5832 /* OImode shouldn't be used directly. */
5836 if (cum->float_in_sse < 2)
5839 if (cum->float_in_sse < 1)
5856 if (!type || !AGGREGATE_TYPE_P (type))
5858 cum->sse_words += words;
5859 cum->sse_nregs -= 1;
5860 cum->sse_regno += 1;
5861 if (cum->sse_nregs <= 0)
5875 if (!type || !AGGREGATE_TYPE_P (type))
5877 cum->mmx_words += words;
5878 cum->mmx_nregs -= 1;
5879 cum->mmx_regno += 1;
5880 if (cum->mmx_nregs <= 0)
5891 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5892 tree type, HOST_WIDE_INT words, int named)
5894 int int_nregs, sse_nregs;
5896 /* Unnamed 256bit vector mode parameters are passed on stack. */
5897 if (!named && VALID_AVX256_REG_MODE (mode))
5900 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5901 cum->words += words;
5902 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5904 cum->nregs -= int_nregs;
5905 cum->sse_nregs -= sse_nregs;
5906 cum->regno += int_nregs;
5907 cum->sse_regno += sse_nregs;
5910 cum->words += words;
5914 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5915 HOST_WIDE_INT words)
5917 /* Otherwise, this should be passed indirect. */
5918 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5920 cum->words += words;
5929 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5930 tree type, int named)
5932 HOST_WIDE_INT bytes, words;
5934 if (mode == BLKmode)
5935 bytes = int_size_in_bytes (type);
5937 bytes = GET_MODE_SIZE (mode);
5938 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5941 mode = type_natural_mode (type, NULL);
5943 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5944 function_arg_advance_ms_64 (cum, bytes, words);
5945 else if (TARGET_64BIT)
5946 function_arg_advance_64 (cum, mode, type, words, named);
5948 function_arg_advance_32 (cum, mode, type, bytes, words);
5951 /* Define where to put the arguments to a function.
5952 Value is zero to push the argument on the stack,
5953 or a hard register in which to store the argument.
5955 MODE is the argument's machine mode.
5956 TYPE is the data type of the argument (as a tree).
5957 This is null for libcalls where that information may
5959 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5960 the preceding args and about the function being called.
5961 NAMED is nonzero if this argument is a named parameter
5962 (otherwise it is an extra parameter matching an ellipsis). */
5965 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5966 enum machine_mode orig_mode, tree type,
5967 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5969 static bool warnedsse, warnedmmx;
5971 /* Avoid the AL settings for the Unix64 ABI. */
5972 if (mode == VOIDmode)
5988 if (words <= cum->nregs)
5990 int regno = cum->regno;
5992 /* Fastcall allocates the first two DWORD (SImode) or
5993 smaller arguments to ECX and EDX if it isn't an
5999 || (type && AGGREGATE_TYPE_P (type)))
6002 /* ECX not EAX is the first allocated register. */
6003 if (regno == AX_REG)
6006 return gen_rtx_REG (mode, regno);
6011 if (cum->float_in_sse < 2)
6014 if (cum->float_in_sse < 1)
6018 /* In 32bit, we pass TImode in xmm registers. */
6025 if (!type || !AGGREGATE_TYPE_P (type))
6027 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6030 warning (0, "SSE vector argument without SSE enabled "
6034 return gen_reg_or_parallel (mode, orig_mode,
6035 cum->sse_regno + FIRST_SSE_REG);
6040 /* OImode shouldn't be used directly. */
6049 if (!type || !AGGREGATE_TYPE_P (type))
6052 return gen_reg_or_parallel (mode, orig_mode,
6053 cum->sse_regno + FIRST_SSE_REG);
6063 if (!type || !AGGREGATE_TYPE_P (type))
6065 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6068 warning (0, "MMX vector argument without MMX enabled "
6072 return gen_reg_or_parallel (mode, orig_mode,
6073 cum->mmx_regno + FIRST_MMX_REG);
6082 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6083 enum machine_mode orig_mode, tree type, int named)
6085 /* Handle a hidden AL argument containing number of registers
6086 for varargs x86-64 functions. */
6087 if (mode == VOIDmode)
6088 return GEN_INT (cum->maybe_vaarg
6089 ? (cum->sse_nregs < 0
6090 ? (cum->call_abi == ix86_abi
6092 : (ix86_abi != SYSV_ABI
6093 ? X86_64_SSE_REGPARM_MAX
6094 : X86_64_MS_SSE_REGPARM_MAX))
6109 /* Unnamed 256bit vector mode parameters are passed on stack. */
6115 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6117 &x86_64_int_parameter_registers [cum->regno],
6122 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6123 enum machine_mode orig_mode, int named,
6124 HOST_WIDE_INT bytes)
6128 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6129 We use value of -2 to specify that current function call is MSABI. */
6130 if (mode == VOIDmode)
6131 return GEN_INT (-2);
6133 /* If we've run out of registers, it goes on the stack. */
6134 if (cum->nregs == 0)
6137 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6139 /* Only floating point modes are passed in anything but integer regs. */
6140 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6143 regno = cum->regno + FIRST_SSE_REG;
6148 /* Unnamed floating parameters are passed in both the
6149 SSE and integer registers. */
6150 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6151 t2 = gen_rtx_REG (mode, regno);
6152 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6153 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6154 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6157 /* Handle aggregated types passed in register. */
6158 if (orig_mode == BLKmode)
6160 if (bytes > 0 && bytes <= 8)
6161 mode = (bytes > 4 ? DImode : SImode);
6162 if (mode == BLKmode)
6166 return gen_reg_or_parallel (mode, orig_mode, regno);
6170 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6171 tree type, int named)
6173 enum machine_mode mode = omode;
6174 HOST_WIDE_INT bytes, words;
6176 if (mode == BLKmode)
6177 bytes = int_size_in_bytes (type);
6179 bytes = GET_MODE_SIZE (mode);
6180 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6182 /* To simplify the code below, represent vector types with a vector mode
6183 even if MMX/SSE are not active. */
6184 if (type && TREE_CODE (type) == VECTOR_TYPE)
6185 mode = type_natural_mode (type, cum);
6187 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6188 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6189 else if (TARGET_64BIT)
6190 return function_arg_64 (cum, mode, omode, type, named);
6192 return function_arg_32 (cum, mode, omode, type, bytes, words);
6195 /* A C expression that indicates when an argument must be passed by
6196 reference. If nonzero for an argument, a copy of that argument is
6197 made in memory and a pointer to the argument is passed instead of
6198 the argument itself. The pointer is passed in whatever way is
6199 appropriate for passing a pointer to that type. */
6202 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6203 enum machine_mode mode ATTRIBUTE_UNUSED,
6204 const_tree type, bool named ATTRIBUTE_UNUSED)
6206 /* See Windows x64 Software Convention. */
6207 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6209 int msize = (int) GET_MODE_SIZE (mode);
6212 /* Arrays are passed by reference. */
6213 if (TREE_CODE (type) == ARRAY_TYPE)
6216 if (AGGREGATE_TYPE_P (type))
6218 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6219 are passed by reference. */
6220 msize = int_size_in_bytes (type);
6224 /* __m128 is passed by reference. */
6226 case 1: case 2: case 4: case 8:
6232 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6238 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6241 contains_aligned_value_p (tree type)
6243 enum machine_mode mode = TYPE_MODE (type);
6244 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6248 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6250 if (TYPE_ALIGN (type) < 128)
6253 if (AGGREGATE_TYPE_P (type))
6255 /* Walk the aggregates recursively. */
6256 switch (TREE_CODE (type))
6260 case QUAL_UNION_TYPE:
6264 /* Walk all the structure fields. */
6265 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6267 if (TREE_CODE (field) == FIELD_DECL
6268 && contains_aligned_value_p (TREE_TYPE (field)))
6275 /* Just for use if some languages passes arrays by value. */
6276 if (contains_aligned_value_p (TREE_TYPE (type)))
6287 /* Gives the alignment boundary, in bits, of an argument with the
6288 specified mode and type. */
6291 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6296 /* Since canonical type is used for call, we convert it to
6297 canonical type if needed. */
6298 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6299 type = TYPE_CANONICAL (type);
6300 align = TYPE_ALIGN (type);
6303 align = GET_MODE_ALIGNMENT (mode);
6304 if (align < PARM_BOUNDARY)
6305 align = PARM_BOUNDARY;
6306 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6307 natural boundaries. */
6308 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6310 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6311 make an exception for SSE modes since these require 128bit
6314 The handling here differs from field_alignment. ICC aligns MMX
6315 arguments to 4 byte boundaries, while structure fields are aligned
6316 to 8 byte boundaries. */
6319 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6320 align = PARM_BOUNDARY;
6324 if (!contains_aligned_value_p (type))
6325 align = PARM_BOUNDARY;
6328 if (align > BIGGEST_ALIGNMENT)
6329 align = BIGGEST_ALIGNMENT;
6333 /* Return true if N is a possible register number of function value. */
6336 ix86_function_value_regno_p (const unsigned int regno)
6343 case FIRST_FLOAT_REG:
6344 /* TODO: The function should depend on current function ABI but
6345 builtins.c would need updating then. Therefore we use the
6347 if (TARGET_64BIT && ix86_abi == MS_ABI)
6349 return TARGET_FLOAT_RETURNS_IN_80387;
6355 if (TARGET_MACHO || TARGET_64BIT)
6363 /* Define how to find the value returned by a function.
6364 VALTYPE is the data type of the value (as a tree).
6365 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6366 otherwise, FUNC is 0. */
6369 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6370 const_tree fntype, const_tree fn)
6374 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6375 we normally prevent this case when mmx is not available. However
6376 some ABIs may require the result to be returned like DImode. */
6377 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6378 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6380 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6381 we prevent this case when sse is not available. However some ABIs
6382 may require the result to be returned like integer TImode. */
6383 else if (mode == TImode
6384 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6385 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6387 /* 32-byte vector modes in %ymm0. */
6388 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6389 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6391 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6392 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6393 regno = FIRST_FLOAT_REG;
6395 /* Most things go in %eax. */
6398 /* Override FP return register with %xmm0 for local functions when
6399 SSE math is enabled or for functions with sseregparm attribute. */
6400 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6402 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6403 if ((sse_level >= 1 && mode == SFmode)
6404 || (sse_level == 2 && mode == DFmode))
6405 regno = FIRST_SSE_REG;
6408 /* OImode shouldn't be used directly. */
6409 gcc_assert (mode != OImode);
6411 return gen_rtx_REG (orig_mode, regno);
6415 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6420 /* Handle libcalls, which don't provide a type node. */
6421 if (valtype == NULL)
6433 return gen_rtx_REG (mode, FIRST_SSE_REG);
6436 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6440 return gen_rtx_REG (mode, AX_REG);
6444 ret = construct_container (mode, orig_mode, valtype, 1,
6445 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6446 x86_64_int_return_registers, 0);
6448 /* For zero sized structures, construct_container returns NULL, but we
6449 need to keep rest of compiler happy by returning meaningful value. */
6451 ret = gen_rtx_REG (orig_mode, AX_REG);
6457 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6459 unsigned int regno = AX_REG;
6463 switch (GET_MODE_SIZE (mode))
6466 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6467 && !COMPLEX_MODE_P (mode))
6468 regno = FIRST_SSE_REG;
6472 if (mode == SFmode || mode == DFmode)
6473 regno = FIRST_SSE_REG;
6479 return gen_rtx_REG (orig_mode, regno);
6483 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6484 enum machine_mode orig_mode, enum machine_mode mode)
6486 const_tree fn, fntype;
6489 if (fntype_or_decl && DECL_P (fntype_or_decl))
6490 fn = fntype_or_decl;
6491 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6493 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6494 return function_value_ms_64 (orig_mode, mode);
6495 else if (TARGET_64BIT)
6496 return function_value_64 (orig_mode, mode, valtype);
6498 return function_value_32 (orig_mode, mode, fntype, fn);
6502 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6503 bool outgoing ATTRIBUTE_UNUSED)
6505 enum machine_mode mode, orig_mode;
6507 orig_mode = TYPE_MODE (valtype);
6508 mode = type_natural_mode (valtype, NULL);
6509 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6513 ix86_libcall_value (enum machine_mode mode)
6515 return ix86_function_value_1 (NULL, NULL, mode, mode);
6518 /* Return true iff type is returned in memory. */
6520 static int ATTRIBUTE_UNUSED
6521 return_in_memory_32 (const_tree type, enum machine_mode mode)
6525 if (mode == BLKmode)
6528 size = int_size_in_bytes (type);
6530 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6533 if (VECTOR_MODE_P (mode) || mode == TImode)
6535 /* User-created vectors small enough to fit in EAX. */
6539 /* MMX/3dNow values are returned in MM0,
6540 except when it doesn't exits. */
6542 return (TARGET_MMX ? 0 : 1);
6544 /* SSE values are returned in XMM0, except when it doesn't exist. */
6546 return (TARGET_SSE ? 0 : 1);
6548 /* AVX values are returned in YMM0, except when it doesn't exist. */
6550 return TARGET_AVX ? 0 : 1;
6559 /* OImode shouldn't be used directly. */
6560 gcc_assert (mode != OImode);
6565 static int ATTRIBUTE_UNUSED
6566 return_in_memory_64 (const_tree type, enum machine_mode mode)
6568 int needed_intregs, needed_sseregs;
6569 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6572 static int ATTRIBUTE_UNUSED
6573 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6575 HOST_WIDE_INT size = int_size_in_bytes (type);
6577 /* __m128 is returned in xmm0. */
6578 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6579 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6582 /* Otherwise, the size must be exactly in [1248]. */
6583 return (size != 1 && size != 2 && size != 4 && size != 8);
6587 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6589 #ifdef SUBTARGET_RETURN_IN_MEMORY
6590 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6592 const enum machine_mode mode = type_natural_mode (type, NULL);
6596 if (ix86_function_type_abi (fntype) == MS_ABI)
6597 return return_in_memory_ms_64 (type, mode);
6599 return return_in_memory_64 (type, mode);
6602 return return_in_memory_32 (type, mode);
6606 /* Return false iff TYPE is returned in memory. This version is used
6607 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6608 but differs notably in that when MMX is available, 8-byte vectors
6609 are returned in memory, rather than in MMX registers. */
6612 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6615 enum machine_mode mode = type_natural_mode (type, NULL);
6618 return return_in_memory_64 (type, mode);
6620 if (mode == BLKmode)
6623 size = int_size_in_bytes (type);
6625 if (VECTOR_MODE_P (mode))
6627 /* Return in memory only if MMX registers *are* available. This
6628 seems backwards, but it is consistent with the existing
6635 else if (mode == TImode)
6637 else if (mode == XFmode)
6643 /* When returning SSE vector types, we have a choice of either
6644 (1) being abi incompatible with a -march switch, or
6645 (2) generating an error.
6646 Given no good solution, I think the safest thing is one warning.
6647 The user won't be able to use -Werror, but....
6649 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6650 called in response to actually generating a caller or callee that
6651 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6652 via aggregate_value_p for general type probing from tree-ssa. */
6655 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6657 static bool warnedsse, warnedmmx;
6659 if (!TARGET_64BIT && type)
6661 /* Look at the return type of the function, not the function type. */
6662 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6664 if (!TARGET_SSE && !warnedsse)
6667 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6670 warning (0, "SSE vector return without SSE enabled "
6675 if (!TARGET_MMX && !warnedmmx)
6677 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6680 warning (0, "MMX vector return without MMX enabled "
6690 /* Create the va_list data type. */
6692 /* Returns the calling convention specific va_list date type.
6693 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6696 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6698 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6700 /* For i386 we use plain pointer to argument area. */
6701 if (!TARGET_64BIT || abi == MS_ABI)
6702 return build_pointer_type (char_type_node);
6704 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6705 type_decl = build_decl (BUILTINS_LOCATION,
6706 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6708 f_gpr = build_decl (BUILTINS_LOCATION,
6709 FIELD_DECL, get_identifier ("gp_offset"),
6710 unsigned_type_node);
6711 f_fpr = build_decl (BUILTINS_LOCATION,
6712 FIELD_DECL, get_identifier ("fp_offset"),
6713 unsigned_type_node);
6714 f_ovf = build_decl (BUILTINS_LOCATION,
6715 FIELD_DECL, get_identifier ("overflow_arg_area"),
6717 f_sav = build_decl (BUILTINS_LOCATION,
6718 FIELD_DECL, get_identifier ("reg_save_area"),
6721 va_list_gpr_counter_field = f_gpr;
6722 va_list_fpr_counter_field = f_fpr;
6724 DECL_FIELD_CONTEXT (f_gpr) = record;
6725 DECL_FIELD_CONTEXT (f_fpr) = record;
6726 DECL_FIELD_CONTEXT (f_ovf) = record;
6727 DECL_FIELD_CONTEXT (f_sav) = record;
6729 TREE_CHAIN (record) = type_decl;
6730 TYPE_NAME (record) = type_decl;
6731 TYPE_FIELDS (record) = f_gpr;
6732 TREE_CHAIN (f_gpr) = f_fpr;
6733 TREE_CHAIN (f_fpr) = f_ovf;
6734 TREE_CHAIN (f_ovf) = f_sav;
6736 layout_type (record);
6738 /* The correct type is an array type of one element. */
6739 return build_array_type (record, build_index_type (size_zero_node));
6742 /* Setup the builtin va_list data type and for 64-bit the additional
6743 calling convention specific va_list data types. */
6746 ix86_build_builtin_va_list (void)
6748 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6750 /* Initialize abi specific va_list builtin types. */
6754 if (ix86_abi == MS_ABI)
6756 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6757 if (TREE_CODE (t) != RECORD_TYPE)
6758 t = build_variant_type_copy (t);
6759 sysv_va_list_type_node = t;
6764 if (TREE_CODE (t) != RECORD_TYPE)
6765 t = build_variant_type_copy (t);
6766 sysv_va_list_type_node = t;
6768 if (ix86_abi != MS_ABI)
6770 t = ix86_build_builtin_va_list_abi (MS_ABI);
6771 if (TREE_CODE (t) != RECORD_TYPE)
6772 t = build_variant_type_copy (t);
6773 ms_va_list_type_node = t;
6778 if (TREE_CODE (t) != RECORD_TYPE)
6779 t = build_variant_type_copy (t);
6780 ms_va_list_type_node = t;
6787 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6790 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6798 int regparm = ix86_regparm;
6800 if (cum->call_abi != ix86_abi)
6801 regparm = (ix86_abi != SYSV_ABI
6802 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6804 /* GPR size of varargs save area. */
6805 if (cfun->va_list_gpr_size)
6806 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6808 ix86_varargs_gpr_size = 0;
6810 /* FPR size of varargs save area. We don't need it if we don't pass
6811 anything in SSE registers. */
6812 if (cum->sse_nregs && cfun->va_list_fpr_size)
6813 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6815 ix86_varargs_fpr_size = 0;
6817 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6820 save_area = frame_pointer_rtx;
6821 set = get_varargs_alias_set ();
6823 for (i = cum->regno;
6825 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6828 mem = gen_rtx_MEM (Pmode,
6829 plus_constant (save_area, i * UNITS_PER_WORD));
6830 MEM_NOTRAP_P (mem) = 1;
6831 set_mem_alias_set (mem, set);
6832 emit_move_insn (mem, gen_rtx_REG (Pmode,
6833 x86_64_int_parameter_registers[i]));
6836 if (ix86_varargs_fpr_size)
6838 /* Now emit code to save SSE registers. The AX parameter contains number
6839 of SSE parameter registers used to call this function. We use
6840 sse_prologue_save insn template that produces computed jump across
6841 SSE saves. We need some preparation work to get this working. */
6843 label = gen_label_rtx ();
6845 nsse_reg = gen_reg_rtx (Pmode);
6846 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6848 /* Compute address of memory block we save into. We always use pointer
6849 pointing 127 bytes after first byte to store - this is needed to keep
6850 instruction size limited by 4 bytes (5 bytes for AVX) with one
6851 byte displacement. */
6852 tmp_reg = gen_reg_rtx (Pmode);
6853 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6854 plus_constant (save_area,
6855 ix86_varargs_gpr_size + 127)));
6856 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6857 MEM_NOTRAP_P (mem) = 1;
6858 set_mem_alias_set (mem, set);
6859 set_mem_align (mem, 64);
6861 /* And finally do the dirty job! */
6862 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6863 GEN_INT (cum->sse_regno), label,
6864 gen_reg_rtx (Pmode)));
6869 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6871 alias_set_type set = get_varargs_alias_set ();
6874 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6878 mem = gen_rtx_MEM (Pmode,
6879 plus_constant (virtual_incoming_args_rtx,
6880 i * UNITS_PER_WORD));
6881 MEM_NOTRAP_P (mem) = 1;
6882 set_mem_alias_set (mem, set);
6884 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6885 emit_move_insn (mem, reg);
6890 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6891 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6894 CUMULATIVE_ARGS next_cum;
6897 /* This argument doesn't appear to be used anymore. Which is good,
6898 because the old code here didn't suppress rtl generation. */
6899 gcc_assert (!no_rtl);
6904 fntype = TREE_TYPE (current_function_decl);
6906 /* For varargs, we do not want to skip the dummy va_dcl argument.
6907 For stdargs, we do want to skip the last named argument. */
6909 if (stdarg_p (fntype))
6910 function_arg_advance (&next_cum, mode, type, 1);
6912 if (cum->call_abi == MS_ABI)
6913 setup_incoming_varargs_ms_64 (&next_cum);
6915 setup_incoming_varargs_64 (&next_cum);
6918 /* Checks if TYPE is of kind va_list char *. */
6921 is_va_list_char_pointer (tree type)
6925 /* For 32-bit it is always true. */
6928 canonic = ix86_canonical_va_list_type (type);
6929 return (canonic == ms_va_list_type_node
6930 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6933 /* Implement va_start. */
6936 ix86_va_start (tree valist, rtx nextarg)
6938 HOST_WIDE_INT words, n_gpr, n_fpr;
6939 tree f_gpr, f_fpr, f_ovf, f_sav;
6940 tree gpr, fpr, ovf, sav, t;
6943 /* Only 64bit target needs something special. */
6944 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6946 std_expand_builtin_va_start (valist, nextarg);
6950 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6951 f_fpr = TREE_CHAIN (f_gpr);
6952 f_ovf = TREE_CHAIN (f_fpr);
6953 f_sav = TREE_CHAIN (f_ovf);
6955 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6956 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6957 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6958 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6959 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6961 /* Count number of gp and fp argument registers used. */
6962 words = crtl->args.info.words;
6963 n_gpr = crtl->args.info.regno;
6964 n_fpr = crtl->args.info.sse_regno;
6966 if (cfun->va_list_gpr_size)
6968 type = TREE_TYPE (gpr);
6969 t = build2 (MODIFY_EXPR, type,
6970 gpr, build_int_cst (type, n_gpr * 8));
6971 TREE_SIDE_EFFECTS (t) = 1;
6972 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6975 if (TARGET_SSE && cfun->va_list_fpr_size)
6977 type = TREE_TYPE (fpr);
6978 t = build2 (MODIFY_EXPR, type, fpr,
6979 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6980 TREE_SIDE_EFFECTS (t) = 1;
6981 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6984 /* Find the overflow area. */
6985 type = TREE_TYPE (ovf);
6986 t = make_tree (type, crtl->args.internal_arg_pointer);
6988 t = build2 (POINTER_PLUS_EXPR, type, t,
6989 size_int (words * UNITS_PER_WORD));
6990 t = build2 (MODIFY_EXPR, type, ovf, t);
6991 TREE_SIDE_EFFECTS (t) = 1;
6992 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6994 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6996 /* Find the register save area.
6997 Prologue of the function save it right above stack frame. */
6998 type = TREE_TYPE (sav);
6999 t = make_tree (type, frame_pointer_rtx);
7000 if (!ix86_varargs_gpr_size)
7001 t = build2 (POINTER_PLUS_EXPR, type, t,
7002 size_int (-8 * X86_64_REGPARM_MAX));
7003 t = build2 (MODIFY_EXPR, type, sav, t);
7004 TREE_SIDE_EFFECTS (t) = 1;
7005 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7009 /* Implement va_arg. */
7012 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7015 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7016 tree f_gpr, f_fpr, f_ovf, f_sav;
7017 tree gpr, fpr, ovf, sav, t;
7019 tree lab_false, lab_over = NULL_TREE;
7024 enum machine_mode nat_mode;
7025 unsigned int arg_boundary;
7027 /* Only 64bit target needs something special. */
7028 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7029 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7031 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7032 f_fpr = TREE_CHAIN (f_gpr);
7033 f_ovf = TREE_CHAIN (f_fpr);
7034 f_sav = TREE_CHAIN (f_ovf);
7036 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7037 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7038 valist = build_va_arg_indirect_ref (valist);
7039 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7040 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7041 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7043 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7045 type = build_pointer_type (type);
7046 size = int_size_in_bytes (type);
7047 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7049 nat_mode = type_natural_mode (type, NULL);
7058 /* Unnamed 256bit vector mode parameters are passed on stack. */
7059 if (ix86_cfun_abi () == SYSV_ABI)
7066 container = construct_container (nat_mode, TYPE_MODE (type),
7067 type, 0, X86_64_REGPARM_MAX,
7068 X86_64_SSE_REGPARM_MAX, intreg,
7073 /* Pull the value out of the saved registers. */
7075 addr = create_tmp_var (ptr_type_node, "addr");
7079 int needed_intregs, needed_sseregs;
7081 tree int_addr, sse_addr;
7083 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7084 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7086 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7088 need_temp = (!REG_P (container)
7089 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7090 || TYPE_ALIGN (type) > 128));
7092 /* In case we are passing structure, verify that it is consecutive block
7093 on the register save area. If not we need to do moves. */
7094 if (!need_temp && !REG_P (container))
7096 /* Verify that all registers are strictly consecutive */
7097 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7101 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7103 rtx slot = XVECEXP (container, 0, i);
7104 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7105 || INTVAL (XEXP (slot, 1)) != i * 16)
7113 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7115 rtx slot = XVECEXP (container, 0, i);
7116 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7117 || INTVAL (XEXP (slot, 1)) != i * 8)
7129 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7130 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7133 /* First ensure that we fit completely in registers. */
7136 t = build_int_cst (TREE_TYPE (gpr),
7137 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7138 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7139 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7140 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7141 gimplify_and_add (t, pre_p);
7145 t = build_int_cst (TREE_TYPE (fpr),
7146 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7147 + X86_64_REGPARM_MAX * 8);
7148 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7149 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7150 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7151 gimplify_and_add (t, pre_p);
7154 /* Compute index to start of area used for integer regs. */
7157 /* int_addr = gpr + sav; */
7158 t = fold_convert (sizetype, gpr);
7159 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7160 gimplify_assign (int_addr, t, pre_p);
7164 /* sse_addr = fpr + sav; */
7165 t = fold_convert (sizetype, fpr);
7166 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7167 gimplify_assign (sse_addr, t, pre_p);
7172 tree temp = create_tmp_var (type, "va_arg_tmp");
7175 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7176 gimplify_assign (addr, t, pre_p);
7178 for (i = 0; i < XVECLEN (container, 0); i++)
7180 rtx slot = XVECEXP (container, 0, i);
7181 rtx reg = XEXP (slot, 0);
7182 enum machine_mode mode = GET_MODE (reg);
7183 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7184 tree addr_type = build_pointer_type (piece_type);
7185 tree daddr_type = build_pointer_type_for_mode (piece_type,
7189 tree dest_addr, dest;
7191 if (SSE_REGNO_P (REGNO (reg)))
7193 src_addr = sse_addr;
7194 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7198 src_addr = int_addr;
7199 src_offset = REGNO (reg) * 8;
7201 src_addr = fold_convert (addr_type, src_addr);
7202 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7203 size_int (src_offset));
7204 src = build_va_arg_indirect_ref (src_addr);
7206 dest_addr = fold_convert (daddr_type, addr);
7207 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7208 size_int (INTVAL (XEXP (slot, 1))));
7209 dest = build_va_arg_indirect_ref (dest_addr);
7211 gimplify_assign (dest, src, pre_p);
7217 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7218 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7219 gimplify_assign (gpr, t, pre_p);
7224 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7225 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7226 gimplify_assign (fpr, t, pre_p);
7229 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7231 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7234 /* ... otherwise out of the overflow area. */
7236 /* When we align parameter on stack for caller, if the parameter
7237 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7238 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7239 here with caller. */
7240 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7241 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7242 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7244 /* Care for on-stack alignment if needed. */
7245 if (arg_boundary <= 64
7246 || integer_zerop (TYPE_SIZE (type)))
7250 HOST_WIDE_INT align = arg_boundary / 8;
7251 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7252 size_int (align - 1));
7253 t = fold_convert (sizetype, t);
7254 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7256 t = fold_convert (TREE_TYPE (ovf), t);
7257 if (crtl->stack_alignment_needed < arg_boundary)
7258 crtl->stack_alignment_needed = arg_boundary;
7260 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7261 gimplify_assign (addr, t, pre_p);
7263 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7264 size_int (rsize * UNITS_PER_WORD));
7265 gimplify_assign (unshare_expr (ovf), t, pre_p);
7268 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7270 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7271 addr = fold_convert (ptrtype, addr);
7274 addr = build_va_arg_indirect_ref (addr);
7275 return build_va_arg_indirect_ref (addr);
7278 /* Return nonzero if OPNUM's MEM should be matched
7279 in movabs* patterns. */
7282 ix86_check_movabs (rtx insn, int opnum)
7286 set = PATTERN (insn);
7287 if (GET_CODE (set) == PARALLEL)
7288 set = XVECEXP (set, 0, 0);
7289 gcc_assert (GET_CODE (set) == SET);
7290 mem = XEXP (set, opnum);
7291 while (GET_CODE (mem) == SUBREG)
7292 mem = SUBREG_REG (mem);
7293 gcc_assert (MEM_P (mem));
7294 return (volatile_ok || !MEM_VOLATILE_P (mem));
7297 /* Initialize the table of extra 80387 mathematical constants. */
7300 init_ext_80387_constants (void)
7302 static const char * cst[5] =
7304 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7305 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7306 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7307 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7308 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7312 for (i = 0; i < 5; i++)
7314 real_from_string (&ext_80387_constants_table[i], cst[i]);
7315 /* Ensure each constant is rounded to XFmode precision. */
7316 real_convert (&ext_80387_constants_table[i],
7317 XFmode, &ext_80387_constants_table[i]);
7320 ext_80387_constants_init = 1;
7323 /* Return true if the constant is something that can be loaded with
7324 a special instruction. */
7327 standard_80387_constant_p (rtx x)
7329 enum machine_mode mode = GET_MODE (x);
7333 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7336 if (x == CONST0_RTX (mode))
7338 if (x == CONST1_RTX (mode))
7341 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7343 /* For XFmode constants, try to find a special 80387 instruction when
7344 optimizing for size or on those CPUs that benefit from them. */
7346 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7350 if (! ext_80387_constants_init)
7351 init_ext_80387_constants ();
7353 for (i = 0; i < 5; i++)
7354 if (real_identical (&r, &ext_80387_constants_table[i]))
7358 /* Load of the constant -0.0 or -1.0 will be split as
7359 fldz;fchs or fld1;fchs sequence. */
7360 if (real_isnegzero (&r))
7362 if (real_identical (&r, &dconstm1))
7368 /* Return the opcode of the special instruction to be used to load
7372 standard_80387_constant_opcode (rtx x)
7374 switch (standard_80387_constant_p (x))
7398 /* Return the CONST_DOUBLE representing the 80387 constant that is
7399 loaded by the specified special instruction. The argument IDX
7400 matches the return value from standard_80387_constant_p. */
7403 standard_80387_constant_rtx (int idx)
7407 if (! ext_80387_constants_init)
7408 init_ext_80387_constants ();
7424 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7428 /* Return 1 if X is all 0s and 2 if x is all 1s
7429 in supported SSE vector mode. */
7432 standard_sse_constant_p (rtx x)
7434 enum machine_mode mode = GET_MODE (x);
7436 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7438 if (vector_all_ones_operand (x, mode))
7454 /* Return the opcode of the special instruction to be used to load
7458 standard_sse_constant_opcode (rtx insn, rtx x)
7460 switch (standard_sse_constant_p (x))
7463 switch (get_attr_mode (insn))
7466 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7468 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7470 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7472 return "vxorps\t%x0, %x0, %x0";
7474 return "vxorpd\t%x0, %x0, %x0";
7476 return "vpxor\t%x0, %x0, %x0";
7481 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7488 /* Returns 1 if OP contains a symbol reference */
7491 symbolic_reference_mentioned_p (rtx op)
7496 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7499 fmt = GET_RTX_FORMAT (GET_CODE (op));
7500 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7506 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7507 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7511 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7518 /* Return 1 if it is appropriate to emit `ret' instructions in the
7519 body of a function. Do this only if the epilogue is simple, needing a
7520 couple of insns. Prior to reloading, we can't tell how many registers
7521 must be saved, so return 0 then. Return 0 if there is no frame
7522 marker to de-allocate. */
7525 ix86_can_use_return_insn_p (void)
7527 struct ix86_frame frame;
7529 if (! reload_completed || frame_pointer_needed)
7532 /* Don't allow more than 32 pop, since that's all we can do
7533 with one instruction. */
7534 if (crtl->args.pops_args
7535 && crtl->args.size >= 32768)
7538 ix86_compute_frame_layout (&frame);
7539 return frame.to_allocate == 0 && frame.padding0 == 0
7540 && (frame.nregs + frame.nsseregs) == 0;
7543 /* Value should be nonzero if functions must have frame pointers.
7544 Zero means the frame pointer need not be set up (and parms may
7545 be accessed via the stack pointer) in functions that seem suitable. */
7548 ix86_frame_pointer_required (void)
7550 /* If we accessed previous frames, then the generated code expects
7551 to be able to access the saved ebp value in our frame. */
7552 if (cfun->machine->accesses_prev_frame)
7555 /* Several x86 os'es need a frame pointer for other reasons,
7556 usually pertaining to setjmp. */
7557 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7560 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7561 the frame pointer by default. Turn it back on now if we've not
7562 got a leaf function. */
7563 if (TARGET_OMIT_LEAF_FRAME_POINTER
7564 && (!current_function_is_leaf
7565 || ix86_current_function_calls_tls_descriptor))
7574 /* Record that the current function accesses previous call frames. */
7577 ix86_setup_frame_addresses (void)
7579 cfun->machine->accesses_prev_frame = 1;
7582 #ifndef USE_HIDDEN_LINKONCE
7583 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7584 # define USE_HIDDEN_LINKONCE 1
7586 # define USE_HIDDEN_LINKONCE 0
7590 static int pic_labels_used;
7592 /* Fills in the label name that should be used for a pc thunk for
7593 the given register. */
7596 get_pc_thunk_name (char name[32], unsigned int regno)
7598 gcc_assert (!TARGET_64BIT);
7600 if (USE_HIDDEN_LINKONCE)
7601 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7603 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7607 /* This function generates code for -fpic that loads %ebx with
7608 the return address of the caller and then returns. */
7611 ix86_code_end (void)
7616 for (regno = 0; regno < 8; ++regno)
7621 if (! ((pic_labels_used >> regno) & 1))
7624 get_pc_thunk_name (name, regno);
7626 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7627 get_identifier (name),
7628 build_function_type (void_type_node, void_list_node));
7629 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7630 NULL_TREE, void_type_node);
7631 TREE_PUBLIC (decl) = 1;
7632 TREE_STATIC (decl) = 1;
7637 switch_to_section (darwin_sections[text_coal_section]);
7638 fputs ("\t.weak_definition\t", asm_out_file);
7639 assemble_name (asm_out_file, name);
7640 fputs ("\n\t.private_extern\t", asm_out_file);
7641 assemble_name (asm_out_file, name);
7642 fputs ("\n", asm_out_file);
7643 ASM_OUTPUT_LABEL (asm_out_file, name);
7644 DECL_WEAK (decl) = 1;
7648 if (USE_HIDDEN_LINKONCE)
7650 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7652 (*targetm.asm_out.unique_section) (decl, 0);
7653 switch_to_section (get_named_section (decl, NULL, 0));
7655 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7656 fputs ("\t.hidden\t", asm_out_file);
7657 assemble_name (asm_out_file, name);
7658 putc ('\n', asm_out_file);
7659 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7663 switch_to_section (text_section);
7664 ASM_OUTPUT_LABEL (asm_out_file, name);
7667 DECL_INITIAL (decl) = make_node (BLOCK);
7668 current_function_decl = decl;
7669 init_function_start (decl);
7670 first_function_block_is_cold = false;
7671 /* Make sure unwind info is emitted for the thunk if needed. */
7672 final_start_function (emit_barrier (), asm_out_file, 1);
7674 xops[0] = gen_rtx_REG (Pmode, regno);
7675 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7676 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7677 output_asm_insn ("ret", xops);
7678 final_end_function ();
7679 init_insn_lengths ();
7680 free_after_compilation (cfun);
7682 current_function_decl = NULL;
7686 /* Emit code for the SET_GOT patterns. */
7689 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7695 if (TARGET_VXWORKS_RTP && flag_pic)
7697 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7698 xops[2] = gen_rtx_MEM (Pmode,
7699 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7700 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7702 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7703 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7704 an unadorned address. */
7705 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7706 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7707 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7711 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7713 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7715 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7718 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7721 output_asm_insn ("call\t%a2", xops);
7722 #ifdef DWARF2_UNWIND_INFO
7723 /* The call to next label acts as a push. */
7724 if (dwarf2out_do_frame ())
7728 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7729 gen_rtx_PLUS (Pmode,
7732 RTX_FRAME_RELATED_P (insn) = 1;
7733 dwarf2out_frame_debug (insn, true);
7740 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7741 is what will be referenced by the Mach-O PIC subsystem. */
7743 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7746 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7747 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7751 output_asm_insn ("pop%z0\t%0", xops);
7752 #ifdef DWARF2_UNWIND_INFO
7753 /* The pop is a pop and clobbers dest, but doesn't restore it
7754 for unwind info purposes. */
7755 if (dwarf2out_do_frame ())
7759 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7760 dwarf2out_frame_debug (insn, true);
7761 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7762 gen_rtx_PLUS (Pmode,
7765 RTX_FRAME_RELATED_P (insn) = 1;
7766 dwarf2out_frame_debug (insn, true);
7775 get_pc_thunk_name (name, REGNO (dest));
7776 pic_labels_used |= 1 << REGNO (dest);
7778 #ifdef DWARF2_UNWIND_INFO
7779 /* Ensure all queued register saves are flushed before the
7781 if (dwarf2out_do_frame ())
7785 insn = emit_barrier ();
7787 dwarf2out_frame_debug (insn, false);
7790 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7791 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7792 output_asm_insn ("call\t%X2", xops);
7793 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7794 is what will be referenced by the Mach-O PIC subsystem. */
7797 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7799 targetm.asm_out.internal_label (asm_out_file, "L",
7800 CODE_LABEL_NUMBER (label));
7807 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7808 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7810 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7815 /* Generate an "push" pattern for input ARG. */
7820 if (ix86_cfa_state->reg == stack_pointer_rtx)
7821 ix86_cfa_state->offset += UNITS_PER_WORD;
7823 return gen_rtx_SET (VOIDmode,
7825 gen_rtx_PRE_DEC (Pmode,
7826 stack_pointer_rtx)),
7830 /* Return >= 0 if there is an unused call-clobbered register available
7831 for the entire function. */
7834 ix86_select_alt_pic_regnum (void)
7836 if (current_function_is_leaf && !crtl->profile
7837 && !ix86_current_function_calls_tls_descriptor)
7840 /* Can't use the same register for both PIC and DRAP. */
7842 drap = REGNO (crtl->drap_reg);
7845 for (i = 2; i >= 0; --i)
7846 if (i != drap && !df_regs_ever_live_p (i))
7850 return INVALID_REGNUM;
7853 /* Return 1 if we need to save REGNO. */
7855 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7857 if (pic_offset_table_rtx
7858 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7859 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7861 || crtl->calls_eh_return
7862 || crtl->uses_const_pool))
7864 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7869 if (crtl->calls_eh_return && maybe_eh_return)
7874 unsigned test = EH_RETURN_DATA_REGNO (i);
7875 if (test == INVALID_REGNUM)
7882 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7885 return (df_regs_ever_live_p (regno)
7886 && !call_used_regs[regno]
7887 && !fixed_regs[regno]
7888 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7891 /* Return number of saved general prupose registers. */
7894 ix86_nsaved_regs (void)
7899 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7900 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7905 /* Return number of saved SSE registrers. */
7908 ix86_nsaved_sseregs (void)
7913 if (ix86_cfun_abi () != MS_ABI)
7915 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7916 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7921 /* Given FROM and TO register numbers, say whether this elimination is
7922 allowed. If stack alignment is needed, we can only replace argument
7923 pointer with hard frame pointer, or replace frame pointer with stack
7924 pointer. Otherwise, frame pointer elimination is automatically
7925 handled and all other eliminations are valid. */
7928 ix86_can_eliminate (const int from, const int to)
7930 if (stack_realign_fp)
7931 return ((from == ARG_POINTER_REGNUM
7932 && to == HARD_FRAME_POINTER_REGNUM)
7933 || (from == FRAME_POINTER_REGNUM
7934 && to == STACK_POINTER_REGNUM));
7936 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7939 /* Return the offset between two registers, one to be eliminated, and the other
7940 its replacement, at the start of a routine. */
7943 ix86_initial_elimination_offset (int from, int to)
7945 struct ix86_frame frame;
7946 ix86_compute_frame_layout (&frame);
7948 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7949 return frame.hard_frame_pointer_offset;
7950 else if (from == FRAME_POINTER_REGNUM
7951 && to == HARD_FRAME_POINTER_REGNUM)
7952 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7955 gcc_assert (to == STACK_POINTER_REGNUM);
7957 if (from == ARG_POINTER_REGNUM)
7958 return frame.stack_pointer_offset;
7960 gcc_assert (from == FRAME_POINTER_REGNUM);
7961 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7965 /* In a dynamically-aligned function, we can't know the offset from
7966 stack pointer to frame pointer, so we must ensure that setjmp
7967 eliminates fp against the hard fp (%ebp) rather than trying to
7968 index from %esp up to the top of the frame across a gap that is
7969 of unknown (at compile-time) size. */
7971 ix86_builtin_setjmp_frame_value (void)
7973 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7976 /* Fill structure ix86_frame about frame of currently computed function. */
7979 ix86_compute_frame_layout (struct ix86_frame *frame)
7981 unsigned int stack_alignment_needed;
7982 HOST_WIDE_INT offset;
7983 unsigned int preferred_alignment;
7984 HOST_WIDE_INT size = get_frame_size ();
7986 frame->nregs = ix86_nsaved_regs ();
7987 frame->nsseregs = ix86_nsaved_sseregs ();
7989 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7990 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7992 /* MS ABI seem to require stack alignment to be always 16 except for function
7994 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7996 preferred_alignment = 16;
7997 stack_alignment_needed = 16;
7998 crtl->preferred_stack_boundary = 128;
7999 crtl->stack_alignment_needed = 128;
8002 gcc_assert (!size || stack_alignment_needed);
8003 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8004 gcc_assert (preferred_alignment <= stack_alignment_needed);
8006 /* During reload iteration the amount of registers saved can change.
8007 Recompute the value as needed. Do not recompute when amount of registers
8008 didn't change as reload does multiple calls to the function and does not
8009 expect the decision to change within single iteration. */
8010 if (!optimize_function_for_size_p (cfun)
8011 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8013 int count = frame->nregs;
8014 struct cgraph_node *node = cgraph_node (current_function_decl);
8016 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8017 /* The fast prologue uses move instead of push to save registers. This
8018 is significantly longer, but also executes faster as modern hardware
8019 can execute the moves in parallel, but can't do that for push/pop.
8021 Be careful about choosing what prologue to emit: When function takes
8022 many instructions to execute we may use slow version as well as in
8023 case function is known to be outside hot spot (this is known with
8024 feedback only). Weight the size of function by number of registers
8025 to save as it is cheap to use one or two push instructions but very
8026 slow to use many of them. */
8028 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8029 if (node->frequency < NODE_FREQUENCY_NORMAL
8030 || (flag_branch_probabilities
8031 && node->frequency < NODE_FREQUENCY_HOT))
8032 cfun->machine->use_fast_prologue_epilogue = false;
8034 cfun->machine->use_fast_prologue_epilogue
8035 = !expensive_function_p (count);
8037 if (TARGET_PROLOGUE_USING_MOVE
8038 && cfun->machine->use_fast_prologue_epilogue)
8039 frame->save_regs_using_mov = true;
8041 frame->save_regs_using_mov = false;
8043 /* Skip return address. */
8044 offset = UNITS_PER_WORD;
8046 /* Skip pushed static chain. */
8047 if (ix86_static_chain_on_stack)
8048 offset += UNITS_PER_WORD;
8050 /* Skip saved base pointer. */
8051 if (frame_pointer_needed)
8052 offset += UNITS_PER_WORD;
8054 frame->hard_frame_pointer_offset = offset;
8056 /* Set offset to aligned because the realigned frame starts from
8058 if (stack_realign_fp)
8059 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8061 /* Register save area */
8062 offset += frame->nregs * UNITS_PER_WORD;
8064 /* Align SSE reg save area. */
8065 if (frame->nsseregs)
8066 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8068 frame->padding0 = 0;
8070 /* SSE register save area. */
8071 offset += frame->padding0 + frame->nsseregs * 16;
8074 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8075 offset += frame->va_arg_size;
8077 /* Align start of frame for local function. */
8078 frame->padding1 = ((offset + stack_alignment_needed - 1)
8079 & -stack_alignment_needed) - offset;
8081 offset += frame->padding1;
8083 /* Frame pointer points here. */
8084 frame->frame_pointer_offset = offset;
8088 /* Add outgoing arguments area. Can be skipped if we eliminated
8089 all the function calls as dead code.
8090 Skipping is however impossible when function calls alloca. Alloca
8091 expander assumes that last crtl->outgoing_args_size
8092 of stack frame are unused. */
8093 if (ACCUMULATE_OUTGOING_ARGS
8094 && (!current_function_is_leaf || cfun->calls_alloca
8095 || ix86_current_function_calls_tls_descriptor))
8097 offset += crtl->outgoing_args_size;
8098 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8101 frame->outgoing_arguments_size = 0;
8103 /* Align stack boundary. Only needed if we're calling another function
8105 if (!current_function_is_leaf || cfun->calls_alloca
8106 || ix86_current_function_calls_tls_descriptor)
8107 frame->padding2 = ((offset + preferred_alignment - 1)
8108 & -preferred_alignment) - offset;
8110 frame->padding2 = 0;
8112 offset += frame->padding2;
8114 /* We've reached end of stack frame. */
8115 frame->stack_pointer_offset = offset;
8117 /* Size prologue needs to allocate. */
8118 frame->to_allocate =
8119 (size + frame->padding1 + frame->padding2
8120 + frame->outgoing_arguments_size + frame->va_arg_size);
8122 if ((!frame->to_allocate && frame->nregs <= 1)
8123 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8124 frame->save_regs_using_mov = false;
8126 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8127 && current_function_sp_is_unchanging
8128 && current_function_is_leaf
8129 && !ix86_current_function_calls_tls_descriptor)
8131 frame->red_zone_size = frame->to_allocate;
8132 if (frame->save_regs_using_mov)
8133 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8134 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8135 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8138 frame->red_zone_size = 0;
8139 frame->to_allocate -= frame->red_zone_size;
8140 frame->stack_pointer_offset -= frame->red_zone_size;
8143 /* Emit code to save registers in the prologue. */
8146 ix86_emit_save_regs (void)
8151 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8152 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8154 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8155 RTX_FRAME_RELATED_P (insn) = 1;
8159 /* Emit code to save registers using MOV insns. First register
8160 is restored from POINTER + OFFSET. */
8162 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8167 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8168 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8170 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8172 gen_rtx_REG (Pmode, regno));
8173 RTX_FRAME_RELATED_P (insn) = 1;
8174 offset += UNITS_PER_WORD;
8178 /* Emit code to save registers using MOV insns. First register
8179 is restored from POINTER + OFFSET. */
8181 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8187 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8188 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8190 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8191 set_mem_align (mem, 128);
8192 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8193 RTX_FRAME_RELATED_P (insn) = 1;
8198 static GTY(()) rtx queued_cfa_restores;
8200 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8201 manipulation insn. Don't add it if the previously
8202 saved value will be left untouched within stack red-zone till return,
8203 as unwinders can find the same value in the register and
8207 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8210 && !TARGET_64BIT_MS_ABI
8211 && red_offset + RED_ZONE_SIZE >= 0
8212 && crtl->args.pops_args < 65536)
8217 add_reg_note (insn, REG_CFA_RESTORE, reg);
8218 RTX_FRAME_RELATED_P (insn) = 1;
8222 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8225 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8228 ix86_add_queued_cfa_restore_notes (rtx insn)
8231 if (!queued_cfa_restores)
8233 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8235 XEXP (last, 1) = REG_NOTES (insn);
8236 REG_NOTES (insn) = queued_cfa_restores;
8237 queued_cfa_restores = NULL_RTX;
8238 RTX_FRAME_RELATED_P (insn) = 1;
8241 /* Expand prologue or epilogue stack adjustment.
8242 The pattern exist to put a dependency on all ebp-based memory accesses.
8243 STYLE should be negative if instructions should be marked as frame related,
8244 zero if %r11 register is live and cannot be freely used and positive
8248 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8249 int style, bool set_cfa)
8254 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8255 else if (x86_64_immediate_operand (offset, DImode))
8256 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8260 /* r11 is used by indirect sibcall return as well, set before the
8261 epilogue and used after the epilogue. ATM indirect sibcall
8262 shouldn't be used together with huge frame sizes in one
8263 function because of the frame_size check in sibcall.c. */
8265 r11 = gen_rtx_REG (DImode, R11_REG);
8266 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8268 RTX_FRAME_RELATED_P (insn) = 1;
8269 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8274 ix86_add_queued_cfa_restore_notes (insn);
8280 gcc_assert (ix86_cfa_state->reg == src);
8281 ix86_cfa_state->offset += INTVAL (offset);
8282 ix86_cfa_state->reg = dest;
8284 r = gen_rtx_PLUS (Pmode, src, offset);
8285 r = gen_rtx_SET (VOIDmode, dest, r);
8286 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8287 RTX_FRAME_RELATED_P (insn) = 1;
8290 RTX_FRAME_RELATED_P (insn) = 1;
8293 /* Find an available register to be used as dynamic realign argument
8294 pointer regsiter. Such a register will be written in prologue and
8295 used in begin of body, so it must not be
8296 1. parameter passing register.
8298 We reuse static-chain register if it is available. Otherwise, we
8299 use DI for i386 and R13 for x86-64. We chose R13 since it has
8302 Return: the regno of chosen register. */
8305 find_drap_reg (void)
8307 tree decl = cfun->decl;
8311 /* Use R13 for nested function or function need static chain.
8312 Since function with tail call may use any caller-saved
8313 registers in epilogue, DRAP must not use caller-saved
8314 register in such case. */
8315 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8322 /* Use DI for nested function or function need static chain.
8323 Since function with tail call may use any caller-saved
8324 registers in epilogue, DRAP must not use caller-saved
8325 register in such case. */
8326 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8329 /* Reuse static chain register if it isn't used for parameter
8331 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8332 && !lookup_attribute ("fastcall",
8333 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8334 && !lookup_attribute ("thiscall",
8335 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8342 /* Return minimum incoming stack alignment. */
8345 ix86_minimum_incoming_stack_boundary (bool sibcall)
8347 unsigned int incoming_stack_boundary;
8349 /* Prefer the one specified at command line. */
8350 if (ix86_user_incoming_stack_boundary)
8351 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8352 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8353 if -mstackrealign is used, it isn't used for sibcall check and
8354 estimated stack alignment is 128bit. */
8357 && ix86_force_align_arg_pointer
8358 && crtl->stack_alignment_estimated == 128)
8359 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8361 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8363 /* Incoming stack alignment can be changed on individual functions
8364 via force_align_arg_pointer attribute. We use the smallest
8365 incoming stack boundary. */
8366 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8367 && lookup_attribute (ix86_force_align_arg_pointer_string,
8368 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8369 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8371 /* The incoming stack frame has to be aligned at least at
8372 parm_stack_boundary. */
8373 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8374 incoming_stack_boundary = crtl->parm_stack_boundary;
8376 /* Stack at entrance of main is aligned by runtime. We use the
8377 smallest incoming stack boundary. */
8378 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8379 && DECL_NAME (current_function_decl)
8380 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8381 && DECL_FILE_SCOPE_P (current_function_decl))
8382 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8384 return incoming_stack_boundary;
8387 /* Update incoming stack boundary and estimated stack alignment. */
8390 ix86_update_stack_boundary (void)
8392 ix86_incoming_stack_boundary
8393 = ix86_minimum_incoming_stack_boundary (false);
8395 /* x86_64 vararg needs 16byte stack alignment for register save
8399 && crtl->stack_alignment_estimated < 128)
8400 crtl->stack_alignment_estimated = 128;
8403 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8404 needed or an rtx for DRAP otherwise. */
8407 ix86_get_drap_rtx (void)
8409 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8410 crtl->need_drap = true;
8412 if (stack_realign_drap)
8414 /* Assign DRAP to vDRAP and returns vDRAP */
8415 unsigned int regno = find_drap_reg ();
8420 arg_ptr = gen_rtx_REG (Pmode, regno);
8421 crtl->drap_reg = arg_ptr;
8424 drap_vreg = copy_to_reg (arg_ptr);
8428 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8431 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8432 RTX_FRAME_RELATED_P (insn) = 1;
8440 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8443 ix86_internal_arg_pointer (void)
8445 return virtual_incoming_args_rtx;
8448 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8449 to be generated in correct form. */
8451 ix86_finalize_stack_realign_flags (void)
8453 /* Check if stack realign is really needed after reload, and
8454 stores result in cfun */
8455 unsigned int incoming_stack_boundary
8456 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8457 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8458 unsigned int stack_realign = (incoming_stack_boundary
8459 < (current_function_is_leaf
8460 ? crtl->max_used_stack_slot_alignment
8461 : crtl->stack_alignment_needed));
8463 if (crtl->stack_realign_finalized)
8465 /* After stack_realign_needed is finalized, we can't no longer
8467 gcc_assert (crtl->stack_realign_needed == stack_realign);
8471 crtl->stack_realign_needed = stack_realign;
8472 crtl->stack_realign_finalized = true;
8476 /* Expand the prologue into a bunch of separate insns. */
8479 ix86_expand_prologue (void)
8483 struct ix86_frame frame;
8484 HOST_WIDE_INT allocate;
8485 int gen_frame_pointer = frame_pointer_needed;
8487 ix86_finalize_stack_realign_flags ();
8489 /* DRAP should not coexist with stack_realign_fp */
8490 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8492 /* Initialize CFA state for before the prologue. */
8493 ix86_cfa_state->reg = stack_pointer_rtx;
8494 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8496 ix86_compute_frame_layout (&frame);
8498 if (ix86_function_ms_hook_prologue (current_function_decl))
8502 /* Make sure the function starts with
8503 8b ff movl.s %edi,%edi
8505 8b ec movl.s %esp,%ebp
8507 This matches the hookable function prologue in Win32 API
8508 functions in Microsoft Windows XP Service Pack 2 and newer.
8509 Wine uses this to enable Windows apps to hook the Win32 API
8510 functions provided by Wine. */
8511 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8512 gen_rtx_REG (SImode, DI_REG)));
8513 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8514 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8515 stack_pointer_rtx));
8517 if (frame_pointer_needed && !(crtl->drap_reg
8518 && crtl->stack_realign_needed))
8520 /* The push %ebp and movl.s %esp, %ebp already set up
8521 the frame pointer. No need to do this again. */
8522 gen_frame_pointer = 0;
8523 RTX_FRAME_RELATED_P (push) = 1;
8524 RTX_FRAME_RELATED_P (mov) = 1;
8525 if (ix86_cfa_state->reg == stack_pointer_rtx)
8526 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8529 /* If the frame pointer is not needed, pop %ebp again. This
8530 could be optimized for cases where ebp needs to be backed up
8531 for some other reason. If stack realignment is needed, pop
8532 the base pointer again, align the stack, and later regenerate
8533 the frame pointer setup. The frame pointer generated by the
8534 hook prologue is not aligned, so it can't be used. */
8535 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8538 /* The first insn of a function that accepts its static chain on the
8539 stack is to push the register that would be filled in by a direct
8540 call. This insn will be skipped by the trampoline. */
8541 if (ix86_static_chain_on_stack)
8545 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8546 emit_insn (gen_blockage ());
8548 /* We don't want to interpret this push insn as a register save,
8549 only as a stack adjustment. The real copy of the register as
8550 a save will be done later, if needed. */
8551 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8552 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8553 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8554 RTX_FRAME_RELATED_P (insn) = 1;
8557 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8558 of DRAP is needed and stack realignment is really needed after reload */
8559 if (crtl->drap_reg && crtl->stack_realign_needed)
8562 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8563 int param_ptr_offset = UNITS_PER_WORD;
8565 if (ix86_static_chain_on_stack)
8566 param_ptr_offset += UNITS_PER_WORD;
8567 if (!call_used_regs[REGNO (crtl->drap_reg)])
8568 param_ptr_offset += UNITS_PER_WORD;
8570 gcc_assert (stack_realign_drap);
8572 /* Grab the argument pointer. */
8573 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8576 /* Only need to push parameter pointer reg if it is caller
8578 if (!call_used_regs[REGNO (crtl->drap_reg)])
8580 /* Push arg pointer reg */
8581 insn = emit_insn (gen_push (y));
8582 RTX_FRAME_RELATED_P (insn) = 1;
8585 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8586 RTX_FRAME_RELATED_P (insn) = 1;
8587 ix86_cfa_state->reg = crtl->drap_reg;
8589 /* Align the stack. */
8590 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8592 GEN_INT (-align_bytes)));
8593 RTX_FRAME_RELATED_P (insn) = 1;
8595 /* Replicate the return address on the stack so that return
8596 address can be reached via (argp - 1) slot. This is needed
8597 to implement macro RETURN_ADDR_RTX and intrinsic function
8598 expand_builtin_return_addr etc. */
8600 x = gen_frame_mem (Pmode,
8601 plus_constant (x, -UNITS_PER_WORD));
8602 insn = emit_insn (gen_push (x));
8603 RTX_FRAME_RELATED_P (insn) = 1;
8606 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8607 slower on all targets. Also sdb doesn't like it. */
8609 if (gen_frame_pointer)
8611 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8612 RTX_FRAME_RELATED_P (insn) = 1;
8614 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8615 RTX_FRAME_RELATED_P (insn) = 1;
8617 if (ix86_cfa_state->reg == stack_pointer_rtx)
8618 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8621 if (stack_realign_fp)
8623 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8624 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8626 /* Align the stack. */
8627 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8629 GEN_INT (-align_bytes)));
8630 RTX_FRAME_RELATED_P (insn) = 1;
8633 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8635 if (!frame.save_regs_using_mov)
8636 ix86_emit_save_regs ();
8638 allocate += frame.nregs * UNITS_PER_WORD;
8640 /* When using red zone we may start register saving before allocating
8641 the stack frame saving one cycle of the prologue. However I will
8642 avoid doing this if I am going to have to probe the stack since
8643 at least on x86_64 the stack probe can turn into a call that clobbers
8644 a red zone location */
8645 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8646 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8647 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8648 && !crtl->stack_realign_needed)
8649 ? hard_frame_pointer_rtx
8650 : stack_pointer_rtx,
8651 -frame.nregs * UNITS_PER_WORD);
8655 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8656 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8657 GEN_INT (-allocate), -1,
8658 ix86_cfa_state->reg == stack_pointer_rtx);
8661 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8665 if (cfun->machine->call_abi == MS_ABI)
8668 eax_live = ix86_eax_live_at_start_p ();
8672 emit_insn (gen_push (eax));
8673 allocate -= UNITS_PER_WORD;
8676 emit_move_insn (eax, GEN_INT (allocate));
8679 insn = gen_allocate_stack_worker_64 (eax, eax);
8681 insn = gen_allocate_stack_worker_32 (eax, eax);
8682 insn = emit_insn (insn);
8684 if (ix86_cfa_state->reg == stack_pointer_rtx)
8686 ix86_cfa_state->offset += allocate;
8687 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8688 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8689 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8690 RTX_FRAME_RELATED_P (insn) = 1;
8695 if (frame_pointer_needed)
8696 t = plus_constant (hard_frame_pointer_rtx,
8699 - frame.nregs * UNITS_PER_WORD);
8701 t = plus_constant (stack_pointer_rtx, allocate);
8702 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8706 if (frame.save_regs_using_mov
8707 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8708 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8710 if (!frame_pointer_needed
8711 || !(frame.to_allocate + frame.padding0)
8712 || crtl->stack_realign_needed)
8713 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8715 + frame.nsseregs * 16 + frame.padding0);
8717 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8718 -frame.nregs * UNITS_PER_WORD);
8720 if (!frame_pointer_needed
8721 || !(frame.to_allocate + frame.padding0)
8722 || crtl->stack_realign_needed)
8723 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8726 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8727 - frame.nregs * UNITS_PER_WORD
8728 - frame.nsseregs * 16
8731 pic_reg_used = false;
8732 if (pic_offset_table_rtx
8733 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8736 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8738 if (alt_pic_reg_used != INVALID_REGNUM)
8739 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8741 pic_reg_used = true;
8748 if (ix86_cmodel == CM_LARGE_PIC)
8750 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8751 rtx label = gen_label_rtx ();
8753 LABEL_PRESERVE_P (label) = 1;
8754 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8755 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8756 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8757 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8758 pic_offset_table_rtx, tmp_reg));
8761 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8764 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8767 /* In the pic_reg_used case, make sure that the got load isn't deleted
8768 when mcount needs it. Blockage to avoid call movement across mcount
8769 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8771 if (crtl->profile && pic_reg_used)
8772 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8774 if (crtl->drap_reg && !crtl->stack_realign_needed)
8776 /* vDRAP is setup but after reload it turns out stack realign
8777 isn't necessary, here we will emit prologue to setup DRAP
8778 without stack realign adjustment */
8780 int drap_bp_offset = UNITS_PER_WORD * 2;
8782 if (ix86_static_chain_on_stack)
8783 drap_bp_offset += UNITS_PER_WORD;
8784 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8785 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8788 /* Prevent instructions from being scheduled into register save push
8789 sequence when access to the redzone area is done through frame pointer.
8790 The offset between the frame pointer and the stack pointer is calculated
8791 relative to the value of the stack pointer at the end of the function
8792 prologue, and moving instructions that access redzone area via frame
8793 pointer inside push sequence violates this assumption. */
8794 if (frame_pointer_needed && frame.red_zone_size)
8795 emit_insn (gen_memory_blockage ());
8797 /* Emit cld instruction if stringops are used in the function. */
8798 if (TARGET_CLD && ix86_current_function_needs_cld)
8799 emit_insn (gen_cld ());
8802 /* Emit code to restore REG using a POP insn. */
8805 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8807 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8809 if (ix86_cfa_state->reg == crtl->drap_reg
8810 && REGNO (reg) == REGNO (crtl->drap_reg))
8812 /* Previously we'd represented the CFA as an expression
8813 like *(%ebp - 8). We've just popped that value from
8814 the stack, which means we need to reset the CFA to
8815 the drap register. This will remain until we restore
8816 the stack pointer. */
8817 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8818 RTX_FRAME_RELATED_P (insn) = 1;
8822 if (ix86_cfa_state->reg == stack_pointer_rtx)
8824 ix86_cfa_state->offset -= UNITS_PER_WORD;
8825 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8826 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8827 RTX_FRAME_RELATED_P (insn) = 1;
8830 /* When the frame pointer is the CFA, and we pop it, we are
8831 swapping back to the stack pointer as the CFA. This happens
8832 for stack frames that don't allocate other data, so we assume
8833 the stack pointer is now pointing at the return address, i.e.
8834 the function entry state, which makes the offset be 1 word. */
8835 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8836 && reg == hard_frame_pointer_rtx)
8838 ix86_cfa_state->reg = stack_pointer_rtx;
8839 ix86_cfa_state->offset -= UNITS_PER_WORD;
8841 add_reg_note (insn, REG_CFA_DEF_CFA,
8842 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8843 GEN_INT (ix86_cfa_state->offset)));
8844 RTX_FRAME_RELATED_P (insn) = 1;
8847 ix86_add_cfa_restore_note (insn, reg, red_offset);
8850 /* Emit code to restore saved registers using POP insns. */
8853 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8857 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8858 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8860 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8862 red_offset += UNITS_PER_WORD;
8866 /* Emit code and notes for the LEAVE instruction. */
8869 ix86_emit_leave (HOST_WIDE_INT red_offset)
8871 rtx insn = emit_insn (ix86_gen_leave ());
8873 ix86_add_queued_cfa_restore_notes (insn);
8875 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8877 ix86_cfa_state->reg = stack_pointer_rtx;
8878 ix86_cfa_state->offset -= UNITS_PER_WORD;
8880 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8881 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8882 RTX_FRAME_RELATED_P (insn) = 1;
8883 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8887 /* Emit code to restore saved registers using MOV insns. First register
8888 is restored from POINTER + OFFSET. */
8890 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8891 HOST_WIDE_INT red_offset,
8892 int maybe_eh_return)
8895 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8898 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8899 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8901 rtx reg = gen_rtx_REG (Pmode, regno);
8903 /* Ensure that adjust_address won't be forced to produce pointer
8904 out of range allowed by x86-64 instruction set. */
8905 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8909 r11 = gen_rtx_REG (DImode, R11_REG);
8910 emit_move_insn (r11, GEN_INT (offset));
8911 emit_insn (gen_adddi3 (r11, r11, pointer));
8912 base_address = gen_rtx_MEM (Pmode, r11);
8915 insn = emit_move_insn (reg,
8916 adjust_address (base_address, Pmode, offset));
8917 offset += UNITS_PER_WORD;
8919 if (ix86_cfa_state->reg == crtl->drap_reg
8920 && regno == REGNO (crtl->drap_reg))
8922 /* Previously we'd represented the CFA as an expression
8923 like *(%ebp - 8). We've just popped that value from
8924 the stack, which means we need to reset the CFA to
8925 the drap register. This will remain until we restore
8926 the stack pointer. */
8927 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8928 RTX_FRAME_RELATED_P (insn) = 1;
8931 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8933 red_offset += UNITS_PER_WORD;
8937 /* Emit code to restore saved registers using MOV insns. First register
8938 is restored from POINTER + OFFSET. */
8940 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8941 HOST_WIDE_INT red_offset,
8942 int maybe_eh_return)
8945 rtx base_address = gen_rtx_MEM (TImode, pointer);
8948 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8949 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8951 rtx reg = gen_rtx_REG (TImode, regno);
8953 /* Ensure that adjust_address won't be forced to produce pointer
8954 out of range allowed by x86-64 instruction set. */
8955 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8959 r11 = gen_rtx_REG (DImode, R11_REG);
8960 emit_move_insn (r11, GEN_INT (offset));
8961 emit_insn (gen_adddi3 (r11, r11, pointer));
8962 base_address = gen_rtx_MEM (TImode, r11);
8965 mem = adjust_address (base_address, TImode, offset);
8966 set_mem_align (mem, 128);
8967 emit_move_insn (reg, mem);
8970 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8976 /* Restore function stack, frame, and registers. */
8979 ix86_expand_epilogue (int style)
8982 struct ix86_frame frame;
8983 HOST_WIDE_INT offset, red_offset;
8984 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8987 ix86_finalize_stack_realign_flags ();
8989 /* When stack is realigned, SP must be valid. */
8990 sp_valid = (!frame_pointer_needed
8991 || current_function_sp_is_unchanging
8992 || stack_realign_fp);
8994 ix86_compute_frame_layout (&frame);
8996 /* See the comment about red zone and frame
8997 pointer usage in ix86_expand_prologue. */
8998 if (frame_pointer_needed && frame.red_zone_size)
8999 emit_insn (gen_memory_blockage ());
9001 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9002 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9004 /* Calculate start of saved registers relative to ebp. Special care
9005 must be taken for the normal return case of a function using
9006 eh_return: the eax and edx registers are marked as saved, but not
9007 restored along this path. */
9008 offset = frame.nregs;
9009 if (crtl->calls_eh_return && style != 2)
9011 offset *= -UNITS_PER_WORD;
9012 offset -= frame.nsseregs * 16 + frame.padding0;
9014 /* Calculate start of saved registers relative to esp on entry of the
9015 function. When realigning stack, this needs to be the most negative
9016 value possible at runtime. */
9017 red_offset = offset;
9019 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9021 else if (stack_realign_fp)
9022 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9024 if (ix86_static_chain_on_stack)
9025 red_offset -= UNITS_PER_WORD;
9026 if (frame_pointer_needed)
9027 red_offset -= UNITS_PER_WORD;
9029 /* If we're only restoring one register and sp is not valid then
9030 using a move instruction to restore the register since it's
9031 less work than reloading sp and popping the register.
9033 The default code result in stack adjustment using add/lea instruction,
9034 while this code results in LEAVE instruction (or discrete equivalent),
9035 so it is profitable in some other cases as well. Especially when there
9036 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9037 and there is exactly one register to pop. This heuristic may need some
9038 tuning in future. */
9039 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9040 || (TARGET_EPILOGUE_USING_MOVE
9041 && cfun->machine->use_fast_prologue_epilogue
9042 && ((frame.nregs + frame.nsseregs) > 1
9043 || (frame.to_allocate + frame.padding0) != 0))
9044 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9045 && (frame.to_allocate + frame.padding0) != 0)
9046 || (frame_pointer_needed && TARGET_USE_LEAVE
9047 && cfun->machine->use_fast_prologue_epilogue
9048 && (frame.nregs + frame.nsseregs) == 1)
9049 || crtl->calls_eh_return)
9051 /* Restore registers. We can use ebp or esp to address the memory
9052 locations. If both are available, default to ebp, since offsets
9053 are known to be small. Only exception is esp pointing directly
9054 to the end of block of saved registers, where we may simplify
9057 If we are realigning stack with bp and sp, regs restore can't
9058 be addressed by bp. sp must be used instead. */
9060 if (!frame_pointer_needed
9061 || (sp_valid && !(frame.to_allocate + frame.padding0))
9062 || stack_realign_fp)
9064 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9065 frame.to_allocate, red_offset,
9067 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9069 + frame.nsseregs * 16
9072 + frame.nsseregs * 16
9073 + frame.padding0, style == 2);
9077 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9080 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9082 + frame.nsseregs * 16
9085 + frame.nsseregs * 16
9086 + frame.padding0, style == 2);
9089 red_offset -= offset;
9091 /* eh_return epilogues need %ecx added to the stack pointer. */
9094 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9096 /* Stack align doesn't work with eh_return. */
9097 gcc_assert (!crtl->stack_realign_needed);
9098 /* Neither does regparm nested functions. */
9099 gcc_assert (!ix86_static_chain_on_stack);
9101 if (frame_pointer_needed)
9103 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9104 tmp = plus_constant (tmp, UNITS_PER_WORD);
9105 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9107 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9108 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9110 /* Note that we use SA as a temporary CFA, as the return
9111 address is at the proper place relative to it. We
9112 pretend this happens at the FP restore insn because
9113 prior to this insn the FP would be stored at the wrong
9114 offset relative to SA, and after this insn we have no
9115 other reasonable register to use for the CFA. We don't
9116 bother resetting the CFA to the SP for the duration of
9118 add_reg_note (tmp, REG_CFA_DEF_CFA,
9119 plus_constant (sa, UNITS_PER_WORD));
9120 ix86_add_queued_cfa_restore_notes (tmp);
9121 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9122 RTX_FRAME_RELATED_P (tmp) = 1;
9123 ix86_cfa_state->reg = sa;
9124 ix86_cfa_state->offset = UNITS_PER_WORD;
9126 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9127 const0_rtx, style, false);
9131 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9132 tmp = plus_constant (tmp, (frame.to_allocate
9133 + frame.nregs * UNITS_PER_WORD
9134 + frame.nsseregs * 16
9136 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9137 ix86_add_queued_cfa_restore_notes (tmp);
9139 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9140 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9142 ix86_cfa_state->offset = UNITS_PER_WORD;
9143 add_reg_note (tmp, REG_CFA_DEF_CFA,
9144 plus_constant (stack_pointer_rtx,
9146 RTX_FRAME_RELATED_P (tmp) = 1;
9150 else if (!frame_pointer_needed)
9151 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9152 GEN_INT (frame.to_allocate
9153 + frame.nregs * UNITS_PER_WORD
9154 + frame.nsseregs * 16
9156 style, !using_drap);
9157 /* If not an i386, mov & pop is faster than "leave". */
9158 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9159 || !cfun->machine->use_fast_prologue_epilogue)
9160 ix86_emit_leave (red_offset);
9163 pro_epilogue_adjust_stack (stack_pointer_rtx,
9164 hard_frame_pointer_rtx,
9165 const0_rtx, style, !using_drap);
9167 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9172 /* First step is to deallocate the stack frame so that we can
9175 If we realign stack with frame pointer, then stack pointer
9176 won't be able to recover via lea $offset(%bp), %sp, because
9177 there is a padding area between bp and sp for realign.
9178 "add $to_allocate, %sp" must be used instead. */
9181 gcc_assert (frame_pointer_needed);
9182 gcc_assert (!stack_realign_fp);
9183 pro_epilogue_adjust_stack (stack_pointer_rtx,
9184 hard_frame_pointer_rtx,
9185 GEN_INT (offset), style, false);
9186 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9189 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9190 GEN_INT (frame.nsseregs * 16
9194 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9196 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9197 frame.to_allocate, red_offset,
9199 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9200 GEN_INT (frame.to_allocate
9201 + frame.nsseregs * 16
9202 + frame.padding0), style,
9203 !using_drap && !frame_pointer_needed);
9206 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9208 red_offset -= offset;
9210 if (frame_pointer_needed)
9212 /* Leave results in shorter dependency chains on CPUs that are
9213 able to grok it fast. */
9214 if (TARGET_USE_LEAVE)
9215 ix86_emit_leave (red_offset);
9218 /* For stack realigned really happens, recover stack
9219 pointer to hard frame pointer is a must, if not using
9221 if (stack_realign_fp)
9222 pro_epilogue_adjust_stack (stack_pointer_rtx,
9223 hard_frame_pointer_rtx,
9224 const0_rtx, style, !using_drap);
9225 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9233 int param_ptr_offset = UNITS_PER_WORD;
9236 gcc_assert (stack_realign_drap);
9238 if (ix86_static_chain_on_stack)
9239 param_ptr_offset += UNITS_PER_WORD;
9240 if (!call_used_regs[REGNO (crtl->drap_reg)])
9241 param_ptr_offset += UNITS_PER_WORD;
9243 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9245 GEN_INT (-param_ptr_offset)));
9247 ix86_cfa_state->reg = stack_pointer_rtx;
9248 ix86_cfa_state->offset = param_ptr_offset;
9250 add_reg_note (insn, REG_CFA_DEF_CFA,
9251 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9252 GEN_INT (ix86_cfa_state->offset)));
9253 RTX_FRAME_RELATED_P (insn) = 1;
9255 if (!call_used_regs[REGNO (crtl->drap_reg)])
9256 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9259 /* Remove the saved static chain from the stack. The use of ECX is
9260 merely as a scratch register, not as the actual static chain. */
9261 if (ix86_static_chain_on_stack)
9265 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9266 ix86_cfa_state->offset += UNITS_PER_WORD;
9268 r = gen_rtx_REG (Pmode, CX_REG);
9269 insn = emit_insn (ix86_gen_pop1 (r));
9271 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9272 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9273 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9274 RTX_FRAME_RELATED_P (insn) = 1;
9277 /* Sibcall epilogues don't want a return instruction. */
9280 *ix86_cfa_state = cfa_state_save;
9284 if (crtl->args.pops_args && crtl->args.size)
9286 rtx popc = GEN_INT (crtl->args.pops_args);
9288 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9289 address, do explicit add, and jump indirectly to the caller. */
9291 if (crtl->args.pops_args >= 65536)
9293 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9296 /* There is no "pascal" calling convention in any 64bit ABI. */
9297 gcc_assert (!TARGET_64BIT);
9299 insn = emit_insn (gen_popsi1 (ecx));
9300 ix86_cfa_state->offset -= UNITS_PER_WORD;
9302 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9303 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9304 add_reg_note (insn, REG_CFA_REGISTER,
9305 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9306 RTX_FRAME_RELATED_P (insn) = 1;
9308 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9310 emit_jump_insn (gen_return_indirect_internal (ecx));
9313 emit_jump_insn (gen_return_pop_internal (popc));
9316 emit_jump_insn (gen_return_internal ());
9318 /* Restore the state back to the state from the prologue,
9319 so that it's correct for the next epilogue. */
9320 *ix86_cfa_state = cfa_state_save;
9323 /* Reset from the function's potential modifications. */
9326 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9327 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9329 if (pic_offset_table_rtx)
9330 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9332 /* Mach-O doesn't support labels at the end of objects, so if
9333 it looks like we might want one, insert a NOP. */
9335 rtx insn = get_last_insn ();
9338 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9339 insn = PREV_INSN (insn);
9343 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9344 fputs ("\tnop\n", file);
9350 /* Extract the parts of an RTL expression that is a valid memory address
9351 for an instruction. Return 0 if the structure of the address is
9352 grossly off. Return -1 if the address contains ASHIFT, so it is not
9353 strictly valid, but still used for computing length of lea instruction. */
9356 ix86_decompose_address (rtx addr, struct ix86_address *out)
9358 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9359 rtx base_reg, index_reg;
9360 HOST_WIDE_INT scale = 1;
9361 rtx scale_rtx = NULL_RTX;
9364 enum ix86_address_seg seg = SEG_DEFAULT;
9366 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9368 else if (GET_CODE (addr) == PLUS)
9378 addends[n++] = XEXP (op, 1);
9381 while (GET_CODE (op) == PLUS);
9386 for (i = n; i >= 0; --i)
9389 switch (GET_CODE (op))
9394 index = XEXP (op, 0);
9395 scale_rtx = XEXP (op, 1);
9401 index = XEXP (op, 0);
9403 if (!CONST_INT_P (tmp))
9405 scale = INTVAL (tmp);
9406 if ((unsigned HOST_WIDE_INT) scale > 3)
9412 if (XINT (op, 1) == UNSPEC_TP
9413 && TARGET_TLS_DIRECT_SEG_REFS
9414 && seg == SEG_DEFAULT)
9415 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9444 else if (GET_CODE (addr) == MULT)
9446 index = XEXP (addr, 0); /* index*scale */
9447 scale_rtx = XEXP (addr, 1);
9449 else if (GET_CODE (addr) == ASHIFT)
9451 /* We're called for lea too, which implements ashift on occasion. */
9452 index = XEXP (addr, 0);
9453 tmp = XEXP (addr, 1);
9454 if (!CONST_INT_P (tmp))
9456 scale = INTVAL (tmp);
9457 if ((unsigned HOST_WIDE_INT) scale > 3)
9463 disp = addr; /* displacement */
9465 /* Extract the integral value of scale. */
9468 if (!CONST_INT_P (scale_rtx))
9470 scale = INTVAL (scale_rtx);
9473 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9474 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9476 /* Avoid useless 0 displacement. */
9477 if (disp == const0_rtx && (base || index))
9480 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9481 if (base_reg && index_reg && scale == 1
9482 && (index_reg == arg_pointer_rtx
9483 || index_reg == frame_pointer_rtx
9484 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9487 tmp = base, base = index, index = tmp;
9488 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9491 /* Special case: %ebp cannot be encoded as a base without a displacement.
9495 && (base_reg == hard_frame_pointer_rtx
9496 || base_reg == frame_pointer_rtx
9497 || base_reg == arg_pointer_rtx
9498 || (REG_P (base_reg)
9499 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9500 || REGNO (base_reg) == R13_REG))))
9503 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9504 Avoid this by transforming to [%esi+0].
9505 Reload calls address legitimization without cfun defined, so we need
9506 to test cfun for being non-NULL. */
9507 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9508 && base_reg && !index_reg && !disp
9510 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9513 /* Special case: encode reg+reg instead of reg*2. */
9514 if (!base && index && scale == 2)
9515 base = index, base_reg = index_reg, scale = 1;
9517 /* Special case: scaling cannot be encoded without base or displacement. */
9518 if (!base && !disp && index && scale != 1)
9530 /* Return cost of the memory address x.
9531 For i386, it is better to use a complex address than let gcc copy
9532 the address into a reg and make a new pseudo. But not if the address
9533 requires to two regs - that would mean more pseudos with longer
9536 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9538 struct ix86_address parts;
9540 int ok = ix86_decompose_address (x, &parts);
9544 if (parts.base && GET_CODE (parts.base) == SUBREG)
9545 parts.base = SUBREG_REG (parts.base);
9546 if (parts.index && GET_CODE (parts.index) == SUBREG)
9547 parts.index = SUBREG_REG (parts.index);
9549 /* Attempt to minimize number of registers in the address. */
9551 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9553 && (!REG_P (parts.index)
9554 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9558 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9560 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9561 && parts.base != parts.index)
9564 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9565 since it's predecode logic can't detect the length of instructions
9566 and it degenerates to vector decoded. Increase cost of such
9567 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9568 to split such addresses or even refuse such addresses at all.
9570 Following addressing modes are affected:
9575 The first and last case may be avoidable by explicitly coding the zero in
9576 memory address, but I don't have AMD-K6 machine handy to check this
9580 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9581 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9582 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9588 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9589 this is used for to form addresses to local data when -fPIC is in
9593 darwin_local_data_pic (rtx disp)
9595 return (GET_CODE (disp) == UNSPEC
9596 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9599 /* Determine if a given RTX is a valid constant. We already know this
9600 satisfies CONSTANT_P. */
9603 legitimate_constant_p (rtx x)
9605 switch (GET_CODE (x))
9610 if (GET_CODE (x) == PLUS)
9612 if (!CONST_INT_P (XEXP (x, 1)))
9617 if (TARGET_MACHO && darwin_local_data_pic (x))
9620 /* Only some unspecs are valid as "constants". */
9621 if (GET_CODE (x) == UNSPEC)
9622 switch (XINT (x, 1))
9627 return TARGET_64BIT;
9630 x = XVECEXP (x, 0, 0);
9631 return (GET_CODE (x) == SYMBOL_REF
9632 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9634 x = XVECEXP (x, 0, 0);
9635 return (GET_CODE (x) == SYMBOL_REF
9636 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9641 /* We must have drilled down to a symbol. */
9642 if (GET_CODE (x) == LABEL_REF)
9644 if (GET_CODE (x) != SYMBOL_REF)
9649 /* TLS symbols are never valid. */
9650 if (SYMBOL_REF_TLS_MODEL (x))
9653 /* DLLIMPORT symbols are never valid. */
9654 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9655 && SYMBOL_REF_DLLIMPORT_P (x))
9660 if (GET_MODE (x) == TImode
9661 && x != CONST0_RTX (TImode)
9667 if (!standard_sse_constant_p (x))
9674 /* Otherwise we handle everything else in the move patterns. */
9678 /* Determine if it's legal to put X into the constant pool. This
9679 is not possible for the address of thread-local symbols, which
9680 is checked above. */
9683 ix86_cannot_force_const_mem (rtx x)
9685 /* We can always put integral constants and vectors in memory. */
9686 switch (GET_CODE (x))
9696 return !legitimate_constant_p (x);
9700 /* Nonzero if the constant value X is a legitimate general operand
9701 when generating PIC code. It is given that flag_pic is on and
9702 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9705 legitimate_pic_operand_p (rtx x)
9709 switch (GET_CODE (x))
9712 inner = XEXP (x, 0);
9713 if (GET_CODE (inner) == PLUS
9714 && CONST_INT_P (XEXP (inner, 1)))
9715 inner = XEXP (inner, 0);
9717 /* Only some unspecs are valid as "constants". */
9718 if (GET_CODE (inner) == UNSPEC)
9719 switch (XINT (inner, 1))
9724 return TARGET_64BIT;
9726 x = XVECEXP (inner, 0, 0);
9727 return (GET_CODE (x) == SYMBOL_REF
9728 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9729 case UNSPEC_MACHOPIC_OFFSET:
9730 return legitimate_pic_address_disp_p (x);
9738 return legitimate_pic_address_disp_p (x);
9745 /* Determine if a given CONST RTX is a valid memory displacement
9749 legitimate_pic_address_disp_p (rtx disp)
9753 /* In 64bit mode we can allow direct addresses of symbols and labels
9754 when they are not dynamic symbols. */
9757 rtx op0 = disp, op1;
9759 switch (GET_CODE (disp))
9765 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9767 op0 = XEXP (XEXP (disp, 0), 0);
9768 op1 = XEXP (XEXP (disp, 0), 1);
9769 if (!CONST_INT_P (op1)
9770 || INTVAL (op1) >= 16*1024*1024
9771 || INTVAL (op1) < -16*1024*1024)
9773 if (GET_CODE (op0) == LABEL_REF)
9775 if (GET_CODE (op0) != SYMBOL_REF)
9780 /* TLS references should always be enclosed in UNSPEC. */
9781 if (SYMBOL_REF_TLS_MODEL (op0))
9783 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9784 && ix86_cmodel != CM_LARGE_PIC)
9792 if (GET_CODE (disp) != CONST)
9794 disp = XEXP (disp, 0);
9798 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9799 of GOT tables. We should not need these anyway. */
9800 if (GET_CODE (disp) != UNSPEC
9801 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9802 && XINT (disp, 1) != UNSPEC_GOTOFF
9803 && XINT (disp, 1) != UNSPEC_PLTOFF))
9806 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9807 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9813 if (GET_CODE (disp) == PLUS)
9815 if (!CONST_INT_P (XEXP (disp, 1)))
9817 disp = XEXP (disp, 0);
9821 if (TARGET_MACHO && darwin_local_data_pic (disp))
9824 if (GET_CODE (disp) != UNSPEC)
9827 switch (XINT (disp, 1))
9832 /* We need to check for both symbols and labels because VxWorks loads
9833 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9835 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9836 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9838 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9839 While ABI specify also 32bit relocation but we don't produce it in
9840 small PIC model at all. */
9841 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9842 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9844 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9846 case UNSPEC_GOTTPOFF:
9847 case UNSPEC_GOTNTPOFF:
9848 case UNSPEC_INDNTPOFF:
9851 disp = XVECEXP (disp, 0, 0);
9852 return (GET_CODE (disp) == SYMBOL_REF
9853 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9855 disp = XVECEXP (disp, 0, 0);
9856 return (GET_CODE (disp) == SYMBOL_REF
9857 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9859 disp = XVECEXP (disp, 0, 0);
9860 return (GET_CODE (disp) == SYMBOL_REF
9861 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9867 /* Recognizes RTL expressions that are valid memory addresses for an
9868 instruction. The MODE argument is the machine mode for the MEM
9869 expression that wants to use this address.
9871 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9872 convert common non-canonical forms to canonical form so that they will
9876 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9877 rtx addr, bool strict)
9879 struct ix86_address parts;
9880 rtx base, index, disp;
9881 HOST_WIDE_INT scale;
9883 if (ix86_decompose_address (addr, &parts) <= 0)
9884 /* Decomposition failed. */
9888 index = parts.index;
9890 scale = parts.scale;
9892 /* Validate base register.
9894 Don't allow SUBREG's that span more than a word here. It can lead to spill
9895 failures when the base is one word out of a two word structure, which is
9896 represented internally as a DImode int. */
9904 else if (GET_CODE (base) == SUBREG
9905 && REG_P (SUBREG_REG (base))
9906 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9908 reg = SUBREG_REG (base);
9910 /* Base is not a register. */
9913 if (GET_MODE (base) != Pmode)
9914 /* Base is not in Pmode. */
9917 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9918 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9919 /* Base is not valid. */
9923 /* Validate index register.
9925 Don't allow SUBREG's that span more than a word here -- same as above. */
9933 else if (GET_CODE (index) == SUBREG
9934 && REG_P (SUBREG_REG (index))
9935 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9937 reg = SUBREG_REG (index);
9939 /* Index is not a register. */
9942 if (GET_MODE (index) != Pmode)
9943 /* Index is not in Pmode. */
9946 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9947 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9948 /* Index is not valid. */
9952 /* Validate scale factor. */
9956 /* Scale without index. */
9959 if (scale != 2 && scale != 4 && scale != 8)
9960 /* Scale is not a valid multiplier. */
9964 /* Validate displacement. */
9967 if (GET_CODE (disp) == CONST
9968 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9969 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9970 switch (XINT (XEXP (disp, 0), 1))
9972 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9973 used. While ABI specify also 32bit relocations, we don't produce
9974 them at all and use IP relative instead. */
9977 gcc_assert (flag_pic);
9979 goto is_legitimate_pic;
9981 /* 64bit address unspec. */
9984 case UNSPEC_GOTPCREL:
9985 gcc_assert (flag_pic);
9986 goto is_legitimate_pic;
9988 case UNSPEC_GOTTPOFF:
9989 case UNSPEC_GOTNTPOFF:
9990 case UNSPEC_INDNTPOFF:
9996 /* Invalid address unspec. */
10000 else if (SYMBOLIC_CONST (disp)
10004 && MACHOPIC_INDIRECT
10005 && !machopic_operand_p (disp)
10011 if (TARGET_64BIT && (index || base))
10013 /* foo@dtpoff(%rX) is ok. */
10014 if (GET_CODE (disp) != CONST
10015 || GET_CODE (XEXP (disp, 0)) != PLUS
10016 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10017 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10018 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10019 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10020 /* Non-constant pic memory reference. */
10023 else if (! legitimate_pic_address_disp_p (disp))
10024 /* Displacement is an invalid pic construct. */
10027 /* This code used to verify that a symbolic pic displacement
10028 includes the pic_offset_table_rtx register.
10030 While this is good idea, unfortunately these constructs may
10031 be created by "adds using lea" optimization for incorrect
10040 This code is nonsensical, but results in addressing
10041 GOT table with pic_offset_table_rtx base. We can't
10042 just refuse it easily, since it gets matched by
10043 "addsi3" pattern, that later gets split to lea in the
10044 case output register differs from input. While this
10045 can be handled by separate addsi pattern for this case
10046 that never results in lea, this seems to be easier and
10047 correct fix for crash to disable this test. */
10049 else if (GET_CODE (disp) != LABEL_REF
10050 && !CONST_INT_P (disp)
10051 && (GET_CODE (disp) != CONST
10052 || !legitimate_constant_p (disp))
10053 && (GET_CODE (disp) != SYMBOL_REF
10054 || !legitimate_constant_p (disp)))
10055 /* Displacement is not constant. */
10057 else if (TARGET_64BIT
10058 && !x86_64_immediate_operand (disp, VOIDmode))
10059 /* Displacement is out of range. */
10063 /* Everything looks valid. */
10067 /* Determine if a given RTX is a valid constant address. */
10070 constant_address_p (rtx x)
10072 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10075 /* Return a unique alias set for the GOT. */
10077 static alias_set_type
10078 ix86_GOT_alias_set (void)
10080 static alias_set_type set = -1;
10082 set = new_alias_set ();
10086 /* Return a legitimate reference for ORIG (an address) using the
10087 register REG. If REG is 0, a new pseudo is generated.
10089 There are two types of references that must be handled:
10091 1. Global data references must load the address from the GOT, via
10092 the PIC reg. An insn is emitted to do this load, and the reg is
10095 2. Static data references, constant pool addresses, and code labels
10096 compute the address as an offset from the GOT, whose base is in
10097 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10098 differentiate them from global data objects. The returned
10099 address is the PIC reg + an unspec constant.
10101 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10102 reg also appears in the address. */
10105 legitimize_pic_address (rtx orig, rtx reg)
10108 rtx new_rtx = orig;
10112 if (TARGET_MACHO && !TARGET_64BIT)
10115 reg = gen_reg_rtx (Pmode);
10116 /* Use the generic Mach-O PIC machinery. */
10117 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10121 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10123 else if (TARGET_64BIT
10124 && ix86_cmodel != CM_SMALL_PIC
10125 && gotoff_operand (addr, Pmode))
10128 /* This symbol may be referenced via a displacement from the PIC
10129 base address (@GOTOFF). */
10131 if (reload_in_progress)
10132 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10133 if (GET_CODE (addr) == CONST)
10134 addr = XEXP (addr, 0);
10135 if (GET_CODE (addr) == PLUS)
10137 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10139 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10142 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10143 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10145 tmpreg = gen_reg_rtx (Pmode);
10148 emit_move_insn (tmpreg, new_rtx);
10152 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10153 tmpreg, 1, OPTAB_DIRECT);
10156 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10158 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10160 /* This symbol may be referenced via a displacement from the PIC
10161 base address (@GOTOFF). */
10163 if (reload_in_progress)
10164 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10165 if (GET_CODE (addr) == CONST)
10166 addr = XEXP (addr, 0);
10167 if (GET_CODE (addr) == PLUS)
10169 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10171 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10174 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10175 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10176 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10180 emit_move_insn (reg, new_rtx);
10184 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10185 /* We can't use @GOTOFF for text labels on VxWorks;
10186 see gotoff_operand. */
10187 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10189 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10191 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10192 return legitimize_dllimport_symbol (addr, true);
10193 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10194 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10195 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10197 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10198 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10202 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10204 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10205 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10206 new_rtx = gen_const_mem (Pmode, new_rtx);
10207 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10210 reg = gen_reg_rtx (Pmode);
10211 /* Use directly gen_movsi, otherwise the address is loaded
10212 into register for CSE. We don't want to CSE this addresses,
10213 instead we CSE addresses from the GOT table, so skip this. */
10214 emit_insn (gen_movsi (reg, new_rtx));
10219 /* This symbol must be referenced via a load from the
10220 Global Offset Table (@GOT). */
10222 if (reload_in_progress)
10223 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10224 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10225 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10227 new_rtx = force_reg (Pmode, new_rtx);
10228 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10229 new_rtx = gen_const_mem (Pmode, new_rtx);
10230 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10233 reg = gen_reg_rtx (Pmode);
10234 emit_move_insn (reg, new_rtx);
10240 if (CONST_INT_P (addr)
10241 && !x86_64_immediate_operand (addr, VOIDmode))
10245 emit_move_insn (reg, addr);
10249 new_rtx = force_reg (Pmode, addr);
10251 else if (GET_CODE (addr) == CONST)
10253 addr = XEXP (addr, 0);
10255 /* We must match stuff we generate before. Assume the only
10256 unspecs that can get here are ours. Not that we could do
10257 anything with them anyway.... */
10258 if (GET_CODE (addr) == UNSPEC
10259 || (GET_CODE (addr) == PLUS
10260 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10262 gcc_assert (GET_CODE (addr) == PLUS);
10264 if (GET_CODE (addr) == PLUS)
10266 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10268 /* Check first to see if this is a constant offset from a @GOTOFF
10269 symbol reference. */
10270 if (gotoff_operand (op0, Pmode)
10271 && CONST_INT_P (op1))
10275 if (reload_in_progress)
10276 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10277 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10279 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10280 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10281 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10285 emit_move_insn (reg, new_rtx);
10291 if (INTVAL (op1) < -16*1024*1024
10292 || INTVAL (op1) >= 16*1024*1024)
10294 if (!x86_64_immediate_operand (op1, Pmode))
10295 op1 = force_reg (Pmode, op1);
10296 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10302 base = legitimize_pic_address (XEXP (addr, 0), reg);
10303 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10304 base == reg ? NULL_RTX : reg);
10306 if (CONST_INT_P (new_rtx))
10307 new_rtx = plus_constant (base, INTVAL (new_rtx));
10310 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10312 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10313 new_rtx = XEXP (new_rtx, 1);
10315 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10323 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10326 get_thread_pointer (int to_reg)
10330 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10334 reg = gen_reg_rtx (Pmode);
10335 insn = gen_rtx_SET (VOIDmode, reg, tp);
10336 insn = emit_insn (insn);
10341 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10342 false if we expect this to be used for a memory address and true if
10343 we expect to load the address into a register. */
10346 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10348 rtx dest, base, off, pic, tp;
10353 case TLS_MODEL_GLOBAL_DYNAMIC:
10354 dest = gen_reg_rtx (Pmode);
10355 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10357 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10359 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10362 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10363 insns = get_insns ();
10366 RTL_CONST_CALL_P (insns) = 1;
10367 emit_libcall_block (insns, dest, rax, x);
10369 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10370 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10372 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10374 if (TARGET_GNU2_TLS)
10376 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10378 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10382 case TLS_MODEL_LOCAL_DYNAMIC:
10383 base = gen_reg_rtx (Pmode);
10384 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10386 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10388 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10391 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10392 insns = get_insns ();
10395 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10396 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10397 RTL_CONST_CALL_P (insns) = 1;
10398 emit_libcall_block (insns, base, rax, note);
10400 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10401 emit_insn (gen_tls_local_dynamic_base_64 (base));
10403 emit_insn (gen_tls_local_dynamic_base_32 (base));
10405 if (TARGET_GNU2_TLS)
10407 rtx x = ix86_tls_module_base ();
10409 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10410 gen_rtx_MINUS (Pmode, x, tp));
10413 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10414 off = gen_rtx_CONST (Pmode, off);
10416 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10418 if (TARGET_GNU2_TLS)
10420 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10422 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10427 case TLS_MODEL_INITIAL_EXEC:
10431 type = UNSPEC_GOTNTPOFF;
10435 if (reload_in_progress)
10436 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10437 pic = pic_offset_table_rtx;
10438 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10440 else if (!TARGET_ANY_GNU_TLS)
10442 pic = gen_reg_rtx (Pmode);
10443 emit_insn (gen_set_got (pic));
10444 type = UNSPEC_GOTTPOFF;
10449 type = UNSPEC_INDNTPOFF;
10452 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10453 off = gen_rtx_CONST (Pmode, off);
10455 off = gen_rtx_PLUS (Pmode, pic, off);
10456 off = gen_const_mem (Pmode, off);
10457 set_mem_alias_set (off, ix86_GOT_alias_set ());
10459 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10461 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10462 off = force_reg (Pmode, off);
10463 return gen_rtx_PLUS (Pmode, base, off);
10467 base = get_thread_pointer (true);
10468 dest = gen_reg_rtx (Pmode);
10469 emit_insn (gen_subsi3 (dest, base, off));
10473 case TLS_MODEL_LOCAL_EXEC:
10474 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10475 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10476 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10477 off = gen_rtx_CONST (Pmode, off);
10479 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10481 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10482 return gen_rtx_PLUS (Pmode, base, off);
10486 base = get_thread_pointer (true);
10487 dest = gen_reg_rtx (Pmode);
10488 emit_insn (gen_subsi3 (dest, base, off));
10493 gcc_unreachable ();
10499 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10502 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10503 htab_t dllimport_map;
10506 get_dllimport_decl (tree decl)
10508 struct tree_map *h, in;
10511 const char *prefix;
10512 size_t namelen, prefixlen;
10517 if (!dllimport_map)
10518 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10520 in.hash = htab_hash_pointer (decl);
10521 in.base.from = decl;
10522 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10523 h = (struct tree_map *) *loc;
10527 *loc = h = GGC_NEW (struct tree_map);
10529 h->base.from = decl;
10530 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10531 VAR_DECL, NULL, ptr_type_node);
10532 DECL_ARTIFICIAL (to) = 1;
10533 DECL_IGNORED_P (to) = 1;
10534 DECL_EXTERNAL (to) = 1;
10535 TREE_READONLY (to) = 1;
10537 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10538 name = targetm.strip_name_encoding (name);
10539 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10540 ? "*__imp_" : "*__imp__";
10541 namelen = strlen (name);
10542 prefixlen = strlen (prefix);
10543 imp_name = (char *) alloca (namelen + prefixlen + 1);
10544 memcpy (imp_name, prefix, prefixlen);
10545 memcpy (imp_name + prefixlen, name, namelen + 1);
10547 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10548 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10549 SET_SYMBOL_REF_DECL (rtl, to);
10550 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10552 rtl = gen_const_mem (Pmode, rtl);
10553 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10555 SET_DECL_RTL (to, rtl);
10556 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10561 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10562 true if we require the result be a register. */
10565 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10570 gcc_assert (SYMBOL_REF_DECL (symbol));
10571 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10573 x = DECL_RTL (imp_decl);
10575 x = force_reg (Pmode, x);
10579 /* Try machine-dependent ways of modifying an illegitimate address
10580 to be legitimate. If we find one, return the new, valid address.
10581 This macro is used in only one place: `memory_address' in explow.c.
10583 OLDX is the address as it was before break_out_memory_refs was called.
10584 In some cases it is useful to look at this to decide what needs to be done.
10586 It is always safe for this macro to do nothing. It exists to recognize
10587 opportunities to optimize the output.
10589 For the 80386, we handle X+REG by loading X into a register R and
10590 using R+REG. R will go in a general reg and indexing will be used.
10591 However, if REG is a broken-out memory address or multiplication,
10592 nothing needs to be done because REG can certainly go in a general reg.
10594 When -fpic is used, special handling is needed for symbolic references.
10595 See comments by legitimize_pic_address in i386.c for details. */
10598 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10599 enum machine_mode mode)
10604 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10606 return legitimize_tls_address (x, (enum tls_model) log, false);
10607 if (GET_CODE (x) == CONST
10608 && GET_CODE (XEXP (x, 0)) == PLUS
10609 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10610 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10612 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10613 (enum tls_model) log, false);
10614 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10617 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10619 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10620 return legitimize_dllimport_symbol (x, true);
10621 if (GET_CODE (x) == CONST
10622 && GET_CODE (XEXP (x, 0)) == PLUS
10623 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10624 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10626 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10627 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10631 if (flag_pic && SYMBOLIC_CONST (x))
10632 return legitimize_pic_address (x, 0);
10634 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10635 if (GET_CODE (x) == ASHIFT
10636 && CONST_INT_P (XEXP (x, 1))
10637 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10640 log = INTVAL (XEXP (x, 1));
10641 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10642 GEN_INT (1 << log));
10645 if (GET_CODE (x) == PLUS)
10647 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10649 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10650 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10651 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10654 log = INTVAL (XEXP (XEXP (x, 0), 1));
10655 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10656 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10657 GEN_INT (1 << log));
10660 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10661 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10662 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10665 log = INTVAL (XEXP (XEXP (x, 1), 1));
10666 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10667 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10668 GEN_INT (1 << log));
10671 /* Put multiply first if it isn't already. */
10672 if (GET_CODE (XEXP (x, 1)) == MULT)
10674 rtx tmp = XEXP (x, 0);
10675 XEXP (x, 0) = XEXP (x, 1);
10680 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10681 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10682 created by virtual register instantiation, register elimination, and
10683 similar optimizations. */
10684 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10687 x = gen_rtx_PLUS (Pmode,
10688 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10689 XEXP (XEXP (x, 1), 0)),
10690 XEXP (XEXP (x, 1), 1));
10694 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10695 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10696 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10697 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10698 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10699 && CONSTANT_P (XEXP (x, 1)))
10702 rtx other = NULL_RTX;
10704 if (CONST_INT_P (XEXP (x, 1)))
10706 constant = XEXP (x, 1);
10707 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10709 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10711 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10712 other = XEXP (x, 1);
10720 x = gen_rtx_PLUS (Pmode,
10721 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10722 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10723 plus_constant (other, INTVAL (constant)));
10727 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10730 if (GET_CODE (XEXP (x, 0)) == MULT)
10733 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10736 if (GET_CODE (XEXP (x, 1)) == MULT)
10739 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10743 && REG_P (XEXP (x, 1))
10744 && REG_P (XEXP (x, 0)))
10747 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10750 x = legitimize_pic_address (x, 0);
10753 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10756 if (REG_P (XEXP (x, 0)))
10758 rtx temp = gen_reg_rtx (Pmode);
10759 rtx val = force_operand (XEXP (x, 1), temp);
10761 emit_move_insn (temp, val);
10763 XEXP (x, 1) = temp;
10767 else if (REG_P (XEXP (x, 1)))
10769 rtx temp = gen_reg_rtx (Pmode);
10770 rtx val = force_operand (XEXP (x, 0), temp);
10772 emit_move_insn (temp, val);
10774 XEXP (x, 0) = temp;
10782 /* Print an integer constant expression in assembler syntax. Addition
10783 and subtraction are the only arithmetic that may appear in these
10784 expressions. FILE is the stdio stream to write to, X is the rtx, and
10785 CODE is the operand print code from the output string. */
10788 output_pic_addr_const (FILE *file, rtx x, int code)
10792 switch (GET_CODE (x))
10795 gcc_assert (flag_pic);
10800 if (! TARGET_MACHO || TARGET_64BIT)
10801 output_addr_const (file, x);
10804 const char *name = XSTR (x, 0);
10806 /* Mark the decl as referenced so that cgraph will
10807 output the function. */
10808 if (SYMBOL_REF_DECL (x))
10809 mark_decl_referenced (SYMBOL_REF_DECL (x));
10812 if (MACHOPIC_INDIRECT
10813 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10814 name = machopic_indirection_name (x, /*stub_p=*/true);
10816 assemble_name (file, name);
10818 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10819 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10820 fputs ("@PLT", file);
10827 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10828 assemble_name (asm_out_file, buf);
10832 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10836 /* This used to output parentheses around the expression,
10837 but that does not work on the 386 (either ATT or BSD assembler). */
10838 output_pic_addr_const (file, XEXP (x, 0), code);
10842 if (GET_MODE (x) == VOIDmode)
10844 /* We can use %d if the number is <32 bits and positive. */
10845 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10846 fprintf (file, "0x%lx%08lx",
10847 (unsigned long) CONST_DOUBLE_HIGH (x),
10848 (unsigned long) CONST_DOUBLE_LOW (x));
10850 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10853 /* We can't handle floating point constants;
10854 PRINT_OPERAND must handle them. */
10855 output_operand_lossage ("floating constant misused");
10859 /* Some assemblers need integer constants to appear first. */
10860 if (CONST_INT_P (XEXP (x, 0)))
10862 output_pic_addr_const (file, XEXP (x, 0), code);
10864 output_pic_addr_const (file, XEXP (x, 1), code);
10868 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10869 output_pic_addr_const (file, XEXP (x, 1), code);
10871 output_pic_addr_const (file, XEXP (x, 0), code);
10877 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10878 output_pic_addr_const (file, XEXP (x, 0), code);
10880 output_pic_addr_const (file, XEXP (x, 1), code);
10882 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10886 gcc_assert (XVECLEN (x, 0) == 1);
10887 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10888 switch (XINT (x, 1))
10891 fputs ("@GOT", file);
10893 case UNSPEC_GOTOFF:
10894 fputs ("@GOTOFF", file);
10896 case UNSPEC_PLTOFF:
10897 fputs ("@PLTOFF", file);
10899 case UNSPEC_GOTPCREL:
10900 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10901 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10903 case UNSPEC_GOTTPOFF:
10904 /* FIXME: This might be @TPOFF in Sun ld too. */
10905 fputs ("@gottpoff", file);
10908 fputs ("@tpoff", file);
10910 case UNSPEC_NTPOFF:
10912 fputs ("@tpoff", file);
10914 fputs ("@ntpoff", file);
10916 case UNSPEC_DTPOFF:
10917 fputs ("@dtpoff", file);
10919 case UNSPEC_GOTNTPOFF:
10921 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10922 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10924 fputs ("@gotntpoff", file);
10926 case UNSPEC_INDNTPOFF:
10927 fputs ("@indntpoff", file);
10930 case UNSPEC_MACHOPIC_OFFSET:
10932 machopic_output_function_base_name (file);
10936 output_operand_lossage ("invalid UNSPEC as operand");
10942 output_operand_lossage ("invalid expression as operand");
10946 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10947 We need to emit DTP-relative relocations. */
10949 static void ATTRIBUTE_UNUSED
10950 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10952 fputs (ASM_LONG, file);
10953 output_addr_const (file, x);
10954 fputs ("@dtpoff", file);
10960 fputs (", 0", file);
10963 gcc_unreachable ();
10967 /* Return true if X is a representation of the PIC register. This copes
10968 with calls from ix86_find_base_term, where the register might have
10969 been replaced by a cselib value. */
10972 ix86_pic_register_p (rtx x)
10974 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10975 return (pic_offset_table_rtx
10976 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10978 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10981 /* In the name of slightly smaller debug output, and to cater to
10982 general assembler lossage, recognize PIC+GOTOFF and turn it back
10983 into a direct symbol reference.
10985 On Darwin, this is necessary to avoid a crash, because Darwin
10986 has a different PIC label for each routine but the DWARF debugging
10987 information is not associated with any particular routine, so it's
10988 necessary to remove references to the PIC label from RTL stored by
10989 the DWARF output code. */
10992 ix86_delegitimize_address (rtx x)
10994 rtx orig_x = delegitimize_mem_from_attrs (x);
10995 /* addend is NULL or some rtx if x is something+GOTOFF where
10996 something doesn't include the PIC register. */
10997 rtx addend = NULL_RTX;
10998 /* reg_addend is NULL or a multiple of some register. */
10999 rtx reg_addend = NULL_RTX;
11000 /* const_addend is NULL or a const_int. */
11001 rtx const_addend = NULL_RTX;
11002 /* This is the result, or NULL. */
11003 rtx result = NULL_RTX;
11012 if (GET_CODE (x) != CONST
11013 || GET_CODE (XEXP (x, 0)) != UNSPEC
11014 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11015 || !MEM_P (orig_x))
11017 return XVECEXP (XEXP (x, 0), 0, 0);
11020 if (GET_CODE (x) != PLUS
11021 || GET_CODE (XEXP (x, 1)) != CONST)
11024 if (ix86_pic_register_p (XEXP (x, 0)))
11025 /* %ebx + GOT/GOTOFF */
11027 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11029 /* %ebx + %reg * scale + GOT/GOTOFF */
11030 reg_addend = XEXP (x, 0);
11031 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11032 reg_addend = XEXP (reg_addend, 1);
11033 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11034 reg_addend = XEXP (reg_addend, 0);
11037 reg_addend = NULL_RTX;
11038 addend = XEXP (x, 0);
11042 addend = XEXP (x, 0);
11044 x = XEXP (XEXP (x, 1), 0);
11045 if (GET_CODE (x) == PLUS
11046 && CONST_INT_P (XEXP (x, 1)))
11048 const_addend = XEXP (x, 1);
11052 if (GET_CODE (x) == UNSPEC
11053 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11054 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11055 result = XVECEXP (x, 0, 0);
11057 if (TARGET_MACHO && darwin_local_data_pic (x)
11058 && !MEM_P (orig_x))
11059 result = XVECEXP (x, 0, 0);
11065 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11067 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11070 /* If the rest of original X doesn't involve the PIC register, add
11071 addend and subtract pic_offset_table_rtx. This can happen e.g.
11073 leal (%ebx, %ecx, 4), %ecx
11075 movl foo@GOTOFF(%ecx), %edx
11076 in which case we return (%ecx - %ebx) + foo. */
11077 if (pic_offset_table_rtx)
11078 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11079 pic_offset_table_rtx),
11087 /* If X is a machine specific address (i.e. a symbol or label being
11088 referenced as a displacement from the GOT implemented using an
11089 UNSPEC), then return the base term. Otherwise return X. */
11092 ix86_find_base_term (rtx x)
11098 if (GET_CODE (x) != CONST)
11100 term = XEXP (x, 0);
11101 if (GET_CODE (term) == PLUS
11102 && (CONST_INT_P (XEXP (term, 1))
11103 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11104 term = XEXP (term, 0);
11105 if (GET_CODE (term) != UNSPEC
11106 || XINT (term, 1) != UNSPEC_GOTPCREL)
11109 return XVECEXP (term, 0, 0);
11112 return ix86_delegitimize_address (x);
11116 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11117 int fp, FILE *file)
11119 const char *suffix;
11121 if (mode == CCFPmode || mode == CCFPUmode)
11123 code = ix86_fp_compare_code_to_integer (code);
11127 code = reverse_condition (code);
11178 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11182 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11183 Those same assemblers have the same but opposite lossage on cmov. */
11184 if (mode == CCmode)
11185 suffix = fp ? "nbe" : "a";
11186 else if (mode == CCCmode)
11189 gcc_unreachable ();
11205 gcc_unreachable ();
11209 gcc_assert (mode == CCmode || mode == CCCmode);
11226 gcc_unreachable ();
11230 /* ??? As above. */
11231 gcc_assert (mode == CCmode || mode == CCCmode);
11232 suffix = fp ? "nb" : "ae";
11235 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11239 /* ??? As above. */
11240 if (mode == CCmode)
11242 else if (mode == CCCmode)
11243 suffix = fp ? "nb" : "ae";
11245 gcc_unreachable ();
11248 suffix = fp ? "u" : "p";
11251 suffix = fp ? "nu" : "np";
11254 gcc_unreachable ();
11256 fputs (suffix, file);
11259 /* Print the name of register X to FILE based on its machine mode and number.
11260 If CODE is 'w', pretend the mode is HImode.
11261 If CODE is 'b', pretend the mode is QImode.
11262 If CODE is 'k', pretend the mode is SImode.
11263 If CODE is 'q', pretend the mode is DImode.
11264 If CODE is 'x', pretend the mode is V4SFmode.
11265 If CODE is 't', pretend the mode is V8SFmode.
11266 If CODE is 'h', pretend the reg is the 'high' byte register.
11267 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11268 If CODE is 'd', duplicate the operand for AVX instruction.
11272 print_reg (rtx x, int code, FILE *file)
11275 bool duplicated = code == 'd' && TARGET_AVX;
11277 gcc_assert (x == pc_rtx
11278 || (REGNO (x) != ARG_POINTER_REGNUM
11279 && REGNO (x) != FRAME_POINTER_REGNUM
11280 && REGNO (x) != FLAGS_REG
11281 && REGNO (x) != FPSR_REG
11282 && REGNO (x) != FPCR_REG));
11284 if (ASSEMBLER_DIALECT == ASM_ATT)
11289 gcc_assert (TARGET_64BIT);
11290 fputs ("rip", file);
11294 if (code == 'w' || MMX_REG_P (x))
11296 else if (code == 'b')
11298 else if (code == 'k')
11300 else if (code == 'q')
11302 else if (code == 'y')
11304 else if (code == 'h')
11306 else if (code == 'x')
11308 else if (code == 't')
11311 code = GET_MODE_SIZE (GET_MODE (x));
11313 /* Irritatingly, AMD extended registers use different naming convention
11314 from the normal registers. */
11315 if (REX_INT_REG_P (x))
11317 gcc_assert (TARGET_64BIT);
11321 error ("extended registers have no high halves");
11324 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11327 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11330 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11333 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11336 error ("unsupported operand size for extended register");
11346 if (STACK_TOP_P (x))
11355 if (! ANY_FP_REG_P (x))
11356 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11361 reg = hi_reg_name[REGNO (x)];
11364 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11366 reg = qi_reg_name[REGNO (x)];
11369 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11371 reg = qi_high_reg_name[REGNO (x)];
11376 gcc_assert (!duplicated);
11378 fputs (hi_reg_name[REGNO (x)] + 1, file);
11383 gcc_unreachable ();
11389 if (ASSEMBLER_DIALECT == ASM_ATT)
11390 fprintf (file, ", %%%s", reg);
11392 fprintf (file, ", %s", reg);
11396 /* Locate some local-dynamic symbol still in use by this function
11397 so that we can print its name in some tls_local_dynamic_base
11401 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11405 if (GET_CODE (x) == SYMBOL_REF
11406 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11408 cfun->machine->some_ld_name = XSTR (x, 0);
11415 static const char *
11416 get_some_local_dynamic_name (void)
11420 if (cfun->machine->some_ld_name)
11421 return cfun->machine->some_ld_name;
11423 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11424 if (NONDEBUG_INSN_P (insn)
11425 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11426 return cfun->machine->some_ld_name;
11431 /* Meaning of CODE:
11432 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11433 C -- print opcode suffix for set/cmov insn.
11434 c -- like C, but print reversed condition
11435 F,f -- likewise, but for floating-point.
11436 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11438 R -- print the prefix for register names.
11439 z -- print the opcode suffix for the size of the current operand.
11440 Z -- likewise, with special suffixes for x87 instructions.
11441 * -- print a star (in certain assembler syntax)
11442 A -- print an absolute memory reference.
11443 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11444 s -- print a shift double count, followed by the assemblers argument
11446 b -- print the QImode name of the register for the indicated operand.
11447 %b0 would print %al if operands[0] is reg 0.
11448 w -- likewise, print the HImode name of the register.
11449 k -- likewise, print the SImode name of the register.
11450 q -- likewise, print the DImode name of the register.
11451 x -- likewise, print the V4SFmode name of the register.
11452 t -- likewise, print the V8SFmode name of the register.
11453 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11454 y -- print "st(0)" instead of "st" as a register.
11455 d -- print duplicated register operand for AVX instruction.
11456 D -- print condition for SSE cmp instruction.
11457 P -- if PIC, print an @PLT suffix.
11458 X -- don't print any sort of PIC '@' suffix for a symbol.
11459 & -- print some in-use local-dynamic symbol name.
11460 H -- print a memory address offset by 8; used for sse high-parts
11461 Y -- print condition for XOP pcom* instruction.
11462 + -- print a branch hint as 'cs' or 'ds' prefix
11463 ; -- print a semicolon (after prefixes due to bug in older gas).
11467 print_operand (FILE *file, rtx x, int code)
11474 if (ASSEMBLER_DIALECT == ASM_ATT)
11480 const char *name = get_some_local_dynamic_name ();
11482 output_operand_lossage ("'%%&' used without any "
11483 "local dynamic TLS references");
11485 assemble_name (file, name);
11490 switch (ASSEMBLER_DIALECT)
11497 /* Intel syntax. For absolute addresses, registers should not
11498 be surrounded by braces. */
11502 PRINT_OPERAND (file, x, 0);
11509 gcc_unreachable ();
11512 PRINT_OPERAND (file, x, 0);
11517 if (ASSEMBLER_DIALECT == ASM_ATT)
11522 if (ASSEMBLER_DIALECT == ASM_ATT)
11527 if (ASSEMBLER_DIALECT == ASM_ATT)
11532 if (ASSEMBLER_DIALECT == ASM_ATT)
11537 if (ASSEMBLER_DIALECT == ASM_ATT)
11542 if (ASSEMBLER_DIALECT == ASM_ATT)
11547 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11549 /* Opcodes don't get size suffixes if using Intel opcodes. */
11550 if (ASSEMBLER_DIALECT == ASM_INTEL)
11553 switch (GET_MODE_SIZE (GET_MODE (x)))
11572 output_operand_lossage
11573 ("invalid operand size for operand code '%c'", code);
11578 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11580 (0, "non-integer operand used with operand code '%c'", code);
11584 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11585 if (ASSEMBLER_DIALECT == ASM_INTEL)
11588 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11590 switch (GET_MODE_SIZE (GET_MODE (x)))
11593 #ifdef HAVE_AS_IX86_FILDS
11603 #ifdef HAVE_AS_IX86_FILDQ
11606 fputs ("ll", file);
11614 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11616 /* 387 opcodes don't get size suffixes
11617 if the operands are registers. */
11618 if (STACK_REG_P (x))
11621 switch (GET_MODE_SIZE (GET_MODE (x)))
11642 output_operand_lossage
11643 ("invalid operand type used with operand code '%c'", code);
11647 output_operand_lossage
11648 ("invalid operand size for operand code '%c'", code);
11665 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11667 PRINT_OPERAND (file, x, 0);
11668 fputs (", ", file);
11673 /* Little bit of braindamage here. The SSE compare instructions
11674 does use completely different names for the comparisons that the
11675 fp conditional moves. */
11678 switch (GET_CODE (x))
11681 fputs ("eq", file);
11684 fputs ("eq_us", file);
11687 fputs ("lt", file);
11690 fputs ("nge", file);
11693 fputs ("le", file);
11696 fputs ("ngt", file);
11699 fputs ("unord", file);
11702 fputs ("neq", file);
11705 fputs ("neq_oq", file);
11708 fputs ("ge", file);
11711 fputs ("nlt", file);
11714 fputs ("gt", file);
11717 fputs ("nle", file);
11720 fputs ("ord", file);
11723 output_operand_lossage ("operand is not a condition code, "
11724 "invalid operand code 'D'");
11730 switch (GET_CODE (x))
11734 fputs ("eq", file);
11738 fputs ("lt", file);
11742 fputs ("le", file);
11745 fputs ("unord", file);
11749 fputs ("neq", file);
11753 fputs ("nlt", file);
11757 fputs ("nle", file);
11760 fputs ("ord", file);
11763 output_operand_lossage ("operand is not a condition code, "
11764 "invalid operand code 'D'");
11770 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11771 if (ASSEMBLER_DIALECT == ASM_ATT)
11773 switch (GET_MODE (x))
11775 case HImode: putc ('w', file); break;
11777 case SFmode: putc ('l', file); break;
11779 case DFmode: putc ('q', file); break;
11780 default: gcc_unreachable ();
11787 if (!COMPARISON_P (x))
11789 output_operand_lossage ("operand is neither a constant nor a "
11790 "condition code, invalid operand code "
11794 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11797 if (!COMPARISON_P (x))
11799 output_operand_lossage ("operand is neither a constant nor a "
11800 "condition code, invalid operand code "
11804 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11805 if (ASSEMBLER_DIALECT == ASM_ATT)
11808 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11811 /* Like above, but reverse condition */
11813 /* Check to see if argument to %c is really a constant
11814 and not a condition code which needs to be reversed. */
11815 if (!COMPARISON_P (x))
11817 output_operand_lossage ("operand is neither a constant nor a "
11818 "condition code, invalid operand "
11822 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11825 if (!COMPARISON_P (x))
11827 output_operand_lossage ("operand is neither a constant nor a "
11828 "condition code, invalid operand "
11832 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11833 if (ASSEMBLER_DIALECT == ASM_ATT)
11836 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11840 /* It doesn't actually matter what mode we use here, as we're
11841 only going to use this for printing. */
11842 x = adjust_address_nv (x, DImode, 8);
11850 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11853 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11856 int pred_val = INTVAL (XEXP (x, 0));
11858 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11859 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11861 int taken = pred_val > REG_BR_PROB_BASE / 2;
11862 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11864 /* Emit hints only in the case default branch prediction
11865 heuristics would fail. */
11866 if (taken != cputaken)
11868 /* We use 3e (DS) prefix for taken branches and
11869 2e (CS) prefix for not taken branches. */
11871 fputs ("ds ; ", file);
11873 fputs ("cs ; ", file);
11881 switch (GET_CODE (x))
11884 fputs ("neq", file);
11887 fputs ("eq", file);
11891 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11895 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11899 fputs ("le", file);
11903 fputs ("lt", file);
11906 fputs ("unord", file);
11909 fputs ("ord", file);
11912 fputs ("ueq", file);
11915 fputs ("nlt", file);
11918 fputs ("nle", file);
11921 fputs ("ule", file);
11924 fputs ("ult", file);
11927 fputs ("une", file);
11930 output_operand_lossage ("operand is not a condition code, "
11931 "invalid operand code 'Y'");
11938 fputs (" ; ", file);
11945 output_operand_lossage ("invalid operand code '%c'", code);
11950 print_reg (x, code, file);
11952 else if (MEM_P (x))
11954 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11955 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11956 && GET_MODE (x) != BLKmode)
11959 switch (GET_MODE_SIZE (GET_MODE (x)))
11961 case 1: size = "BYTE"; break;
11962 case 2: size = "WORD"; break;
11963 case 4: size = "DWORD"; break;
11964 case 8: size = "QWORD"; break;
11965 case 12: size = "TBYTE"; break;
11967 if (GET_MODE (x) == XFmode)
11972 case 32: size = "YMMWORD"; break;
11974 gcc_unreachable ();
11977 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11980 else if (code == 'w')
11982 else if (code == 'k')
11985 fputs (size, file);
11986 fputs (" PTR ", file);
11990 /* Avoid (%rip) for call operands. */
11991 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11992 && !CONST_INT_P (x))
11993 output_addr_const (file, x);
11994 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11995 output_operand_lossage ("invalid constraints for operand");
11997 output_address (x);
12000 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12005 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12006 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12008 if (ASSEMBLER_DIALECT == ASM_ATT)
12010 fprintf (file, "%#08lx", (long unsigned int) l);
12013 /* These float cases don't actually occur as immediate operands. */
12014 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12018 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12019 fputs (dstr, file);
12022 else if (GET_CODE (x) == CONST_DOUBLE
12023 && GET_MODE (x) == XFmode)
12027 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12028 fputs (dstr, file);
12033 /* We have patterns that allow zero sets of memory, for instance.
12034 In 64-bit mode, we should probably support all 8-byte vectors,
12035 since we can in fact encode that into an immediate. */
12036 if (GET_CODE (x) == CONST_VECTOR)
12038 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12044 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12046 if (ASSEMBLER_DIALECT == ASM_ATT)
12049 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12050 || GET_CODE (x) == LABEL_REF)
12052 if (ASSEMBLER_DIALECT == ASM_ATT)
12055 fputs ("OFFSET FLAT:", file);
12058 if (CONST_INT_P (x))
12059 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12061 output_pic_addr_const (file, x, code);
12063 output_addr_const (file, x);
12067 /* Print a memory operand whose address is ADDR. */
12070 print_operand_address (FILE *file, rtx addr)
12072 struct ix86_address parts;
12073 rtx base, index, disp;
12075 int ok = ix86_decompose_address (addr, &parts);
12080 index = parts.index;
12082 scale = parts.scale;
12090 if (ASSEMBLER_DIALECT == ASM_ATT)
12092 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12095 gcc_unreachable ();
12098 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12099 if (TARGET_64BIT && !base && !index)
12103 if (GET_CODE (disp) == CONST
12104 && GET_CODE (XEXP (disp, 0)) == PLUS
12105 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12106 symbol = XEXP (XEXP (disp, 0), 0);
12108 if (GET_CODE (symbol) == LABEL_REF
12109 || (GET_CODE (symbol) == SYMBOL_REF
12110 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12113 if (!base && !index)
12115 /* Displacement only requires special attention. */
12117 if (CONST_INT_P (disp))
12119 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12120 fputs ("ds:", file);
12121 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12124 output_pic_addr_const (file, disp, 0);
12126 output_addr_const (file, disp);
12130 if (ASSEMBLER_DIALECT == ASM_ATT)
12135 output_pic_addr_const (file, disp, 0);
12136 else if (GET_CODE (disp) == LABEL_REF)
12137 output_asm_label (disp);
12139 output_addr_const (file, disp);
12144 print_reg (base, 0, file);
12148 print_reg (index, 0, file);
12150 fprintf (file, ",%d", scale);
12156 rtx offset = NULL_RTX;
12160 /* Pull out the offset of a symbol; print any symbol itself. */
12161 if (GET_CODE (disp) == CONST
12162 && GET_CODE (XEXP (disp, 0)) == PLUS
12163 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12165 offset = XEXP (XEXP (disp, 0), 1);
12166 disp = gen_rtx_CONST (VOIDmode,
12167 XEXP (XEXP (disp, 0), 0));
12171 output_pic_addr_const (file, disp, 0);
12172 else if (GET_CODE (disp) == LABEL_REF)
12173 output_asm_label (disp);
12174 else if (CONST_INT_P (disp))
12177 output_addr_const (file, disp);
12183 print_reg (base, 0, file);
12186 if (INTVAL (offset) >= 0)
12188 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12192 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12199 print_reg (index, 0, file);
12201 fprintf (file, "*%d", scale);
12209 output_addr_const_extra (FILE *file, rtx x)
12213 if (GET_CODE (x) != UNSPEC)
12216 op = XVECEXP (x, 0, 0);
12217 switch (XINT (x, 1))
12219 case UNSPEC_GOTTPOFF:
12220 output_addr_const (file, op);
12221 /* FIXME: This might be @TPOFF in Sun ld. */
12222 fputs ("@gottpoff", file);
12225 output_addr_const (file, op);
12226 fputs ("@tpoff", file);
12228 case UNSPEC_NTPOFF:
12229 output_addr_const (file, op);
12231 fputs ("@tpoff", file);
12233 fputs ("@ntpoff", file);
12235 case UNSPEC_DTPOFF:
12236 output_addr_const (file, op);
12237 fputs ("@dtpoff", file);
12239 case UNSPEC_GOTNTPOFF:
12240 output_addr_const (file, op);
12242 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12243 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12245 fputs ("@gotntpoff", file);
12247 case UNSPEC_INDNTPOFF:
12248 output_addr_const (file, op);
12249 fputs ("@indntpoff", file);
12252 case UNSPEC_MACHOPIC_OFFSET:
12253 output_addr_const (file, op);
12255 machopic_output_function_base_name (file);
12266 /* Split one or more DImode RTL references into pairs of SImode
12267 references. The RTL can be REG, offsettable MEM, integer constant, or
12268 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12269 split and "num" is its length. lo_half and hi_half are output arrays
12270 that parallel "operands". */
12273 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12277 rtx op = operands[num];
12279 /* simplify_subreg refuse to split volatile memory addresses,
12280 but we still have to handle it. */
12283 lo_half[num] = adjust_address (op, SImode, 0);
12284 hi_half[num] = adjust_address (op, SImode, 4);
12288 lo_half[num] = simplify_gen_subreg (SImode, op,
12289 GET_MODE (op) == VOIDmode
12290 ? DImode : GET_MODE (op), 0);
12291 hi_half[num] = simplify_gen_subreg (SImode, op,
12292 GET_MODE (op) == VOIDmode
12293 ? DImode : GET_MODE (op), 4);
12297 /* Split one or more TImode RTL references into pairs of DImode
12298 references. The RTL can be REG, offsettable MEM, integer constant, or
12299 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12300 split and "num" is its length. lo_half and hi_half are output arrays
12301 that parallel "operands". */
12304 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12308 rtx op = operands[num];
12310 /* simplify_subreg refuse to split volatile memory addresses, but we
12311 still have to handle it. */
12314 lo_half[num] = adjust_address (op, DImode, 0);
12315 hi_half[num] = adjust_address (op, DImode, 8);
12319 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12320 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12325 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12326 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12327 is the expression of the binary operation. The output may either be
12328 emitted here, or returned to the caller, like all output_* functions.
12330 There is no guarantee that the operands are the same mode, as they
12331 might be within FLOAT or FLOAT_EXTEND expressions. */
12333 #ifndef SYSV386_COMPAT
12334 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12335 wants to fix the assemblers because that causes incompatibility
12336 with gcc. No-one wants to fix gcc because that causes
12337 incompatibility with assemblers... You can use the option of
12338 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12339 #define SYSV386_COMPAT 1
12343 output_387_binary_op (rtx insn, rtx *operands)
12345 static char buf[40];
12348 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12350 #ifdef ENABLE_CHECKING
12351 /* Even if we do not want to check the inputs, this documents input
12352 constraints. Which helps in understanding the following code. */
12353 if (STACK_REG_P (operands[0])
12354 && ((REG_P (operands[1])
12355 && REGNO (operands[0]) == REGNO (operands[1])
12356 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12357 || (REG_P (operands[2])
12358 && REGNO (operands[0]) == REGNO (operands[2])
12359 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12360 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12363 gcc_assert (is_sse);
12366 switch (GET_CODE (operands[3]))
12369 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12370 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12378 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12379 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12387 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12388 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12396 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12397 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12405 gcc_unreachable ();
12412 strcpy (buf, ssep);
12413 if (GET_MODE (operands[0]) == SFmode)
12414 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12416 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12420 strcpy (buf, ssep + 1);
12421 if (GET_MODE (operands[0]) == SFmode)
12422 strcat (buf, "ss\t{%2, %0|%0, %2}");
12424 strcat (buf, "sd\t{%2, %0|%0, %2}");
12430 switch (GET_CODE (operands[3]))
12434 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12436 rtx temp = operands[2];
12437 operands[2] = operands[1];
12438 operands[1] = temp;
12441 /* know operands[0] == operands[1]. */
12443 if (MEM_P (operands[2]))
12449 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12451 if (STACK_TOP_P (operands[0]))
12452 /* How is it that we are storing to a dead operand[2]?
12453 Well, presumably operands[1] is dead too. We can't
12454 store the result to st(0) as st(0) gets popped on this
12455 instruction. Instead store to operands[2] (which I
12456 think has to be st(1)). st(1) will be popped later.
12457 gcc <= 2.8.1 didn't have this check and generated
12458 assembly code that the Unixware assembler rejected. */
12459 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12461 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12465 if (STACK_TOP_P (operands[0]))
12466 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12468 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12473 if (MEM_P (operands[1]))
12479 if (MEM_P (operands[2]))
12485 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12488 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12489 derived assemblers, confusingly reverse the direction of
12490 the operation for fsub{r} and fdiv{r} when the
12491 destination register is not st(0). The Intel assembler
12492 doesn't have this brain damage. Read !SYSV386_COMPAT to
12493 figure out what the hardware really does. */
12494 if (STACK_TOP_P (operands[0]))
12495 p = "{p\t%0, %2|rp\t%2, %0}";
12497 p = "{rp\t%2, %0|p\t%0, %2}";
12499 if (STACK_TOP_P (operands[0]))
12500 /* As above for fmul/fadd, we can't store to st(0). */
12501 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12503 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12508 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12511 if (STACK_TOP_P (operands[0]))
12512 p = "{rp\t%0, %1|p\t%1, %0}";
12514 p = "{p\t%1, %0|rp\t%0, %1}";
12516 if (STACK_TOP_P (operands[0]))
12517 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12519 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12524 if (STACK_TOP_P (operands[0]))
12526 if (STACK_TOP_P (operands[1]))
12527 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12529 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12532 else if (STACK_TOP_P (operands[1]))
12535 p = "{\t%1, %0|r\t%0, %1}";
12537 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12543 p = "{r\t%2, %0|\t%0, %2}";
12545 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12551 gcc_unreachable ();
12558 /* Return needed mode for entity in optimize_mode_switching pass. */
12561 ix86_mode_needed (int entity, rtx insn)
12563 enum attr_i387_cw mode;
12565 /* The mode UNINITIALIZED is used to store control word after a
12566 function call or ASM pattern. The mode ANY specify that function
12567 has no requirements on the control word and make no changes in the
12568 bits we are interested in. */
12571 || (NONJUMP_INSN_P (insn)
12572 && (asm_noperands (PATTERN (insn)) >= 0
12573 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12574 return I387_CW_UNINITIALIZED;
12576 if (recog_memoized (insn) < 0)
12577 return I387_CW_ANY;
12579 mode = get_attr_i387_cw (insn);
12584 if (mode == I387_CW_TRUNC)
12589 if (mode == I387_CW_FLOOR)
12594 if (mode == I387_CW_CEIL)
12599 if (mode == I387_CW_MASK_PM)
12604 gcc_unreachable ();
12607 return I387_CW_ANY;
12610 /* Output code to initialize control word copies used by trunc?f?i and
12611 rounding patterns. CURRENT_MODE is set to current control word,
12612 while NEW_MODE is set to new control word. */
12615 emit_i387_cw_initialization (int mode)
12617 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12620 enum ix86_stack_slot slot;
12622 rtx reg = gen_reg_rtx (HImode);
12624 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12625 emit_move_insn (reg, copy_rtx (stored_mode));
12627 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12628 || optimize_function_for_size_p (cfun))
12632 case I387_CW_TRUNC:
12633 /* round toward zero (truncate) */
12634 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12635 slot = SLOT_CW_TRUNC;
12638 case I387_CW_FLOOR:
12639 /* round down toward -oo */
12640 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12641 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12642 slot = SLOT_CW_FLOOR;
12646 /* round up toward +oo */
12647 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12648 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12649 slot = SLOT_CW_CEIL;
12652 case I387_CW_MASK_PM:
12653 /* mask precision exception for nearbyint() */
12654 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12655 slot = SLOT_CW_MASK_PM;
12659 gcc_unreachable ();
12666 case I387_CW_TRUNC:
12667 /* round toward zero (truncate) */
12668 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12669 slot = SLOT_CW_TRUNC;
12672 case I387_CW_FLOOR:
12673 /* round down toward -oo */
12674 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12675 slot = SLOT_CW_FLOOR;
12679 /* round up toward +oo */
12680 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12681 slot = SLOT_CW_CEIL;
12684 case I387_CW_MASK_PM:
12685 /* mask precision exception for nearbyint() */
12686 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12687 slot = SLOT_CW_MASK_PM;
12691 gcc_unreachable ();
12695 gcc_assert (slot < MAX_386_STACK_LOCALS);
12697 new_mode = assign_386_stack_local (HImode, slot);
12698 emit_move_insn (new_mode, reg);
12701 /* Output code for INSN to convert a float to a signed int. OPERANDS
12702 are the insn operands. The output may be [HSD]Imode and the input
12703 operand may be [SDX]Fmode. */
12706 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12708 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12709 int dimode_p = GET_MODE (operands[0]) == DImode;
12710 int round_mode = get_attr_i387_cw (insn);
12712 /* Jump through a hoop or two for DImode, since the hardware has no
12713 non-popping instruction. We used to do this a different way, but
12714 that was somewhat fragile and broke with post-reload splitters. */
12715 if ((dimode_p || fisttp) && !stack_top_dies)
12716 output_asm_insn ("fld\t%y1", operands);
12718 gcc_assert (STACK_TOP_P (operands[1]));
12719 gcc_assert (MEM_P (operands[0]));
12720 gcc_assert (GET_MODE (operands[1]) != TFmode);
12723 output_asm_insn ("fisttp%Z0\t%0", operands);
12726 if (round_mode != I387_CW_ANY)
12727 output_asm_insn ("fldcw\t%3", operands);
12728 if (stack_top_dies || dimode_p)
12729 output_asm_insn ("fistp%Z0\t%0", operands);
12731 output_asm_insn ("fist%Z0\t%0", operands);
12732 if (round_mode != I387_CW_ANY)
12733 output_asm_insn ("fldcw\t%2", operands);
12739 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12740 have the values zero or one, indicates the ffreep insn's operand
12741 from the OPERANDS array. */
12743 static const char *
12744 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12746 if (TARGET_USE_FFREEP)
12747 #ifdef HAVE_AS_IX86_FFREEP
12748 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12751 static char retval[32];
12752 int regno = REGNO (operands[opno]);
12754 gcc_assert (FP_REGNO_P (regno));
12756 regno -= FIRST_STACK_REG;
12758 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12763 return opno ? "fstp\t%y1" : "fstp\t%y0";
12767 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12768 should be used. UNORDERED_P is true when fucom should be used. */
12771 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12773 int stack_top_dies;
12774 rtx cmp_op0, cmp_op1;
12775 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12779 cmp_op0 = operands[0];
12780 cmp_op1 = operands[1];
12784 cmp_op0 = operands[1];
12785 cmp_op1 = operands[2];
12790 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12791 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12792 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12793 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12795 if (GET_MODE (operands[0]) == SFmode)
12797 return &ucomiss[TARGET_AVX ? 0 : 1];
12799 return &comiss[TARGET_AVX ? 0 : 1];
12802 return &ucomisd[TARGET_AVX ? 0 : 1];
12804 return &comisd[TARGET_AVX ? 0 : 1];
12807 gcc_assert (STACK_TOP_P (cmp_op0));
12809 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12811 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12813 if (stack_top_dies)
12815 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12816 return output_387_ffreep (operands, 1);
12819 return "ftst\n\tfnstsw\t%0";
12822 if (STACK_REG_P (cmp_op1)
12824 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12825 && REGNO (cmp_op1) != FIRST_STACK_REG)
12827 /* If both the top of the 387 stack dies, and the other operand
12828 is also a stack register that dies, then this must be a
12829 `fcompp' float compare */
12833 /* There is no double popping fcomi variant. Fortunately,
12834 eflags is immune from the fstp's cc clobbering. */
12836 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12838 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12839 return output_387_ffreep (operands, 0);
12844 return "fucompp\n\tfnstsw\t%0";
12846 return "fcompp\n\tfnstsw\t%0";
12851 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12853 static const char * const alt[16] =
12855 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12856 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12857 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12858 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12860 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12861 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12865 "fcomi\t{%y1, %0|%0, %y1}",
12866 "fcomip\t{%y1, %0|%0, %y1}",
12867 "fucomi\t{%y1, %0|%0, %y1}",
12868 "fucomip\t{%y1, %0|%0, %y1}",
12879 mask = eflags_p << 3;
12880 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12881 mask |= unordered_p << 1;
12882 mask |= stack_top_dies;
12884 gcc_assert (mask < 16);
12893 ix86_output_addr_vec_elt (FILE *file, int value)
12895 const char *directive = ASM_LONG;
12899 directive = ASM_QUAD;
12901 gcc_assert (!TARGET_64BIT);
12904 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12908 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12910 const char *directive = ASM_LONG;
12913 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12914 directive = ASM_QUAD;
12916 gcc_assert (!TARGET_64BIT);
12918 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12919 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12920 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12921 directive, value, rel);
12922 else if (HAVE_AS_GOTOFF_IN_DATA)
12923 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12925 else if (TARGET_MACHO)
12927 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12928 machopic_output_function_base_name (file);
12933 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12934 GOT_SYMBOL_NAME, value);
12937 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12941 ix86_expand_clear (rtx dest)
12945 /* We play register width games, which are only valid after reload. */
12946 gcc_assert (reload_completed);
12948 /* Avoid HImode and its attendant prefix byte. */
12949 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12950 dest = gen_rtx_REG (SImode, REGNO (dest));
12951 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12953 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12954 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12956 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12957 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12963 /* X is an unchanging MEM. If it is a constant pool reference, return
12964 the constant pool rtx, else NULL. */
12967 maybe_get_pool_constant (rtx x)
12969 x = ix86_delegitimize_address (XEXP (x, 0));
12971 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12972 return get_pool_constant (x);
12978 ix86_expand_move (enum machine_mode mode, rtx operands[])
12981 enum tls_model model;
12986 if (GET_CODE (op1) == SYMBOL_REF)
12988 model = SYMBOL_REF_TLS_MODEL (op1);
12991 op1 = legitimize_tls_address (op1, model, true);
12992 op1 = force_operand (op1, op0);
12996 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12997 && SYMBOL_REF_DLLIMPORT_P (op1))
12998 op1 = legitimize_dllimport_symbol (op1, false);
13000 else if (GET_CODE (op1) == CONST
13001 && GET_CODE (XEXP (op1, 0)) == PLUS
13002 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13004 rtx addend = XEXP (XEXP (op1, 0), 1);
13005 rtx symbol = XEXP (XEXP (op1, 0), 0);
13008 model = SYMBOL_REF_TLS_MODEL (symbol);
13010 tmp = legitimize_tls_address (symbol, model, true);
13011 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13012 && SYMBOL_REF_DLLIMPORT_P (symbol))
13013 tmp = legitimize_dllimport_symbol (symbol, true);
13017 tmp = force_operand (tmp, NULL);
13018 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13019 op0, 1, OPTAB_DIRECT);
13025 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13027 if (TARGET_MACHO && !TARGET_64BIT)
13032 rtx temp = ((reload_in_progress
13033 || ((op0 && REG_P (op0))
13035 ? op0 : gen_reg_rtx (Pmode));
13036 op1 = machopic_indirect_data_reference (op1, temp);
13037 op1 = machopic_legitimize_pic_address (op1, mode,
13038 temp == op1 ? 0 : temp);
13040 else if (MACHOPIC_INDIRECT)
13041 op1 = machopic_indirect_data_reference (op1, 0);
13049 op1 = force_reg (Pmode, op1);
13050 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13052 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13053 op1 = legitimize_pic_address (op1, reg);
13062 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13063 || !push_operand (op0, mode))
13065 op1 = force_reg (mode, op1);
13067 if (push_operand (op0, mode)
13068 && ! general_no_elim_operand (op1, mode))
13069 op1 = copy_to_mode_reg (mode, op1);
13071 /* Force large constants in 64bit compilation into register
13072 to get them CSEed. */
13073 if (can_create_pseudo_p ()
13074 && (mode == DImode) && TARGET_64BIT
13075 && immediate_operand (op1, mode)
13076 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13077 && !register_operand (op0, mode)
13079 op1 = copy_to_mode_reg (mode, op1);
13081 if (can_create_pseudo_p ()
13082 && FLOAT_MODE_P (mode)
13083 && GET_CODE (op1) == CONST_DOUBLE)
13085 /* If we are loading a floating point constant to a register,
13086 force the value to memory now, since we'll get better code
13087 out the back end. */
13089 op1 = validize_mem (force_const_mem (mode, op1));
13090 if (!register_operand (op0, mode))
13092 rtx temp = gen_reg_rtx (mode);
13093 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13094 emit_move_insn (op0, temp);
13100 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13104 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13106 rtx op0 = operands[0], op1 = operands[1];
13107 unsigned int align = GET_MODE_ALIGNMENT (mode);
13109 /* Force constants other than zero into memory. We do not know how
13110 the instructions used to build constants modify the upper 64 bits
13111 of the register, once we have that information we may be able
13112 to handle some of them more efficiently. */
13113 if (can_create_pseudo_p ()
13114 && register_operand (op0, mode)
13115 && (CONSTANT_P (op1)
13116 || (GET_CODE (op1) == SUBREG
13117 && CONSTANT_P (SUBREG_REG (op1))))
13118 && !standard_sse_constant_p (op1))
13119 op1 = validize_mem (force_const_mem (mode, op1));
13121 /* We need to check memory alignment for SSE mode since attribute
13122 can make operands unaligned. */
13123 if (can_create_pseudo_p ()
13124 && SSE_REG_MODE_P (mode)
13125 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13126 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13130 /* ix86_expand_vector_move_misalign() does not like constants ... */
13131 if (CONSTANT_P (op1)
13132 || (GET_CODE (op1) == SUBREG
13133 && CONSTANT_P (SUBREG_REG (op1))))
13134 op1 = validize_mem (force_const_mem (mode, op1));
13136 /* ... nor both arguments in memory. */
13137 if (!register_operand (op0, mode)
13138 && !register_operand (op1, mode))
13139 op1 = force_reg (mode, op1);
13141 tmp[0] = op0; tmp[1] = op1;
13142 ix86_expand_vector_move_misalign (mode, tmp);
13146 /* Make operand1 a register if it isn't already. */
13147 if (can_create_pseudo_p ()
13148 && !register_operand (op0, mode)
13149 && !register_operand (op1, mode))
13151 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13155 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13158 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13159 straight to ix86_expand_vector_move. */
13160 /* Code generation for scalar reg-reg moves of single and double precision data:
13161 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13165 if (x86_sse_partial_reg_dependency == true)
13170 Code generation for scalar loads of double precision data:
13171 if (x86_sse_split_regs == true)
13172 movlpd mem, reg (gas syntax)
13176 Code generation for unaligned packed loads of single precision data
13177 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13178 if (x86_sse_unaligned_move_optimal)
13181 if (x86_sse_partial_reg_dependency == true)
13193 Code generation for unaligned packed loads of double precision data
13194 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13195 if (x86_sse_unaligned_move_optimal)
13198 if (x86_sse_split_regs == true)
13211 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13220 switch (GET_MODE_CLASS (mode))
13222 case MODE_VECTOR_INT:
13224 switch (GET_MODE_SIZE (mode))
13227 op0 = gen_lowpart (V16QImode, op0);
13228 op1 = gen_lowpart (V16QImode, op1);
13229 emit_insn (gen_avx_movdqu (op0, op1));
13232 op0 = gen_lowpart (V32QImode, op0);
13233 op1 = gen_lowpart (V32QImode, op1);
13234 emit_insn (gen_avx_movdqu256 (op0, op1));
13237 gcc_unreachable ();
13240 case MODE_VECTOR_FLOAT:
13241 op0 = gen_lowpart (mode, op0);
13242 op1 = gen_lowpart (mode, op1);
13247 emit_insn (gen_avx_movups (op0, op1));
13250 emit_insn (gen_avx_movups256 (op0, op1));
13253 emit_insn (gen_avx_movupd (op0, op1));
13256 emit_insn (gen_avx_movupd256 (op0, op1));
13259 gcc_unreachable ();
13264 gcc_unreachable ();
13272 /* If we're optimizing for size, movups is the smallest. */
13273 if (optimize_insn_for_size_p ())
13275 op0 = gen_lowpart (V4SFmode, op0);
13276 op1 = gen_lowpart (V4SFmode, op1);
13277 emit_insn (gen_sse_movups (op0, op1));
13281 /* ??? If we have typed data, then it would appear that using
13282 movdqu is the only way to get unaligned data loaded with
13284 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13286 op0 = gen_lowpart (V16QImode, op0);
13287 op1 = gen_lowpart (V16QImode, op1);
13288 emit_insn (gen_sse2_movdqu (op0, op1));
13292 if (TARGET_SSE2 && mode == V2DFmode)
13296 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13298 op0 = gen_lowpart (V2DFmode, op0);
13299 op1 = gen_lowpart (V2DFmode, op1);
13300 emit_insn (gen_sse2_movupd (op0, op1));
13304 /* When SSE registers are split into halves, we can avoid
13305 writing to the top half twice. */
13306 if (TARGET_SSE_SPLIT_REGS)
13308 emit_clobber (op0);
13313 /* ??? Not sure about the best option for the Intel chips.
13314 The following would seem to satisfy; the register is
13315 entirely cleared, breaking the dependency chain. We
13316 then store to the upper half, with a dependency depth
13317 of one. A rumor has it that Intel recommends two movsd
13318 followed by an unpacklpd, but this is unconfirmed. And
13319 given that the dependency depth of the unpacklpd would
13320 still be one, I'm not sure why this would be better. */
13321 zero = CONST0_RTX (V2DFmode);
13324 m = adjust_address (op1, DFmode, 0);
13325 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13326 m = adjust_address (op1, DFmode, 8);
13327 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13331 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13333 op0 = gen_lowpart (V4SFmode, op0);
13334 op1 = gen_lowpart (V4SFmode, op1);
13335 emit_insn (gen_sse_movups (op0, op1));
13339 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13340 emit_move_insn (op0, CONST0_RTX (mode));
13342 emit_clobber (op0);
13344 if (mode != V4SFmode)
13345 op0 = gen_lowpart (V4SFmode, op0);
13346 m = adjust_address (op1, V2SFmode, 0);
13347 emit_insn (gen_sse_loadlps (op0, op0, m));
13348 m = adjust_address (op1, V2SFmode, 8);
13349 emit_insn (gen_sse_loadhps (op0, op0, m));
13352 else if (MEM_P (op0))
13354 /* If we're optimizing for size, movups is the smallest. */
13355 if (optimize_insn_for_size_p ())
13357 op0 = gen_lowpart (V4SFmode, op0);
13358 op1 = gen_lowpart (V4SFmode, op1);
13359 emit_insn (gen_sse_movups (op0, op1));
13363 /* ??? Similar to above, only less clear because of quote
13364 typeless stores unquote. */
13365 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13366 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13368 op0 = gen_lowpart (V16QImode, op0);
13369 op1 = gen_lowpart (V16QImode, op1);
13370 emit_insn (gen_sse2_movdqu (op0, op1));
13374 if (TARGET_SSE2 && mode == V2DFmode)
13376 m = adjust_address (op0, DFmode, 0);
13377 emit_insn (gen_sse2_storelpd (m, op1));
13378 m = adjust_address (op0, DFmode, 8);
13379 emit_insn (gen_sse2_storehpd (m, op1));
13383 if (mode != V4SFmode)
13384 op1 = gen_lowpart (V4SFmode, op1);
13385 m = adjust_address (op0, V2SFmode, 0);
13386 emit_insn (gen_sse_storelps (m, op1));
13387 m = adjust_address (op0, V2SFmode, 8);
13388 emit_insn (gen_sse_storehps (m, op1));
13392 gcc_unreachable ();
13395 /* Expand a push in MODE. This is some mode for which we do not support
13396 proper push instructions, at least from the registers that we expect
13397 the value to live in. */
13400 ix86_expand_push (enum machine_mode mode, rtx x)
13404 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13405 GEN_INT (-GET_MODE_SIZE (mode)),
13406 stack_pointer_rtx, 1, OPTAB_DIRECT);
13407 if (tmp != stack_pointer_rtx)
13408 emit_move_insn (stack_pointer_rtx, tmp);
13410 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13412 /* When we push an operand onto stack, it has to be aligned at least
13413 at the function argument boundary. However since we don't have
13414 the argument type, we can't determine the actual argument
13416 emit_move_insn (tmp, x);
13419 /* Helper function of ix86_fixup_binary_operands to canonicalize
13420 operand order. Returns true if the operands should be swapped. */
13423 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13426 rtx dst = operands[0];
13427 rtx src1 = operands[1];
13428 rtx src2 = operands[2];
13430 /* If the operation is not commutative, we can't do anything. */
13431 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13434 /* Highest priority is that src1 should match dst. */
13435 if (rtx_equal_p (dst, src1))
13437 if (rtx_equal_p (dst, src2))
13440 /* Next highest priority is that immediate constants come second. */
13441 if (immediate_operand (src2, mode))
13443 if (immediate_operand (src1, mode))
13446 /* Lowest priority is that memory references should come second. */
13456 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13457 destination to use for the operation. If different from the true
13458 destination in operands[0], a copy operation will be required. */
13461 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13464 rtx dst = operands[0];
13465 rtx src1 = operands[1];
13466 rtx src2 = operands[2];
13468 /* Canonicalize operand order. */
13469 if (ix86_swap_binary_operands_p (code, mode, operands))
13473 /* It is invalid to swap operands of different modes. */
13474 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13481 /* Both source operands cannot be in memory. */
13482 if (MEM_P (src1) && MEM_P (src2))
13484 /* Optimization: Only read from memory once. */
13485 if (rtx_equal_p (src1, src2))
13487 src2 = force_reg (mode, src2);
13491 src2 = force_reg (mode, src2);
13494 /* If the destination is memory, and we do not have matching source
13495 operands, do things in registers. */
13496 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13497 dst = gen_reg_rtx (mode);
13499 /* Source 1 cannot be a constant. */
13500 if (CONSTANT_P (src1))
13501 src1 = force_reg (mode, src1);
13503 /* Source 1 cannot be a non-matching memory. */
13504 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13505 src1 = force_reg (mode, src1);
13507 operands[1] = src1;
13508 operands[2] = src2;
13512 /* Similarly, but assume that the destination has already been
13513 set up properly. */
13516 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13517 enum machine_mode mode, rtx operands[])
13519 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13520 gcc_assert (dst == operands[0]);
13523 /* Attempt to expand a binary operator. Make the expansion closer to the
13524 actual machine, then just general_operand, which will allow 3 separate
13525 memory references (one output, two input) in a single insn. */
13528 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13531 rtx src1, src2, dst, op, clob;
13533 dst = ix86_fixup_binary_operands (code, mode, operands);
13534 src1 = operands[1];
13535 src2 = operands[2];
13537 /* Emit the instruction. */
13539 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13540 if (reload_in_progress)
13542 /* Reload doesn't know about the flags register, and doesn't know that
13543 it doesn't want to clobber it. We can only do this with PLUS. */
13544 gcc_assert (code == PLUS);
13549 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13550 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13553 /* Fix up the destination if needed. */
13554 if (dst != operands[0])
13555 emit_move_insn (operands[0], dst);
13558 /* Return TRUE or FALSE depending on whether the binary operator meets the
13559 appropriate constraints. */
13562 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13565 rtx dst = operands[0];
13566 rtx src1 = operands[1];
13567 rtx src2 = operands[2];
13569 /* Both source operands cannot be in memory. */
13570 if (MEM_P (src1) && MEM_P (src2))
13573 /* Canonicalize operand order for commutative operators. */
13574 if (ix86_swap_binary_operands_p (code, mode, operands))
13581 /* If the destination is memory, we must have a matching source operand. */
13582 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13585 /* Source 1 cannot be a constant. */
13586 if (CONSTANT_P (src1))
13589 /* Source 1 cannot be a non-matching memory. */
13590 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13596 /* Attempt to expand a unary operator. Make the expansion closer to the
13597 actual machine, then just general_operand, which will allow 2 separate
13598 memory references (one output, one input) in a single insn. */
13601 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13604 int matching_memory;
13605 rtx src, dst, op, clob;
13610 /* If the destination is memory, and we do not have matching source
13611 operands, do things in registers. */
13612 matching_memory = 0;
13615 if (rtx_equal_p (dst, src))
13616 matching_memory = 1;
13618 dst = gen_reg_rtx (mode);
13621 /* When source operand is memory, destination must match. */
13622 if (MEM_P (src) && !matching_memory)
13623 src = force_reg (mode, src);
13625 /* Emit the instruction. */
13627 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13628 if (reload_in_progress || code == NOT)
13630 /* Reload doesn't know about the flags register, and doesn't know that
13631 it doesn't want to clobber it. */
13632 gcc_assert (code == NOT);
13637 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13638 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13641 /* Fix up the destination if needed. */
13642 if (dst != operands[0])
13643 emit_move_insn (operands[0], dst);
13646 #define LEA_SEARCH_THRESHOLD 12
13648 /* Search backward for non-agu definition of register number REGNO1
13649 or register number REGNO2 in INSN's basic block until
13650 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13651 2. Reach BB boundary, or
13652 3. Reach agu definition.
13653 Returns the distance between the non-agu definition point and INSN.
13654 If no definition point, returns -1. */
13657 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13660 basic_block bb = BLOCK_FOR_INSN (insn);
13663 enum attr_type insn_type;
13665 if (insn != BB_HEAD (bb))
13667 rtx prev = PREV_INSN (insn);
13668 while (prev && distance < LEA_SEARCH_THRESHOLD)
13670 if (NONDEBUG_INSN_P (prev))
13673 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13674 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13675 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13676 && (regno1 == DF_REF_REGNO (*def_rec)
13677 || regno2 == DF_REF_REGNO (*def_rec)))
13679 insn_type = get_attr_type (prev);
13680 if (insn_type != TYPE_LEA)
13684 if (prev == BB_HEAD (bb))
13686 prev = PREV_INSN (prev);
13690 if (distance < LEA_SEARCH_THRESHOLD)
13694 bool simple_loop = false;
13696 FOR_EACH_EDGE (e, ei, bb->preds)
13699 simple_loop = true;
13705 rtx prev = BB_END (bb);
13708 && distance < LEA_SEARCH_THRESHOLD)
13710 if (NONDEBUG_INSN_P (prev))
13713 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13714 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13715 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13716 && (regno1 == DF_REF_REGNO (*def_rec)
13717 || regno2 == DF_REF_REGNO (*def_rec)))
13719 insn_type = get_attr_type (prev);
13720 if (insn_type != TYPE_LEA)
13724 prev = PREV_INSN (prev);
13732 /* get_attr_type may modify recog data. We want to make sure
13733 that recog data is valid for instruction INSN, on which
13734 distance_non_agu_define is called. INSN is unchanged here. */
13735 extract_insn_cached (insn);
13739 /* Return the distance between INSN and the next insn that uses
13740 register number REGNO0 in memory address. Return -1 if no such
13741 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13744 distance_agu_use (unsigned int regno0, rtx insn)
13746 basic_block bb = BLOCK_FOR_INSN (insn);
13751 if (insn != BB_END (bb))
13753 rtx next = NEXT_INSN (insn);
13754 while (next && distance < LEA_SEARCH_THRESHOLD)
13756 if (NONDEBUG_INSN_P (next))
13760 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13761 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13762 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13763 && regno0 == DF_REF_REGNO (*use_rec))
13765 /* Return DISTANCE if OP0 is used in memory
13766 address in NEXT. */
13770 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13771 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13772 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13773 && regno0 == DF_REF_REGNO (*def_rec))
13775 /* Return -1 if OP0 is set in NEXT. */
13779 if (next == BB_END (bb))
13781 next = NEXT_INSN (next);
13785 if (distance < LEA_SEARCH_THRESHOLD)
13789 bool simple_loop = false;
13791 FOR_EACH_EDGE (e, ei, bb->succs)
13794 simple_loop = true;
13800 rtx next = BB_HEAD (bb);
13803 && distance < LEA_SEARCH_THRESHOLD)
13805 if (NONDEBUG_INSN_P (next))
13809 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13810 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13811 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13812 && regno0 == DF_REF_REGNO (*use_rec))
13814 /* Return DISTANCE if OP0 is used in memory
13815 address in NEXT. */
13819 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13820 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13821 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13822 && regno0 == DF_REF_REGNO (*def_rec))
13824 /* Return -1 if OP0 is set in NEXT. */
13829 next = NEXT_INSN (next);
13837 /* Define this macro to tune LEA priority vs ADD, it take effect when
13838 there is a dilemma of choicing LEA or ADD
13839 Negative value: ADD is more preferred than LEA
13841 Positive value: LEA is more preferred than ADD*/
13842 #define IX86_LEA_PRIORITY 2
13844 /* Return true if it is ok to optimize an ADD operation to LEA
13845 operation to avoid flag register consumation. For the processors
13846 like ATOM, if the destination register of LEA holds an actual
13847 address which will be used soon, LEA is better and otherwise ADD
13851 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13852 rtx insn, rtx operands[])
13854 unsigned int regno0 = true_regnum (operands[0]);
13855 unsigned int regno1 = true_regnum (operands[1]);
13856 unsigned int regno2;
13858 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13859 return regno0 != regno1;
13861 regno2 = true_regnum (operands[2]);
13863 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13864 if (regno0 != regno1 && regno0 != regno2)
13868 int dist_define, dist_use;
13869 dist_define = distance_non_agu_define (regno1, regno2, insn);
13870 if (dist_define <= 0)
13873 /* If this insn has both backward non-agu dependence and forward
13874 agu dependence, the one with short distance take effect. */
13875 dist_use = distance_agu_use (regno0, insn);
13877 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13884 /* Return true if destination reg of SET_BODY is shift count of
13888 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13894 /* Retrieve destination of SET_BODY. */
13895 switch (GET_CODE (set_body))
13898 set_dest = SET_DEST (set_body);
13899 if (!set_dest || !REG_P (set_dest))
13903 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13904 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13912 /* Retrieve shift count of USE_BODY. */
13913 switch (GET_CODE (use_body))
13916 shift_rtx = XEXP (use_body, 1);
13919 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13920 if (ix86_dep_by_shift_count_body (set_body,
13921 XVECEXP (use_body, 0, i)))
13929 && (GET_CODE (shift_rtx) == ASHIFT
13930 || GET_CODE (shift_rtx) == LSHIFTRT
13931 || GET_CODE (shift_rtx) == ASHIFTRT
13932 || GET_CODE (shift_rtx) == ROTATE
13933 || GET_CODE (shift_rtx) == ROTATERT))
13935 rtx shift_count = XEXP (shift_rtx, 1);
13937 /* Return true if shift count is dest of SET_BODY. */
13938 if (REG_P (shift_count)
13939 && true_regnum (set_dest) == true_regnum (shift_count))
13946 /* Return true if destination reg of SET_INSN is shift count of
13950 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13952 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13953 PATTERN (use_insn));
13956 /* Return TRUE or FALSE depending on whether the unary operator meets the
13957 appropriate constraints. */
13960 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13961 enum machine_mode mode ATTRIBUTE_UNUSED,
13962 rtx operands[2] ATTRIBUTE_UNUSED)
13964 /* If one of operands is memory, source and destination must match. */
13965 if ((MEM_P (operands[0])
13966 || MEM_P (operands[1]))
13967 && ! rtx_equal_p (operands[0], operands[1]))
13972 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13973 are ok, keeping in mind the possible movddup alternative. */
13976 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13978 if (MEM_P (operands[0]))
13979 return rtx_equal_p (operands[0], operands[1 + high]);
13980 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13981 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13985 /* Post-reload splitter for converting an SF or DFmode value in an
13986 SSE register into an unsigned SImode. */
13989 ix86_split_convert_uns_si_sse (rtx operands[])
13991 enum machine_mode vecmode;
13992 rtx value, large, zero_or_two31, input, two31, x;
13994 large = operands[1];
13995 zero_or_two31 = operands[2];
13996 input = operands[3];
13997 two31 = operands[4];
13998 vecmode = GET_MODE (large);
13999 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14001 /* Load up the value into the low element. We must ensure that the other
14002 elements are valid floats -- zero is the easiest such value. */
14005 if (vecmode == V4SFmode)
14006 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14008 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14012 input = gen_rtx_REG (vecmode, REGNO (input));
14013 emit_move_insn (value, CONST0_RTX (vecmode));
14014 if (vecmode == V4SFmode)
14015 emit_insn (gen_sse_movss (value, value, input));
14017 emit_insn (gen_sse2_movsd (value, value, input));
14020 emit_move_insn (large, two31);
14021 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14023 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14024 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14026 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14027 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14029 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14030 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14032 large = gen_rtx_REG (V4SImode, REGNO (large));
14033 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14035 x = gen_rtx_REG (V4SImode, REGNO (value));
14036 if (vecmode == V4SFmode)
14037 emit_insn (gen_sse2_cvttps2dq (x, value));
14039 emit_insn (gen_sse2_cvttpd2dq (x, value));
14042 emit_insn (gen_xorv4si3 (value, value, large));
14045 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14046 Expects the 64-bit DImode to be supplied in a pair of integral
14047 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14048 -mfpmath=sse, !optimize_size only. */
14051 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14053 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14054 rtx int_xmm, fp_xmm;
14055 rtx biases, exponents;
14058 int_xmm = gen_reg_rtx (V4SImode);
14059 if (TARGET_INTER_UNIT_MOVES)
14060 emit_insn (gen_movdi_to_sse (int_xmm, input));
14061 else if (TARGET_SSE_SPLIT_REGS)
14063 emit_clobber (int_xmm);
14064 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14068 x = gen_reg_rtx (V2DImode);
14069 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14070 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14073 x = gen_rtx_CONST_VECTOR (V4SImode,
14074 gen_rtvec (4, GEN_INT (0x43300000UL),
14075 GEN_INT (0x45300000UL),
14076 const0_rtx, const0_rtx));
14077 exponents = validize_mem (force_const_mem (V4SImode, x));
14079 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14080 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14082 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14083 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14084 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14085 (0x1.0p84 + double(fp_value_hi_xmm)).
14086 Note these exponents differ by 32. */
14088 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14090 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14091 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14092 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14093 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14094 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14095 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14096 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14097 biases = validize_mem (force_const_mem (V2DFmode, biases));
14098 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14100 /* Add the upper and lower DFmode values together. */
14102 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14105 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14106 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14107 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14110 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14113 /* Not used, but eases macroization of patterns. */
14115 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14116 rtx input ATTRIBUTE_UNUSED)
14118 gcc_unreachable ();
14121 /* Convert an unsigned SImode value into a DFmode. Only currently used
14122 for SSE, but applicable anywhere. */
14125 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14127 REAL_VALUE_TYPE TWO31r;
14130 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14131 NULL, 1, OPTAB_DIRECT);
14133 fp = gen_reg_rtx (DFmode);
14134 emit_insn (gen_floatsidf2 (fp, x));
14136 real_ldexp (&TWO31r, &dconst1, 31);
14137 x = const_double_from_real_value (TWO31r, DFmode);
14139 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14141 emit_move_insn (target, x);
14144 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14145 32-bit mode; otherwise we have a direct convert instruction. */
14148 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14150 REAL_VALUE_TYPE TWO32r;
14151 rtx fp_lo, fp_hi, x;
14153 fp_lo = gen_reg_rtx (DFmode);
14154 fp_hi = gen_reg_rtx (DFmode);
14156 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14158 real_ldexp (&TWO32r, &dconst1, 32);
14159 x = const_double_from_real_value (TWO32r, DFmode);
14160 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14162 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14164 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14167 emit_move_insn (target, x);
14170 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14171 For x86_32, -mfpmath=sse, !optimize_size only. */
14173 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14175 REAL_VALUE_TYPE ONE16r;
14176 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14178 real_ldexp (&ONE16r, &dconst1, 16);
14179 x = const_double_from_real_value (ONE16r, SFmode);
14180 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14181 NULL, 0, OPTAB_DIRECT);
14182 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14183 NULL, 0, OPTAB_DIRECT);
14184 fp_hi = gen_reg_rtx (SFmode);
14185 fp_lo = gen_reg_rtx (SFmode);
14186 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14187 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14188 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14190 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14192 if (!rtx_equal_p (target, fp_hi))
14193 emit_move_insn (target, fp_hi);
14196 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14197 then replicate the value for all elements of the vector
14201 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14208 v = gen_rtvec (4, value, value, value, value);
14209 return gen_rtx_CONST_VECTOR (V4SImode, v);
14213 v = gen_rtvec (2, value, value);
14214 return gen_rtx_CONST_VECTOR (V2DImode, v);
14218 v = gen_rtvec (4, value, value, value, value);
14220 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14221 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14222 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14226 v = gen_rtvec (2, value, value);
14228 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14229 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14232 gcc_unreachable ();
14236 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14237 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14238 for an SSE register. If VECT is true, then replicate the mask for
14239 all elements of the vector register. If INVERT is true, then create
14240 a mask excluding the sign bit. */
14243 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14245 enum machine_mode vec_mode, imode;
14246 HOST_WIDE_INT hi, lo;
14251 /* Find the sign bit, sign extended to 2*HWI. */
14257 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14258 lo = 0x80000000, hi = lo < 0;
14264 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14265 if (HOST_BITS_PER_WIDE_INT >= 64)
14266 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14268 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14273 vec_mode = VOIDmode;
14274 if (HOST_BITS_PER_WIDE_INT >= 64)
14277 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14284 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14288 lo = ~lo, hi = ~hi;
14294 mask = immed_double_const (lo, hi, imode);
14296 vec = gen_rtvec (2, v, mask);
14297 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14298 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14305 gcc_unreachable ();
14309 lo = ~lo, hi = ~hi;
14311 /* Force this value into the low part of a fp vector constant. */
14312 mask = immed_double_const (lo, hi, imode);
14313 mask = gen_lowpart (mode, mask);
14315 if (vec_mode == VOIDmode)
14316 return force_reg (mode, mask);
14318 v = ix86_build_const_vector (mode, vect, mask);
14319 return force_reg (vec_mode, v);
14322 /* Generate code for floating point ABS or NEG. */
14325 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14328 rtx mask, set, use, clob, dst, src;
14329 bool use_sse = false;
14330 bool vector_mode = VECTOR_MODE_P (mode);
14331 enum machine_mode elt_mode = mode;
14335 elt_mode = GET_MODE_INNER (mode);
14338 else if (mode == TFmode)
14340 else if (TARGET_SSE_MATH)
14341 use_sse = SSE_FLOAT_MODE_P (mode);
14343 /* NEG and ABS performed with SSE use bitwise mask operations.
14344 Create the appropriate mask now. */
14346 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14355 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14356 set = gen_rtx_SET (VOIDmode, dst, set);
14361 set = gen_rtx_fmt_e (code, mode, src);
14362 set = gen_rtx_SET (VOIDmode, dst, set);
14365 use = gen_rtx_USE (VOIDmode, mask);
14366 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14367 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14368 gen_rtvec (3, set, use, clob)));
14375 /* Expand a copysign operation. Special case operand 0 being a constant. */
14378 ix86_expand_copysign (rtx operands[])
14380 enum machine_mode mode;
14381 rtx dest, op0, op1, mask, nmask;
14383 dest = operands[0];
14387 mode = GET_MODE (dest);
14389 if (GET_CODE (op0) == CONST_DOUBLE)
14391 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14393 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14394 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14396 if (mode == SFmode || mode == DFmode)
14398 enum machine_mode vmode;
14400 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14402 if (op0 == CONST0_RTX (mode))
14403 op0 = CONST0_RTX (vmode);
14406 rtx v = ix86_build_const_vector (mode, false, op0);
14408 op0 = force_reg (vmode, v);
14411 else if (op0 != CONST0_RTX (mode))
14412 op0 = force_reg (mode, op0);
14414 mask = ix86_build_signbit_mask (mode, 0, 0);
14416 if (mode == SFmode)
14417 copysign_insn = gen_copysignsf3_const;
14418 else if (mode == DFmode)
14419 copysign_insn = gen_copysigndf3_const;
14421 copysign_insn = gen_copysigntf3_const;
14423 emit_insn (copysign_insn (dest, op0, op1, mask));
14427 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14429 nmask = ix86_build_signbit_mask (mode, 0, 1);
14430 mask = ix86_build_signbit_mask (mode, 0, 0);
14432 if (mode == SFmode)
14433 copysign_insn = gen_copysignsf3_var;
14434 else if (mode == DFmode)
14435 copysign_insn = gen_copysigndf3_var;
14437 copysign_insn = gen_copysigntf3_var;
14439 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14443 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14444 be a constant, and so has already been expanded into a vector constant. */
14447 ix86_split_copysign_const (rtx operands[])
14449 enum machine_mode mode, vmode;
14450 rtx dest, op0, mask, x;
14452 dest = operands[0];
14454 mask = operands[3];
14456 mode = GET_MODE (dest);
14457 vmode = GET_MODE (mask);
14459 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14460 x = gen_rtx_AND (vmode, dest, mask);
14461 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14463 if (op0 != CONST0_RTX (vmode))
14465 x = gen_rtx_IOR (vmode, dest, op0);
14466 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14470 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14471 so we have to do two masks. */
14474 ix86_split_copysign_var (rtx operands[])
14476 enum machine_mode mode, vmode;
14477 rtx dest, scratch, op0, op1, mask, nmask, x;
14479 dest = operands[0];
14480 scratch = operands[1];
14483 nmask = operands[4];
14484 mask = operands[5];
14486 mode = GET_MODE (dest);
14487 vmode = GET_MODE (mask);
14489 if (rtx_equal_p (op0, op1))
14491 /* Shouldn't happen often (it's useless, obviously), but when it does
14492 we'd generate incorrect code if we continue below. */
14493 emit_move_insn (dest, op0);
14497 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14499 gcc_assert (REGNO (op1) == REGNO (scratch));
14501 x = gen_rtx_AND (vmode, scratch, mask);
14502 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14505 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14506 x = gen_rtx_NOT (vmode, dest);
14507 x = gen_rtx_AND (vmode, x, op0);
14508 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14512 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14514 x = gen_rtx_AND (vmode, scratch, mask);
14516 else /* alternative 2,4 */
14518 gcc_assert (REGNO (mask) == REGNO (scratch));
14519 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14520 x = gen_rtx_AND (vmode, scratch, op1);
14522 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14524 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14526 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14527 x = gen_rtx_AND (vmode, dest, nmask);
14529 else /* alternative 3,4 */
14531 gcc_assert (REGNO (nmask) == REGNO (dest));
14533 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14534 x = gen_rtx_AND (vmode, dest, op0);
14536 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14539 x = gen_rtx_IOR (vmode, dest, scratch);
14540 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14543 /* Return TRUE or FALSE depending on whether the first SET in INSN
14544 has source and destination with matching CC modes, and that the
14545 CC mode is at least as constrained as REQ_MODE. */
14548 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14551 enum machine_mode set_mode;
14553 set = PATTERN (insn);
14554 if (GET_CODE (set) == PARALLEL)
14555 set = XVECEXP (set, 0, 0);
14556 gcc_assert (GET_CODE (set) == SET);
14557 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14559 set_mode = GET_MODE (SET_DEST (set));
14563 if (req_mode != CCNOmode
14564 && (req_mode != CCmode
14565 || XEXP (SET_SRC (set), 1) != const0_rtx))
14569 if (req_mode == CCGCmode)
14573 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14577 if (req_mode == CCZmode)
14588 gcc_unreachable ();
14591 return (GET_MODE (SET_SRC (set)) == set_mode);
14594 /* Generate insn patterns to do an integer compare of OPERANDS. */
14597 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14599 enum machine_mode cmpmode;
14602 cmpmode = SELECT_CC_MODE (code, op0, op1);
14603 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14605 /* This is very simple, but making the interface the same as in the
14606 FP case makes the rest of the code easier. */
14607 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14608 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14610 /* Return the test that should be put into the flags user, i.e.
14611 the bcc, scc, or cmov instruction. */
14612 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14615 /* Figure out whether to use ordered or unordered fp comparisons.
14616 Return the appropriate mode to use. */
14619 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14621 /* ??? In order to make all comparisons reversible, we do all comparisons
14622 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14623 all forms trapping and nontrapping comparisons, we can make inequality
14624 comparisons trapping again, since it results in better code when using
14625 FCOM based compares. */
14626 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14630 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14632 enum machine_mode mode = GET_MODE (op0);
14634 if (SCALAR_FLOAT_MODE_P (mode))
14636 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14637 return ix86_fp_compare_mode (code);
14642 /* Only zero flag is needed. */
14643 case EQ: /* ZF=0 */
14644 case NE: /* ZF!=0 */
14646 /* Codes needing carry flag. */
14647 case GEU: /* CF=0 */
14648 case LTU: /* CF=1 */
14649 /* Detect overflow checks. They need just the carry flag. */
14650 if (GET_CODE (op0) == PLUS
14651 && rtx_equal_p (op1, XEXP (op0, 0)))
14655 case GTU: /* CF=0 & ZF=0 */
14656 case LEU: /* CF=1 | ZF=1 */
14657 /* Detect overflow checks. They need just the carry flag. */
14658 if (GET_CODE (op0) == MINUS
14659 && rtx_equal_p (op1, XEXP (op0, 0)))
14663 /* Codes possibly doable only with sign flag when
14664 comparing against zero. */
14665 case GE: /* SF=OF or SF=0 */
14666 case LT: /* SF<>OF or SF=1 */
14667 if (op1 == const0_rtx)
14670 /* For other cases Carry flag is not required. */
14672 /* Codes doable only with sign flag when comparing
14673 against zero, but we miss jump instruction for it
14674 so we need to use relational tests against overflow
14675 that thus needs to be zero. */
14676 case GT: /* ZF=0 & SF=OF */
14677 case LE: /* ZF=1 | SF<>OF */
14678 if (op1 == const0_rtx)
14682 /* strcmp pattern do (use flags) and combine may ask us for proper
14687 gcc_unreachable ();
14691 /* Return the fixed registers used for condition codes. */
14694 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14701 /* If two condition code modes are compatible, return a condition code
14702 mode which is compatible with both. Otherwise, return
14705 static enum machine_mode
14706 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14711 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14714 if ((m1 == CCGCmode && m2 == CCGOCmode)
14715 || (m1 == CCGOCmode && m2 == CCGCmode))
14721 gcc_unreachable ();
14751 /* These are only compatible with themselves, which we already
14758 /* Return a comparison we can do and that it is equivalent to
14759 swap_condition (code) apart possibly from orderedness.
14760 But, never change orderedness if TARGET_IEEE_FP, returning
14761 UNKNOWN in that case if necessary. */
14763 static enum rtx_code
14764 ix86_fp_swap_condition (enum rtx_code code)
14768 case GT: /* GTU - CF=0 & ZF=0 */
14769 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14770 case GE: /* GEU - CF=0 */
14771 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14772 case UNLT: /* LTU - CF=1 */
14773 return TARGET_IEEE_FP ? UNKNOWN : GT;
14774 case UNLE: /* LEU - CF=1 | ZF=1 */
14775 return TARGET_IEEE_FP ? UNKNOWN : GE;
14777 return swap_condition (code);
14781 /* Return cost of comparison CODE using the best strategy for performance.
14782 All following functions do use number of instructions as a cost metrics.
14783 In future this should be tweaked to compute bytes for optimize_size and
14784 take into account performance of various instructions on various CPUs. */
14787 ix86_fp_comparison_cost (enum rtx_code code)
14791 /* The cost of code using bit-twiddling on %ah. */
14808 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14812 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14815 gcc_unreachable ();
14818 switch (ix86_fp_comparison_strategy (code))
14820 case IX86_FPCMP_COMI:
14821 return arith_cost > 4 ? 3 : 2;
14822 case IX86_FPCMP_SAHF:
14823 return arith_cost > 4 ? 4 : 3;
14829 /* Return strategy to use for floating-point. We assume that fcomi is always
14830 preferrable where available, since that is also true when looking at size
14831 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14833 enum ix86_fpcmp_strategy
14834 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14836 /* Do fcomi/sahf based test when profitable. */
14839 return IX86_FPCMP_COMI;
14841 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14842 return IX86_FPCMP_SAHF;
14844 return IX86_FPCMP_ARITH;
14847 /* Swap, force into registers, or otherwise massage the two operands
14848 to a fp comparison. The operands are updated in place; the new
14849 comparison code is returned. */
14851 static enum rtx_code
14852 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14854 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14855 rtx op0 = *pop0, op1 = *pop1;
14856 enum machine_mode op_mode = GET_MODE (op0);
14857 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14859 /* All of the unordered compare instructions only work on registers.
14860 The same is true of the fcomi compare instructions. The XFmode
14861 compare instructions require registers except when comparing
14862 against zero or when converting operand 1 from fixed point to
14866 && (fpcmp_mode == CCFPUmode
14867 || (op_mode == XFmode
14868 && ! (standard_80387_constant_p (op0) == 1
14869 || standard_80387_constant_p (op1) == 1)
14870 && GET_CODE (op1) != FLOAT)
14871 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14873 op0 = force_reg (op_mode, op0);
14874 op1 = force_reg (op_mode, op1);
14878 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14879 things around if they appear profitable, otherwise force op0
14880 into a register. */
14882 if (standard_80387_constant_p (op0) == 0
14884 && ! (standard_80387_constant_p (op1) == 0
14887 enum rtx_code new_code = ix86_fp_swap_condition (code);
14888 if (new_code != UNKNOWN)
14891 tmp = op0, op0 = op1, op1 = tmp;
14897 op0 = force_reg (op_mode, op0);
14899 if (CONSTANT_P (op1))
14901 int tmp = standard_80387_constant_p (op1);
14903 op1 = validize_mem (force_const_mem (op_mode, op1));
14907 op1 = force_reg (op_mode, op1);
14910 op1 = force_reg (op_mode, op1);
14914 /* Try to rearrange the comparison to make it cheaper. */
14915 if (ix86_fp_comparison_cost (code)
14916 > ix86_fp_comparison_cost (swap_condition (code))
14917 && (REG_P (op1) || can_create_pseudo_p ()))
14920 tmp = op0, op0 = op1, op1 = tmp;
14921 code = swap_condition (code);
14923 op0 = force_reg (op_mode, op0);
14931 /* Convert comparison codes we use to represent FP comparison to integer
14932 code that will result in proper branch. Return UNKNOWN if no such code
14936 ix86_fp_compare_code_to_integer (enum rtx_code code)
14965 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14968 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14970 enum machine_mode fpcmp_mode, intcmp_mode;
14973 fpcmp_mode = ix86_fp_compare_mode (code);
14974 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14976 /* Do fcomi/sahf based test when profitable. */
14977 switch (ix86_fp_comparison_strategy (code))
14979 case IX86_FPCMP_COMI:
14980 intcmp_mode = fpcmp_mode;
14981 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14982 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14987 case IX86_FPCMP_SAHF:
14988 intcmp_mode = fpcmp_mode;
14989 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14990 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14994 scratch = gen_reg_rtx (HImode);
14995 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14996 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14999 case IX86_FPCMP_ARITH:
15000 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15001 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15002 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15004 scratch = gen_reg_rtx (HImode);
15005 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15007 /* In the unordered case, we have to check C2 for NaN's, which
15008 doesn't happen to work out to anything nice combination-wise.
15009 So do some bit twiddling on the value we've got in AH to come
15010 up with an appropriate set of condition codes. */
15012 intcmp_mode = CCNOmode;
15017 if (code == GT || !TARGET_IEEE_FP)
15019 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15024 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15025 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15026 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15027 intcmp_mode = CCmode;
15033 if (code == LT && TARGET_IEEE_FP)
15035 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15036 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15037 intcmp_mode = CCmode;
15042 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15048 if (code == GE || !TARGET_IEEE_FP)
15050 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15055 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15056 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15062 if (code == LE && TARGET_IEEE_FP)
15064 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15065 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15066 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15067 intcmp_mode = CCmode;
15072 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15078 if (code == EQ && TARGET_IEEE_FP)
15080 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15081 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15082 intcmp_mode = CCmode;
15087 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15093 if (code == NE && TARGET_IEEE_FP)
15095 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15096 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15102 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15108 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15112 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15117 gcc_unreachable ();
15125 /* Return the test that should be put into the flags user, i.e.
15126 the bcc, scc, or cmov instruction. */
15127 return gen_rtx_fmt_ee (code, VOIDmode,
15128 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15133 ix86_expand_compare (enum rtx_code code)
15136 op0 = ix86_compare_op0;
15137 op1 = ix86_compare_op1;
15139 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15140 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15142 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15144 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15145 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15148 ret = ix86_expand_int_compare (code, op0, op1);
15154 ix86_expand_branch (enum rtx_code code, rtx label)
15158 switch (GET_MODE (ix86_compare_op0))
15167 tmp = ix86_expand_compare (code);
15168 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15169 gen_rtx_LABEL_REF (VOIDmode, label),
15171 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15178 /* Expand DImode branch into multiple compare+branch. */
15180 rtx lo[2], hi[2], label2;
15181 enum rtx_code code1, code2, code3;
15182 enum machine_mode submode;
15184 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15186 tmp = ix86_compare_op0;
15187 ix86_compare_op0 = ix86_compare_op1;
15188 ix86_compare_op1 = tmp;
15189 code = swap_condition (code);
15191 if (GET_MODE (ix86_compare_op0) == DImode)
15193 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15194 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15199 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15200 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15204 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15205 avoid two branches. This costs one extra insn, so disable when
15206 optimizing for size. */
15208 if ((code == EQ || code == NE)
15209 && (!optimize_insn_for_size_p ()
15210 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15215 if (hi[1] != const0_rtx)
15216 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15217 NULL_RTX, 0, OPTAB_WIDEN);
15220 if (lo[1] != const0_rtx)
15221 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15222 NULL_RTX, 0, OPTAB_WIDEN);
15224 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15225 NULL_RTX, 0, OPTAB_WIDEN);
15227 ix86_compare_op0 = tmp;
15228 ix86_compare_op1 = const0_rtx;
15229 ix86_expand_branch (code, label);
15233 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15234 op1 is a constant and the low word is zero, then we can just
15235 examine the high word. Similarly for low word -1 and
15236 less-or-equal-than or greater-than. */
15238 if (CONST_INT_P (hi[1]))
15241 case LT: case LTU: case GE: case GEU:
15242 if (lo[1] == const0_rtx)
15244 ix86_compare_op0 = hi[0];
15245 ix86_compare_op1 = hi[1];
15246 ix86_expand_branch (code, label);
15250 case LE: case LEU: case GT: case GTU:
15251 if (lo[1] == constm1_rtx)
15253 ix86_compare_op0 = hi[0];
15254 ix86_compare_op1 = hi[1];
15255 ix86_expand_branch (code, label);
15263 /* Otherwise, we need two or three jumps. */
15265 label2 = gen_label_rtx ();
15268 code2 = swap_condition (code);
15269 code3 = unsigned_condition (code);
15273 case LT: case GT: case LTU: case GTU:
15276 case LE: code1 = LT; code2 = GT; break;
15277 case GE: code1 = GT; code2 = LT; break;
15278 case LEU: code1 = LTU; code2 = GTU; break;
15279 case GEU: code1 = GTU; code2 = LTU; break;
15281 case EQ: code1 = UNKNOWN; code2 = NE; break;
15282 case NE: code2 = UNKNOWN; break;
15285 gcc_unreachable ();
15290 * if (hi(a) < hi(b)) goto true;
15291 * if (hi(a) > hi(b)) goto false;
15292 * if (lo(a) < lo(b)) goto true;
15296 ix86_compare_op0 = hi[0];
15297 ix86_compare_op1 = hi[1];
15299 if (code1 != UNKNOWN)
15300 ix86_expand_branch (code1, label);
15301 if (code2 != UNKNOWN)
15302 ix86_expand_branch (code2, label2);
15304 ix86_compare_op0 = lo[0];
15305 ix86_compare_op1 = lo[1];
15306 ix86_expand_branch (code3, label);
15308 if (code2 != UNKNOWN)
15309 emit_label (label2);
15314 /* If we have already emitted a compare insn, go straight to simple.
15315 ix86_expand_compare won't emit anything if ix86_compare_emitted
15317 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15322 /* Split branch based on floating point condition. */
15324 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15325 rtx target1, rtx target2, rtx tmp, rtx pushed)
15330 if (target2 != pc_rtx)
15333 code = reverse_condition_maybe_unordered (code);
15338 condition = ix86_expand_fp_compare (code, op1, op2,
15341 /* Remove pushed operand from stack. */
15343 ix86_free_from_memory (GET_MODE (pushed));
15345 i = emit_jump_insn (gen_rtx_SET
15347 gen_rtx_IF_THEN_ELSE (VOIDmode,
15348 condition, target1, target2)));
15349 if (split_branch_probability >= 0)
15350 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15354 ix86_expand_setcc (enum rtx_code code, rtx dest)
15358 gcc_assert (GET_MODE (dest) == QImode);
15360 ret = ix86_expand_compare (code);
15361 PUT_MODE (ret, QImode);
15362 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15365 /* Expand comparison setting or clearing carry flag. Return true when
15366 successful and set pop for the operation. */
15368 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15370 enum machine_mode mode =
15371 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15373 /* Do not handle DImode compares that go through special path. */
15374 if (mode == (TARGET_64BIT ? TImode : DImode))
15377 if (SCALAR_FLOAT_MODE_P (mode))
15379 rtx compare_op, compare_seq;
15381 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15383 /* Shortcut: following common codes never translate
15384 into carry flag compares. */
15385 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15386 || code == ORDERED || code == UNORDERED)
15389 /* These comparisons require zero flag; swap operands so they won't. */
15390 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15391 && !TARGET_IEEE_FP)
15396 code = swap_condition (code);
15399 /* Try to expand the comparison and verify that we end up with
15400 carry flag based comparison. This fails to be true only when
15401 we decide to expand comparison using arithmetic that is not
15402 too common scenario. */
15404 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15405 compare_seq = get_insns ();
15408 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15409 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15410 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15412 code = GET_CODE (compare_op);
15414 if (code != LTU && code != GEU)
15417 emit_insn (compare_seq);
15422 if (!INTEGRAL_MODE_P (mode))
15431 /* Convert a==0 into (unsigned)a<1. */
15434 if (op1 != const0_rtx)
15437 code = (code == EQ ? LTU : GEU);
15440 /* Convert a>b into b<a or a>=b-1. */
15443 if (CONST_INT_P (op1))
15445 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15446 /* Bail out on overflow. We still can swap operands but that
15447 would force loading of the constant into register. */
15448 if (op1 == const0_rtx
15449 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15451 code = (code == GTU ? GEU : LTU);
15458 code = (code == GTU ? LTU : GEU);
15462 /* Convert a>=0 into (unsigned)a<0x80000000. */
15465 if (mode == DImode || op1 != const0_rtx)
15467 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15468 code = (code == LT ? GEU : LTU);
15472 if (mode == DImode || op1 != constm1_rtx)
15474 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15475 code = (code == LE ? GEU : LTU);
15481 /* Swapping operands may cause constant to appear as first operand. */
15482 if (!nonimmediate_operand (op0, VOIDmode))
15484 if (!can_create_pseudo_p ())
15486 op0 = force_reg (mode, op0);
15488 ix86_compare_op0 = op0;
15489 ix86_compare_op1 = op1;
15490 *pop = ix86_expand_compare (code);
15491 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15496 ix86_expand_int_movcc (rtx operands[])
15498 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15499 rtx compare_seq, compare_op;
15500 enum machine_mode mode = GET_MODE (operands[0]);
15501 bool sign_bit_compare_p = false;
15504 ix86_compare_op0 = XEXP (operands[1], 0);
15505 ix86_compare_op1 = XEXP (operands[1], 1);
15506 compare_op = ix86_expand_compare (code);
15507 compare_seq = get_insns ();
15510 compare_code = GET_CODE (compare_op);
15512 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15513 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15514 sign_bit_compare_p = true;
15516 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15517 HImode insns, we'd be swallowed in word prefix ops. */
15519 if ((mode != HImode || TARGET_FAST_PREFIX)
15520 && (mode != (TARGET_64BIT ? TImode : DImode))
15521 && CONST_INT_P (operands[2])
15522 && CONST_INT_P (operands[3]))
15524 rtx out = operands[0];
15525 HOST_WIDE_INT ct = INTVAL (operands[2]);
15526 HOST_WIDE_INT cf = INTVAL (operands[3]);
15527 HOST_WIDE_INT diff;
15530 /* Sign bit compares are better done using shifts than we do by using
15532 if (sign_bit_compare_p
15533 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15534 ix86_compare_op1, &compare_op))
15536 /* Detect overlap between destination and compare sources. */
15539 if (!sign_bit_compare_p)
15542 bool fpcmp = false;
15544 compare_code = GET_CODE (compare_op);
15546 flags = XEXP (compare_op, 0);
15548 if (GET_MODE (flags) == CCFPmode
15549 || GET_MODE (flags) == CCFPUmode)
15553 = ix86_fp_compare_code_to_integer (compare_code);
15556 /* To simplify rest of code, restrict to the GEU case. */
15557 if (compare_code == LTU)
15559 HOST_WIDE_INT tmp = ct;
15562 compare_code = reverse_condition (compare_code);
15563 code = reverse_condition (code);
15568 PUT_CODE (compare_op,
15569 reverse_condition_maybe_unordered
15570 (GET_CODE (compare_op)));
15572 PUT_CODE (compare_op,
15573 reverse_condition (GET_CODE (compare_op)));
15577 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15578 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15579 tmp = gen_reg_rtx (mode);
15581 if (mode == DImode)
15582 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15584 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15585 flags, compare_op));
15589 if (code == GT || code == GE)
15590 code = reverse_condition (code);
15593 HOST_WIDE_INT tmp = ct;
15598 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15599 ix86_compare_op1, VOIDmode, 0, -1);
15612 tmp = expand_simple_binop (mode, PLUS,
15614 copy_rtx (tmp), 1, OPTAB_DIRECT);
15625 tmp = expand_simple_binop (mode, IOR,
15627 copy_rtx (tmp), 1, OPTAB_DIRECT);
15629 else if (diff == -1 && ct)
15639 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15641 tmp = expand_simple_binop (mode, PLUS,
15642 copy_rtx (tmp), GEN_INT (cf),
15643 copy_rtx (tmp), 1, OPTAB_DIRECT);
15651 * andl cf - ct, dest
15661 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15664 tmp = expand_simple_binop (mode, AND,
15666 gen_int_mode (cf - ct, mode),
15667 copy_rtx (tmp), 1, OPTAB_DIRECT);
15669 tmp = expand_simple_binop (mode, PLUS,
15670 copy_rtx (tmp), GEN_INT (ct),
15671 copy_rtx (tmp), 1, OPTAB_DIRECT);
15674 if (!rtx_equal_p (tmp, out))
15675 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15677 return 1; /* DONE */
15682 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15685 tmp = ct, ct = cf, cf = tmp;
15688 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15690 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15692 /* We may be reversing unordered compare to normal compare, that
15693 is not valid in general (we may convert non-trapping condition
15694 to trapping one), however on i386 we currently emit all
15695 comparisons unordered. */
15696 compare_code = reverse_condition_maybe_unordered (compare_code);
15697 code = reverse_condition_maybe_unordered (code);
15701 compare_code = reverse_condition (compare_code);
15702 code = reverse_condition (code);
15706 compare_code = UNKNOWN;
15707 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15708 && CONST_INT_P (ix86_compare_op1))
15710 if (ix86_compare_op1 == const0_rtx
15711 && (code == LT || code == GE))
15712 compare_code = code;
15713 else if (ix86_compare_op1 == constm1_rtx)
15717 else if (code == GT)
15722 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15723 if (compare_code != UNKNOWN
15724 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15725 && (cf == -1 || ct == -1))
15727 /* If lea code below could be used, only optimize
15728 if it results in a 2 insn sequence. */
15730 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15731 || diff == 3 || diff == 5 || diff == 9)
15732 || (compare_code == LT && ct == -1)
15733 || (compare_code == GE && cf == -1))
15736 * notl op1 (if necessary)
15744 code = reverse_condition (code);
15747 out = emit_store_flag (out, code, ix86_compare_op0,
15748 ix86_compare_op1, VOIDmode, 0, -1);
15750 out = expand_simple_binop (mode, IOR,
15752 out, 1, OPTAB_DIRECT);
15753 if (out != operands[0])
15754 emit_move_insn (operands[0], out);
15756 return 1; /* DONE */
15761 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15762 || diff == 3 || diff == 5 || diff == 9)
15763 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15765 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15771 * lea cf(dest*(ct-cf)),dest
15775 * This also catches the degenerate setcc-only case.
15781 out = emit_store_flag (out, code, ix86_compare_op0,
15782 ix86_compare_op1, VOIDmode, 0, 1);
15785 /* On x86_64 the lea instruction operates on Pmode, so we need
15786 to get arithmetics done in proper mode to match. */
15788 tmp = copy_rtx (out);
15792 out1 = copy_rtx (out);
15793 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15797 tmp = gen_rtx_PLUS (mode, tmp, out1);
15803 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15806 if (!rtx_equal_p (tmp, out))
15809 out = force_operand (tmp, copy_rtx (out));
15811 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15813 if (!rtx_equal_p (out, operands[0]))
15814 emit_move_insn (operands[0], copy_rtx (out));
15816 return 1; /* DONE */
15820 * General case: Jumpful:
15821 * xorl dest,dest cmpl op1, op2
15822 * cmpl op1, op2 movl ct, dest
15823 * setcc dest jcc 1f
15824 * decl dest movl cf, dest
15825 * andl (cf-ct),dest 1:
15828 * Size 20. Size 14.
15830 * This is reasonably steep, but branch mispredict costs are
15831 * high on modern cpus, so consider failing only if optimizing
15835 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15836 && BRANCH_COST (optimize_insn_for_speed_p (),
15841 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15846 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15848 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15850 /* We may be reversing unordered compare to normal compare,
15851 that is not valid in general (we may convert non-trapping
15852 condition to trapping one), however on i386 we currently
15853 emit all comparisons unordered. */
15854 code = reverse_condition_maybe_unordered (code);
15858 code = reverse_condition (code);
15859 if (compare_code != UNKNOWN)
15860 compare_code = reverse_condition (compare_code);
15864 if (compare_code != UNKNOWN)
15866 /* notl op1 (if needed)
15871 For x < 0 (resp. x <= -1) there will be no notl,
15872 so if possible swap the constants to get rid of the
15874 True/false will be -1/0 while code below (store flag
15875 followed by decrement) is 0/-1, so the constants need
15876 to be exchanged once more. */
15878 if (compare_code == GE || !cf)
15880 code = reverse_condition (code);
15885 HOST_WIDE_INT tmp = cf;
15890 out = emit_store_flag (out, code, ix86_compare_op0,
15891 ix86_compare_op1, VOIDmode, 0, -1);
15895 out = emit_store_flag (out, code, ix86_compare_op0,
15896 ix86_compare_op1, VOIDmode, 0, 1);
15898 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15899 copy_rtx (out), 1, OPTAB_DIRECT);
15902 out = expand_simple_binop (mode, AND, copy_rtx (out),
15903 gen_int_mode (cf - ct, mode),
15904 copy_rtx (out), 1, OPTAB_DIRECT);
15906 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15907 copy_rtx (out), 1, OPTAB_DIRECT);
15908 if (!rtx_equal_p (out, operands[0]))
15909 emit_move_insn (operands[0], copy_rtx (out));
15911 return 1; /* DONE */
15915 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15917 /* Try a few things more with specific constants and a variable. */
15920 rtx var, orig_out, out, tmp;
15922 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15923 return 0; /* FAIL */
15925 /* If one of the two operands is an interesting constant, load a
15926 constant with the above and mask it in with a logical operation. */
15928 if (CONST_INT_P (operands[2]))
15931 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15932 operands[3] = constm1_rtx, op = and_optab;
15933 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15934 operands[3] = const0_rtx, op = ior_optab;
15936 return 0; /* FAIL */
15938 else if (CONST_INT_P (operands[3]))
15941 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15942 operands[2] = constm1_rtx, op = and_optab;
15943 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15944 operands[2] = const0_rtx, op = ior_optab;
15946 return 0; /* FAIL */
15949 return 0; /* FAIL */
15951 orig_out = operands[0];
15952 tmp = gen_reg_rtx (mode);
15955 /* Recurse to get the constant loaded. */
15956 if (ix86_expand_int_movcc (operands) == 0)
15957 return 0; /* FAIL */
15959 /* Mask in the interesting variable. */
15960 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15962 if (!rtx_equal_p (out, orig_out))
15963 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15965 return 1; /* DONE */
15969 * For comparison with above,
15979 if (! nonimmediate_operand (operands[2], mode))
15980 operands[2] = force_reg (mode, operands[2]);
15981 if (! nonimmediate_operand (operands[3], mode))
15982 operands[3] = force_reg (mode, operands[3]);
15984 if (! register_operand (operands[2], VOIDmode)
15986 || ! register_operand (operands[3], VOIDmode)))
15987 operands[2] = force_reg (mode, operands[2]);
15990 && ! register_operand (operands[3], VOIDmode))
15991 operands[3] = force_reg (mode, operands[3]);
15993 emit_insn (compare_seq);
15994 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15995 gen_rtx_IF_THEN_ELSE (mode,
15996 compare_op, operands[2],
15999 return 1; /* DONE */
16002 /* Swap, force into registers, or otherwise massage the two operands
16003 to an sse comparison with a mask result. Thus we differ a bit from
16004 ix86_prepare_fp_compare_args which expects to produce a flags result.
16006 The DEST operand exists to help determine whether to commute commutative
16007 operators. The POP0/POP1 operands are updated in place. The new
16008 comparison code is returned, or UNKNOWN if not implementable. */
16010 static enum rtx_code
16011 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16012 rtx *pop0, rtx *pop1)
16020 /* We have no LTGT as an operator. We could implement it with
16021 NE & ORDERED, but this requires an extra temporary. It's
16022 not clear that it's worth it. */
16029 /* These are supported directly. */
16036 /* For commutative operators, try to canonicalize the destination
16037 operand to be first in the comparison - this helps reload to
16038 avoid extra moves. */
16039 if (!dest || !rtx_equal_p (dest, *pop1))
16047 /* These are not supported directly. Swap the comparison operands
16048 to transform into something that is supported. */
16052 code = swap_condition (code);
16056 gcc_unreachable ();
16062 /* Detect conditional moves that exactly match min/max operational
16063 semantics. Note that this is IEEE safe, as long as we don't
16064 interchange the operands.
16066 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16067 and TRUE if the operation is successful and instructions are emitted. */
16070 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16071 rtx cmp_op1, rtx if_true, rtx if_false)
16073 enum machine_mode mode;
16079 else if (code == UNGE)
16082 if_true = if_false;
16088 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16090 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16095 mode = GET_MODE (dest);
16097 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16098 but MODE may be a vector mode and thus not appropriate. */
16099 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16101 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16104 if_true = force_reg (mode, if_true);
16105 v = gen_rtvec (2, if_true, if_false);
16106 tmp = gen_rtx_UNSPEC (mode, v, u);
16110 code = is_min ? SMIN : SMAX;
16111 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16114 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16118 /* Expand an sse vector comparison. Return the register with the result. */
16121 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16122 rtx op_true, rtx op_false)
16124 enum machine_mode mode = GET_MODE (dest);
16127 cmp_op0 = force_reg (mode, cmp_op0);
16128 if (!nonimmediate_operand (cmp_op1, mode))
16129 cmp_op1 = force_reg (mode, cmp_op1);
16132 || reg_overlap_mentioned_p (dest, op_true)
16133 || reg_overlap_mentioned_p (dest, op_false))
16134 dest = gen_reg_rtx (mode);
16136 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16137 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16142 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16143 operations. This is used for both scalar and vector conditional moves. */
16146 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16148 enum machine_mode mode = GET_MODE (dest);
16151 if (op_false == CONST0_RTX (mode))
16153 op_true = force_reg (mode, op_true);
16154 x = gen_rtx_AND (mode, cmp, op_true);
16155 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16157 else if (op_true == CONST0_RTX (mode))
16159 op_false = force_reg (mode, op_false);
16160 x = gen_rtx_NOT (mode, cmp);
16161 x = gen_rtx_AND (mode, x, op_false);
16162 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16164 else if (TARGET_XOP)
16166 rtx pcmov = gen_rtx_SET (mode, dest,
16167 gen_rtx_IF_THEN_ELSE (mode, cmp,
16174 op_true = force_reg (mode, op_true);
16175 op_false = force_reg (mode, op_false);
16177 t2 = gen_reg_rtx (mode);
16179 t3 = gen_reg_rtx (mode);
16183 x = gen_rtx_AND (mode, op_true, cmp);
16184 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16186 x = gen_rtx_NOT (mode, cmp);
16187 x = gen_rtx_AND (mode, x, op_false);
16188 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16190 x = gen_rtx_IOR (mode, t3, t2);
16191 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16195 /* Expand a floating-point conditional move. Return true if successful. */
16198 ix86_expand_fp_movcc (rtx operands[])
16200 enum machine_mode mode = GET_MODE (operands[0]);
16201 enum rtx_code code = GET_CODE (operands[1]);
16202 rtx tmp, compare_op;
16204 ix86_compare_op0 = XEXP (operands[1], 0);
16205 ix86_compare_op1 = XEXP (operands[1], 1);
16206 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16208 enum machine_mode cmode;
16210 /* Since we've no cmove for sse registers, don't force bad register
16211 allocation just to gain access to it. Deny movcc when the
16212 comparison mode doesn't match the move mode. */
16213 cmode = GET_MODE (ix86_compare_op0);
16214 if (cmode == VOIDmode)
16215 cmode = GET_MODE (ix86_compare_op1);
16219 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16221 &ix86_compare_op1);
16222 if (code == UNKNOWN)
16225 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16226 ix86_compare_op1, operands[2],
16230 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16231 ix86_compare_op1, operands[2], operands[3]);
16232 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16236 /* The floating point conditional move instructions don't directly
16237 support conditions resulting from a signed integer comparison. */
16239 compare_op = ix86_expand_compare (code);
16240 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16242 tmp = gen_reg_rtx (QImode);
16243 ix86_expand_setcc (code, tmp);
16245 ix86_compare_op0 = tmp;
16246 ix86_compare_op1 = const0_rtx;
16247 compare_op = ix86_expand_compare (code);
16250 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16251 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16252 operands[2], operands[3])));
16257 /* Expand a floating-point vector conditional move; a vcond operation
16258 rather than a movcc operation. */
16261 ix86_expand_fp_vcond (rtx operands[])
16263 enum rtx_code code = GET_CODE (operands[3]);
16266 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16267 &operands[4], &operands[5]);
16268 if (code == UNKNOWN)
16271 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16272 operands[5], operands[1], operands[2]))
16275 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16276 operands[1], operands[2]);
16277 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16281 /* Expand a signed/unsigned integral vector conditional move. */
16284 ix86_expand_int_vcond (rtx operands[])
16286 enum machine_mode mode = GET_MODE (operands[0]);
16287 enum rtx_code code = GET_CODE (operands[3]);
16288 bool negate = false;
16291 cop0 = operands[4];
16292 cop1 = operands[5];
16294 /* XOP supports all of the comparisons on all vector int types. */
16297 /* Canonicalize the comparison to EQ, GT, GTU. */
16308 code = reverse_condition (code);
16314 code = reverse_condition (code);
16320 code = swap_condition (code);
16321 x = cop0, cop0 = cop1, cop1 = x;
16325 gcc_unreachable ();
16328 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16329 if (mode == V2DImode)
16334 /* SSE4.1 supports EQ. */
16335 if (!TARGET_SSE4_1)
16341 /* SSE4.2 supports GT/GTU. */
16342 if (!TARGET_SSE4_2)
16347 gcc_unreachable ();
16351 /* Unsigned parallel compare is not supported by the hardware.
16352 Play some tricks to turn this into a signed comparison
16356 cop0 = force_reg (mode, cop0);
16364 rtx (*gen_sub3) (rtx, rtx, rtx);
16366 /* Subtract (-(INT MAX) - 1) from both operands to make
16368 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16370 gen_sub3 = (mode == V4SImode
16371 ? gen_subv4si3 : gen_subv2di3);
16372 t1 = gen_reg_rtx (mode);
16373 emit_insn (gen_sub3 (t1, cop0, mask));
16375 t2 = gen_reg_rtx (mode);
16376 emit_insn (gen_sub3 (t2, cop1, mask));
16386 /* Perform a parallel unsigned saturating subtraction. */
16387 x = gen_reg_rtx (mode);
16388 emit_insn (gen_rtx_SET (VOIDmode, x,
16389 gen_rtx_US_MINUS (mode, cop0, cop1)));
16392 cop1 = CONST0_RTX (mode);
16398 gcc_unreachable ();
16403 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16404 operands[1+negate], operands[2-negate]);
16406 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16407 operands[2-negate]);
16411 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16412 true if we should do zero extension, else sign extension. HIGH_P is
16413 true if we want the N/2 high elements, else the low elements. */
16416 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16418 enum machine_mode imode = GET_MODE (operands[1]);
16419 rtx (*unpack)(rtx, rtx, rtx);
16426 unpack = gen_vec_interleave_highv16qi;
16428 unpack = gen_vec_interleave_lowv16qi;
16432 unpack = gen_vec_interleave_highv8hi;
16434 unpack = gen_vec_interleave_lowv8hi;
16438 unpack = gen_vec_interleave_highv4si;
16440 unpack = gen_vec_interleave_lowv4si;
16443 gcc_unreachable ();
16446 dest = gen_lowpart (imode, operands[0]);
16449 se = force_reg (imode, CONST0_RTX (imode));
16451 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16452 operands[1], pc_rtx, pc_rtx);
16454 emit_insn (unpack (dest, operands[1], se));
16457 /* This function performs the same task as ix86_expand_sse_unpack,
16458 but with SSE4.1 instructions. */
16461 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16463 enum machine_mode imode = GET_MODE (operands[1]);
16464 rtx (*unpack)(rtx, rtx);
16471 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16473 unpack = gen_sse4_1_extendv8qiv8hi2;
16477 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16479 unpack = gen_sse4_1_extendv4hiv4si2;
16483 unpack = gen_sse4_1_zero_extendv2siv2di2;
16485 unpack = gen_sse4_1_extendv2siv2di2;
16488 gcc_unreachable ();
16491 dest = operands[0];
16494 /* Shift higher 8 bytes to lower 8 bytes. */
16495 src = gen_reg_rtx (imode);
16496 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16497 gen_lowpart (V1TImode, operands[1]),
16503 emit_insn (unpack (dest, src));
16506 /* Expand conditional increment or decrement using adb/sbb instructions.
16507 The default case using setcc followed by the conditional move can be
16508 done by generic code. */
16510 ix86_expand_int_addcc (rtx operands[])
16512 enum rtx_code code = GET_CODE (operands[1]);
16514 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16516 rtx val = const0_rtx;
16517 bool fpcmp = false;
16518 enum machine_mode mode;
16520 ix86_compare_op0 = XEXP (operands[1], 0);
16521 ix86_compare_op1 = XEXP (operands[1], 1);
16522 if (operands[3] != const1_rtx
16523 && operands[3] != constm1_rtx)
16525 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16526 ix86_compare_op1, &compare_op))
16528 code = GET_CODE (compare_op);
16530 flags = XEXP (compare_op, 0);
16532 if (GET_MODE (flags) == CCFPmode
16533 || GET_MODE (flags) == CCFPUmode)
16536 code = ix86_fp_compare_code_to_integer (code);
16543 PUT_CODE (compare_op,
16544 reverse_condition_maybe_unordered
16545 (GET_CODE (compare_op)));
16547 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16550 mode = GET_MODE (operands[0]);
16552 /* Construct either adc or sbb insn. */
16553 if ((code == LTU) == (operands[3] == constm1_rtx))
16558 insn = gen_subqi3_carry;
16561 insn = gen_subhi3_carry;
16564 insn = gen_subsi3_carry;
16567 insn = gen_subdi3_carry;
16570 gcc_unreachable ();
16578 insn = gen_addqi3_carry;
16581 insn = gen_addhi3_carry;
16584 insn = gen_addsi3_carry;
16587 insn = gen_adddi3_carry;
16590 gcc_unreachable ();
16593 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16595 return 1; /* DONE */
16599 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16600 works for floating pointer parameters and nonoffsetable memories.
16601 For pushes, it returns just stack offsets; the values will be saved
16602 in the right order. Maximally three parts are generated. */
16605 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16610 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16612 size = (GET_MODE_SIZE (mode) + 4) / 8;
16614 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16615 gcc_assert (size >= 2 && size <= 4);
16617 /* Optimize constant pool reference to immediates. This is used by fp
16618 moves, that force all constants to memory to allow combining. */
16619 if (MEM_P (operand) && MEM_READONLY_P (operand))
16621 rtx tmp = maybe_get_pool_constant (operand);
16626 if (MEM_P (operand) && !offsettable_memref_p (operand))
16628 /* The only non-offsetable memories we handle are pushes. */
16629 int ok = push_operand (operand, VOIDmode);
16633 operand = copy_rtx (operand);
16634 PUT_MODE (operand, Pmode);
16635 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16639 if (GET_CODE (operand) == CONST_VECTOR)
16641 enum machine_mode imode = int_mode_for_mode (mode);
16642 /* Caution: if we looked through a constant pool memory above,
16643 the operand may actually have a different mode now. That's
16644 ok, since we want to pun this all the way back to an integer. */
16645 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16646 gcc_assert (operand != NULL);
16652 if (mode == DImode)
16653 split_di (&operand, 1, &parts[0], &parts[1]);
16658 if (REG_P (operand))
16660 gcc_assert (reload_completed);
16661 for (i = 0; i < size; i++)
16662 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16664 else if (offsettable_memref_p (operand))
16666 operand = adjust_address (operand, SImode, 0);
16667 parts[0] = operand;
16668 for (i = 1; i < size; i++)
16669 parts[i] = adjust_address (operand, SImode, 4 * i);
16671 else if (GET_CODE (operand) == CONST_DOUBLE)
16676 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16680 real_to_target (l, &r, mode);
16681 parts[3] = gen_int_mode (l[3], SImode);
16682 parts[2] = gen_int_mode (l[2], SImode);
16685 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16686 parts[2] = gen_int_mode (l[2], SImode);
16689 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16692 gcc_unreachable ();
16694 parts[1] = gen_int_mode (l[1], SImode);
16695 parts[0] = gen_int_mode (l[0], SImode);
16698 gcc_unreachable ();
16703 if (mode == TImode)
16704 split_ti (&operand, 1, &parts[0], &parts[1]);
16705 if (mode == XFmode || mode == TFmode)
16707 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16708 if (REG_P (operand))
16710 gcc_assert (reload_completed);
16711 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16712 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16714 else if (offsettable_memref_p (operand))
16716 operand = adjust_address (operand, DImode, 0);
16717 parts[0] = operand;
16718 parts[1] = adjust_address (operand, upper_mode, 8);
16720 else if (GET_CODE (operand) == CONST_DOUBLE)
16725 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16726 real_to_target (l, &r, mode);
16728 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16729 if (HOST_BITS_PER_WIDE_INT >= 64)
16732 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16733 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16736 parts[0] = immed_double_const (l[0], l[1], DImode);
16738 if (upper_mode == SImode)
16739 parts[1] = gen_int_mode (l[2], SImode);
16740 else if (HOST_BITS_PER_WIDE_INT >= 64)
16743 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16744 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16747 parts[1] = immed_double_const (l[2], l[3], DImode);
16750 gcc_unreachable ();
16757 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16758 Return false when normal moves are needed; true when all required
16759 insns have been emitted. Operands 2-4 contain the input values
16760 int the correct order; operands 5-7 contain the output values. */
16763 ix86_split_long_move (rtx operands[])
16768 int collisions = 0;
16769 enum machine_mode mode = GET_MODE (operands[0]);
16770 bool collisionparts[4];
16772 /* The DFmode expanders may ask us to move double.
16773 For 64bit target this is single move. By hiding the fact
16774 here we simplify i386.md splitters. */
16775 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16777 /* Optimize constant pool reference to immediates. This is used by
16778 fp moves, that force all constants to memory to allow combining. */
16780 if (MEM_P (operands[1])
16781 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16782 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16783 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16784 if (push_operand (operands[0], VOIDmode))
16786 operands[0] = copy_rtx (operands[0]);
16787 PUT_MODE (operands[0], Pmode);
16790 operands[0] = gen_lowpart (DImode, operands[0]);
16791 operands[1] = gen_lowpart (DImode, operands[1]);
16792 emit_move_insn (operands[0], operands[1]);
16796 /* The only non-offsettable memory we handle is push. */
16797 if (push_operand (operands[0], VOIDmode))
16800 gcc_assert (!MEM_P (operands[0])
16801 || offsettable_memref_p (operands[0]));
16803 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16804 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16806 /* When emitting push, take care for source operands on the stack. */
16807 if (push && MEM_P (operands[1])
16808 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16810 rtx src_base = XEXP (part[1][nparts - 1], 0);
16812 /* Compensate for the stack decrement by 4. */
16813 if (!TARGET_64BIT && nparts == 3
16814 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16815 src_base = plus_constant (src_base, 4);
16817 /* src_base refers to the stack pointer and is
16818 automatically decreased by emitted push. */
16819 for (i = 0; i < nparts; i++)
16820 part[1][i] = change_address (part[1][i],
16821 GET_MODE (part[1][i]), src_base);
16824 /* We need to do copy in the right order in case an address register
16825 of the source overlaps the destination. */
16826 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16830 for (i = 0; i < nparts; i++)
16833 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16834 if (collisionparts[i])
16838 /* Collision in the middle part can be handled by reordering. */
16839 if (collisions == 1 && nparts == 3 && collisionparts [1])
16841 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16842 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16844 else if (collisions == 1
16846 && (collisionparts [1] || collisionparts [2]))
16848 if (collisionparts [1])
16850 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16851 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16855 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16856 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16860 /* If there are more collisions, we can't handle it by reordering.
16861 Do an lea to the last part and use only one colliding move. */
16862 else if (collisions > 1)
16868 base = part[0][nparts - 1];
16870 /* Handle the case when the last part isn't valid for lea.
16871 Happens in 64-bit mode storing the 12-byte XFmode. */
16872 if (GET_MODE (base) != Pmode)
16873 base = gen_rtx_REG (Pmode, REGNO (base));
16875 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16876 part[1][0] = replace_equiv_address (part[1][0], base);
16877 for (i = 1; i < nparts; i++)
16879 tmp = plus_constant (base, UNITS_PER_WORD * i);
16880 part[1][i] = replace_equiv_address (part[1][i], tmp);
16891 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16892 emit_insn (gen_addsi3 (stack_pointer_rtx,
16893 stack_pointer_rtx, GEN_INT (-4)));
16894 emit_move_insn (part[0][2], part[1][2]);
16896 else if (nparts == 4)
16898 emit_move_insn (part[0][3], part[1][3]);
16899 emit_move_insn (part[0][2], part[1][2]);
16904 /* In 64bit mode we don't have 32bit push available. In case this is
16905 register, it is OK - we will just use larger counterpart. We also
16906 retype memory - these comes from attempt to avoid REX prefix on
16907 moving of second half of TFmode value. */
16908 if (GET_MODE (part[1][1]) == SImode)
16910 switch (GET_CODE (part[1][1]))
16913 part[1][1] = adjust_address (part[1][1], DImode, 0);
16917 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16921 gcc_unreachable ();
16924 if (GET_MODE (part[1][0]) == SImode)
16925 part[1][0] = part[1][1];
16928 emit_move_insn (part[0][1], part[1][1]);
16929 emit_move_insn (part[0][0], part[1][0]);
16933 /* Choose correct order to not overwrite the source before it is copied. */
16934 if ((REG_P (part[0][0])
16935 && REG_P (part[1][1])
16936 && (REGNO (part[0][0]) == REGNO (part[1][1])
16938 && REGNO (part[0][0]) == REGNO (part[1][2]))
16940 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16942 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16944 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16946 operands[2 + i] = part[0][j];
16947 operands[6 + i] = part[1][j];
16952 for (i = 0; i < nparts; i++)
16954 operands[2 + i] = part[0][i];
16955 operands[6 + i] = part[1][i];
16959 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16960 if (optimize_insn_for_size_p ())
16962 for (j = 0; j < nparts - 1; j++)
16963 if (CONST_INT_P (operands[6 + j])
16964 && operands[6 + j] != const0_rtx
16965 && REG_P (operands[2 + j]))
16966 for (i = j; i < nparts - 1; i++)
16967 if (CONST_INT_P (operands[7 + i])
16968 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16969 operands[7 + i] = operands[2 + j];
16972 for (i = 0; i < nparts; i++)
16973 emit_move_insn (operands[2 + i], operands[6 + i]);
16978 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16979 left shift by a constant, either using a single shift or
16980 a sequence of add instructions. */
16983 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16987 emit_insn ((mode == DImode
16989 : gen_adddi3) (operand, operand, operand));
16991 else if (!optimize_insn_for_size_p ()
16992 && count * ix86_cost->add <= ix86_cost->shift_const)
16995 for (i=0; i<count; i++)
16997 emit_insn ((mode == DImode
16999 : gen_adddi3) (operand, operand, operand));
17003 emit_insn ((mode == DImode
17005 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17009 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17011 rtx low[2], high[2];
17013 const int single_width = mode == DImode ? 32 : 64;
17015 if (CONST_INT_P (operands[2]))
17017 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17018 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17020 if (count >= single_width)
17022 emit_move_insn (high[0], low[1]);
17023 emit_move_insn (low[0], const0_rtx);
17025 if (count > single_width)
17026 ix86_expand_ashl_const (high[0], count - single_width, mode);
17030 if (!rtx_equal_p (operands[0], operands[1]))
17031 emit_move_insn (operands[0], operands[1]);
17032 emit_insn ((mode == DImode
17034 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17035 ix86_expand_ashl_const (low[0], count, mode);
17040 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17042 if (operands[1] == const1_rtx)
17044 /* Assuming we've chosen a QImode capable registers, then 1 << N
17045 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17046 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17048 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17050 ix86_expand_clear (low[0]);
17051 ix86_expand_clear (high[0]);
17052 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17054 d = gen_lowpart (QImode, low[0]);
17055 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17056 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17057 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17059 d = gen_lowpart (QImode, high[0]);
17060 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17061 s = gen_rtx_NE (QImode, flags, const0_rtx);
17062 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17065 /* Otherwise, we can get the same results by manually performing
17066 a bit extract operation on bit 5/6, and then performing the two
17067 shifts. The two methods of getting 0/1 into low/high are exactly
17068 the same size. Avoiding the shift in the bit extract case helps
17069 pentium4 a bit; no one else seems to care much either way. */
17074 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17075 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17077 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17078 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17080 emit_insn ((mode == DImode
17082 : gen_lshrdi3) (high[0], high[0],
17083 GEN_INT (mode == DImode ? 5 : 6)));
17084 emit_insn ((mode == DImode
17086 : gen_anddi3) (high[0], high[0], const1_rtx));
17087 emit_move_insn (low[0], high[0]);
17088 emit_insn ((mode == DImode
17090 : gen_xordi3) (low[0], low[0], const1_rtx));
17093 emit_insn ((mode == DImode
17095 : gen_ashldi3) (low[0], low[0], operands[2]));
17096 emit_insn ((mode == DImode
17098 : gen_ashldi3) (high[0], high[0], operands[2]));
17102 if (operands[1] == constm1_rtx)
17104 /* For -1 << N, we can avoid the shld instruction, because we
17105 know that we're shifting 0...31/63 ones into a -1. */
17106 emit_move_insn (low[0], constm1_rtx);
17107 if (optimize_insn_for_size_p ())
17108 emit_move_insn (high[0], low[0]);
17110 emit_move_insn (high[0], constm1_rtx);
17114 if (!rtx_equal_p (operands[0], operands[1]))
17115 emit_move_insn (operands[0], operands[1]);
17117 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17118 emit_insn ((mode == DImode
17120 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17123 emit_insn ((mode == DImode
17125 : gen_ashldi3) (low[0], low[0], operands[2]));
17127 if (TARGET_CMOVE && scratch)
17129 ix86_expand_clear (scratch);
17130 emit_insn ((mode == DImode
17131 ? gen_x86_shiftsi_adj_1
17132 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17136 emit_insn ((mode == DImode
17137 ? gen_x86_shiftsi_adj_2
17138 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17142 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17144 rtx low[2], high[2];
17146 const int single_width = mode == DImode ? 32 : 64;
17148 if (CONST_INT_P (operands[2]))
17150 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17151 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17153 if (count == single_width * 2 - 1)
17155 emit_move_insn (high[0], high[1]);
17156 emit_insn ((mode == DImode
17158 : gen_ashrdi3) (high[0], high[0],
17159 GEN_INT (single_width - 1)));
17160 emit_move_insn (low[0], high[0]);
17163 else if (count >= single_width)
17165 emit_move_insn (low[0], high[1]);
17166 emit_move_insn (high[0], low[0]);
17167 emit_insn ((mode == DImode
17169 : gen_ashrdi3) (high[0], high[0],
17170 GEN_INT (single_width - 1)));
17171 if (count > single_width)
17172 emit_insn ((mode == DImode
17174 : gen_ashrdi3) (low[0], low[0],
17175 GEN_INT (count - single_width)));
17179 if (!rtx_equal_p (operands[0], operands[1]))
17180 emit_move_insn (operands[0], operands[1]);
17181 emit_insn ((mode == DImode
17183 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17184 emit_insn ((mode == DImode
17186 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17191 if (!rtx_equal_p (operands[0], operands[1]))
17192 emit_move_insn (operands[0], operands[1]);
17194 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17196 emit_insn ((mode == DImode
17198 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17199 emit_insn ((mode == DImode
17201 : gen_ashrdi3) (high[0], high[0], operands[2]));
17203 if (TARGET_CMOVE && scratch)
17205 emit_move_insn (scratch, high[0]);
17206 emit_insn ((mode == DImode
17208 : gen_ashrdi3) (scratch, scratch,
17209 GEN_INT (single_width - 1)));
17210 emit_insn ((mode == DImode
17211 ? gen_x86_shiftsi_adj_1
17212 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17216 emit_insn ((mode == DImode
17217 ? gen_x86_shiftsi_adj_3
17218 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17223 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17225 rtx low[2], high[2];
17227 const int single_width = mode == DImode ? 32 : 64;
17229 if (CONST_INT_P (operands[2]))
17231 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17232 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17234 if (count >= single_width)
17236 emit_move_insn (low[0], high[1]);
17237 ix86_expand_clear (high[0]);
17239 if (count > single_width)
17240 emit_insn ((mode == DImode
17242 : gen_lshrdi3) (low[0], low[0],
17243 GEN_INT (count - single_width)));
17247 if (!rtx_equal_p (operands[0], operands[1]))
17248 emit_move_insn (operands[0], operands[1]);
17249 emit_insn ((mode == DImode
17251 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17252 emit_insn ((mode == DImode
17254 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17259 if (!rtx_equal_p (operands[0], operands[1]))
17260 emit_move_insn (operands[0], operands[1]);
17262 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17264 emit_insn ((mode == DImode
17266 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17267 emit_insn ((mode == DImode
17269 : gen_lshrdi3) (high[0], high[0], operands[2]));
17271 /* Heh. By reversing the arguments, we can reuse this pattern. */
17272 if (TARGET_CMOVE && scratch)
17274 ix86_expand_clear (scratch);
17275 emit_insn ((mode == DImode
17276 ? gen_x86_shiftsi_adj_1
17277 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17281 emit_insn ((mode == DImode
17282 ? gen_x86_shiftsi_adj_2
17283 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17287 /* Predict just emitted jump instruction to be taken with probability PROB. */
17289 predict_jump (int prob)
17291 rtx insn = get_last_insn ();
17292 gcc_assert (JUMP_P (insn));
17293 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17296 /* Helper function for the string operations below. Dest VARIABLE whether
17297 it is aligned to VALUE bytes. If true, jump to the label. */
17299 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17301 rtx label = gen_label_rtx ();
17302 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17303 if (GET_MODE (variable) == DImode)
17304 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17306 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17307 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17310 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17312 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17316 /* Adjust COUNTER by the VALUE. */
17318 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17320 if (GET_MODE (countreg) == DImode)
17321 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17323 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17326 /* Zero extend possibly SImode EXP to Pmode register. */
17328 ix86_zero_extend_to_Pmode (rtx exp)
17331 if (GET_MODE (exp) == VOIDmode)
17332 return force_reg (Pmode, exp);
17333 if (GET_MODE (exp) == Pmode)
17334 return copy_to_mode_reg (Pmode, exp);
17335 r = gen_reg_rtx (Pmode);
17336 emit_insn (gen_zero_extendsidi2 (r, exp));
17340 /* Divide COUNTREG by SCALE. */
17342 scale_counter (rtx countreg, int scale)
17348 if (CONST_INT_P (countreg))
17349 return GEN_INT (INTVAL (countreg) / scale);
17350 gcc_assert (REG_P (countreg));
17352 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17353 GEN_INT (exact_log2 (scale)),
17354 NULL, 1, OPTAB_DIRECT);
17358 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17359 DImode for constant loop counts. */
17361 static enum machine_mode
17362 counter_mode (rtx count_exp)
17364 if (GET_MODE (count_exp) != VOIDmode)
17365 return GET_MODE (count_exp);
17366 if (!CONST_INT_P (count_exp))
17368 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17373 /* When SRCPTR is non-NULL, output simple loop to move memory
17374 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17375 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17376 equivalent loop to set memory by VALUE (supposed to be in MODE).
17378 The size is rounded down to whole number of chunk size moved at once.
17379 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17383 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17384 rtx destptr, rtx srcptr, rtx value,
17385 rtx count, enum machine_mode mode, int unroll,
17388 rtx out_label, top_label, iter, tmp;
17389 enum machine_mode iter_mode = counter_mode (count);
17390 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17391 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17397 top_label = gen_label_rtx ();
17398 out_label = gen_label_rtx ();
17399 iter = gen_reg_rtx (iter_mode);
17401 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17402 NULL, 1, OPTAB_DIRECT);
17403 /* Those two should combine. */
17404 if (piece_size == const1_rtx)
17406 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17408 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17410 emit_move_insn (iter, const0_rtx);
17412 emit_label (top_label);
17414 tmp = convert_modes (Pmode, iter_mode, iter, true);
17415 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17416 destmem = change_address (destmem, mode, x_addr);
17420 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17421 srcmem = change_address (srcmem, mode, y_addr);
17423 /* When unrolling for chips that reorder memory reads and writes,
17424 we can save registers by using single temporary.
17425 Also using 4 temporaries is overkill in 32bit mode. */
17426 if (!TARGET_64BIT && 0)
17428 for (i = 0; i < unroll; i++)
17433 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17435 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17437 emit_move_insn (destmem, srcmem);
17443 gcc_assert (unroll <= 4);
17444 for (i = 0; i < unroll; i++)
17446 tmpreg[i] = gen_reg_rtx (mode);
17450 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17452 emit_move_insn (tmpreg[i], srcmem);
17454 for (i = 0; i < unroll; i++)
17459 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17461 emit_move_insn (destmem, tmpreg[i]);
17466 for (i = 0; i < unroll; i++)
17470 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17471 emit_move_insn (destmem, value);
17474 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17475 true, OPTAB_LIB_WIDEN);
17477 emit_move_insn (iter, tmp);
17479 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17481 if (expected_size != -1)
17483 expected_size /= GET_MODE_SIZE (mode) * unroll;
17484 if (expected_size == 0)
17486 else if (expected_size > REG_BR_PROB_BASE)
17487 predict_jump (REG_BR_PROB_BASE - 1);
17489 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17492 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17493 iter = ix86_zero_extend_to_Pmode (iter);
17494 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17495 true, OPTAB_LIB_WIDEN);
17496 if (tmp != destptr)
17497 emit_move_insn (destptr, tmp);
17500 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17501 true, OPTAB_LIB_WIDEN);
17503 emit_move_insn (srcptr, tmp);
17505 emit_label (out_label);
17508 /* Output "rep; mov" instruction.
17509 Arguments have same meaning as for previous function */
17511 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17512 rtx destptr, rtx srcptr,
17514 enum machine_mode mode)
17520 /* If the size is known, it is shorter to use rep movs. */
17521 if (mode == QImode && CONST_INT_P (count)
17522 && !(INTVAL (count) & 3))
17525 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17526 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17527 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17528 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17529 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17530 if (mode != QImode)
17532 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17533 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17534 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17535 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17536 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17537 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17541 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17542 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17544 if (CONST_INT_P (count))
17546 count = GEN_INT (INTVAL (count)
17547 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17548 destmem = shallow_copy_rtx (destmem);
17549 srcmem = shallow_copy_rtx (srcmem);
17550 set_mem_size (destmem, count);
17551 set_mem_size (srcmem, count);
17555 if (MEM_SIZE (destmem))
17556 set_mem_size (destmem, NULL_RTX);
17557 if (MEM_SIZE (srcmem))
17558 set_mem_size (srcmem, NULL_RTX);
17560 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17564 /* Output "rep; stos" instruction.
17565 Arguments have same meaning as for previous function */
17567 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17568 rtx count, enum machine_mode mode,
17574 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17575 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17576 value = force_reg (mode, gen_lowpart (mode, value));
17577 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17578 if (mode != QImode)
17580 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17581 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17582 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17585 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17586 if (orig_value == const0_rtx && CONST_INT_P (count))
17588 count = GEN_INT (INTVAL (count)
17589 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17590 destmem = shallow_copy_rtx (destmem);
17591 set_mem_size (destmem, count);
17593 else if (MEM_SIZE (destmem))
17594 set_mem_size (destmem, NULL_RTX);
17595 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17599 emit_strmov (rtx destmem, rtx srcmem,
17600 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17602 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17603 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17604 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17607 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17609 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17610 rtx destptr, rtx srcptr, rtx count, int max_size)
17613 if (CONST_INT_P (count))
17615 HOST_WIDE_INT countval = INTVAL (count);
17618 if ((countval & 0x10) && max_size > 16)
17622 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17623 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17626 gcc_unreachable ();
17629 if ((countval & 0x08) && max_size > 8)
17632 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17635 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17636 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17640 if ((countval & 0x04) && max_size > 4)
17642 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17645 if ((countval & 0x02) && max_size > 2)
17647 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17650 if ((countval & 0x01) && max_size > 1)
17652 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17659 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17660 count, 1, OPTAB_DIRECT);
17661 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17662 count, QImode, 1, 4);
17666 /* When there are stringops, we can cheaply increase dest and src pointers.
17667 Otherwise we save code size by maintaining offset (zero is readily
17668 available from preceding rep operation) and using x86 addressing modes.
17670 if (TARGET_SINGLE_STRINGOP)
17674 rtx label = ix86_expand_aligntest (count, 4, true);
17675 src = change_address (srcmem, SImode, srcptr);
17676 dest = change_address (destmem, SImode, destptr);
17677 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17678 emit_label (label);
17679 LABEL_NUSES (label) = 1;
17683 rtx label = ix86_expand_aligntest (count, 2, true);
17684 src = change_address (srcmem, HImode, srcptr);
17685 dest = change_address (destmem, HImode, destptr);
17686 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17687 emit_label (label);
17688 LABEL_NUSES (label) = 1;
17692 rtx label = ix86_expand_aligntest (count, 1, true);
17693 src = change_address (srcmem, QImode, srcptr);
17694 dest = change_address (destmem, QImode, destptr);
17695 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17696 emit_label (label);
17697 LABEL_NUSES (label) = 1;
17702 rtx offset = force_reg (Pmode, const0_rtx);
17707 rtx label = ix86_expand_aligntest (count, 4, true);
17708 src = change_address (srcmem, SImode, srcptr);
17709 dest = change_address (destmem, SImode, destptr);
17710 emit_move_insn (dest, src);
17711 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17712 true, OPTAB_LIB_WIDEN);
17714 emit_move_insn (offset, tmp);
17715 emit_label (label);
17716 LABEL_NUSES (label) = 1;
17720 rtx label = ix86_expand_aligntest (count, 2, true);
17721 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17722 src = change_address (srcmem, HImode, tmp);
17723 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17724 dest = change_address (destmem, HImode, tmp);
17725 emit_move_insn (dest, src);
17726 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17727 true, OPTAB_LIB_WIDEN);
17729 emit_move_insn (offset, tmp);
17730 emit_label (label);
17731 LABEL_NUSES (label) = 1;
17735 rtx label = ix86_expand_aligntest (count, 1, true);
17736 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17737 src = change_address (srcmem, QImode, tmp);
17738 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17739 dest = change_address (destmem, QImode, tmp);
17740 emit_move_insn (dest, src);
17741 emit_label (label);
17742 LABEL_NUSES (label) = 1;
17747 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17749 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17750 rtx count, int max_size)
17753 expand_simple_binop (counter_mode (count), AND, count,
17754 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17755 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17756 gen_lowpart (QImode, value), count, QImode,
17760 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17762 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17766 if (CONST_INT_P (count))
17768 HOST_WIDE_INT countval = INTVAL (count);
17771 if ((countval & 0x10) && max_size > 16)
17775 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17776 emit_insn (gen_strset (destptr, dest, value));
17777 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17778 emit_insn (gen_strset (destptr, dest, value));
17781 gcc_unreachable ();
17784 if ((countval & 0x08) && max_size > 8)
17788 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17789 emit_insn (gen_strset (destptr, dest, value));
17793 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17794 emit_insn (gen_strset (destptr, dest, value));
17795 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17796 emit_insn (gen_strset (destptr, dest, value));
17800 if ((countval & 0x04) && max_size > 4)
17802 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17803 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17806 if ((countval & 0x02) && max_size > 2)
17808 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17809 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17812 if ((countval & 0x01) && max_size > 1)
17814 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17815 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17822 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17827 rtx label = ix86_expand_aligntest (count, 16, true);
17830 dest = change_address (destmem, DImode, destptr);
17831 emit_insn (gen_strset (destptr, dest, value));
17832 emit_insn (gen_strset (destptr, dest, value));
17836 dest = change_address (destmem, SImode, destptr);
17837 emit_insn (gen_strset (destptr, dest, value));
17838 emit_insn (gen_strset (destptr, dest, value));
17839 emit_insn (gen_strset (destptr, dest, value));
17840 emit_insn (gen_strset (destptr, dest, value));
17842 emit_label (label);
17843 LABEL_NUSES (label) = 1;
17847 rtx label = ix86_expand_aligntest (count, 8, true);
17850 dest = change_address (destmem, DImode, destptr);
17851 emit_insn (gen_strset (destptr, dest, value));
17855 dest = change_address (destmem, SImode, destptr);
17856 emit_insn (gen_strset (destptr, dest, value));
17857 emit_insn (gen_strset (destptr, dest, value));
17859 emit_label (label);
17860 LABEL_NUSES (label) = 1;
17864 rtx label = ix86_expand_aligntest (count, 4, true);
17865 dest = change_address (destmem, SImode, destptr);
17866 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17867 emit_label (label);
17868 LABEL_NUSES (label) = 1;
17872 rtx label = ix86_expand_aligntest (count, 2, true);
17873 dest = change_address (destmem, HImode, destptr);
17874 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17875 emit_label (label);
17876 LABEL_NUSES (label) = 1;
17880 rtx label = ix86_expand_aligntest (count, 1, true);
17881 dest = change_address (destmem, QImode, destptr);
17882 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17883 emit_label (label);
17884 LABEL_NUSES (label) = 1;
17888 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17889 DESIRED_ALIGNMENT. */
17891 expand_movmem_prologue (rtx destmem, rtx srcmem,
17892 rtx destptr, rtx srcptr, rtx count,
17893 int align, int desired_alignment)
17895 if (align <= 1 && desired_alignment > 1)
17897 rtx label = ix86_expand_aligntest (destptr, 1, false);
17898 srcmem = change_address (srcmem, QImode, srcptr);
17899 destmem = change_address (destmem, QImode, destptr);
17900 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17901 ix86_adjust_counter (count, 1);
17902 emit_label (label);
17903 LABEL_NUSES (label) = 1;
17905 if (align <= 2 && desired_alignment > 2)
17907 rtx label = ix86_expand_aligntest (destptr, 2, false);
17908 srcmem = change_address (srcmem, HImode, srcptr);
17909 destmem = change_address (destmem, HImode, destptr);
17910 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17911 ix86_adjust_counter (count, 2);
17912 emit_label (label);
17913 LABEL_NUSES (label) = 1;
17915 if (align <= 4 && desired_alignment > 4)
17917 rtx label = ix86_expand_aligntest (destptr, 4, false);
17918 srcmem = change_address (srcmem, SImode, srcptr);
17919 destmem = change_address (destmem, SImode, destptr);
17920 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17921 ix86_adjust_counter (count, 4);
17922 emit_label (label);
17923 LABEL_NUSES (label) = 1;
17925 gcc_assert (desired_alignment <= 8);
17928 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17929 ALIGN_BYTES is how many bytes need to be copied. */
17931 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17932 int desired_align, int align_bytes)
17935 rtx src_size, dst_size;
17937 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17938 if (src_align_bytes >= 0)
17939 src_align_bytes = desired_align - src_align_bytes;
17940 src_size = MEM_SIZE (src);
17941 dst_size = MEM_SIZE (dst);
17942 if (align_bytes & 1)
17944 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17945 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17947 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17949 if (align_bytes & 2)
17951 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17952 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17953 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17954 set_mem_align (dst, 2 * BITS_PER_UNIT);
17955 if (src_align_bytes >= 0
17956 && (src_align_bytes & 1) == (align_bytes & 1)
17957 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17958 set_mem_align (src, 2 * BITS_PER_UNIT);
17960 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17962 if (align_bytes & 4)
17964 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17965 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17966 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17967 set_mem_align (dst, 4 * BITS_PER_UNIT);
17968 if (src_align_bytes >= 0)
17970 unsigned int src_align = 0;
17971 if ((src_align_bytes & 3) == (align_bytes & 3))
17973 else if ((src_align_bytes & 1) == (align_bytes & 1))
17975 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17976 set_mem_align (src, src_align * BITS_PER_UNIT);
17979 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17981 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17982 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17983 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17984 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17985 if (src_align_bytes >= 0)
17987 unsigned int src_align = 0;
17988 if ((src_align_bytes & 7) == (align_bytes & 7))
17990 else if ((src_align_bytes & 3) == (align_bytes & 3))
17992 else if ((src_align_bytes & 1) == (align_bytes & 1))
17994 if (src_align > (unsigned int) desired_align)
17995 src_align = desired_align;
17996 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17997 set_mem_align (src, src_align * BITS_PER_UNIT);
18000 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18002 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18007 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18008 DESIRED_ALIGNMENT. */
18010 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18011 int align, int desired_alignment)
18013 if (align <= 1 && desired_alignment > 1)
18015 rtx label = ix86_expand_aligntest (destptr, 1, false);
18016 destmem = change_address (destmem, QImode, destptr);
18017 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18018 ix86_adjust_counter (count, 1);
18019 emit_label (label);
18020 LABEL_NUSES (label) = 1;
18022 if (align <= 2 && desired_alignment > 2)
18024 rtx label = ix86_expand_aligntest (destptr, 2, false);
18025 destmem = change_address (destmem, HImode, destptr);
18026 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18027 ix86_adjust_counter (count, 2);
18028 emit_label (label);
18029 LABEL_NUSES (label) = 1;
18031 if (align <= 4 && desired_alignment > 4)
18033 rtx label = ix86_expand_aligntest (destptr, 4, false);
18034 destmem = change_address (destmem, SImode, destptr);
18035 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18036 ix86_adjust_counter (count, 4);
18037 emit_label (label);
18038 LABEL_NUSES (label) = 1;
18040 gcc_assert (desired_alignment <= 8);
18043 /* Set enough from DST to align DST known to by aligned by ALIGN to
18044 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18046 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18047 int desired_align, int align_bytes)
18050 rtx dst_size = MEM_SIZE (dst);
18051 if (align_bytes & 1)
18053 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18055 emit_insn (gen_strset (destreg, dst,
18056 gen_lowpart (QImode, value)));
18058 if (align_bytes & 2)
18060 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18061 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18062 set_mem_align (dst, 2 * BITS_PER_UNIT);
18064 emit_insn (gen_strset (destreg, dst,
18065 gen_lowpart (HImode, value)));
18067 if (align_bytes & 4)
18069 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18070 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18071 set_mem_align (dst, 4 * BITS_PER_UNIT);
18073 emit_insn (gen_strset (destreg, dst,
18074 gen_lowpart (SImode, value)));
18076 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18077 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18078 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18080 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18084 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18085 static enum stringop_alg
18086 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18087 int *dynamic_check)
18089 const struct stringop_algs * algs;
18090 bool optimize_for_speed;
18091 /* Algorithms using the rep prefix want at least edi and ecx;
18092 additionally, memset wants eax and memcpy wants esi. Don't
18093 consider such algorithms if the user has appropriated those
18094 registers for their own purposes. */
18095 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18097 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18099 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18100 || (alg != rep_prefix_1_byte \
18101 && alg != rep_prefix_4_byte \
18102 && alg != rep_prefix_8_byte))
18103 const struct processor_costs *cost;
18105 /* Even if the string operation call is cold, we still might spend a lot
18106 of time processing large blocks. */
18107 if (optimize_function_for_size_p (cfun)
18108 || (optimize_insn_for_size_p ()
18109 && expected_size != -1 && expected_size < 256))
18110 optimize_for_speed = false;
18112 optimize_for_speed = true;
18114 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18116 *dynamic_check = -1;
18118 algs = &cost->memset[TARGET_64BIT != 0];
18120 algs = &cost->memcpy[TARGET_64BIT != 0];
18121 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18122 return stringop_alg;
18123 /* rep; movq or rep; movl is the smallest variant. */
18124 else if (!optimize_for_speed)
18126 if (!count || (count & 3))
18127 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18129 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18131 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18133 else if (expected_size != -1 && expected_size < 4)
18134 return loop_1_byte;
18135 else if (expected_size != -1)
18138 enum stringop_alg alg = libcall;
18139 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18141 /* We get here if the algorithms that were not libcall-based
18142 were rep-prefix based and we are unable to use rep prefixes
18143 based on global register usage. Break out of the loop and
18144 use the heuristic below. */
18145 if (algs->size[i].max == 0)
18147 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18149 enum stringop_alg candidate = algs->size[i].alg;
18151 if (candidate != libcall && ALG_USABLE_P (candidate))
18153 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18154 last non-libcall inline algorithm. */
18155 if (TARGET_INLINE_ALL_STRINGOPS)
18157 /* When the current size is best to be copied by a libcall,
18158 but we are still forced to inline, run the heuristic below
18159 that will pick code for medium sized blocks. */
18160 if (alg != libcall)
18164 else if (ALG_USABLE_P (candidate))
18168 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18170 /* When asked to inline the call anyway, try to pick meaningful choice.
18171 We look for maximal size of block that is faster to copy by hand and
18172 take blocks of at most of that size guessing that average size will
18173 be roughly half of the block.
18175 If this turns out to be bad, we might simply specify the preferred
18176 choice in ix86_costs. */
18177 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18178 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18181 enum stringop_alg alg;
18183 bool any_alg_usable_p = true;
18185 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18187 enum stringop_alg candidate = algs->size[i].alg;
18188 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18190 if (candidate != libcall && candidate
18191 && ALG_USABLE_P (candidate))
18192 max = algs->size[i].max;
18194 /* If there aren't any usable algorithms, then recursing on
18195 smaller sizes isn't going to find anything. Just return the
18196 simple byte-at-a-time copy loop. */
18197 if (!any_alg_usable_p)
18199 /* Pick something reasonable. */
18200 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18201 *dynamic_check = 128;
18202 return loop_1_byte;
18206 alg = decide_alg (count, max / 2, memset, dynamic_check);
18207 gcc_assert (*dynamic_check == -1);
18208 gcc_assert (alg != libcall);
18209 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18210 *dynamic_check = max;
18213 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18214 #undef ALG_USABLE_P
18217 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18218 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18220 decide_alignment (int align,
18221 enum stringop_alg alg,
18224 int desired_align = 0;
18228 gcc_unreachable ();
18230 case unrolled_loop:
18231 desired_align = GET_MODE_SIZE (Pmode);
18233 case rep_prefix_8_byte:
18236 case rep_prefix_4_byte:
18237 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18238 copying whole cacheline at once. */
18239 if (TARGET_PENTIUMPRO)
18244 case rep_prefix_1_byte:
18245 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18246 copying whole cacheline at once. */
18247 if (TARGET_PENTIUMPRO)
18261 if (desired_align < align)
18262 desired_align = align;
18263 if (expected_size != -1 && expected_size < 4)
18264 desired_align = align;
18265 return desired_align;
18268 /* Return the smallest power of 2 greater than VAL. */
18270 smallest_pow2_greater_than (int val)
18278 /* Expand string move (memcpy) operation. Use i386 string operations when
18279 profitable. expand_setmem contains similar code. The code depends upon
18280 architecture, block size and alignment, but always has the same
18283 1) Prologue guard: Conditional that jumps up to epilogues for small
18284 blocks that can be handled by epilogue alone. This is faster but
18285 also needed for correctness, since prologue assume the block is larger
18286 than the desired alignment.
18288 Optional dynamic check for size and libcall for large
18289 blocks is emitted here too, with -minline-stringops-dynamically.
18291 2) Prologue: copy first few bytes in order to get destination aligned
18292 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18293 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18294 We emit either a jump tree on power of two sized blocks, or a byte loop.
18296 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18297 with specified algorithm.
18299 4) Epilogue: code copying tail of the block that is too small to be
18300 handled by main body (or up to size guarded by prologue guard). */
18303 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18304 rtx expected_align_exp, rtx expected_size_exp)
18310 rtx jump_around_label = NULL;
18311 HOST_WIDE_INT align = 1;
18312 unsigned HOST_WIDE_INT count = 0;
18313 HOST_WIDE_INT expected_size = -1;
18314 int size_needed = 0, epilogue_size_needed;
18315 int desired_align = 0, align_bytes = 0;
18316 enum stringop_alg alg;
18318 bool need_zero_guard = false;
18320 if (CONST_INT_P (align_exp))
18321 align = INTVAL (align_exp);
18322 /* i386 can do misaligned access on reasonably increased cost. */
18323 if (CONST_INT_P (expected_align_exp)
18324 && INTVAL (expected_align_exp) > align)
18325 align = INTVAL (expected_align_exp);
18326 /* ALIGN is the minimum of destination and source alignment, but we care here
18327 just about destination alignment. */
18328 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18329 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18331 if (CONST_INT_P (count_exp))
18332 count = expected_size = INTVAL (count_exp);
18333 if (CONST_INT_P (expected_size_exp) && count == 0)
18334 expected_size = INTVAL (expected_size_exp);
18336 /* Make sure we don't need to care about overflow later on. */
18337 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18340 /* Step 0: Decide on preferred algorithm, desired alignment and
18341 size of chunks to be copied by main loop. */
18343 alg = decide_alg (count, expected_size, false, &dynamic_check);
18344 desired_align = decide_alignment (align, alg, expected_size);
18346 if (!TARGET_ALIGN_STRINGOPS)
18347 align = desired_align;
18349 if (alg == libcall)
18351 gcc_assert (alg != no_stringop);
18353 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18354 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18355 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18360 gcc_unreachable ();
18362 need_zero_guard = true;
18363 size_needed = GET_MODE_SIZE (Pmode);
18365 case unrolled_loop:
18366 need_zero_guard = true;
18367 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18369 case rep_prefix_8_byte:
18372 case rep_prefix_4_byte:
18375 case rep_prefix_1_byte:
18379 need_zero_guard = true;
18384 epilogue_size_needed = size_needed;
18386 /* Step 1: Prologue guard. */
18388 /* Alignment code needs count to be in register. */
18389 if (CONST_INT_P (count_exp) && desired_align > align)
18391 if (INTVAL (count_exp) > desired_align
18392 && INTVAL (count_exp) > size_needed)
18395 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18396 if (align_bytes <= 0)
18399 align_bytes = desired_align - align_bytes;
18401 if (align_bytes == 0)
18402 count_exp = force_reg (counter_mode (count_exp), count_exp);
18404 gcc_assert (desired_align >= 1 && align >= 1);
18406 /* Ensure that alignment prologue won't copy past end of block. */
18407 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18409 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18410 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18411 Make sure it is power of 2. */
18412 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18416 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18418 /* If main algorithm works on QImode, no epilogue is needed.
18419 For small sizes just don't align anything. */
18420 if (size_needed == 1)
18421 desired_align = align;
18428 label = gen_label_rtx ();
18429 emit_cmp_and_jump_insns (count_exp,
18430 GEN_INT (epilogue_size_needed),
18431 LTU, 0, counter_mode (count_exp), 1, label);
18432 if (expected_size == -1 || expected_size < epilogue_size_needed)
18433 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18435 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18439 /* Emit code to decide on runtime whether library call or inline should be
18441 if (dynamic_check != -1)
18443 if (CONST_INT_P (count_exp))
18445 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18447 emit_block_move_via_libcall (dst, src, count_exp, false);
18448 count_exp = const0_rtx;
18454 rtx hot_label = gen_label_rtx ();
18455 jump_around_label = gen_label_rtx ();
18456 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18457 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18458 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18459 emit_block_move_via_libcall (dst, src, count_exp, false);
18460 emit_jump (jump_around_label);
18461 emit_label (hot_label);
18465 /* Step 2: Alignment prologue. */
18467 if (desired_align > align)
18469 if (align_bytes == 0)
18471 /* Except for the first move in epilogue, we no longer know
18472 constant offset in aliasing info. It don't seems to worth
18473 the pain to maintain it for the first move, so throw away
18475 src = change_address (src, BLKmode, srcreg);
18476 dst = change_address (dst, BLKmode, destreg);
18477 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18482 /* If we know how many bytes need to be stored before dst is
18483 sufficiently aligned, maintain aliasing info accurately. */
18484 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18485 desired_align, align_bytes);
18486 count_exp = plus_constant (count_exp, -align_bytes);
18487 count -= align_bytes;
18489 if (need_zero_guard
18490 && (count < (unsigned HOST_WIDE_INT) size_needed
18491 || (align_bytes == 0
18492 && count < ((unsigned HOST_WIDE_INT) size_needed
18493 + desired_align - align))))
18495 /* It is possible that we copied enough so the main loop will not
18497 gcc_assert (size_needed > 1);
18498 if (label == NULL_RTX)
18499 label = gen_label_rtx ();
18500 emit_cmp_and_jump_insns (count_exp,
18501 GEN_INT (size_needed),
18502 LTU, 0, counter_mode (count_exp), 1, label);
18503 if (expected_size == -1
18504 || expected_size < (desired_align - align) / 2 + size_needed)
18505 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18507 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18510 if (label && size_needed == 1)
18512 emit_label (label);
18513 LABEL_NUSES (label) = 1;
18515 epilogue_size_needed = 1;
18517 else if (label == NULL_RTX)
18518 epilogue_size_needed = size_needed;
18520 /* Step 3: Main loop. */
18526 gcc_unreachable ();
18528 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18529 count_exp, QImode, 1, expected_size);
18532 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18533 count_exp, Pmode, 1, expected_size);
18535 case unrolled_loop:
18536 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18537 registers for 4 temporaries anyway. */
18538 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18539 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18542 case rep_prefix_8_byte:
18543 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18546 case rep_prefix_4_byte:
18547 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18550 case rep_prefix_1_byte:
18551 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18555 /* Adjust properly the offset of src and dest memory for aliasing. */
18556 if (CONST_INT_P (count_exp))
18558 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18559 (count / size_needed) * size_needed);
18560 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18561 (count / size_needed) * size_needed);
18565 src = change_address (src, BLKmode, srcreg);
18566 dst = change_address (dst, BLKmode, destreg);
18569 /* Step 4: Epilogue to copy the remaining bytes. */
18573 /* When the main loop is done, COUNT_EXP might hold original count,
18574 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18575 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18576 bytes. Compensate if needed. */
18578 if (size_needed < epilogue_size_needed)
18581 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18582 GEN_INT (size_needed - 1), count_exp, 1,
18584 if (tmp != count_exp)
18585 emit_move_insn (count_exp, tmp);
18587 emit_label (label);
18588 LABEL_NUSES (label) = 1;
18591 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18592 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18593 epilogue_size_needed);
18594 if (jump_around_label)
18595 emit_label (jump_around_label);
18599 /* Helper function for memcpy. For QImode value 0xXY produce
18600 0xXYXYXYXY of wide specified by MODE. This is essentially
18601 a * 0x10101010, but we can do slightly better than
18602 synth_mult by unwinding the sequence by hand on CPUs with
18605 promote_duplicated_reg (enum machine_mode mode, rtx val)
18607 enum machine_mode valmode = GET_MODE (val);
18609 int nops = mode == DImode ? 3 : 2;
18611 gcc_assert (mode == SImode || mode == DImode);
18612 if (val == const0_rtx)
18613 return copy_to_mode_reg (mode, const0_rtx);
18614 if (CONST_INT_P (val))
18616 HOST_WIDE_INT v = INTVAL (val) & 255;
18620 if (mode == DImode)
18621 v |= (v << 16) << 16;
18622 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18625 if (valmode == VOIDmode)
18627 if (valmode != QImode)
18628 val = gen_lowpart (QImode, val);
18629 if (mode == QImode)
18631 if (!TARGET_PARTIAL_REG_STALL)
18633 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18634 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18635 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18636 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18638 rtx reg = convert_modes (mode, QImode, val, true);
18639 tmp = promote_duplicated_reg (mode, const1_rtx);
18640 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18645 rtx reg = convert_modes (mode, QImode, val, true);
18647 if (!TARGET_PARTIAL_REG_STALL)
18648 if (mode == SImode)
18649 emit_insn (gen_movsi_insv_1 (reg, reg));
18651 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18654 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18655 NULL, 1, OPTAB_DIRECT);
18657 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18659 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18660 NULL, 1, OPTAB_DIRECT);
18661 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18662 if (mode == SImode)
18664 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18665 NULL, 1, OPTAB_DIRECT);
18666 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18671 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18672 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18673 alignment from ALIGN to DESIRED_ALIGN. */
18675 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18680 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18681 promoted_val = promote_duplicated_reg (DImode, val);
18682 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18683 promoted_val = promote_duplicated_reg (SImode, val);
18684 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18685 promoted_val = promote_duplicated_reg (HImode, val);
18687 promoted_val = val;
18689 return promoted_val;
18692 /* Expand string clear operation (bzero). Use i386 string operations when
18693 profitable. See expand_movmem comment for explanation of individual
18694 steps performed. */
18696 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18697 rtx expected_align_exp, rtx expected_size_exp)
18702 rtx jump_around_label = NULL;
18703 HOST_WIDE_INT align = 1;
18704 unsigned HOST_WIDE_INT count = 0;
18705 HOST_WIDE_INT expected_size = -1;
18706 int size_needed = 0, epilogue_size_needed;
18707 int desired_align = 0, align_bytes = 0;
18708 enum stringop_alg alg;
18709 rtx promoted_val = NULL;
18710 bool force_loopy_epilogue = false;
18712 bool need_zero_guard = false;
18714 if (CONST_INT_P (align_exp))
18715 align = INTVAL (align_exp);
18716 /* i386 can do misaligned access on reasonably increased cost. */
18717 if (CONST_INT_P (expected_align_exp)
18718 && INTVAL (expected_align_exp) > align)
18719 align = INTVAL (expected_align_exp);
18720 if (CONST_INT_P (count_exp))
18721 count = expected_size = INTVAL (count_exp);
18722 if (CONST_INT_P (expected_size_exp) && count == 0)
18723 expected_size = INTVAL (expected_size_exp);
18725 /* Make sure we don't need to care about overflow later on. */
18726 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18729 /* Step 0: Decide on preferred algorithm, desired alignment and
18730 size of chunks to be copied by main loop. */
18732 alg = decide_alg (count, expected_size, true, &dynamic_check);
18733 desired_align = decide_alignment (align, alg, expected_size);
18735 if (!TARGET_ALIGN_STRINGOPS)
18736 align = desired_align;
18738 if (alg == libcall)
18740 gcc_assert (alg != no_stringop);
18742 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18743 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18748 gcc_unreachable ();
18750 need_zero_guard = true;
18751 size_needed = GET_MODE_SIZE (Pmode);
18753 case unrolled_loop:
18754 need_zero_guard = true;
18755 size_needed = GET_MODE_SIZE (Pmode) * 4;
18757 case rep_prefix_8_byte:
18760 case rep_prefix_4_byte:
18763 case rep_prefix_1_byte:
18767 need_zero_guard = true;
18771 epilogue_size_needed = size_needed;
18773 /* Step 1: Prologue guard. */
18775 /* Alignment code needs count to be in register. */
18776 if (CONST_INT_P (count_exp) && desired_align > align)
18778 if (INTVAL (count_exp) > desired_align
18779 && INTVAL (count_exp) > size_needed)
18782 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18783 if (align_bytes <= 0)
18786 align_bytes = desired_align - align_bytes;
18788 if (align_bytes == 0)
18790 enum machine_mode mode = SImode;
18791 if (TARGET_64BIT && (count & ~0xffffffff))
18793 count_exp = force_reg (mode, count_exp);
18796 /* Do the cheap promotion to allow better CSE across the
18797 main loop and epilogue (ie one load of the big constant in the
18798 front of all code. */
18799 if (CONST_INT_P (val_exp))
18800 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18801 desired_align, align);
18802 /* Ensure that alignment prologue won't copy past end of block. */
18803 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18805 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18806 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18807 Make sure it is power of 2. */
18808 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18810 /* To improve performance of small blocks, we jump around the VAL
18811 promoting mode. This mean that if the promoted VAL is not constant,
18812 we might not use it in the epilogue and have to use byte
18814 if (epilogue_size_needed > 2 && !promoted_val)
18815 force_loopy_epilogue = true;
18818 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18820 /* If main algorithm works on QImode, no epilogue is needed.
18821 For small sizes just don't align anything. */
18822 if (size_needed == 1)
18823 desired_align = align;
18830 label = gen_label_rtx ();
18831 emit_cmp_and_jump_insns (count_exp,
18832 GEN_INT (epilogue_size_needed),
18833 LTU, 0, counter_mode (count_exp), 1, label);
18834 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18835 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18837 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18840 if (dynamic_check != -1)
18842 rtx hot_label = gen_label_rtx ();
18843 jump_around_label = gen_label_rtx ();
18844 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18845 LEU, 0, counter_mode (count_exp), 1, hot_label);
18846 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18847 set_storage_via_libcall (dst, count_exp, val_exp, false);
18848 emit_jump (jump_around_label);
18849 emit_label (hot_label);
18852 /* Step 2: Alignment prologue. */
18854 /* Do the expensive promotion once we branched off the small blocks. */
18856 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18857 desired_align, align);
18858 gcc_assert (desired_align >= 1 && align >= 1);
18860 if (desired_align > align)
18862 if (align_bytes == 0)
18864 /* Except for the first move in epilogue, we no longer know
18865 constant offset in aliasing info. It don't seems to worth
18866 the pain to maintain it for the first move, so throw away
18868 dst = change_address (dst, BLKmode, destreg);
18869 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18874 /* If we know how many bytes need to be stored before dst is
18875 sufficiently aligned, maintain aliasing info accurately. */
18876 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18877 desired_align, align_bytes);
18878 count_exp = plus_constant (count_exp, -align_bytes);
18879 count -= align_bytes;
18881 if (need_zero_guard
18882 && (count < (unsigned HOST_WIDE_INT) size_needed
18883 || (align_bytes == 0
18884 && count < ((unsigned HOST_WIDE_INT) size_needed
18885 + desired_align - align))))
18887 /* It is possible that we copied enough so the main loop will not
18889 gcc_assert (size_needed > 1);
18890 if (label == NULL_RTX)
18891 label = gen_label_rtx ();
18892 emit_cmp_and_jump_insns (count_exp,
18893 GEN_INT (size_needed),
18894 LTU, 0, counter_mode (count_exp), 1, label);
18895 if (expected_size == -1
18896 || expected_size < (desired_align - align) / 2 + size_needed)
18897 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18899 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18902 if (label && size_needed == 1)
18904 emit_label (label);
18905 LABEL_NUSES (label) = 1;
18907 promoted_val = val_exp;
18908 epilogue_size_needed = 1;
18910 else if (label == NULL_RTX)
18911 epilogue_size_needed = size_needed;
18913 /* Step 3: Main loop. */
18919 gcc_unreachable ();
18921 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18922 count_exp, QImode, 1, expected_size);
18925 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18926 count_exp, Pmode, 1, expected_size);
18928 case unrolled_loop:
18929 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18930 count_exp, Pmode, 4, expected_size);
18932 case rep_prefix_8_byte:
18933 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18936 case rep_prefix_4_byte:
18937 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18940 case rep_prefix_1_byte:
18941 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18945 /* Adjust properly the offset of src and dest memory for aliasing. */
18946 if (CONST_INT_P (count_exp))
18947 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18948 (count / size_needed) * size_needed);
18950 dst = change_address (dst, BLKmode, destreg);
18952 /* Step 4: Epilogue to copy the remaining bytes. */
18956 /* When the main loop is done, COUNT_EXP might hold original count,
18957 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18958 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18959 bytes. Compensate if needed. */
18961 if (size_needed < epilogue_size_needed)
18964 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18965 GEN_INT (size_needed - 1), count_exp, 1,
18967 if (tmp != count_exp)
18968 emit_move_insn (count_exp, tmp);
18970 emit_label (label);
18971 LABEL_NUSES (label) = 1;
18974 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18976 if (force_loopy_epilogue)
18977 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18978 epilogue_size_needed);
18980 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18981 epilogue_size_needed);
18983 if (jump_around_label)
18984 emit_label (jump_around_label);
18988 /* Expand the appropriate insns for doing strlen if not just doing
18991 out = result, initialized with the start address
18992 align_rtx = alignment of the address.
18993 scratch = scratch register, initialized with the startaddress when
18994 not aligned, otherwise undefined
18996 This is just the body. It needs the initializations mentioned above and
18997 some address computing at the end. These things are done in i386.md. */
19000 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19004 rtx align_2_label = NULL_RTX;
19005 rtx align_3_label = NULL_RTX;
19006 rtx align_4_label = gen_label_rtx ();
19007 rtx end_0_label = gen_label_rtx ();
19009 rtx tmpreg = gen_reg_rtx (SImode);
19010 rtx scratch = gen_reg_rtx (SImode);
19014 if (CONST_INT_P (align_rtx))
19015 align = INTVAL (align_rtx);
19017 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19019 /* Is there a known alignment and is it less than 4? */
19022 rtx scratch1 = gen_reg_rtx (Pmode);
19023 emit_move_insn (scratch1, out);
19024 /* Is there a known alignment and is it not 2? */
19027 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19028 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19030 /* Leave just the 3 lower bits. */
19031 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19032 NULL_RTX, 0, OPTAB_WIDEN);
19034 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19035 Pmode, 1, align_4_label);
19036 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19037 Pmode, 1, align_2_label);
19038 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19039 Pmode, 1, align_3_label);
19043 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19044 check if is aligned to 4 - byte. */
19046 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19047 NULL_RTX, 0, OPTAB_WIDEN);
19049 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19050 Pmode, 1, align_4_label);
19053 mem = change_address (src, QImode, out);
19055 /* Now compare the bytes. */
19057 /* Compare the first n unaligned byte on a byte per byte basis. */
19058 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19059 QImode, 1, end_0_label);
19061 /* Increment the address. */
19062 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19064 /* Not needed with an alignment of 2 */
19067 emit_label (align_2_label);
19069 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19072 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19074 emit_label (align_3_label);
19077 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19080 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19083 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19084 align this loop. It gives only huge programs, but does not help to
19086 emit_label (align_4_label);
19088 mem = change_address (src, SImode, out);
19089 emit_move_insn (scratch, mem);
19090 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19092 /* This formula yields a nonzero result iff one of the bytes is zero.
19093 This saves three branches inside loop and many cycles. */
19095 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19096 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19097 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19098 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19099 gen_int_mode (0x80808080, SImode)));
19100 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19105 rtx reg = gen_reg_rtx (SImode);
19106 rtx reg2 = gen_reg_rtx (Pmode);
19107 emit_move_insn (reg, tmpreg);
19108 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19110 /* If zero is not in the first two bytes, move two bytes forward. */
19111 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19112 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19113 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19114 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19115 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19118 /* Emit lea manually to avoid clobbering of flags. */
19119 emit_insn (gen_rtx_SET (SImode, reg2,
19120 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19122 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19123 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19124 emit_insn (gen_rtx_SET (VOIDmode, out,
19125 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19131 rtx end_2_label = gen_label_rtx ();
19132 /* Is zero in the first two bytes? */
19134 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19135 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19136 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19137 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19138 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19140 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19141 JUMP_LABEL (tmp) = end_2_label;
19143 /* Not in the first two. Move two bytes forward. */
19144 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19145 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19147 emit_label (end_2_label);
19151 /* Avoid branch in fixing the byte. */
19152 tmpreg = gen_lowpart (QImode, tmpreg);
19153 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19154 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19155 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19156 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19158 emit_label (end_0_label);
19161 /* Expand strlen. */
19164 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19166 rtx addr, scratch1, scratch2, scratch3, scratch4;
19168 /* The generic case of strlen expander is long. Avoid it's
19169 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19171 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19172 && !TARGET_INLINE_ALL_STRINGOPS
19173 && !optimize_insn_for_size_p ()
19174 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19177 addr = force_reg (Pmode, XEXP (src, 0));
19178 scratch1 = gen_reg_rtx (Pmode);
19180 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19181 && !optimize_insn_for_size_p ())
19183 /* Well it seems that some optimizer does not combine a call like
19184 foo(strlen(bar), strlen(bar));
19185 when the move and the subtraction is done here. It does calculate
19186 the length just once when these instructions are done inside of
19187 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19188 often used and I use one fewer register for the lifetime of
19189 output_strlen_unroll() this is better. */
19191 emit_move_insn (out, addr);
19193 ix86_expand_strlensi_unroll_1 (out, src, align);
19195 /* strlensi_unroll_1 returns the address of the zero at the end of
19196 the string, like memchr(), so compute the length by subtracting
19197 the start address. */
19198 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19204 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19205 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19208 scratch2 = gen_reg_rtx (Pmode);
19209 scratch3 = gen_reg_rtx (Pmode);
19210 scratch4 = force_reg (Pmode, constm1_rtx);
19212 emit_move_insn (scratch3, addr);
19213 eoschar = force_reg (QImode, eoschar);
19215 src = replace_equiv_address_nv (src, scratch3);
19217 /* If .md starts supporting :P, this can be done in .md. */
19218 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19219 scratch4), UNSPEC_SCAS);
19220 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19221 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19222 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19227 /* For given symbol (function) construct code to compute address of it's PLT
19228 entry in large x86-64 PIC model. */
19230 construct_plt_address (rtx symbol)
19232 rtx tmp = gen_reg_rtx (Pmode);
19233 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19235 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19236 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19238 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19239 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19244 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19246 rtx pop, int sibcall)
19248 rtx use = NULL, call;
19250 if (pop == const0_rtx)
19252 gcc_assert (!TARGET_64BIT || !pop);
19254 if (TARGET_MACHO && !TARGET_64BIT)
19257 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19258 fnaddr = machopic_indirect_call_target (fnaddr);
19263 /* Static functions and indirect calls don't need the pic register. */
19264 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19265 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19266 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19267 use_reg (&use, pic_offset_table_rtx);
19270 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19272 rtx al = gen_rtx_REG (QImode, AX_REG);
19273 emit_move_insn (al, callarg2);
19274 use_reg (&use, al);
19277 if (ix86_cmodel == CM_LARGE_PIC
19279 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19280 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19281 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19283 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19284 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19286 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19287 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19290 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19292 call = gen_rtx_SET (VOIDmode, retval, call);
19295 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19296 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19297 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19300 && ix86_cfun_abi () == MS_ABI
19301 && (!callarg2 || INTVAL (callarg2) != -2))
19303 /* We need to represent that SI and DI registers are clobbered
19305 static int clobbered_registers[] = {
19306 XMM6_REG, XMM7_REG, XMM8_REG,
19307 XMM9_REG, XMM10_REG, XMM11_REG,
19308 XMM12_REG, XMM13_REG, XMM14_REG,
19309 XMM15_REG, SI_REG, DI_REG
19312 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19313 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19314 UNSPEC_MS_TO_SYSV_CALL);
19318 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19319 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19322 (SSE_REGNO_P (clobbered_registers[i])
19324 clobbered_registers[i]));
19326 call = gen_rtx_PARALLEL (VOIDmode,
19327 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19331 call = emit_call_insn (call);
19333 CALL_INSN_FUNCTION_USAGE (call) = use;
19337 /* Clear stack slot assignments remembered from previous functions.
19338 This is called from INIT_EXPANDERS once before RTL is emitted for each
19341 static struct machine_function *
19342 ix86_init_machine_status (void)
19344 struct machine_function *f;
19346 f = GGC_CNEW (struct machine_function);
19347 f->use_fast_prologue_epilogue_nregs = -1;
19348 f->tls_descriptor_call_expanded_p = 0;
19349 f->call_abi = ix86_abi;
19354 /* Return a MEM corresponding to a stack slot with mode MODE.
19355 Allocate a new slot if necessary.
19357 The RTL for a function can have several slots available: N is
19358 which slot to use. */
19361 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19363 struct stack_local_entry *s;
19365 gcc_assert (n < MAX_386_STACK_LOCALS);
19367 /* Virtual slot is valid only before vregs are instantiated. */
19368 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19370 for (s = ix86_stack_locals; s; s = s->next)
19371 if (s->mode == mode && s->n == n)
19372 return copy_rtx (s->rtl);
19374 s = (struct stack_local_entry *)
19375 ggc_alloc (sizeof (struct stack_local_entry));
19378 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19380 s->next = ix86_stack_locals;
19381 ix86_stack_locals = s;
19385 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19387 static GTY(()) rtx ix86_tls_symbol;
19389 ix86_tls_get_addr (void)
19392 if (!ix86_tls_symbol)
19394 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19395 (TARGET_ANY_GNU_TLS
19397 ? "___tls_get_addr"
19398 : "__tls_get_addr");
19401 return ix86_tls_symbol;
19404 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19406 static GTY(()) rtx ix86_tls_module_base_symbol;
19408 ix86_tls_module_base (void)
19411 if (!ix86_tls_module_base_symbol)
19413 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19414 "_TLS_MODULE_BASE_");
19415 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19416 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19419 return ix86_tls_module_base_symbol;
19422 /* Calculate the length of the memory address in the instruction
19423 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19426 memory_address_length (rtx addr)
19428 struct ix86_address parts;
19429 rtx base, index, disp;
19433 if (GET_CODE (addr) == PRE_DEC
19434 || GET_CODE (addr) == POST_INC
19435 || GET_CODE (addr) == PRE_MODIFY
19436 || GET_CODE (addr) == POST_MODIFY)
19439 ok = ix86_decompose_address (addr, &parts);
19442 if (parts.base && GET_CODE (parts.base) == SUBREG)
19443 parts.base = SUBREG_REG (parts.base);
19444 if (parts.index && GET_CODE (parts.index) == SUBREG)
19445 parts.index = SUBREG_REG (parts.index);
19448 index = parts.index;
19453 - esp as the base always wants an index,
19454 - ebp as the base always wants a displacement,
19455 - r12 as the base always wants an index,
19456 - r13 as the base always wants a displacement. */
19458 /* Register Indirect. */
19459 if (base && !index && !disp)
19461 /* esp (for its index) and ebp (for its displacement) need
19462 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19465 && (addr == arg_pointer_rtx
19466 || addr == frame_pointer_rtx
19467 || REGNO (addr) == SP_REG
19468 || REGNO (addr) == BP_REG
19469 || REGNO (addr) == R12_REG
19470 || REGNO (addr) == R13_REG))
19474 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19475 is not disp32, but disp32(%rip), so for disp32
19476 SIB byte is needed, unless print_operand_address
19477 optimizes it into disp32(%rip) or (%rip) is implied
19479 else if (disp && !base && !index)
19486 if (GET_CODE (disp) == CONST)
19487 symbol = XEXP (disp, 0);
19488 if (GET_CODE (symbol) == PLUS
19489 && CONST_INT_P (XEXP (symbol, 1)))
19490 symbol = XEXP (symbol, 0);
19492 if (GET_CODE (symbol) != LABEL_REF
19493 && (GET_CODE (symbol) != SYMBOL_REF
19494 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19495 && (GET_CODE (symbol) != UNSPEC
19496 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19497 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19504 /* Find the length of the displacement constant. */
19507 if (base && satisfies_constraint_K (disp))
19512 /* ebp always wants a displacement. Similarly r13. */
19513 else if (base && REG_P (base)
19514 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19517 /* An index requires the two-byte modrm form.... */
19519 /* ...like esp (or r12), which always wants an index. */
19520 || base == arg_pointer_rtx
19521 || base == frame_pointer_rtx
19522 || (base && REG_P (base)
19523 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19540 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19541 is set, expect that insn have 8bit immediate alternative. */
19543 ix86_attr_length_immediate_default (rtx insn, int shortform)
19547 extract_insn_cached (insn);
19548 for (i = recog_data.n_operands - 1; i >= 0; --i)
19549 if (CONSTANT_P (recog_data.operand[i]))
19551 enum attr_mode mode = get_attr_mode (insn);
19554 if (shortform && CONST_INT_P (recog_data.operand[i]))
19556 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19563 ival = trunc_int_for_mode (ival, HImode);
19566 ival = trunc_int_for_mode (ival, SImode);
19571 if (IN_RANGE (ival, -128, 127))
19588 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19593 fatal_insn ("unknown insn mode", insn);
19598 /* Compute default value for "length_address" attribute. */
19600 ix86_attr_length_address_default (rtx insn)
19604 if (get_attr_type (insn) == TYPE_LEA)
19606 rtx set = PATTERN (insn), addr;
19608 if (GET_CODE (set) == PARALLEL)
19609 set = XVECEXP (set, 0, 0);
19611 gcc_assert (GET_CODE (set) == SET);
19613 addr = SET_SRC (set);
19614 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19616 if (GET_CODE (addr) == ZERO_EXTEND)
19617 addr = XEXP (addr, 0);
19618 if (GET_CODE (addr) == SUBREG)
19619 addr = SUBREG_REG (addr);
19622 return memory_address_length (addr);
19625 extract_insn_cached (insn);
19626 for (i = recog_data.n_operands - 1; i >= 0; --i)
19627 if (MEM_P (recog_data.operand[i]))
19629 constrain_operands_cached (reload_completed);
19630 if (which_alternative != -1)
19632 const char *constraints = recog_data.constraints[i];
19633 int alt = which_alternative;
19635 while (*constraints == '=' || *constraints == '+')
19638 while (*constraints++ != ',')
19640 /* Skip ignored operands. */
19641 if (*constraints == 'X')
19644 return memory_address_length (XEXP (recog_data.operand[i], 0));
19649 /* Compute default value for "length_vex" attribute. It includes
19650 2 or 3 byte VEX prefix and 1 opcode byte. */
19653 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19658 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19659 byte VEX prefix. */
19660 if (!has_0f_opcode || has_vex_w)
19663 /* We can always use 2 byte VEX prefix in 32bit. */
19667 extract_insn_cached (insn);
19669 for (i = recog_data.n_operands - 1; i >= 0; --i)
19670 if (REG_P (recog_data.operand[i]))
19672 /* REX.W bit uses 3 byte VEX prefix. */
19673 if (GET_MODE (recog_data.operand[i]) == DImode
19674 && GENERAL_REG_P (recog_data.operand[i]))
19679 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19680 if (MEM_P (recog_data.operand[i])
19681 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19688 /* Return the maximum number of instructions a cpu can issue. */
19691 ix86_issue_rate (void)
19695 case PROCESSOR_PENTIUM:
19696 case PROCESSOR_ATOM:
19700 case PROCESSOR_PENTIUMPRO:
19701 case PROCESSOR_PENTIUM4:
19702 case PROCESSOR_ATHLON:
19704 case PROCESSOR_AMDFAM10:
19705 case PROCESSOR_NOCONA:
19706 case PROCESSOR_GENERIC32:
19707 case PROCESSOR_GENERIC64:
19710 case PROCESSOR_CORE2:
19718 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19719 by DEP_INSN and nothing set by DEP_INSN. */
19722 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19726 /* Simplify the test for uninteresting insns. */
19727 if (insn_type != TYPE_SETCC
19728 && insn_type != TYPE_ICMOV
19729 && insn_type != TYPE_FCMOV
19730 && insn_type != TYPE_IBR)
19733 if ((set = single_set (dep_insn)) != 0)
19735 set = SET_DEST (set);
19738 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19739 && XVECLEN (PATTERN (dep_insn), 0) == 2
19740 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19741 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19743 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19744 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19749 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19752 /* This test is true if the dependent insn reads the flags but
19753 not any other potentially set register. */
19754 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19757 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19763 /* Return true iff USE_INSN has a memory address with operands set by
19767 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19770 extract_insn_cached (use_insn);
19771 for (i = recog_data.n_operands - 1; i >= 0; --i)
19772 if (MEM_P (recog_data.operand[i]))
19774 rtx addr = XEXP (recog_data.operand[i], 0);
19775 return modified_in_p (addr, set_insn) != 0;
19781 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19783 enum attr_type insn_type, dep_insn_type;
19784 enum attr_memory memory;
19786 int dep_insn_code_number;
19788 /* Anti and output dependencies have zero cost on all CPUs. */
19789 if (REG_NOTE_KIND (link) != 0)
19792 dep_insn_code_number = recog_memoized (dep_insn);
19794 /* If we can't recognize the insns, we can't really do anything. */
19795 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19798 insn_type = get_attr_type (insn);
19799 dep_insn_type = get_attr_type (dep_insn);
19803 case PROCESSOR_PENTIUM:
19804 /* Address Generation Interlock adds a cycle of latency. */
19805 if (insn_type == TYPE_LEA)
19807 rtx addr = PATTERN (insn);
19809 if (GET_CODE (addr) == PARALLEL)
19810 addr = XVECEXP (addr, 0, 0);
19812 gcc_assert (GET_CODE (addr) == SET);
19814 addr = SET_SRC (addr);
19815 if (modified_in_p (addr, dep_insn))
19818 else if (ix86_agi_dependent (dep_insn, insn))
19821 /* ??? Compares pair with jump/setcc. */
19822 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19825 /* Floating point stores require value to be ready one cycle earlier. */
19826 if (insn_type == TYPE_FMOV
19827 && get_attr_memory (insn) == MEMORY_STORE
19828 && !ix86_agi_dependent (dep_insn, insn))
19832 case PROCESSOR_PENTIUMPRO:
19833 memory = get_attr_memory (insn);
19835 /* INT->FP conversion is expensive. */
19836 if (get_attr_fp_int_src (dep_insn))
19839 /* There is one cycle extra latency between an FP op and a store. */
19840 if (insn_type == TYPE_FMOV
19841 && (set = single_set (dep_insn)) != NULL_RTX
19842 && (set2 = single_set (insn)) != NULL_RTX
19843 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19844 && MEM_P (SET_DEST (set2)))
19847 /* Show ability of reorder buffer to hide latency of load by executing
19848 in parallel with previous instruction in case
19849 previous instruction is not needed to compute the address. */
19850 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19851 && !ix86_agi_dependent (dep_insn, insn))
19853 /* Claim moves to take one cycle, as core can issue one load
19854 at time and the next load can start cycle later. */
19855 if (dep_insn_type == TYPE_IMOV
19856 || dep_insn_type == TYPE_FMOV)
19864 memory = get_attr_memory (insn);
19866 /* The esp dependency is resolved before the instruction is really
19868 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19869 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19872 /* INT->FP conversion is expensive. */
19873 if (get_attr_fp_int_src (dep_insn))
19876 /* Show ability of reorder buffer to hide latency of load by executing
19877 in parallel with previous instruction in case
19878 previous instruction is not needed to compute the address. */
19879 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19880 && !ix86_agi_dependent (dep_insn, insn))
19882 /* Claim moves to take one cycle, as core can issue one load
19883 at time and the next load can start cycle later. */
19884 if (dep_insn_type == TYPE_IMOV
19885 || dep_insn_type == TYPE_FMOV)
19894 case PROCESSOR_ATHLON:
19896 case PROCESSOR_AMDFAM10:
19897 case PROCESSOR_ATOM:
19898 case PROCESSOR_GENERIC32:
19899 case PROCESSOR_GENERIC64:
19900 memory = get_attr_memory (insn);
19902 /* Show ability of reorder buffer to hide latency of load by executing
19903 in parallel with previous instruction in case
19904 previous instruction is not needed to compute the address. */
19905 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19906 && !ix86_agi_dependent (dep_insn, insn))
19908 enum attr_unit unit = get_attr_unit (insn);
19911 /* Because of the difference between the length of integer and
19912 floating unit pipeline preparation stages, the memory operands
19913 for floating point are cheaper.
19915 ??? For Athlon it the difference is most probably 2. */
19916 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19919 loadcost = TARGET_ATHLON ? 2 : 0;
19921 if (cost >= loadcost)
19934 /* How many alternative schedules to try. This should be as wide as the
19935 scheduling freedom in the DFA, but no wider. Making this value too
19936 large results extra work for the scheduler. */
19939 ia32_multipass_dfa_lookahead (void)
19943 case PROCESSOR_PENTIUM:
19946 case PROCESSOR_PENTIUMPRO:
19956 /* Compute the alignment given to a constant that is being placed in memory.
19957 EXP is the constant and ALIGN is the alignment that the object would
19959 The value of this function is used instead of that alignment to align
19963 ix86_constant_alignment (tree exp, int align)
19965 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19966 || TREE_CODE (exp) == INTEGER_CST)
19968 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19970 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19973 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19974 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19975 return BITS_PER_WORD;
19980 /* Compute the alignment for a static variable.
19981 TYPE is the data type, and ALIGN is the alignment that
19982 the object would ordinarily have. The value of this function is used
19983 instead of that alignment to align the object. */
19986 ix86_data_alignment (tree type, int align)
19988 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19990 if (AGGREGATE_TYPE_P (type)
19991 && TYPE_SIZE (type)
19992 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19993 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19994 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19995 && align < max_align)
19998 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19999 to 16byte boundary. */
20002 if (AGGREGATE_TYPE_P (type)
20003 && TYPE_SIZE (type)
20004 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20005 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20006 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20010 if (TREE_CODE (type) == ARRAY_TYPE)
20012 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20014 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20017 else if (TREE_CODE (type) == COMPLEX_TYPE)
20020 if (TYPE_MODE (type) == DCmode && align < 64)
20022 if ((TYPE_MODE (type) == XCmode
20023 || TYPE_MODE (type) == TCmode) && align < 128)
20026 else if ((TREE_CODE (type) == RECORD_TYPE
20027 || TREE_CODE (type) == UNION_TYPE
20028 || TREE_CODE (type) == QUAL_UNION_TYPE)
20029 && TYPE_FIELDS (type))
20031 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20033 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20036 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20037 || TREE_CODE (type) == INTEGER_TYPE)
20039 if (TYPE_MODE (type) == DFmode && align < 64)
20041 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20048 /* Compute the alignment for a local variable or a stack slot. EXP is
20049 the data type or decl itself, MODE is the widest mode available and
20050 ALIGN is the alignment that the object would ordinarily have. The
20051 value of this macro is used instead of that alignment to align the
20055 ix86_local_alignment (tree exp, enum machine_mode mode,
20056 unsigned int align)
20060 if (exp && DECL_P (exp))
20062 type = TREE_TYPE (exp);
20071 /* Don't do dynamic stack realignment for long long objects with
20072 -mpreferred-stack-boundary=2. */
20075 && ix86_preferred_stack_boundary < 64
20076 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20077 && (!type || !TYPE_USER_ALIGN (type))
20078 && (!decl || !DECL_USER_ALIGN (decl)))
20081 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20082 register in MODE. We will return the largest alignment of XF
20086 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20087 align = GET_MODE_ALIGNMENT (DFmode);
20091 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20092 to 16byte boundary. Exact wording is:
20094 An array uses the same alignment as its elements, except that a local or
20095 global array variable of length at least 16 bytes or
20096 a C99 variable-length array variable always has alignment of at least 16 bytes.
20098 This was added to allow use of aligned SSE instructions at arrays. This
20099 rule is meant for static storage (where compiler can not do the analysis
20100 by itself). We follow it for automatic variables only when convenient.
20101 We fully control everything in the function compiled and functions from
20102 other unit can not rely on the alignment.
20104 Exclude va_list type. It is the common case of local array where
20105 we can not benefit from the alignment. */
20106 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20109 if (AGGREGATE_TYPE_P (type)
20110 && (TYPE_MAIN_VARIANT (type)
20111 != TYPE_MAIN_VARIANT (va_list_type_node))
20112 && TYPE_SIZE (type)
20113 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20114 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20115 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20118 if (TREE_CODE (type) == ARRAY_TYPE)
20120 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20122 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20125 else if (TREE_CODE (type) == COMPLEX_TYPE)
20127 if (TYPE_MODE (type) == DCmode && align < 64)
20129 if ((TYPE_MODE (type) == XCmode
20130 || TYPE_MODE (type) == TCmode) && align < 128)
20133 else if ((TREE_CODE (type) == RECORD_TYPE
20134 || TREE_CODE (type) == UNION_TYPE
20135 || TREE_CODE (type) == QUAL_UNION_TYPE)
20136 && TYPE_FIELDS (type))
20138 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20140 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20143 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20144 || TREE_CODE (type) == INTEGER_TYPE)
20147 if (TYPE_MODE (type) == DFmode && align < 64)
20149 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20155 /* Compute the minimum required alignment for dynamic stack realignment
20156 purposes for a local variable, parameter or a stack slot. EXP is
20157 the data type or decl itself, MODE is its mode and ALIGN is the
20158 alignment that the object would ordinarily have. */
20161 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20162 unsigned int align)
20166 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20169 if (exp && DECL_P (exp))
20171 type = TREE_TYPE (exp);
20180 /* Don't do dynamic stack realignment for long long objects with
20181 -mpreferred-stack-boundary=2. */
20182 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20183 && (!type || !TYPE_USER_ALIGN (type))
20184 && (!decl || !DECL_USER_ALIGN (decl)))
20190 /* Find a location for the static chain incoming to a nested function.
20191 This is a register, unless all free registers are used by arguments. */
20194 ix86_static_chain (const_tree fndecl, bool incoming_p)
20198 if (!DECL_STATIC_CHAIN (fndecl))
20203 /* We always use R10 in 64-bit mode. */
20209 /* By default in 32-bit mode we use ECX to pass the static chain. */
20212 fntype = TREE_TYPE (fndecl);
20213 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20215 /* Fastcall functions use ecx/edx for arguments, which leaves
20216 us with EAX for the static chain. */
20219 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20221 /* Thiscall functions use ecx for arguments, which leaves
20222 us with EAX for the static chain. */
20225 else if (ix86_function_regparm (fntype, fndecl) == 3)
20227 /* For regparm 3, we have no free call-clobbered registers in
20228 which to store the static chain. In order to implement this,
20229 we have the trampoline push the static chain to the stack.
20230 However, we can't push a value below the return address when
20231 we call the nested function directly, so we have to use an
20232 alternate entry point. For this we use ESI, and have the
20233 alternate entry point push ESI, so that things appear the
20234 same once we're executing the nested function. */
20237 if (fndecl == current_function_decl)
20238 ix86_static_chain_on_stack = true;
20239 return gen_frame_mem (SImode,
20240 plus_constant (arg_pointer_rtx, -8));
20246 return gen_rtx_REG (Pmode, regno);
20249 /* Emit RTL insns to initialize the variable parts of a trampoline.
20250 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20251 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20252 to be passed to the target function. */
20255 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20259 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20266 /* Depending on the static chain location, either load a register
20267 with a constant, or push the constant to the stack. All of the
20268 instructions are the same size. */
20269 chain = ix86_static_chain (fndecl, true);
20272 if (REGNO (chain) == CX_REG)
20274 else if (REGNO (chain) == AX_REG)
20277 gcc_unreachable ();
20282 mem = adjust_address (m_tramp, QImode, 0);
20283 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20285 mem = adjust_address (m_tramp, SImode, 1);
20286 emit_move_insn (mem, chain_value);
20288 /* Compute offset from the end of the jmp to the target function.
20289 In the case in which the trampoline stores the static chain on
20290 the stack, we need to skip the first insn which pushes the
20291 (call-saved) register static chain; this push is 1 byte. */
20292 disp = expand_binop (SImode, sub_optab, fnaddr,
20293 plus_constant (XEXP (m_tramp, 0),
20294 MEM_P (chain) ? 9 : 10),
20295 NULL_RTX, 1, OPTAB_DIRECT);
20297 mem = adjust_address (m_tramp, QImode, 5);
20298 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20300 mem = adjust_address (m_tramp, SImode, 6);
20301 emit_move_insn (mem, disp);
20307 /* Load the function address to r11. Try to load address using
20308 the shorter movl instead of movabs. We may want to support
20309 movq for kernel mode, but kernel does not use trampolines at
20311 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20313 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20315 mem = adjust_address (m_tramp, HImode, offset);
20316 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20318 mem = adjust_address (m_tramp, SImode, offset + 2);
20319 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20324 mem = adjust_address (m_tramp, HImode, offset);
20325 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20327 mem = adjust_address (m_tramp, DImode, offset + 2);
20328 emit_move_insn (mem, fnaddr);
20332 /* Load static chain using movabs to r10. */
20333 mem = adjust_address (m_tramp, HImode, offset);
20334 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20336 mem = adjust_address (m_tramp, DImode, offset + 2);
20337 emit_move_insn (mem, chain_value);
20340 /* Jump to r11; the last (unused) byte is a nop, only there to
20341 pad the write out to a single 32-bit store. */
20342 mem = adjust_address (m_tramp, SImode, offset);
20343 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20346 gcc_assert (offset <= TRAMPOLINE_SIZE);
20349 #ifdef ENABLE_EXECUTE_STACK
20350 #ifdef CHECK_EXECUTE_STACK_ENABLED
20351 if (CHECK_EXECUTE_STACK_ENABLED)
20353 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20354 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20358 /* The following file contains several enumerations and data structures
20359 built from the definitions in i386-builtin-types.def. */
20361 #include "i386-builtin-types.inc"
20363 /* Table for the ix86 builtin non-function types. */
20364 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20366 /* Retrieve an element from the above table, building some of
20367 the types lazily. */
20370 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20372 unsigned int index;
20375 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20377 type = ix86_builtin_type_tab[(int) tcode];
20381 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20382 if (tcode <= IX86_BT_LAST_VECT)
20384 enum machine_mode mode;
20386 index = tcode - IX86_BT_LAST_PRIM - 1;
20387 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20388 mode = ix86_builtin_type_vect_mode[index];
20390 type = build_vector_type_for_mode (itype, mode);
20396 index = tcode - IX86_BT_LAST_VECT - 1;
20397 if (tcode <= IX86_BT_LAST_PTR)
20398 quals = TYPE_UNQUALIFIED;
20400 quals = TYPE_QUAL_CONST;
20402 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20403 if (quals != TYPE_UNQUALIFIED)
20404 itype = build_qualified_type (itype, quals);
20406 type = build_pointer_type (itype);
20409 ix86_builtin_type_tab[(int) tcode] = type;
20413 /* Table for the ix86 builtin function types. */
20414 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20416 /* Retrieve an element from the above table, building some of
20417 the types lazily. */
20420 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20424 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20426 type = ix86_builtin_func_type_tab[(int) tcode];
20430 if (tcode <= IX86_BT_LAST_FUNC)
20432 unsigned start = ix86_builtin_func_start[(int) tcode];
20433 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20434 tree rtype, atype, args = void_list_node;
20437 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20438 for (i = after - 1; i > start; --i)
20440 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20441 args = tree_cons (NULL, atype, args);
20444 type = build_function_type (rtype, args);
20448 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20449 enum ix86_builtin_func_type icode;
20451 icode = ix86_builtin_func_alias_base[index];
20452 type = ix86_get_builtin_func_type (icode);
20455 ix86_builtin_func_type_tab[(int) tcode] = type;
20460 /* Codes for all the SSE/MMX builtins. */
20463 IX86_BUILTIN_ADDPS,
20464 IX86_BUILTIN_ADDSS,
20465 IX86_BUILTIN_DIVPS,
20466 IX86_BUILTIN_DIVSS,
20467 IX86_BUILTIN_MULPS,
20468 IX86_BUILTIN_MULSS,
20469 IX86_BUILTIN_SUBPS,
20470 IX86_BUILTIN_SUBSS,
20472 IX86_BUILTIN_CMPEQPS,
20473 IX86_BUILTIN_CMPLTPS,
20474 IX86_BUILTIN_CMPLEPS,
20475 IX86_BUILTIN_CMPGTPS,
20476 IX86_BUILTIN_CMPGEPS,
20477 IX86_BUILTIN_CMPNEQPS,
20478 IX86_BUILTIN_CMPNLTPS,
20479 IX86_BUILTIN_CMPNLEPS,
20480 IX86_BUILTIN_CMPNGTPS,
20481 IX86_BUILTIN_CMPNGEPS,
20482 IX86_BUILTIN_CMPORDPS,
20483 IX86_BUILTIN_CMPUNORDPS,
20484 IX86_BUILTIN_CMPEQSS,
20485 IX86_BUILTIN_CMPLTSS,
20486 IX86_BUILTIN_CMPLESS,
20487 IX86_BUILTIN_CMPNEQSS,
20488 IX86_BUILTIN_CMPNLTSS,
20489 IX86_BUILTIN_CMPNLESS,
20490 IX86_BUILTIN_CMPNGTSS,
20491 IX86_BUILTIN_CMPNGESS,
20492 IX86_BUILTIN_CMPORDSS,
20493 IX86_BUILTIN_CMPUNORDSS,
20495 IX86_BUILTIN_COMIEQSS,
20496 IX86_BUILTIN_COMILTSS,
20497 IX86_BUILTIN_COMILESS,
20498 IX86_BUILTIN_COMIGTSS,
20499 IX86_BUILTIN_COMIGESS,
20500 IX86_BUILTIN_COMINEQSS,
20501 IX86_BUILTIN_UCOMIEQSS,
20502 IX86_BUILTIN_UCOMILTSS,
20503 IX86_BUILTIN_UCOMILESS,
20504 IX86_BUILTIN_UCOMIGTSS,
20505 IX86_BUILTIN_UCOMIGESS,
20506 IX86_BUILTIN_UCOMINEQSS,
20508 IX86_BUILTIN_CVTPI2PS,
20509 IX86_BUILTIN_CVTPS2PI,
20510 IX86_BUILTIN_CVTSI2SS,
20511 IX86_BUILTIN_CVTSI642SS,
20512 IX86_BUILTIN_CVTSS2SI,
20513 IX86_BUILTIN_CVTSS2SI64,
20514 IX86_BUILTIN_CVTTPS2PI,
20515 IX86_BUILTIN_CVTTSS2SI,
20516 IX86_BUILTIN_CVTTSS2SI64,
20518 IX86_BUILTIN_MAXPS,
20519 IX86_BUILTIN_MAXSS,
20520 IX86_BUILTIN_MINPS,
20521 IX86_BUILTIN_MINSS,
20523 IX86_BUILTIN_LOADUPS,
20524 IX86_BUILTIN_STOREUPS,
20525 IX86_BUILTIN_MOVSS,
20527 IX86_BUILTIN_MOVHLPS,
20528 IX86_BUILTIN_MOVLHPS,
20529 IX86_BUILTIN_LOADHPS,
20530 IX86_BUILTIN_LOADLPS,
20531 IX86_BUILTIN_STOREHPS,
20532 IX86_BUILTIN_STORELPS,
20534 IX86_BUILTIN_MASKMOVQ,
20535 IX86_BUILTIN_MOVMSKPS,
20536 IX86_BUILTIN_PMOVMSKB,
20538 IX86_BUILTIN_MOVNTPS,
20539 IX86_BUILTIN_MOVNTQ,
20541 IX86_BUILTIN_LOADDQU,
20542 IX86_BUILTIN_STOREDQU,
20544 IX86_BUILTIN_PACKSSWB,
20545 IX86_BUILTIN_PACKSSDW,
20546 IX86_BUILTIN_PACKUSWB,
20548 IX86_BUILTIN_PADDB,
20549 IX86_BUILTIN_PADDW,
20550 IX86_BUILTIN_PADDD,
20551 IX86_BUILTIN_PADDQ,
20552 IX86_BUILTIN_PADDSB,
20553 IX86_BUILTIN_PADDSW,
20554 IX86_BUILTIN_PADDUSB,
20555 IX86_BUILTIN_PADDUSW,
20556 IX86_BUILTIN_PSUBB,
20557 IX86_BUILTIN_PSUBW,
20558 IX86_BUILTIN_PSUBD,
20559 IX86_BUILTIN_PSUBQ,
20560 IX86_BUILTIN_PSUBSB,
20561 IX86_BUILTIN_PSUBSW,
20562 IX86_BUILTIN_PSUBUSB,
20563 IX86_BUILTIN_PSUBUSW,
20566 IX86_BUILTIN_PANDN,
20570 IX86_BUILTIN_PAVGB,
20571 IX86_BUILTIN_PAVGW,
20573 IX86_BUILTIN_PCMPEQB,
20574 IX86_BUILTIN_PCMPEQW,
20575 IX86_BUILTIN_PCMPEQD,
20576 IX86_BUILTIN_PCMPGTB,
20577 IX86_BUILTIN_PCMPGTW,
20578 IX86_BUILTIN_PCMPGTD,
20580 IX86_BUILTIN_PMADDWD,
20582 IX86_BUILTIN_PMAXSW,
20583 IX86_BUILTIN_PMAXUB,
20584 IX86_BUILTIN_PMINSW,
20585 IX86_BUILTIN_PMINUB,
20587 IX86_BUILTIN_PMULHUW,
20588 IX86_BUILTIN_PMULHW,
20589 IX86_BUILTIN_PMULLW,
20591 IX86_BUILTIN_PSADBW,
20592 IX86_BUILTIN_PSHUFW,
20594 IX86_BUILTIN_PSLLW,
20595 IX86_BUILTIN_PSLLD,
20596 IX86_BUILTIN_PSLLQ,
20597 IX86_BUILTIN_PSRAW,
20598 IX86_BUILTIN_PSRAD,
20599 IX86_BUILTIN_PSRLW,
20600 IX86_BUILTIN_PSRLD,
20601 IX86_BUILTIN_PSRLQ,
20602 IX86_BUILTIN_PSLLWI,
20603 IX86_BUILTIN_PSLLDI,
20604 IX86_BUILTIN_PSLLQI,
20605 IX86_BUILTIN_PSRAWI,
20606 IX86_BUILTIN_PSRADI,
20607 IX86_BUILTIN_PSRLWI,
20608 IX86_BUILTIN_PSRLDI,
20609 IX86_BUILTIN_PSRLQI,
20611 IX86_BUILTIN_PUNPCKHBW,
20612 IX86_BUILTIN_PUNPCKHWD,
20613 IX86_BUILTIN_PUNPCKHDQ,
20614 IX86_BUILTIN_PUNPCKLBW,
20615 IX86_BUILTIN_PUNPCKLWD,
20616 IX86_BUILTIN_PUNPCKLDQ,
20618 IX86_BUILTIN_SHUFPS,
20620 IX86_BUILTIN_RCPPS,
20621 IX86_BUILTIN_RCPSS,
20622 IX86_BUILTIN_RSQRTPS,
20623 IX86_BUILTIN_RSQRTPS_NR,
20624 IX86_BUILTIN_RSQRTSS,
20625 IX86_BUILTIN_RSQRTF,
20626 IX86_BUILTIN_SQRTPS,
20627 IX86_BUILTIN_SQRTPS_NR,
20628 IX86_BUILTIN_SQRTSS,
20630 IX86_BUILTIN_UNPCKHPS,
20631 IX86_BUILTIN_UNPCKLPS,
20633 IX86_BUILTIN_ANDPS,
20634 IX86_BUILTIN_ANDNPS,
20636 IX86_BUILTIN_XORPS,
20639 IX86_BUILTIN_LDMXCSR,
20640 IX86_BUILTIN_STMXCSR,
20641 IX86_BUILTIN_SFENCE,
20643 /* 3DNow! Original */
20644 IX86_BUILTIN_FEMMS,
20645 IX86_BUILTIN_PAVGUSB,
20646 IX86_BUILTIN_PF2ID,
20647 IX86_BUILTIN_PFACC,
20648 IX86_BUILTIN_PFADD,
20649 IX86_BUILTIN_PFCMPEQ,
20650 IX86_BUILTIN_PFCMPGE,
20651 IX86_BUILTIN_PFCMPGT,
20652 IX86_BUILTIN_PFMAX,
20653 IX86_BUILTIN_PFMIN,
20654 IX86_BUILTIN_PFMUL,
20655 IX86_BUILTIN_PFRCP,
20656 IX86_BUILTIN_PFRCPIT1,
20657 IX86_BUILTIN_PFRCPIT2,
20658 IX86_BUILTIN_PFRSQIT1,
20659 IX86_BUILTIN_PFRSQRT,
20660 IX86_BUILTIN_PFSUB,
20661 IX86_BUILTIN_PFSUBR,
20662 IX86_BUILTIN_PI2FD,
20663 IX86_BUILTIN_PMULHRW,
20665 /* 3DNow! Athlon Extensions */
20666 IX86_BUILTIN_PF2IW,
20667 IX86_BUILTIN_PFNACC,
20668 IX86_BUILTIN_PFPNACC,
20669 IX86_BUILTIN_PI2FW,
20670 IX86_BUILTIN_PSWAPDSI,
20671 IX86_BUILTIN_PSWAPDSF,
20674 IX86_BUILTIN_ADDPD,
20675 IX86_BUILTIN_ADDSD,
20676 IX86_BUILTIN_DIVPD,
20677 IX86_BUILTIN_DIVSD,
20678 IX86_BUILTIN_MULPD,
20679 IX86_BUILTIN_MULSD,
20680 IX86_BUILTIN_SUBPD,
20681 IX86_BUILTIN_SUBSD,
20683 IX86_BUILTIN_CMPEQPD,
20684 IX86_BUILTIN_CMPLTPD,
20685 IX86_BUILTIN_CMPLEPD,
20686 IX86_BUILTIN_CMPGTPD,
20687 IX86_BUILTIN_CMPGEPD,
20688 IX86_BUILTIN_CMPNEQPD,
20689 IX86_BUILTIN_CMPNLTPD,
20690 IX86_BUILTIN_CMPNLEPD,
20691 IX86_BUILTIN_CMPNGTPD,
20692 IX86_BUILTIN_CMPNGEPD,
20693 IX86_BUILTIN_CMPORDPD,
20694 IX86_BUILTIN_CMPUNORDPD,
20695 IX86_BUILTIN_CMPEQSD,
20696 IX86_BUILTIN_CMPLTSD,
20697 IX86_BUILTIN_CMPLESD,
20698 IX86_BUILTIN_CMPNEQSD,
20699 IX86_BUILTIN_CMPNLTSD,
20700 IX86_BUILTIN_CMPNLESD,
20701 IX86_BUILTIN_CMPORDSD,
20702 IX86_BUILTIN_CMPUNORDSD,
20704 IX86_BUILTIN_COMIEQSD,
20705 IX86_BUILTIN_COMILTSD,
20706 IX86_BUILTIN_COMILESD,
20707 IX86_BUILTIN_COMIGTSD,
20708 IX86_BUILTIN_COMIGESD,
20709 IX86_BUILTIN_COMINEQSD,
20710 IX86_BUILTIN_UCOMIEQSD,
20711 IX86_BUILTIN_UCOMILTSD,
20712 IX86_BUILTIN_UCOMILESD,
20713 IX86_BUILTIN_UCOMIGTSD,
20714 IX86_BUILTIN_UCOMIGESD,
20715 IX86_BUILTIN_UCOMINEQSD,
20717 IX86_BUILTIN_MAXPD,
20718 IX86_BUILTIN_MAXSD,
20719 IX86_BUILTIN_MINPD,
20720 IX86_BUILTIN_MINSD,
20722 IX86_BUILTIN_ANDPD,
20723 IX86_BUILTIN_ANDNPD,
20725 IX86_BUILTIN_XORPD,
20727 IX86_BUILTIN_SQRTPD,
20728 IX86_BUILTIN_SQRTSD,
20730 IX86_BUILTIN_UNPCKHPD,
20731 IX86_BUILTIN_UNPCKLPD,
20733 IX86_BUILTIN_SHUFPD,
20735 IX86_BUILTIN_LOADUPD,
20736 IX86_BUILTIN_STOREUPD,
20737 IX86_BUILTIN_MOVSD,
20739 IX86_BUILTIN_LOADHPD,
20740 IX86_BUILTIN_LOADLPD,
20742 IX86_BUILTIN_CVTDQ2PD,
20743 IX86_BUILTIN_CVTDQ2PS,
20745 IX86_BUILTIN_CVTPD2DQ,
20746 IX86_BUILTIN_CVTPD2PI,
20747 IX86_BUILTIN_CVTPD2PS,
20748 IX86_BUILTIN_CVTTPD2DQ,
20749 IX86_BUILTIN_CVTTPD2PI,
20751 IX86_BUILTIN_CVTPI2PD,
20752 IX86_BUILTIN_CVTSI2SD,
20753 IX86_BUILTIN_CVTSI642SD,
20755 IX86_BUILTIN_CVTSD2SI,
20756 IX86_BUILTIN_CVTSD2SI64,
20757 IX86_BUILTIN_CVTSD2SS,
20758 IX86_BUILTIN_CVTSS2SD,
20759 IX86_BUILTIN_CVTTSD2SI,
20760 IX86_BUILTIN_CVTTSD2SI64,
20762 IX86_BUILTIN_CVTPS2DQ,
20763 IX86_BUILTIN_CVTPS2PD,
20764 IX86_BUILTIN_CVTTPS2DQ,
20766 IX86_BUILTIN_MOVNTI,
20767 IX86_BUILTIN_MOVNTPD,
20768 IX86_BUILTIN_MOVNTDQ,
20770 IX86_BUILTIN_MOVQ128,
20773 IX86_BUILTIN_MASKMOVDQU,
20774 IX86_BUILTIN_MOVMSKPD,
20775 IX86_BUILTIN_PMOVMSKB128,
20777 IX86_BUILTIN_PACKSSWB128,
20778 IX86_BUILTIN_PACKSSDW128,
20779 IX86_BUILTIN_PACKUSWB128,
20781 IX86_BUILTIN_PADDB128,
20782 IX86_BUILTIN_PADDW128,
20783 IX86_BUILTIN_PADDD128,
20784 IX86_BUILTIN_PADDQ128,
20785 IX86_BUILTIN_PADDSB128,
20786 IX86_BUILTIN_PADDSW128,
20787 IX86_BUILTIN_PADDUSB128,
20788 IX86_BUILTIN_PADDUSW128,
20789 IX86_BUILTIN_PSUBB128,
20790 IX86_BUILTIN_PSUBW128,
20791 IX86_BUILTIN_PSUBD128,
20792 IX86_BUILTIN_PSUBQ128,
20793 IX86_BUILTIN_PSUBSB128,
20794 IX86_BUILTIN_PSUBSW128,
20795 IX86_BUILTIN_PSUBUSB128,
20796 IX86_BUILTIN_PSUBUSW128,
20798 IX86_BUILTIN_PAND128,
20799 IX86_BUILTIN_PANDN128,
20800 IX86_BUILTIN_POR128,
20801 IX86_BUILTIN_PXOR128,
20803 IX86_BUILTIN_PAVGB128,
20804 IX86_BUILTIN_PAVGW128,
20806 IX86_BUILTIN_PCMPEQB128,
20807 IX86_BUILTIN_PCMPEQW128,
20808 IX86_BUILTIN_PCMPEQD128,
20809 IX86_BUILTIN_PCMPGTB128,
20810 IX86_BUILTIN_PCMPGTW128,
20811 IX86_BUILTIN_PCMPGTD128,
20813 IX86_BUILTIN_PMADDWD128,
20815 IX86_BUILTIN_PMAXSW128,
20816 IX86_BUILTIN_PMAXUB128,
20817 IX86_BUILTIN_PMINSW128,
20818 IX86_BUILTIN_PMINUB128,
20820 IX86_BUILTIN_PMULUDQ,
20821 IX86_BUILTIN_PMULUDQ128,
20822 IX86_BUILTIN_PMULHUW128,
20823 IX86_BUILTIN_PMULHW128,
20824 IX86_BUILTIN_PMULLW128,
20826 IX86_BUILTIN_PSADBW128,
20827 IX86_BUILTIN_PSHUFHW,
20828 IX86_BUILTIN_PSHUFLW,
20829 IX86_BUILTIN_PSHUFD,
20831 IX86_BUILTIN_PSLLDQI128,
20832 IX86_BUILTIN_PSLLWI128,
20833 IX86_BUILTIN_PSLLDI128,
20834 IX86_BUILTIN_PSLLQI128,
20835 IX86_BUILTIN_PSRAWI128,
20836 IX86_BUILTIN_PSRADI128,
20837 IX86_BUILTIN_PSRLDQI128,
20838 IX86_BUILTIN_PSRLWI128,
20839 IX86_BUILTIN_PSRLDI128,
20840 IX86_BUILTIN_PSRLQI128,
20842 IX86_BUILTIN_PSLLDQ128,
20843 IX86_BUILTIN_PSLLW128,
20844 IX86_BUILTIN_PSLLD128,
20845 IX86_BUILTIN_PSLLQ128,
20846 IX86_BUILTIN_PSRAW128,
20847 IX86_BUILTIN_PSRAD128,
20848 IX86_BUILTIN_PSRLW128,
20849 IX86_BUILTIN_PSRLD128,
20850 IX86_BUILTIN_PSRLQ128,
20852 IX86_BUILTIN_PUNPCKHBW128,
20853 IX86_BUILTIN_PUNPCKHWD128,
20854 IX86_BUILTIN_PUNPCKHDQ128,
20855 IX86_BUILTIN_PUNPCKHQDQ128,
20856 IX86_BUILTIN_PUNPCKLBW128,
20857 IX86_BUILTIN_PUNPCKLWD128,
20858 IX86_BUILTIN_PUNPCKLDQ128,
20859 IX86_BUILTIN_PUNPCKLQDQ128,
20861 IX86_BUILTIN_CLFLUSH,
20862 IX86_BUILTIN_MFENCE,
20863 IX86_BUILTIN_LFENCE,
20865 IX86_BUILTIN_BSRSI,
20866 IX86_BUILTIN_BSRDI,
20867 IX86_BUILTIN_RDPMC,
20868 IX86_BUILTIN_RDTSC,
20869 IX86_BUILTIN_RDTSCP,
20870 IX86_BUILTIN_ROLQI,
20871 IX86_BUILTIN_ROLHI,
20872 IX86_BUILTIN_RORQI,
20873 IX86_BUILTIN_RORHI,
20876 IX86_BUILTIN_ADDSUBPS,
20877 IX86_BUILTIN_HADDPS,
20878 IX86_BUILTIN_HSUBPS,
20879 IX86_BUILTIN_MOVSHDUP,
20880 IX86_BUILTIN_MOVSLDUP,
20881 IX86_BUILTIN_ADDSUBPD,
20882 IX86_BUILTIN_HADDPD,
20883 IX86_BUILTIN_HSUBPD,
20884 IX86_BUILTIN_LDDQU,
20886 IX86_BUILTIN_MONITOR,
20887 IX86_BUILTIN_MWAIT,
20890 IX86_BUILTIN_PHADDW,
20891 IX86_BUILTIN_PHADDD,
20892 IX86_BUILTIN_PHADDSW,
20893 IX86_BUILTIN_PHSUBW,
20894 IX86_BUILTIN_PHSUBD,
20895 IX86_BUILTIN_PHSUBSW,
20896 IX86_BUILTIN_PMADDUBSW,
20897 IX86_BUILTIN_PMULHRSW,
20898 IX86_BUILTIN_PSHUFB,
20899 IX86_BUILTIN_PSIGNB,
20900 IX86_BUILTIN_PSIGNW,
20901 IX86_BUILTIN_PSIGND,
20902 IX86_BUILTIN_PALIGNR,
20903 IX86_BUILTIN_PABSB,
20904 IX86_BUILTIN_PABSW,
20905 IX86_BUILTIN_PABSD,
20907 IX86_BUILTIN_PHADDW128,
20908 IX86_BUILTIN_PHADDD128,
20909 IX86_BUILTIN_PHADDSW128,
20910 IX86_BUILTIN_PHSUBW128,
20911 IX86_BUILTIN_PHSUBD128,
20912 IX86_BUILTIN_PHSUBSW128,
20913 IX86_BUILTIN_PMADDUBSW128,
20914 IX86_BUILTIN_PMULHRSW128,
20915 IX86_BUILTIN_PSHUFB128,
20916 IX86_BUILTIN_PSIGNB128,
20917 IX86_BUILTIN_PSIGNW128,
20918 IX86_BUILTIN_PSIGND128,
20919 IX86_BUILTIN_PALIGNR128,
20920 IX86_BUILTIN_PABSB128,
20921 IX86_BUILTIN_PABSW128,
20922 IX86_BUILTIN_PABSD128,
20924 /* AMDFAM10 - SSE4A New Instructions. */
20925 IX86_BUILTIN_MOVNTSD,
20926 IX86_BUILTIN_MOVNTSS,
20927 IX86_BUILTIN_EXTRQI,
20928 IX86_BUILTIN_EXTRQ,
20929 IX86_BUILTIN_INSERTQI,
20930 IX86_BUILTIN_INSERTQ,
20933 IX86_BUILTIN_BLENDPD,
20934 IX86_BUILTIN_BLENDPS,
20935 IX86_BUILTIN_BLENDVPD,
20936 IX86_BUILTIN_BLENDVPS,
20937 IX86_BUILTIN_PBLENDVB128,
20938 IX86_BUILTIN_PBLENDW128,
20943 IX86_BUILTIN_INSERTPS128,
20945 IX86_BUILTIN_MOVNTDQA,
20946 IX86_BUILTIN_MPSADBW128,
20947 IX86_BUILTIN_PACKUSDW128,
20948 IX86_BUILTIN_PCMPEQQ,
20949 IX86_BUILTIN_PHMINPOSUW128,
20951 IX86_BUILTIN_PMAXSB128,
20952 IX86_BUILTIN_PMAXSD128,
20953 IX86_BUILTIN_PMAXUD128,
20954 IX86_BUILTIN_PMAXUW128,
20956 IX86_BUILTIN_PMINSB128,
20957 IX86_BUILTIN_PMINSD128,
20958 IX86_BUILTIN_PMINUD128,
20959 IX86_BUILTIN_PMINUW128,
20961 IX86_BUILTIN_PMOVSXBW128,
20962 IX86_BUILTIN_PMOVSXBD128,
20963 IX86_BUILTIN_PMOVSXBQ128,
20964 IX86_BUILTIN_PMOVSXWD128,
20965 IX86_BUILTIN_PMOVSXWQ128,
20966 IX86_BUILTIN_PMOVSXDQ128,
20968 IX86_BUILTIN_PMOVZXBW128,
20969 IX86_BUILTIN_PMOVZXBD128,
20970 IX86_BUILTIN_PMOVZXBQ128,
20971 IX86_BUILTIN_PMOVZXWD128,
20972 IX86_BUILTIN_PMOVZXWQ128,
20973 IX86_BUILTIN_PMOVZXDQ128,
20975 IX86_BUILTIN_PMULDQ128,
20976 IX86_BUILTIN_PMULLD128,
20978 IX86_BUILTIN_ROUNDPD,
20979 IX86_BUILTIN_ROUNDPS,
20980 IX86_BUILTIN_ROUNDSD,
20981 IX86_BUILTIN_ROUNDSS,
20983 IX86_BUILTIN_PTESTZ,
20984 IX86_BUILTIN_PTESTC,
20985 IX86_BUILTIN_PTESTNZC,
20987 IX86_BUILTIN_VEC_INIT_V2SI,
20988 IX86_BUILTIN_VEC_INIT_V4HI,
20989 IX86_BUILTIN_VEC_INIT_V8QI,
20990 IX86_BUILTIN_VEC_EXT_V2DF,
20991 IX86_BUILTIN_VEC_EXT_V2DI,
20992 IX86_BUILTIN_VEC_EXT_V4SF,
20993 IX86_BUILTIN_VEC_EXT_V4SI,
20994 IX86_BUILTIN_VEC_EXT_V8HI,
20995 IX86_BUILTIN_VEC_EXT_V2SI,
20996 IX86_BUILTIN_VEC_EXT_V4HI,
20997 IX86_BUILTIN_VEC_EXT_V16QI,
20998 IX86_BUILTIN_VEC_SET_V2DI,
20999 IX86_BUILTIN_VEC_SET_V4SF,
21000 IX86_BUILTIN_VEC_SET_V4SI,
21001 IX86_BUILTIN_VEC_SET_V8HI,
21002 IX86_BUILTIN_VEC_SET_V4HI,
21003 IX86_BUILTIN_VEC_SET_V16QI,
21005 IX86_BUILTIN_VEC_PACK_SFIX,
21008 IX86_BUILTIN_CRC32QI,
21009 IX86_BUILTIN_CRC32HI,
21010 IX86_BUILTIN_CRC32SI,
21011 IX86_BUILTIN_CRC32DI,
21013 IX86_BUILTIN_PCMPESTRI128,
21014 IX86_BUILTIN_PCMPESTRM128,
21015 IX86_BUILTIN_PCMPESTRA128,
21016 IX86_BUILTIN_PCMPESTRC128,
21017 IX86_BUILTIN_PCMPESTRO128,
21018 IX86_BUILTIN_PCMPESTRS128,
21019 IX86_BUILTIN_PCMPESTRZ128,
21020 IX86_BUILTIN_PCMPISTRI128,
21021 IX86_BUILTIN_PCMPISTRM128,
21022 IX86_BUILTIN_PCMPISTRA128,
21023 IX86_BUILTIN_PCMPISTRC128,
21024 IX86_BUILTIN_PCMPISTRO128,
21025 IX86_BUILTIN_PCMPISTRS128,
21026 IX86_BUILTIN_PCMPISTRZ128,
21028 IX86_BUILTIN_PCMPGTQ,
21030 /* AES instructions */
21031 IX86_BUILTIN_AESENC128,
21032 IX86_BUILTIN_AESENCLAST128,
21033 IX86_BUILTIN_AESDEC128,
21034 IX86_BUILTIN_AESDECLAST128,
21035 IX86_BUILTIN_AESIMC128,
21036 IX86_BUILTIN_AESKEYGENASSIST128,
21038 /* PCLMUL instruction */
21039 IX86_BUILTIN_PCLMULQDQ128,
21042 IX86_BUILTIN_ADDPD256,
21043 IX86_BUILTIN_ADDPS256,
21044 IX86_BUILTIN_ADDSUBPD256,
21045 IX86_BUILTIN_ADDSUBPS256,
21046 IX86_BUILTIN_ANDPD256,
21047 IX86_BUILTIN_ANDPS256,
21048 IX86_BUILTIN_ANDNPD256,
21049 IX86_BUILTIN_ANDNPS256,
21050 IX86_BUILTIN_BLENDPD256,
21051 IX86_BUILTIN_BLENDPS256,
21052 IX86_BUILTIN_BLENDVPD256,
21053 IX86_BUILTIN_BLENDVPS256,
21054 IX86_BUILTIN_DIVPD256,
21055 IX86_BUILTIN_DIVPS256,
21056 IX86_BUILTIN_DPPS256,
21057 IX86_BUILTIN_HADDPD256,
21058 IX86_BUILTIN_HADDPS256,
21059 IX86_BUILTIN_HSUBPD256,
21060 IX86_BUILTIN_HSUBPS256,
21061 IX86_BUILTIN_MAXPD256,
21062 IX86_BUILTIN_MAXPS256,
21063 IX86_BUILTIN_MINPD256,
21064 IX86_BUILTIN_MINPS256,
21065 IX86_BUILTIN_MULPD256,
21066 IX86_BUILTIN_MULPS256,
21067 IX86_BUILTIN_ORPD256,
21068 IX86_BUILTIN_ORPS256,
21069 IX86_BUILTIN_SHUFPD256,
21070 IX86_BUILTIN_SHUFPS256,
21071 IX86_BUILTIN_SUBPD256,
21072 IX86_BUILTIN_SUBPS256,
21073 IX86_BUILTIN_XORPD256,
21074 IX86_BUILTIN_XORPS256,
21075 IX86_BUILTIN_CMPSD,
21076 IX86_BUILTIN_CMPSS,
21077 IX86_BUILTIN_CMPPD,
21078 IX86_BUILTIN_CMPPS,
21079 IX86_BUILTIN_CMPPD256,
21080 IX86_BUILTIN_CMPPS256,
21081 IX86_BUILTIN_CVTDQ2PD256,
21082 IX86_BUILTIN_CVTDQ2PS256,
21083 IX86_BUILTIN_CVTPD2PS256,
21084 IX86_BUILTIN_CVTPS2DQ256,
21085 IX86_BUILTIN_CVTPS2PD256,
21086 IX86_BUILTIN_CVTTPD2DQ256,
21087 IX86_BUILTIN_CVTPD2DQ256,
21088 IX86_BUILTIN_CVTTPS2DQ256,
21089 IX86_BUILTIN_EXTRACTF128PD256,
21090 IX86_BUILTIN_EXTRACTF128PS256,
21091 IX86_BUILTIN_EXTRACTF128SI256,
21092 IX86_BUILTIN_VZEROALL,
21093 IX86_BUILTIN_VZEROUPPER,
21094 IX86_BUILTIN_VPERMILVARPD,
21095 IX86_BUILTIN_VPERMILVARPS,
21096 IX86_BUILTIN_VPERMILVARPD256,
21097 IX86_BUILTIN_VPERMILVARPS256,
21098 IX86_BUILTIN_VPERMILPD,
21099 IX86_BUILTIN_VPERMILPS,
21100 IX86_BUILTIN_VPERMILPD256,
21101 IX86_BUILTIN_VPERMILPS256,
21102 IX86_BUILTIN_VPERMIL2PD,
21103 IX86_BUILTIN_VPERMIL2PS,
21104 IX86_BUILTIN_VPERMIL2PD256,
21105 IX86_BUILTIN_VPERMIL2PS256,
21106 IX86_BUILTIN_VPERM2F128PD256,
21107 IX86_BUILTIN_VPERM2F128PS256,
21108 IX86_BUILTIN_VPERM2F128SI256,
21109 IX86_BUILTIN_VBROADCASTSS,
21110 IX86_BUILTIN_VBROADCASTSD256,
21111 IX86_BUILTIN_VBROADCASTSS256,
21112 IX86_BUILTIN_VBROADCASTPD256,
21113 IX86_BUILTIN_VBROADCASTPS256,
21114 IX86_BUILTIN_VINSERTF128PD256,
21115 IX86_BUILTIN_VINSERTF128PS256,
21116 IX86_BUILTIN_VINSERTF128SI256,
21117 IX86_BUILTIN_LOADUPD256,
21118 IX86_BUILTIN_LOADUPS256,
21119 IX86_BUILTIN_STOREUPD256,
21120 IX86_BUILTIN_STOREUPS256,
21121 IX86_BUILTIN_LDDQU256,
21122 IX86_BUILTIN_MOVNTDQ256,
21123 IX86_BUILTIN_MOVNTPD256,
21124 IX86_BUILTIN_MOVNTPS256,
21125 IX86_BUILTIN_LOADDQU256,
21126 IX86_BUILTIN_STOREDQU256,
21127 IX86_BUILTIN_MASKLOADPD,
21128 IX86_BUILTIN_MASKLOADPS,
21129 IX86_BUILTIN_MASKSTOREPD,
21130 IX86_BUILTIN_MASKSTOREPS,
21131 IX86_BUILTIN_MASKLOADPD256,
21132 IX86_BUILTIN_MASKLOADPS256,
21133 IX86_BUILTIN_MASKSTOREPD256,
21134 IX86_BUILTIN_MASKSTOREPS256,
21135 IX86_BUILTIN_MOVSHDUP256,
21136 IX86_BUILTIN_MOVSLDUP256,
21137 IX86_BUILTIN_MOVDDUP256,
21139 IX86_BUILTIN_SQRTPD256,
21140 IX86_BUILTIN_SQRTPS256,
21141 IX86_BUILTIN_SQRTPS_NR256,
21142 IX86_BUILTIN_RSQRTPS256,
21143 IX86_BUILTIN_RSQRTPS_NR256,
21145 IX86_BUILTIN_RCPPS256,
21147 IX86_BUILTIN_ROUNDPD256,
21148 IX86_BUILTIN_ROUNDPS256,
21150 IX86_BUILTIN_UNPCKHPD256,
21151 IX86_BUILTIN_UNPCKLPD256,
21152 IX86_BUILTIN_UNPCKHPS256,
21153 IX86_BUILTIN_UNPCKLPS256,
21155 IX86_BUILTIN_SI256_SI,
21156 IX86_BUILTIN_PS256_PS,
21157 IX86_BUILTIN_PD256_PD,
21158 IX86_BUILTIN_SI_SI256,
21159 IX86_BUILTIN_PS_PS256,
21160 IX86_BUILTIN_PD_PD256,
21162 IX86_BUILTIN_VTESTZPD,
21163 IX86_BUILTIN_VTESTCPD,
21164 IX86_BUILTIN_VTESTNZCPD,
21165 IX86_BUILTIN_VTESTZPS,
21166 IX86_BUILTIN_VTESTCPS,
21167 IX86_BUILTIN_VTESTNZCPS,
21168 IX86_BUILTIN_VTESTZPD256,
21169 IX86_BUILTIN_VTESTCPD256,
21170 IX86_BUILTIN_VTESTNZCPD256,
21171 IX86_BUILTIN_VTESTZPS256,
21172 IX86_BUILTIN_VTESTCPS256,
21173 IX86_BUILTIN_VTESTNZCPS256,
21174 IX86_BUILTIN_PTESTZ256,
21175 IX86_BUILTIN_PTESTC256,
21176 IX86_BUILTIN_PTESTNZC256,
21178 IX86_BUILTIN_MOVMSKPD256,
21179 IX86_BUILTIN_MOVMSKPS256,
21181 /* TFmode support builtins. */
21183 IX86_BUILTIN_HUGE_VALQ,
21184 IX86_BUILTIN_FABSQ,
21185 IX86_BUILTIN_COPYSIGNQ,
21187 /* Vectorizer support builtins. */
21188 IX86_BUILTIN_CPYSGNPS,
21189 IX86_BUILTIN_CPYSGNPD,
21191 IX86_BUILTIN_CVTUDQ2PS,
21193 IX86_BUILTIN_VEC_PERM_V2DF,
21194 IX86_BUILTIN_VEC_PERM_V4SF,
21195 IX86_BUILTIN_VEC_PERM_V2DI,
21196 IX86_BUILTIN_VEC_PERM_V4SI,
21197 IX86_BUILTIN_VEC_PERM_V8HI,
21198 IX86_BUILTIN_VEC_PERM_V16QI,
21199 IX86_BUILTIN_VEC_PERM_V2DI_U,
21200 IX86_BUILTIN_VEC_PERM_V4SI_U,
21201 IX86_BUILTIN_VEC_PERM_V8HI_U,
21202 IX86_BUILTIN_VEC_PERM_V16QI_U,
21203 IX86_BUILTIN_VEC_PERM_V4DF,
21204 IX86_BUILTIN_VEC_PERM_V8SF,
21206 /* FMA4 and XOP instructions. */
21207 IX86_BUILTIN_VFMADDSS,
21208 IX86_BUILTIN_VFMADDSD,
21209 IX86_BUILTIN_VFMADDPS,
21210 IX86_BUILTIN_VFMADDPD,
21211 IX86_BUILTIN_VFMSUBSS,
21212 IX86_BUILTIN_VFMSUBSD,
21213 IX86_BUILTIN_VFMSUBPS,
21214 IX86_BUILTIN_VFMSUBPD,
21215 IX86_BUILTIN_VFMADDSUBPS,
21216 IX86_BUILTIN_VFMADDSUBPD,
21217 IX86_BUILTIN_VFMSUBADDPS,
21218 IX86_BUILTIN_VFMSUBADDPD,
21219 IX86_BUILTIN_VFNMADDSS,
21220 IX86_BUILTIN_VFNMADDSD,
21221 IX86_BUILTIN_VFNMADDPS,
21222 IX86_BUILTIN_VFNMADDPD,
21223 IX86_BUILTIN_VFNMSUBSS,
21224 IX86_BUILTIN_VFNMSUBSD,
21225 IX86_BUILTIN_VFNMSUBPS,
21226 IX86_BUILTIN_VFNMSUBPD,
21227 IX86_BUILTIN_VFMADDPS256,
21228 IX86_BUILTIN_VFMADDPD256,
21229 IX86_BUILTIN_VFMSUBPS256,
21230 IX86_BUILTIN_VFMSUBPD256,
21231 IX86_BUILTIN_VFMADDSUBPS256,
21232 IX86_BUILTIN_VFMADDSUBPD256,
21233 IX86_BUILTIN_VFMSUBADDPS256,
21234 IX86_BUILTIN_VFMSUBADDPD256,
21235 IX86_BUILTIN_VFNMADDPS256,
21236 IX86_BUILTIN_VFNMADDPD256,
21237 IX86_BUILTIN_VFNMSUBPS256,
21238 IX86_BUILTIN_VFNMSUBPD256,
21240 IX86_BUILTIN_VPCMOV,
21241 IX86_BUILTIN_VPCMOV_V2DI,
21242 IX86_BUILTIN_VPCMOV_V4SI,
21243 IX86_BUILTIN_VPCMOV_V8HI,
21244 IX86_BUILTIN_VPCMOV_V16QI,
21245 IX86_BUILTIN_VPCMOV_V4SF,
21246 IX86_BUILTIN_VPCMOV_V2DF,
21247 IX86_BUILTIN_VPCMOV256,
21248 IX86_BUILTIN_VPCMOV_V4DI256,
21249 IX86_BUILTIN_VPCMOV_V8SI256,
21250 IX86_BUILTIN_VPCMOV_V16HI256,
21251 IX86_BUILTIN_VPCMOV_V32QI256,
21252 IX86_BUILTIN_VPCMOV_V8SF256,
21253 IX86_BUILTIN_VPCMOV_V4DF256,
21255 IX86_BUILTIN_VPPERM,
21257 IX86_BUILTIN_VPMACSSWW,
21258 IX86_BUILTIN_VPMACSWW,
21259 IX86_BUILTIN_VPMACSSWD,
21260 IX86_BUILTIN_VPMACSWD,
21261 IX86_BUILTIN_VPMACSSDD,
21262 IX86_BUILTIN_VPMACSDD,
21263 IX86_BUILTIN_VPMACSSDQL,
21264 IX86_BUILTIN_VPMACSSDQH,
21265 IX86_BUILTIN_VPMACSDQL,
21266 IX86_BUILTIN_VPMACSDQH,
21267 IX86_BUILTIN_VPMADCSSWD,
21268 IX86_BUILTIN_VPMADCSWD,
21270 IX86_BUILTIN_VPHADDBW,
21271 IX86_BUILTIN_VPHADDBD,
21272 IX86_BUILTIN_VPHADDBQ,
21273 IX86_BUILTIN_VPHADDWD,
21274 IX86_BUILTIN_VPHADDWQ,
21275 IX86_BUILTIN_VPHADDDQ,
21276 IX86_BUILTIN_VPHADDUBW,
21277 IX86_BUILTIN_VPHADDUBD,
21278 IX86_BUILTIN_VPHADDUBQ,
21279 IX86_BUILTIN_VPHADDUWD,
21280 IX86_BUILTIN_VPHADDUWQ,
21281 IX86_BUILTIN_VPHADDUDQ,
21282 IX86_BUILTIN_VPHSUBBW,
21283 IX86_BUILTIN_VPHSUBWD,
21284 IX86_BUILTIN_VPHSUBDQ,
21286 IX86_BUILTIN_VPROTB,
21287 IX86_BUILTIN_VPROTW,
21288 IX86_BUILTIN_VPROTD,
21289 IX86_BUILTIN_VPROTQ,
21290 IX86_BUILTIN_VPROTB_IMM,
21291 IX86_BUILTIN_VPROTW_IMM,
21292 IX86_BUILTIN_VPROTD_IMM,
21293 IX86_BUILTIN_VPROTQ_IMM,
21295 IX86_BUILTIN_VPSHLB,
21296 IX86_BUILTIN_VPSHLW,
21297 IX86_BUILTIN_VPSHLD,
21298 IX86_BUILTIN_VPSHLQ,
21299 IX86_BUILTIN_VPSHAB,
21300 IX86_BUILTIN_VPSHAW,
21301 IX86_BUILTIN_VPSHAD,
21302 IX86_BUILTIN_VPSHAQ,
21304 IX86_BUILTIN_VFRCZSS,
21305 IX86_BUILTIN_VFRCZSD,
21306 IX86_BUILTIN_VFRCZPS,
21307 IX86_BUILTIN_VFRCZPD,
21308 IX86_BUILTIN_VFRCZPS256,
21309 IX86_BUILTIN_VFRCZPD256,
21311 IX86_BUILTIN_VPCOMEQUB,
21312 IX86_BUILTIN_VPCOMNEUB,
21313 IX86_BUILTIN_VPCOMLTUB,
21314 IX86_BUILTIN_VPCOMLEUB,
21315 IX86_BUILTIN_VPCOMGTUB,
21316 IX86_BUILTIN_VPCOMGEUB,
21317 IX86_BUILTIN_VPCOMFALSEUB,
21318 IX86_BUILTIN_VPCOMTRUEUB,
21320 IX86_BUILTIN_VPCOMEQUW,
21321 IX86_BUILTIN_VPCOMNEUW,
21322 IX86_BUILTIN_VPCOMLTUW,
21323 IX86_BUILTIN_VPCOMLEUW,
21324 IX86_BUILTIN_VPCOMGTUW,
21325 IX86_BUILTIN_VPCOMGEUW,
21326 IX86_BUILTIN_VPCOMFALSEUW,
21327 IX86_BUILTIN_VPCOMTRUEUW,
21329 IX86_BUILTIN_VPCOMEQUD,
21330 IX86_BUILTIN_VPCOMNEUD,
21331 IX86_BUILTIN_VPCOMLTUD,
21332 IX86_BUILTIN_VPCOMLEUD,
21333 IX86_BUILTIN_VPCOMGTUD,
21334 IX86_BUILTIN_VPCOMGEUD,
21335 IX86_BUILTIN_VPCOMFALSEUD,
21336 IX86_BUILTIN_VPCOMTRUEUD,
21338 IX86_BUILTIN_VPCOMEQUQ,
21339 IX86_BUILTIN_VPCOMNEUQ,
21340 IX86_BUILTIN_VPCOMLTUQ,
21341 IX86_BUILTIN_VPCOMLEUQ,
21342 IX86_BUILTIN_VPCOMGTUQ,
21343 IX86_BUILTIN_VPCOMGEUQ,
21344 IX86_BUILTIN_VPCOMFALSEUQ,
21345 IX86_BUILTIN_VPCOMTRUEUQ,
21347 IX86_BUILTIN_VPCOMEQB,
21348 IX86_BUILTIN_VPCOMNEB,
21349 IX86_BUILTIN_VPCOMLTB,
21350 IX86_BUILTIN_VPCOMLEB,
21351 IX86_BUILTIN_VPCOMGTB,
21352 IX86_BUILTIN_VPCOMGEB,
21353 IX86_BUILTIN_VPCOMFALSEB,
21354 IX86_BUILTIN_VPCOMTRUEB,
21356 IX86_BUILTIN_VPCOMEQW,
21357 IX86_BUILTIN_VPCOMNEW,
21358 IX86_BUILTIN_VPCOMLTW,
21359 IX86_BUILTIN_VPCOMLEW,
21360 IX86_BUILTIN_VPCOMGTW,
21361 IX86_BUILTIN_VPCOMGEW,
21362 IX86_BUILTIN_VPCOMFALSEW,
21363 IX86_BUILTIN_VPCOMTRUEW,
21365 IX86_BUILTIN_VPCOMEQD,
21366 IX86_BUILTIN_VPCOMNED,
21367 IX86_BUILTIN_VPCOMLTD,
21368 IX86_BUILTIN_VPCOMLED,
21369 IX86_BUILTIN_VPCOMGTD,
21370 IX86_BUILTIN_VPCOMGED,
21371 IX86_BUILTIN_VPCOMFALSED,
21372 IX86_BUILTIN_VPCOMTRUED,
21374 IX86_BUILTIN_VPCOMEQQ,
21375 IX86_BUILTIN_VPCOMNEQ,
21376 IX86_BUILTIN_VPCOMLTQ,
21377 IX86_BUILTIN_VPCOMLEQ,
21378 IX86_BUILTIN_VPCOMGTQ,
21379 IX86_BUILTIN_VPCOMGEQ,
21380 IX86_BUILTIN_VPCOMFALSEQ,
21381 IX86_BUILTIN_VPCOMTRUEQ,
21383 /* LWP instructions. */
21384 IX86_BUILTIN_LLWPCB,
21385 IX86_BUILTIN_SLWPCB,
21386 IX86_BUILTIN_LWPVAL32,
21387 IX86_BUILTIN_LWPVAL64,
21388 IX86_BUILTIN_LWPINS32,
21389 IX86_BUILTIN_LWPINS64,
21396 /* Table for the ix86 builtin decls. */
21397 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21399 /* Table of all of the builtin functions that are possible with different ISA's
21400 but are waiting to be built until a function is declared to use that
21402 struct builtin_isa {
21403 const char *name; /* function name */
21404 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21405 int isa; /* isa_flags this builtin is defined for */
21406 bool const_p; /* true if the declaration is constant */
21407 bool set_and_not_built_p;
21410 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21413 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21414 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21415 function decl in the ix86_builtins array. Returns the function decl or
21416 NULL_TREE, if the builtin was not added.
21418 If the front end has a special hook for builtin functions, delay adding
21419 builtin functions that aren't in the current ISA until the ISA is changed
21420 with function specific optimization. Doing so, can save about 300K for the
21421 default compiler. When the builtin is expanded, check at that time whether
21424 If the front end doesn't have a special hook, record all builtins, even if
21425 it isn't an instruction set in the current ISA in case the user uses
21426 function specific options for a different ISA, so that we don't get scope
21427 errors if a builtin is added in the middle of a function scope. */
21430 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21431 enum ix86_builtins code)
21433 tree decl = NULL_TREE;
21435 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21437 ix86_builtins_isa[(int) code].isa = mask;
21440 || (mask & ix86_isa_flags) != 0
21441 || (lang_hooks.builtin_function
21442 == lang_hooks.builtin_function_ext_scope))
21445 tree type = ix86_get_builtin_func_type (tcode);
21446 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21448 ix86_builtins[(int) code] = decl;
21449 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21453 ix86_builtins[(int) code] = NULL_TREE;
21454 ix86_builtins_isa[(int) code].tcode = tcode;
21455 ix86_builtins_isa[(int) code].name = name;
21456 ix86_builtins_isa[(int) code].const_p = false;
21457 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21464 /* Like def_builtin, but also marks the function decl "const". */
21467 def_builtin_const (int mask, const char *name,
21468 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21470 tree decl = def_builtin (mask, name, tcode, code);
21472 TREE_READONLY (decl) = 1;
21474 ix86_builtins_isa[(int) code].const_p = true;
21479 /* Add any new builtin functions for a given ISA that may not have been
21480 declared. This saves a bit of space compared to adding all of the
21481 declarations to the tree, even if we didn't use them. */
21484 ix86_add_new_builtins (int isa)
21488 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21490 if ((ix86_builtins_isa[i].isa & isa) != 0
21491 && ix86_builtins_isa[i].set_and_not_built_p)
21495 /* Don't define the builtin again. */
21496 ix86_builtins_isa[i].set_and_not_built_p = false;
21498 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21499 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21500 type, i, BUILT_IN_MD, NULL,
21503 ix86_builtins[i] = decl;
21504 if (ix86_builtins_isa[i].const_p)
21505 TREE_READONLY (decl) = 1;
21510 /* Bits for builtin_description.flag. */
21512 /* Set when we don't support the comparison natively, and should
21513 swap_comparison in order to support it. */
21514 #define BUILTIN_DESC_SWAP_OPERANDS 1
21516 struct builtin_description
21518 const unsigned int mask;
21519 const enum insn_code icode;
21520 const char *const name;
21521 const enum ix86_builtins code;
21522 const enum rtx_code comparison;
21526 static const struct builtin_description bdesc_comi[] =
21528 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21529 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21530 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21531 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21532 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21533 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21534 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21535 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21536 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21537 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21538 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21539 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21540 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21541 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21542 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21544 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21546 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21547 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21548 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21549 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21551 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21554 static const struct builtin_description bdesc_pcmpestr[] =
21557 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21558 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21559 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21560 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21561 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21562 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21563 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21566 static const struct builtin_description bdesc_pcmpistr[] =
21569 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21570 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21571 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21572 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21573 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21574 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21575 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21578 /* Special builtins with variable number of arguments. */
21579 static const struct builtin_description bdesc_special_args[] =
21581 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21582 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21585 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21588 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21591 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21592 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21593 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21595 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21596 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21597 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21598 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21600 /* SSE or 3DNow!A */
21601 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21602 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21606 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21608 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21610 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21611 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21612 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21613 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21615 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21616 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21619 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21622 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21625 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21626 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21629 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21630 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21632 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21633 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21634 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21635 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21636 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21638 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21639 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21640 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21641 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21642 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21643 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21644 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21646 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21647 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21648 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21650 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21651 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21652 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21653 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21654 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21655 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21656 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21657 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21659 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21660 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21661 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21662 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21663 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21664 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21668 /* Builtins with variable number of arguments. */
21669 static const struct builtin_description bdesc_args[] =
21671 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21672 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21673 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21674 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21675 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21676 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21677 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21680 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21681 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21682 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21683 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21684 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21685 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21687 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21688 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21689 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21690 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21691 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21692 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21693 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21694 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21696 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21697 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21699 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21700 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21701 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21702 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21704 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21705 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21706 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21707 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21708 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21709 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21711 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21712 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21713 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21714 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21715 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21716 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21718 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21719 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21720 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21722 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21724 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21725 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21726 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21727 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21728 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21729 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21731 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21732 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21733 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21734 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21735 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21736 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21738 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21739 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21740 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21741 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21744 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21745 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21746 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21747 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21749 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21750 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21751 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21752 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21753 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21754 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21755 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21756 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21757 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21758 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21759 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21760 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21761 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21762 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21763 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21766 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21767 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21768 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21769 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21770 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21771 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21774 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21775 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21776 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21777 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21778 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21780 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21781 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21782 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21783 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21784 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21785 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21787 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21789 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21790 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21791 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21792 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21793 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21794 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21795 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21796 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21798 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21799 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21800 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21801 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21802 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21803 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21804 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21805 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21806 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21807 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21808 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21809 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21810 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21811 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21812 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21813 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21814 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21815 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21816 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21817 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21818 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21819 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21821 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21822 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21823 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21824 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21826 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21827 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21828 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21829 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21831 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21833 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21834 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21835 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21836 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21837 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21839 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21840 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21841 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21843 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21845 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21846 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21847 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21849 /* SSE MMX or 3Dnow!A */
21850 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21851 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21852 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21854 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21855 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21856 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21857 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21859 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21860 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21862 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21867 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21868 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21871 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21872 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21877 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21878 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21883 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21884 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21885 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21891 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21893 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21897 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21898 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21902 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21904 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21906 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21915 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21918 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21921 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21925 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21926 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21927 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21930 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21932 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21935 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21937 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21939 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21941 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21942 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21944 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21947 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21948 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21950 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21952 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21954 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21956 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21957 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21959 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21961 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21963 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21964 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21968 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21971 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21973 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21974 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21976 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21979 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21981 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21982 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21983 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21984 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21985 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21986 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21988 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21989 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21990 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21991 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21993 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21994 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21995 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21996 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21997 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21998 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21999 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22000 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22002 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22003 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22004 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22006 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22007 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22010 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22012 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22014 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22015 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22016 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22017 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22020 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22021 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22022 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22023 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22024 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22033 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22035 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22037 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22042 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22047 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22049 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22052 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22053 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22056 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22057 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22059 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22060 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22061 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22062 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22063 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22064 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22067 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22068 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22069 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22070 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22071 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22072 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22074 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22075 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22076 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22077 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22078 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22079 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22080 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22081 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22082 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22083 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22084 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22085 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22086 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22087 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22088 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22089 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22090 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22091 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22092 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22093 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22094 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22095 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22096 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22097 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22100 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22101 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22104 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22105 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22106 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22107 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22108 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22109 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22110 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22111 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22112 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22113 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22115 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22116 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22117 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22118 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22119 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22120 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22121 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22122 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22123 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22124 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22125 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22126 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22127 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22129 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22130 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22131 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22132 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22133 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22134 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22135 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22136 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22137 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22138 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22139 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22140 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22143 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22144 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22145 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22146 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22148 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22149 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22150 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22153 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22154 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22155 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22156 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22157 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22160 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22161 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22162 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22163 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22167 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22169 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22170 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22171 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22172 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22175 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22178 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22179 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22180 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22181 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22182 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22188 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22190 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22193 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22194 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22196 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22198 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22200 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22201 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22202 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22203 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22205 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22208 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22210 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22214 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22217 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22221 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22224 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22225 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22226 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22227 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22228 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22229 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22230 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22231 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22232 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22233 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22234 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22235 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22236 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22237 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22238 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22239 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22240 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22241 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22242 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22243 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22245 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22246 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22247 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22249 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22250 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22251 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22252 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22253 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22255 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22257 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22258 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22260 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22261 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22262 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22263 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22265 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22266 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22267 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22268 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22269 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22270 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22272 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22273 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22274 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22275 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22276 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22277 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22278 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22279 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22280 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22281 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22282 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22283 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22284 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22285 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22286 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22288 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22289 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22291 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22294 /* FMA4 and XOP. */
22295 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22296 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22297 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22298 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22299 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22300 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22301 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22302 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22303 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22304 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22305 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22306 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22307 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22308 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22309 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22310 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22311 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22312 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22313 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22314 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22315 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22316 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22317 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22318 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22319 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22320 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22321 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22322 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22323 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22324 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22325 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22326 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22327 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22328 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22329 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22330 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22331 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22332 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22333 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22334 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22335 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22336 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22337 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22338 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22339 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22340 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22341 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22342 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22343 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22344 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22345 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22346 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22348 static const struct builtin_description bdesc_multi_arg[] =
22350 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22351 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22352 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22353 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22354 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22355 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22356 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22357 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22359 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22360 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22361 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22362 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22363 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22364 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22365 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22366 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22368 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22369 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22370 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22371 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22373 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22374 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22375 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22376 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22378 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22379 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22380 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22381 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22383 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22384 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22385 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22386 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22388 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22397 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22400 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22404 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22408 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22410 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22412 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22416 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22417 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22420 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22423 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22424 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22427 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22428 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22429 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22432 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22434 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22436 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22440 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22441 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22444 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22447 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22448 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22450 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22452 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22456 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22457 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22460 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22463 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22465 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22468 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22471 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22473 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22476 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22477 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22479 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22480 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22481 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22483 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22484 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22485 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22486 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22487 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22488 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22489 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22491 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22492 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22493 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22494 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22495 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22496 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22497 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22499 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22500 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22501 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22502 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22503 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22504 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22505 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22507 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22508 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22509 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22510 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22511 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22512 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22513 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22515 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22516 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22517 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22518 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22519 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22520 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22521 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22523 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22524 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22525 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22527 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22528 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22530 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22532 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22534 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22536 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22537 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22538 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22539 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22541 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22542 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22543 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22544 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22548 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22549 in the current target ISA to allow the user to compile particular modules
22550 with different target specific options that differ from the command line
22553 ix86_init_mmx_sse_builtins (void)
22555 const struct builtin_description * d;
22556 enum ix86_builtin_func_type ftype;
22559 /* Add all special builtins with variable number of operands. */
22560 for (i = 0, d = bdesc_special_args;
22561 i < ARRAY_SIZE (bdesc_special_args);
22567 ftype = (enum ix86_builtin_func_type) d->flag;
22568 def_builtin (d->mask, d->name, ftype, d->code);
22571 /* Add all builtins with variable number of operands. */
22572 for (i = 0, d = bdesc_args;
22573 i < ARRAY_SIZE (bdesc_args);
22579 ftype = (enum ix86_builtin_func_type) d->flag;
22580 def_builtin_const (d->mask, d->name, ftype, d->code);
22583 /* pcmpestr[im] insns. */
22584 for (i = 0, d = bdesc_pcmpestr;
22585 i < ARRAY_SIZE (bdesc_pcmpestr);
22588 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22589 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22591 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22592 def_builtin_const (d->mask, d->name, ftype, d->code);
22595 /* pcmpistr[im] insns. */
22596 for (i = 0, d = bdesc_pcmpistr;
22597 i < ARRAY_SIZE (bdesc_pcmpistr);
22600 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22601 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22603 ftype = INT_FTYPE_V16QI_V16QI_INT;
22604 def_builtin_const (d->mask, d->name, ftype, d->code);
22607 /* comi/ucomi insns. */
22608 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22610 if (d->mask == OPTION_MASK_ISA_SSE2)
22611 ftype = INT_FTYPE_V2DF_V2DF;
22613 ftype = INT_FTYPE_V4SF_V4SF;
22614 def_builtin_const (d->mask, d->name, ftype, d->code);
22618 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22619 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22620 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22621 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22623 /* SSE or 3DNow!A */
22624 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22625 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22626 IX86_BUILTIN_MASKMOVQ);
22629 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22630 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22632 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22633 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22634 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22635 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22638 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22639 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22640 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22641 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22644 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22645 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22646 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22647 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22648 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22649 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22650 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22651 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22652 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22653 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22654 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22655 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22658 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22659 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22661 /* MMX access to the vec_init patterns. */
22662 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22663 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22665 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22666 V4HI_FTYPE_HI_HI_HI_HI,
22667 IX86_BUILTIN_VEC_INIT_V4HI);
22669 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22670 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22671 IX86_BUILTIN_VEC_INIT_V8QI);
22673 /* Access to the vec_extract patterns. */
22674 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22675 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22676 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22677 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22678 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22679 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22680 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22681 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22682 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22683 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22685 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22686 "__builtin_ia32_vec_ext_v4hi",
22687 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22689 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22690 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22692 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22693 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22695 /* Access to the vec_set patterns. */
22696 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22697 "__builtin_ia32_vec_set_v2di",
22698 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22700 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22701 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22703 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22704 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22706 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22707 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22709 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22710 "__builtin_ia32_vec_set_v4hi",
22711 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22713 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22714 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22716 /* Add FMA4 multi-arg argument instructions */
22717 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22722 ftype = (enum ix86_builtin_func_type) d->flag;
22723 def_builtin_const (d->mask, d->name, ftype, d->code);
22727 /* Internal method for ix86_init_builtins. */
22730 ix86_init_builtins_va_builtins_abi (void)
22732 tree ms_va_ref, sysv_va_ref;
22733 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22734 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22735 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22736 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22740 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22741 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22742 ms_va_ref = build_reference_type (ms_va_list_type_node);
22744 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22747 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22748 fnvoid_va_start_ms =
22749 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22750 fnvoid_va_end_sysv =
22751 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22752 fnvoid_va_start_sysv =
22753 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22755 fnvoid_va_copy_ms =
22756 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22758 fnvoid_va_copy_sysv =
22759 build_function_type_list (void_type_node, sysv_va_ref,
22760 sysv_va_ref, NULL_TREE);
22762 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22763 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22764 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22765 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22766 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22767 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22768 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22769 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22770 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22771 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22772 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22773 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22777 ix86_init_builtin_types (void)
22779 tree float128_type_node, float80_type_node;
22781 /* The __float80 type. */
22782 float80_type_node = long_double_type_node;
22783 if (TYPE_MODE (float80_type_node) != XFmode)
22785 /* The __float80 type. */
22786 float80_type_node = make_node (REAL_TYPE);
22788 TYPE_PRECISION (float80_type_node) = 80;
22789 layout_type (float80_type_node);
22791 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22793 /* The __float128 type. */
22794 float128_type_node = make_node (REAL_TYPE);
22795 TYPE_PRECISION (float128_type_node) = 128;
22796 layout_type (float128_type_node);
22797 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22799 /* This macro is built by i386-builtin-types.awk. */
22800 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22804 ix86_init_builtins (void)
22808 ix86_init_builtin_types ();
22810 /* TFmode support builtins. */
22811 def_builtin_const (0, "__builtin_infq",
22812 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22813 def_builtin_const (0, "__builtin_huge_valq",
22814 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22816 /* We will expand them to normal call if SSE2 isn't available since
22817 they are used by libgcc. */
22818 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22819 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22820 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22821 TREE_READONLY (t) = 1;
22822 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22824 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22825 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22826 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22827 TREE_READONLY (t) = 1;
22828 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22830 ix86_init_mmx_sse_builtins ();
22833 ix86_init_builtins_va_builtins_abi ();
22836 /* Return the ix86 builtin for CODE. */
22839 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22841 if (code >= IX86_BUILTIN_MAX)
22842 return error_mark_node;
22844 return ix86_builtins[code];
22847 /* Errors in the source file can cause expand_expr to return const0_rtx
22848 where we expect a vector. To avoid crashing, use one of the vector
22849 clear instructions. */
22851 safe_vector_operand (rtx x, enum machine_mode mode)
22853 if (x == const0_rtx)
22854 x = CONST0_RTX (mode);
22858 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22861 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22864 tree arg0 = CALL_EXPR_ARG (exp, 0);
22865 tree arg1 = CALL_EXPR_ARG (exp, 1);
22866 rtx op0 = expand_normal (arg0);
22867 rtx op1 = expand_normal (arg1);
22868 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22869 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22870 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22872 if (VECTOR_MODE_P (mode0))
22873 op0 = safe_vector_operand (op0, mode0);
22874 if (VECTOR_MODE_P (mode1))
22875 op1 = safe_vector_operand (op1, mode1);
22877 if (optimize || !target
22878 || GET_MODE (target) != tmode
22879 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22880 target = gen_reg_rtx (tmode);
22882 if (GET_MODE (op1) == SImode && mode1 == TImode)
22884 rtx x = gen_reg_rtx (V4SImode);
22885 emit_insn (gen_sse2_loadd (x, op1));
22886 op1 = gen_lowpart (TImode, x);
22889 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22890 op0 = copy_to_mode_reg (mode0, op0);
22891 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22892 op1 = copy_to_mode_reg (mode1, op1);
22894 pat = GEN_FCN (icode) (target, op0, op1);
22903 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22906 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22907 enum ix86_builtin_func_type m_type,
22908 enum rtx_code sub_code)
22913 bool comparison_p = false;
22915 bool last_arg_constant = false;
22916 int num_memory = 0;
22919 enum machine_mode mode;
22922 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22926 case MULTI_ARG_4_DF2_DI_I:
22927 case MULTI_ARG_4_DF2_DI_I1:
22928 case MULTI_ARG_4_SF2_SI_I:
22929 case MULTI_ARG_4_SF2_SI_I1:
22931 last_arg_constant = true;
22934 case MULTI_ARG_3_SF:
22935 case MULTI_ARG_3_DF:
22936 case MULTI_ARG_3_SF2:
22937 case MULTI_ARG_3_DF2:
22938 case MULTI_ARG_3_DI:
22939 case MULTI_ARG_3_SI:
22940 case MULTI_ARG_3_SI_DI:
22941 case MULTI_ARG_3_HI:
22942 case MULTI_ARG_3_HI_SI:
22943 case MULTI_ARG_3_QI:
22944 case MULTI_ARG_3_DI2:
22945 case MULTI_ARG_3_SI2:
22946 case MULTI_ARG_3_HI2:
22947 case MULTI_ARG_3_QI2:
22951 case MULTI_ARG_2_SF:
22952 case MULTI_ARG_2_DF:
22953 case MULTI_ARG_2_DI:
22954 case MULTI_ARG_2_SI:
22955 case MULTI_ARG_2_HI:
22956 case MULTI_ARG_2_QI:
22960 case MULTI_ARG_2_DI_IMM:
22961 case MULTI_ARG_2_SI_IMM:
22962 case MULTI_ARG_2_HI_IMM:
22963 case MULTI_ARG_2_QI_IMM:
22965 last_arg_constant = true;
22968 case MULTI_ARG_1_SF:
22969 case MULTI_ARG_1_DF:
22970 case MULTI_ARG_1_SF2:
22971 case MULTI_ARG_1_DF2:
22972 case MULTI_ARG_1_DI:
22973 case MULTI_ARG_1_SI:
22974 case MULTI_ARG_1_HI:
22975 case MULTI_ARG_1_QI:
22976 case MULTI_ARG_1_SI_DI:
22977 case MULTI_ARG_1_HI_DI:
22978 case MULTI_ARG_1_HI_SI:
22979 case MULTI_ARG_1_QI_DI:
22980 case MULTI_ARG_1_QI_SI:
22981 case MULTI_ARG_1_QI_HI:
22985 case MULTI_ARG_2_DI_CMP:
22986 case MULTI_ARG_2_SI_CMP:
22987 case MULTI_ARG_2_HI_CMP:
22988 case MULTI_ARG_2_QI_CMP:
22990 comparison_p = true;
22993 case MULTI_ARG_2_SF_TF:
22994 case MULTI_ARG_2_DF_TF:
22995 case MULTI_ARG_2_DI_TF:
22996 case MULTI_ARG_2_SI_TF:
22997 case MULTI_ARG_2_HI_TF:
22998 case MULTI_ARG_2_QI_TF:
23004 gcc_unreachable ();
23007 if (optimize || !target
23008 || GET_MODE (target) != tmode
23009 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23010 target = gen_reg_rtx (tmode);
23012 gcc_assert (nargs <= 4);
23014 for (i = 0; i < nargs; i++)
23016 tree arg = CALL_EXPR_ARG (exp, i);
23017 rtx op = expand_normal (arg);
23018 int adjust = (comparison_p) ? 1 : 0;
23019 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23021 if (last_arg_constant && i == nargs-1)
23023 if (!CONST_INT_P (op))
23025 error ("last argument must be an immediate");
23026 return gen_reg_rtx (tmode);
23031 if (VECTOR_MODE_P (mode))
23032 op = safe_vector_operand (op, mode);
23034 /* If we aren't optimizing, only allow one memory operand to be
23036 if (memory_operand (op, mode))
23039 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23042 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23044 op = force_reg (mode, op);
23048 args[i].mode = mode;
23054 pat = GEN_FCN (icode) (target, args[0].op);
23059 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23060 GEN_INT ((int)sub_code));
23061 else if (! comparison_p)
23062 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23065 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23069 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23074 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23078 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23082 gcc_unreachable ();
23092 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23093 insns with vec_merge. */
23096 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23100 tree arg0 = CALL_EXPR_ARG (exp, 0);
23101 rtx op1, op0 = expand_normal (arg0);
23102 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23103 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23105 if (optimize || !target
23106 || GET_MODE (target) != tmode
23107 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23108 target = gen_reg_rtx (tmode);
23110 if (VECTOR_MODE_P (mode0))
23111 op0 = safe_vector_operand (op0, mode0);
23113 if ((optimize && !register_operand (op0, mode0))
23114 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23115 op0 = copy_to_mode_reg (mode0, op0);
23118 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23119 op1 = copy_to_mode_reg (mode0, op1);
23121 pat = GEN_FCN (icode) (target, op0, op1);
23128 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23131 ix86_expand_sse_compare (const struct builtin_description *d,
23132 tree exp, rtx target, bool swap)
23135 tree arg0 = CALL_EXPR_ARG (exp, 0);
23136 tree arg1 = CALL_EXPR_ARG (exp, 1);
23137 rtx op0 = expand_normal (arg0);
23138 rtx op1 = expand_normal (arg1);
23140 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23141 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23142 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23143 enum rtx_code comparison = d->comparison;
23145 if (VECTOR_MODE_P (mode0))
23146 op0 = safe_vector_operand (op0, mode0);
23147 if (VECTOR_MODE_P (mode1))
23148 op1 = safe_vector_operand (op1, mode1);
23150 /* Swap operands if we have a comparison that isn't available in
23154 rtx tmp = gen_reg_rtx (mode1);
23155 emit_move_insn (tmp, op1);
23160 if (optimize || !target
23161 || GET_MODE (target) != tmode
23162 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23163 target = gen_reg_rtx (tmode);
23165 if ((optimize && !register_operand (op0, mode0))
23166 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23167 op0 = copy_to_mode_reg (mode0, op0);
23168 if ((optimize && !register_operand (op1, mode1))
23169 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23170 op1 = copy_to_mode_reg (mode1, op1);
23172 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23173 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23180 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23183 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23187 tree arg0 = CALL_EXPR_ARG (exp, 0);
23188 tree arg1 = CALL_EXPR_ARG (exp, 1);
23189 rtx op0 = expand_normal (arg0);
23190 rtx op1 = expand_normal (arg1);
23191 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23192 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23193 enum rtx_code comparison = d->comparison;
23195 if (VECTOR_MODE_P (mode0))
23196 op0 = safe_vector_operand (op0, mode0);
23197 if (VECTOR_MODE_P (mode1))
23198 op1 = safe_vector_operand (op1, mode1);
23200 /* Swap operands if we have a comparison that isn't available in
23202 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23209 target = gen_reg_rtx (SImode);
23210 emit_move_insn (target, const0_rtx);
23211 target = gen_rtx_SUBREG (QImode, target, 0);
23213 if ((optimize && !register_operand (op0, mode0))
23214 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23215 op0 = copy_to_mode_reg (mode0, op0);
23216 if ((optimize && !register_operand (op1, mode1))
23217 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23218 op1 = copy_to_mode_reg (mode1, op1);
23220 pat = GEN_FCN (d->icode) (op0, op1);
23224 emit_insn (gen_rtx_SET (VOIDmode,
23225 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23226 gen_rtx_fmt_ee (comparison, QImode,
23230 return SUBREG_REG (target);
23233 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23236 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23240 tree arg0 = CALL_EXPR_ARG (exp, 0);
23241 tree arg1 = CALL_EXPR_ARG (exp, 1);
23242 rtx op0 = expand_normal (arg0);
23243 rtx op1 = expand_normal (arg1);
23244 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23245 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23246 enum rtx_code comparison = d->comparison;
23248 if (VECTOR_MODE_P (mode0))
23249 op0 = safe_vector_operand (op0, mode0);
23250 if (VECTOR_MODE_P (mode1))
23251 op1 = safe_vector_operand (op1, mode1);
23253 target = gen_reg_rtx (SImode);
23254 emit_move_insn (target, const0_rtx);
23255 target = gen_rtx_SUBREG (QImode, target, 0);
23257 if ((optimize && !register_operand (op0, mode0))
23258 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23259 op0 = copy_to_mode_reg (mode0, op0);
23260 if ((optimize && !register_operand (op1, mode1))
23261 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23262 op1 = copy_to_mode_reg (mode1, op1);
23264 pat = GEN_FCN (d->icode) (op0, op1);
23268 emit_insn (gen_rtx_SET (VOIDmode,
23269 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23270 gen_rtx_fmt_ee (comparison, QImode,
23274 return SUBREG_REG (target);
23277 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23280 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23281 tree exp, rtx target)
23284 tree arg0 = CALL_EXPR_ARG (exp, 0);
23285 tree arg1 = CALL_EXPR_ARG (exp, 1);
23286 tree arg2 = CALL_EXPR_ARG (exp, 2);
23287 tree arg3 = CALL_EXPR_ARG (exp, 3);
23288 tree arg4 = CALL_EXPR_ARG (exp, 4);
23289 rtx scratch0, scratch1;
23290 rtx op0 = expand_normal (arg0);
23291 rtx op1 = expand_normal (arg1);
23292 rtx op2 = expand_normal (arg2);
23293 rtx op3 = expand_normal (arg3);
23294 rtx op4 = expand_normal (arg4);
23295 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23297 tmode0 = insn_data[d->icode].operand[0].mode;
23298 tmode1 = insn_data[d->icode].operand[1].mode;
23299 modev2 = insn_data[d->icode].operand[2].mode;
23300 modei3 = insn_data[d->icode].operand[3].mode;
23301 modev4 = insn_data[d->icode].operand[4].mode;
23302 modei5 = insn_data[d->icode].operand[5].mode;
23303 modeimm = insn_data[d->icode].operand[6].mode;
23305 if (VECTOR_MODE_P (modev2))
23306 op0 = safe_vector_operand (op0, modev2);
23307 if (VECTOR_MODE_P (modev4))
23308 op2 = safe_vector_operand (op2, modev4);
23310 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23311 op0 = copy_to_mode_reg (modev2, op0);
23312 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23313 op1 = copy_to_mode_reg (modei3, op1);
23314 if ((optimize && !register_operand (op2, modev4))
23315 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23316 op2 = copy_to_mode_reg (modev4, op2);
23317 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23318 op3 = copy_to_mode_reg (modei5, op3);
23320 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23322 error ("the fifth argument must be a 8-bit immediate");
23326 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23328 if (optimize || !target
23329 || GET_MODE (target) != tmode0
23330 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23331 target = gen_reg_rtx (tmode0);
23333 scratch1 = gen_reg_rtx (tmode1);
23335 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23337 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23339 if (optimize || !target
23340 || GET_MODE (target) != tmode1
23341 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23342 target = gen_reg_rtx (tmode1);
23344 scratch0 = gen_reg_rtx (tmode0);
23346 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23350 gcc_assert (d->flag);
23352 scratch0 = gen_reg_rtx (tmode0);
23353 scratch1 = gen_reg_rtx (tmode1);
23355 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23365 target = gen_reg_rtx (SImode);
23366 emit_move_insn (target, const0_rtx);
23367 target = gen_rtx_SUBREG (QImode, target, 0);
23370 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23371 gen_rtx_fmt_ee (EQ, QImode,
23372 gen_rtx_REG ((enum machine_mode) d->flag,
23375 return SUBREG_REG (target);
23382 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23385 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23386 tree exp, rtx target)
23389 tree arg0 = CALL_EXPR_ARG (exp, 0);
23390 tree arg1 = CALL_EXPR_ARG (exp, 1);
23391 tree arg2 = CALL_EXPR_ARG (exp, 2);
23392 rtx scratch0, scratch1;
23393 rtx op0 = expand_normal (arg0);
23394 rtx op1 = expand_normal (arg1);
23395 rtx op2 = expand_normal (arg2);
23396 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23398 tmode0 = insn_data[d->icode].operand[0].mode;
23399 tmode1 = insn_data[d->icode].operand[1].mode;
23400 modev2 = insn_data[d->icode].operand[2].mode;
23401 modev3 = insn_data[d->icode].operand[3].mode;
23402 modeimm = insn_data[d->icode].operand[4].mode;
23404 if (VECTOR_MODE_P (modev2))
23405 op0 = safe_vector_operand (op0, modev2);
23406 if (VECTOR_MODE_P (modev3))
23407 op1 = safe_vector_operand (op1, modev3);
23409 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23410 op0 = copy_to_mode_reg (modev2, op0);
23411 if ((optimize && !register_operand (op1, modev3))
23412 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23413 op1 = copy_to_mode_reg (modev3, op1);
23415 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23417 error ("the third argument must be a 8-bit immediate");
23421 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23423 if (optimize || !target
23424 || GET_MODE (target) != tmode0
23425 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23426 target = gen_reg_rtx (tmode0);
23428 scratch1 = gen_reg_rtx (tmode1);
23430 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23432 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23434 if (optimize || !target
23435 || GET_MODE (target) != tmode1
23436 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23437 target = gen_reg_rtx (tmode1);
23439 scratch0 = gen_reg_rtx (tmode0);
23441 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23445 gcc_assert (d->flag);
23447 scratch0 = gen_reg_rtx (tmode0);
23448 scratch1 = gen_reg_rtx (tmode1);
23450 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23460 target = gen_reg_rtx (SImode);
23461 emit_move_insn (target, const0_rtx);
23462 target = gen_rtx_SUBREG (QImode, target, 0);
23465 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23466 gen_rtx_fmt_ee (EQ, QImode,
23467 gen_rtx_REG ((enum machine_mode) d->flag,
23470 return SUBREG_REG (target);
23476 /* Subroutine of ix86_expand_builtin to take care of insns with
23477 variable number of operands. */
23480 ix86_expand_args_builtin (const struct builtin_description *d,
23481 tree exp, rtx target)
23483 rtx pat, real_target;
23484 unsigned int i, nargs;
23485 unsigned int nargs_constant = 0;
23486 int num_memory = 0;
23490 enum machine_mode mode;
23492 bool last_arg_count = false;
23493 enum insn_code icode = d->icode;
23494 const struct insn_data *insn_p = &insn_data[icode];
23495 enum machine_mode tmode = insn_p->operand[0].mode;
23496 enum machine_mode rmode = VOIDmode;
23498 enum rtx_code comparison = d->comparison;
23500 switch ((enum ix86_builtin_func_type) d->flag)
23502 case INT_FTYPE_V8SF_V8SF_PTEST:
23503 case INT_FTYPE_V4DI_V4DI_PTEST:
23504 case INT_FTYPE_V4DF_V4DF_PTEST:
23505 case INT_FTYPE_V4SF_V4SF_PTEST:
23506 case INT_FTYPE_V2DI_V2DI_PTEST:
23507 case INT_FTYPE_V2DF_V2DF_PTEST:
23508 return ix86_expand_sse_ptest (d, exp, target);
23509 case FLOAT128_FTYPE_FLOAT128:
23510 case FLOAT_FTYPE_FLOAT:
23511 case INT_FTYPE_INT:
23512 case UINT64_FTYPE_INT:
23513 case UINT16_FTYPE_UINT16:
23514 case INT64_FTYPE_INT64:
23515 case INT64_FTYPE_V4SF:
23516 case INT64_FTYPE_V2DF:
23517 case INT_FTYPE_V16QI:
23518 case INT_FTYPE_V8QI:
23519 case INT_FTYPE_V8SF:
23520 case INT_FTYPE_V4DF:
23521 case INT_FTYPE_V4SF:
23522 case INT_FTYPE_V2DF:
23523 case V16QI_FTYPE_V16QI:
23524 case V8SI_FTYPE_V8SF:
23525 case V8SI_FTYPE_V4SI:
23526 case V8HI_FTYPE_V8HI:
23527 case V8HI_FTYPE_V16QI:
23528 case V8QI_FTYPE_V8QI:
23529 case V8SF_FTYPE_V8SF:
23530 case V8SF_FTYPE_V8SI:
23531 case V8SF_FTYPE_V4SF:
23532 case V4SI_FTYPE_V4SI:
23533 case V4SI_FTYPE_V16QI:
23534 case V4SI_FTYPE_V4SF:
23535 case V4SI_FTYPE_V8SI:
23536 case V4SI_FTYPE_V8HI:
23537 case V4SI_FTYPE_V4DF:
23538 case V4SI_FTYPE_V2DF:
23539 case V4HI_FTYPE_V4HI:
23540 case V4DF_FTYPE_V4DF:
23541 case V4DF_FTYPE_V4SI:
23542 case V4DF_FTYPE_V4SF:
23543 case V4DF_FTYPE_V2DF:
23544 case V4SF_FTYPE_V4SF:
23545 case V4SF_FTYPE_V4SI:
23546 case V4SF_FTYPE_V8SF:
23547 case V4SF_FTYPE_V4DF:
23548 case V4SF_FTYPE_V2DF:
23549 case V2DI_FTYPE_V2DI:
23550 case V2DI_FTYPE_V16QI:
23551 case V2DI_FTYPE_V8HI:
23552 case V2DI_FTYPE_V4SI:
23553 case V2DF_FTYPE_V2DF:
23554 case V2DF_FTYPE_V4SI:
23555 case V2DF_FTYPE_V4DF:
23556 case V2DF_FTYPE_V4SF:
23557 case V2DF_FTYPE_V2SI:
23558 case V2SI_FTYPE_V2SI:
23559 case V2SI_FTYPE_V4SF:
23560 case V2SI_FTYPE_V2SF:
23561 case V2SI_FTYPE_V2DF:
23562 case V2SF_FTYPE_V2SF:
23563 case V2SF_FTYPE_V2SI:
23566 case V4SF_FTYPE_V4SF_VEC_MERGE:
23567 case V2DF_FTYPE_V2DF_VEC_MERGE:
23568 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23569 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23570 case V16QI_FTYPE_V16QI_V16QI:
23571 case V16QI_FTYPE_V8HI_V8HI:
23572 case V8QI_FTYPE_V8QI_V8QI:
23573 case V8QI_FTYPE_V4HI_V4HI:
23574 case V8HI_FTYPE_V8HI_V8HI:
23575 case V8HI_FTYPE_V16QI_V16QI:
23576 case V8HI_FTYPE_V4SI_V4SI:
23577 case V8SF_FTYPE_V8SF_V8SF:
23578 case V8SF_FTYPE_V8SF_V8SI:
23579 case V4SI_FTYPE_V4SI_V4SI:
23580 case V4SI_FTYPE_V8HI_V8HI:
23581 case V4SI_FTYPE_V4SF_V4SF:
23582 case V4SI_FTYPE_V2DF_V2DF:
23583 case V4HI_FTYPE_V4HI_V4HI:
23584 case V4HI_FTYPE_V8QI_V8QI:
23585 case V4HI_FTYPE_V2SI_V2SI:
23586 case V4DF_FTYPE_V4DF_V4DF:
23587 case V4DF_FTYPE_V4DF_V4DI:
23588 case V4SF_FTYPE_V4SF_V4SF:
23589 case V4SF_FTYPE_V4SF_V4SI:
23590 case V4SF_FTYPE_V4SF_V2SI:
23591 case V4SF_FTYPE_V4SF_V2DF:
23592 case V4SF_FTYPE_V4SF_DI:
23593 case V4SF_FTYPE_V4SF_SI:
23594 case V2DI_FTYPE_V2DI_V2DI:
23595 case V2DI_FTYPE_V16QI_V16QI:
23596 case V2DI_FTYPE_V4SI_V4SI:
23597 case V2DI_FTYPE_V2DI_V16QI:
23598 case V2DI_FTYPE_V2DF_V2DF:
23599 case V2SI_FTYPE_V2SI_V2SI:
23600 case V2SI_FTYPE_V4HI_V4HI:
23601 case V2SI_FTYPE_V2SF_V2SF:
23602 case V2DF_FTYPE_V2DF_V2DF:
23603 case V2DF_FTYPE_V2DF_V4SF:
23604 case V2DF_FTYPE_V2DF_V2DI:
23605 case V2DF_FTYPE_V2DF_DI:
23606 case V2DF_FTYPE_V2DF_SI:
23607 case V2SF_FTYPE_V2SF_V2SF:
23608 case V1DI_FTYPE_V1DI_V1DI:
23609 case V1DI_FTYPE_V8QI_V8QI:
23610 case V1DI_FTYPE_V2SI_V2SI:
23611 if (comparison == UNKNOWN)
23612 return ix86_expand_binop_builtin (icode, exp, target);
23615 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23616 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23617 gcc_assert (comparison != UNKNOWN);
23621 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23622 case V8HI_FTYPE_V8HI_SI_COUNT:
23623 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23624 case V4SI_FTYPE_V4SI_SI_COUNT:
23625 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23626 case V4HI_FTYPE_V4HI_SI_COUNT:
23627 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23628 case V2DI_FTYPE_V2DI_SI_COUNT:
23629 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23630 case V2SI_FTYPE_V2SI_SI_COUNT:
23631 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23632 case V1DI_FTYPE_V1DI_SI_COUNT:
23634 last_arg_count = true;
23636 case UINT64_FTYPE_UINT64_UINT64:
23637 case UINT_FTYPE_UINT_UINT:
23638 case UINT_FTYPE_UINT_USHORT:
23639 case UINT_FTYPE_UINT_UCHAR:
23640 case UINT16_FTYPE_UINT16_INT:
23641 case UINT8_FTYPE_UINT8_INT:
23644 case V2DI_FTYPE_V2DI_INT_CONVERT:
23647 nargs_constant = 1;
23649 case V8HI_FTYPE_V8HI_INT:
23650 case V8SF_FTYPE_V8SF_INT:
23651 case V4SI_FTYPE_V4SI_INT:
23652 case V4SI_FTYPE_V8SI_INT:
23653 case V4HI_FTYPE_V4HI_INT:
23654 case V4DF_FTYPE_V4DF_INT:
23655 case V4SF_FTYPE_V4SF_INT:
23656 case V4SF_FTYPE_V8SF_INT:
23657 case V2DI_FTYPE_V2DI_INT:
23658 case V2DF_FTYPE_V2DF_INT:
23659 case V2DF_FTYPE_V4DF_INT:
23661 nargs_constant = 1;
23663 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23664 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23665 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23666 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23667 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23670 case V16QI_FTYPE_V16QI_V16QI_INT:
23671 case V8HI_FTYPE_V8HI_V8HI_INT:
23672 case V8SI_FTYPE_V8SI_V8SI_INT:
23673 case V8SI_FTYPE_V8SI_V4SI_INT:
23674 case V8SF_FTYPE_V8SF_V8SF_INT:
23675 case V8SF_FTYPE_V8SF_V4SF_INT:
23676 case V4SI_FTYPE_V4SI_V4SI_INT:
23677 case V4DF_FTYPE_V4DF_V4DF_INT:
23678 case V4DF_FTYPE_V4DF_V2DF_INT:
23679 case V4SF_FTYPE_V4SF_V4SF_INT:
23680 case V2DI_FTYPE_V2DI_V2DI_INT:
23681 case V2DF_FTYPE_V2DF_V2DF_INT:
23683 nargs_constant = 1;
23685 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23688 nargs_constant = 1;
23690 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23693 nargs_constant = 1;
23695 case V2DI_FTYPE_V2DI_UINT_UINT:
23697 nargs_constant = 2;
23699 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23700 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23701 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23702 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23704 nargs_constant = 1;
23706 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23708 nargs_constant = 2;
23711 gcc_unreachable ();
23714 gcc_assert (nargs <= ARRAY_SIZE (args));
23716 if (comparison != UNKNOWN)
23718 gcc_assert (nargs == 2);
23719 return ix86_expand_sse_compare (d, exp, target, swap);
23722 if (rmode == VOIDmode || rmode == tmode)
23726 || GET_MODE (target) != tmode
23727 || ! (*insn_p->operand[0].predicate) (target, tmode))
23728 target = gen_reg_rtx (tmode);
23729 real_target = target;
23733 target = gen_reg_rtx (rmode);
23734 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23737 for (i = 0; i < nargs; i++)
23739 tree arg = CALL_EXPR_ARG (exp, i);
23740 rtx op = expand_normal (arg);
23741 enum machine_mode mode = insn_p->operand[i + 1].mode;
23742 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23744 if (last_arg_count && (i + 1) == nargs)
23746 /* SIMD shift insns take either an 8-bit immediate or
23747 register as count. But builtin functions take int as
23748 count. If count doesn't match, we put it in register. */
23751 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23752 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23753 op = copy_to_reg (op);
23756 else if ((nargs - i) <= nargs_constant)
23761 case CODE_FOR_sse4_1_roundpd:
23762 case CODE_FOR_sse4_1_roundps:
23763 case CODE_FOR_sse4_1_roundsd:
23764 case CODE_FOR_sse4_1_roundss:
23765 case CODE_FOR_sse4_1_blendps:
23766 case CODE_FOR_avx_blendpd256:
23767 case CODE_FOR_avx_vpermilv4df:
23768 case CODE_FOR_avx_roundpd256:
23769 case CODE_FOR_avx_roundps256:
23770 error ("the last argument must be a 4-bit immediate");
23773 case CODE_FOR_sse4_1_blendpd:
23774 case CODE_FOR_avx_vpermilv2df:
23775 case CODE_FOR_xop_vpermil2v2df3:
23776 case CODE_FOR_xop_vpermil2v4sf3:
23777 case CODE_FOR_xop_vpermil2v4df3:
23778 case CODE_FOR_xop_vpermil2v8sf3:
23779 error ("the last argument must be a 2-bit immediate");
23782 case CODE_FOR_avx_vextractf128v4df:
23783 case CODE_FOR_avx_vextractf128v8sf:
23784 case CODE_FOR_avx_vextractf128v8si:
23785 case CODE_FOR_avx_vinsertf128v4df:
23786 case CODE_FOR_avx_vinsertf128v8sf:
23787 case CODE_FOR_avx_vinsertf128v8si:
23788 error ("the last argument must be a 1-bit immediate");
23791 case CODE_FOR_avx_cmpsdv2df3:
23792 case CODE_FOR_avx_cmpssv4sf3:
23793 case CODE_FOR_avx_cmppdv2df3:
23794 case CODE_FOR_avx_cmppsv4sf3:
23795 case CODE_FOR_avx_cmppdv4df3:
23796 case CODE_FOR_avx_cmppsv8sf3:
23797 error ("the last argument must be a 5-bit immediate");
23801 switch (nargs_constant)
23804 if ((nargs - i) == nargs_constant)
23806 error ("the next to last argument must be an 8-bit immediate");
23810 error ("the last argument must be an 8-bit immediate");
23813 gcc_unreachable ();
23820 if (VECTOR_MODE_P (mode))
23821 op = safe_vector_operand (op, mode);
23823 /* If we aren't optimizing, only allow one memory operand to
23825 if (memory_operand (op, mode))
23828 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23830 if (optimize || !match || num_memory > 1)
23831 op = copy_to_mode_reg (mode, op);
23835 op = copy_to_reg (op);
23836 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23841 args[i].mode = mode;
23847 pat = GEN_FCN (icode) (real_target, args[0].op);
23850 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23853 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23857 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23858 args[2].op, args[3].op);
23861 gcc_unreachable ();
23871 /* Subroutine of ix86_expand_builtin to take care of special insns
23872 with variable number of operands. */
23875 ix86_expand_special_args_builtin (const struct builtin_description *d,
23876 tree exp, rtx target)
23880 unsigned int i, nargs, arg_adjust, memory;
23884 enum machine_mode mode;
23886 enum insn_code icode = d->icode;
23887 bool last_arg_constant = false;
23888 const struct insn_data *insn_p = &insn_data[icode];
23889 enum machine_mode tmode = insn_p->operand[0].mode;
23890 enum { load, store } klass;
23892 switch ((enum ix86_builtin_func_type) d->flag)
23894 case VOID_FTYPE_VOID:
23895 emit_insn (GEN_FCN (icode) (target));
23897 case UINT64_FTYPE_VOID:
23902 case UINT64_FTYPE_PUNSIGNED:
23903 case V2DI_FTYPE_PV2DI:
23904 case V32QI_FTYPE_PCCHAR:
23905 case V16QI_FTYPE_PCCHAR:
23906 case V8SF_FTYPE_PCV4SF:
23907 case V8SF_FTYPE_PCFLOAT:
23908 case V4SF_FTYPE_PCFLOAT:
23909 case V4DF_FTYPE_PCV2DF:
23910 case V4DF_FTYPE_PCDOUBLE:
23911 case V2DF_FTYPE_PCDOUBLE:
23912 case VOID_FTYPE_PVOID:
23917 case VOID_FTYPE_PV2SF_V4SF:
23918 case VOID_FTYPE_PV4DI_V4DI:
23919 case VOID_FTYPE_PV2DI_V2DI:
23920 case VOID_FTYPE_PCHAR_V32QI:
23921 case VOID_FTYPE_PCHAR_V16QI:
23922 case VOID_FTYPE_PFLOAT_V8SF:
23923 case VOID_FTYPE_PFLOAT_V4SF:
23924 case VOID_FTYPE_PDOUBLE_V4DF:
23925 case VOID_FTYPE_PDOUBLE_V2DF:
23926 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23927 case VOID_FTYPE_PINT_INT:
23930 /* Reserve memory operand for target. */
23931 memory = ARRAY_SIZE (args);
23933 case V4SF_FTYPE_V4SF_PCV2SF:
23934 case V2DF_FTYPE_V2DF_PCDOUBLE:
23939 case V8SF_FTYPE_PCV8SF_V8SF:
23940 case V4DF_FTYPE_PCV4DF_V4DF:
23941 case V4SF_FTYPE_PCV4SF_V4SF:
23942 case V2DF_FTYPE_PCV2DF_V2DF:
23947 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23948 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23949 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23950 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23953 /* Reserve memory operand for target. */
23954 memory = ARRAY_SIZE (args);
23956 case VOID_FTYPE_UINT_UINT_UINT:
23957 case VOID_FTYPE_UINT64_UINT_UINT:
23958 case UCHAR_FTYPE_UINT_UINT_UINT:
23959 case UCHAR_FTYPE_UINT64_UINT_UINT:
23962 memory = ARRAY_SIZE (args);
23963 last_arg_constant = true;
23966 gcc_unreachable ();
23969 gcc_assert (nargs <= ARRAY_SIZE (args));
23971 if (klass == store)
23973 arg = CALL_EXPR_ARG (exp, 0);
23974 op = expand_normal (arg);
23975 gcc_assert (target == 0);
23976 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23984 || GET_MODE (target) != tmode
23985 || ! (*insn_p->operand[0].predicate) (target, tmode))
23986 target = gen_reg_rtx (tmode);
23989 for (i = 0; i < nargs; i++)
23991 enum machine_mode mode = insn_p->operand[i + 1].mode;
23994 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23995 op = expand_normal (arg);
23996 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23998 if (last_arg_constant && (i + 1) == nargs)
24002 if (icode == CODE_FOR_lwp_lwpvalsi3
24003 || icode == CODE_FOR_lwp_lwpinssi3
24004 || icode == CODE_FOR_lwp_lwpvaldi3
24005 || icode == CODE_FOR_lwp_lwpinsdi3)
24006 error ("the last argument must be a 32-bit immediate");
24008 error ("the last argument must be an 8-bit immediate");
24016 /* This must be the memory operand. */
24017 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24018 gcc_assert (GET_MODE (op) == mode
24019 || GET_MODE (op) == VOIDmode);
24023 /* This must be register. */
24024 if (VECTOR_MODE_P (mode))
24025 op = safe_vector_operand (op, mode);
24027 gcc_assert (GET_MODE (op) == mode
24028 || GET_MODE (op) == VOIDmode);
24029 op = copy_to_mode_reg (mode, op);
24034 args[i].mode = mode;
24040 pat = GEN_FCN (icode) (target);
24043 pat = GEN_FCN (icode) (target, args[0].op);
24046 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24049 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24052 gcc_unreachable ();
24058 return klass == store ? 0 : target;
24061 /* Return the integer constant in ARG. Constrain it to be in the range
24062 of the subparts of VEC_TYPE; issue an error if not. */
24065 get_element_number (tree vec_type, tree arg)
24067 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24069 if (!host_integerp (arg, 1)
24070 || (elt = tree_low_cst (arg, 1), elt > max))
24072 error ("selector must be an integer constant in the range 0..%wi", max);
24079 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24080 ix86_expand_vector_init. We DO have language-level syntax for this, in
24081 the form of (type){ init-list }. Except that since we can't place emms
24082 instructions from inside the compiler, we can't allow the use of MMX
24083 registers unless the user explicitly asks for it. So we do *not* define
24084 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24085 we have builtins invoked by mmintrin.h that gives us license to emit
24086 these sorts of instructions. */
24089 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24091 enum machine_mode tmode = TYPE_MODE (type);
24092 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24093 int i, n_elt = GET_MODE_NUNITS (tmode);
24094 rtvec v = rtvec_alloc (n_elt);
24096 gcc_assert (VECTOR_MODE_P (tmode));
24097 gcc_assert (call_expr_nargs (exp) == n_elt);
24099 for (i = 0; i < n_elt; ++i)
24101 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24102 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24105 if (!target || !register_operand (target, tmode))
24106 target = gen_reg_rtx (tmode);
24108 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24112 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24113 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24114 had a language-level syntax for referencing vector elements. */
24117 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24119 enum machine_mode tmode, mode0;
24124 arg0 = CALL_EXPR_ARG (exp, 0);
24125 arg1 = CALL_EXPR_ARG (exp, 1);
24127 op0 = expand_normal (arg0);
24128 elt = get_element_number (TREE_TYPE (arg0), arg1);
24130 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24131 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24132 gcc_assert (VECTOR_MODE_P (mode0));
24134 op0 = force_reg (mode0, op0);
24136 if (optimize || !target || !register_operand (target, tmode))
24137 target = gen_reg_rtx (tmode);
24139 ix86_expand_vector_extract (true, target, op0, elt);
24144 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24145 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24146 a language-level syntax for referencing vector elements. */
24149 ix86_expand_vec_set_builtin (tree exp)
24151 enum machine_mode tmode, mode1;
24152 tree arg0, arg1, arg2;
24154 rtx op0, op1, target;
24156 arg0 = CALL_EXPR_ARG (exp, 0);
24157 arg1 = CALL_EXPR_ARG (exp, 1);
24158 arg2 = CALL_EXPR_ARG (exp, 2);
24160 tmode = TYPE_MODE (TREE_TYPE (arg0));
24161 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24162 gcc_assert (VECTOR_MODE_P (tmode));
24164 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24165 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24166 elt = get_element_number (TREE_TYPE (arg0), arg2);
24168 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24169 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24171 op0 = force_reg (tmode, op0);
24172 op1 = force_reg (mode1, op1);
24174 /* OP0 is the source of these builtin functions and shouldn't be
24175 modified. Create a copy, use it and return it as target. */
24176 target = gen_reg_rtx (tmode);
24177 emit_move_insn (target, op0);
24178 ix86_expand_vector_set (true, target, op1, elt);
24183 /* Expand an expression EXP that calls a built-in function,
24184 with result going to TARGET if that's convenient
24185 (and in mode MODE if that's convenient).
24186 SUBTARGET may be used as the target for computing one of EXP's operands.
24187 IGNORE is nonzero if the value is to be ignored. */
24190 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24191 enum machine_mode mode ATTRIBUTE_UNUSED,
24192 int ignore ATTRIBUTE_UNUSED)
24194 const struct builtin_description *d;
24196 enum insn_code icode;
24197 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24198 tree arg0, arg1, arg2;
24199 rtx op0, op1, op2, pat;
24200 enum machine_mode mode0, mode1, mode2;
24201 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24203 /* Determine whether the builtin function is available under the current ISA.
24204 Originally the builtin was not created if it wasn't applicable to the
24205 current ISA based on the command line switches. With function specific
24206 options, we need to check in the context of the function making the call
24207 whether it is supported. */
24208 if (ix86_builtins_isa[fcode].isa
24209 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24211 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24212 NULL, NULL, false);
24215 error ("%qE needs unknown isa option", fndecl);
24218 gcc_assert (opts != NULL);
24219 error ("%qE needs isa option %s", fndecl, opts);
24227 case IX86_BUILTIN_MASKMOVQ:
24228 case IX86_BUILTIN_MASKMOVDQU:
24229 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24230 ? CODE_FOR_mmx_maskmovq
24231 : CODE_FOR_sse2_maskmovdqu);
24232 /* Note the arg order is different from the operand order. */
24233 arg1 = CALL_EXPR_ARG (exp, 0);
24234 arg2 = CALL_EXPR_ARG (exp, 1);
24235 arg0 = CALL_EXPR_ARG (exp, 2);
24236 op0 = expand_normal (arg0);
24237 op1 = expand_normal (arg1);
24238 op2 = expand_normal (arg2);
24239 mode0 = insn_data[icode].operand[0].mode;
24240 mode1 = insn_data[icode].operand[1].mode;
24241 mode2 = insn_data[icode].operand[2].mode;
24243 op0 = force_reg (Pmode, op0);
24244 op0 = gen_rtx_MEM (mode1, op0);
24246 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24247 op0 = copy_to_mode_reg (mode0, op0);
24248 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24249 op1 = copy_to_mode_reg (mode1, op1);
24250 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24251 op2 = copy_to_mode_reg (mode2, op2);
24252 pat = GEN_FCN (icode) (op0, op1, op2);
24258 case IX86_BUILTIN_LDMXCSR:
24259 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24260 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24261 emit_move_insn (target, op0);
24262 emit_insn (gen_sse_ldmxcsr (target));
24265 case IX86_BUILTIN_STMXCSR:
24266 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24267 emit_insn (gen_sse_stmxcsr (target));
24268 return copy_to_mode_reg (SImode, target);
24270 case IX86_BUILTIN_CLFLUSH:
24271 arg0 = CALL_EXPR_ARG (exp, 0);
24272 op0 = expand_normal (arg0);
24273 icode = CODE_FOR_sse2_clflush;
24274 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24275 op0 = copy_to_mode_reg (Pmode, op0);
24277 emit_insn (gen_sse2_clflush (op0));
24280 case IX86_BUILTIN_MONITOR:
24281 arg0 = CALL_EXPR_ARG (exp, 0);
24282 arg1 = CALL_EXPR_ARG (exp, 1);
24283 arg2 = CALL_EXPR_ARG (exp, 2);
24284 op0 = expand_normal (arg0);
24285 op1 = expand_normal (arg1);
24286 op2 = expand_normal (arg2);
24288 op0 = copy_to_mode_reg (Pmode, op0);
24290 op1 = copy_to_mode_reg (SImode, op1);
24292 op2 = copy_to_mode_reg (SImode, op2);
24293 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24296 case IX86_BUILTIN_MWAIT:
24297 arg0 = CALL_EXPR_ARG (exp, 0);
24298 arg1 = CALL_EXPR_ARG (exp, 1);
24299 op0 = expand_normal (arg0);
24300 op1 = expand_normal (arg1);
24302 op0 = copy_to_mode_reg (SImode, op0);
24304 op1 = copy_to_mode_reg (SImode, op1);
24305 emit_insn (gen_sse3_mwait (op0, op1));
24308 case IX86_BUILTIN_VEC_INIT_V2SI:
24309 case IX86_BUILTIN_VEC_INIT_V4HI:
24310 case IX86_BUILTIN_VEC_INIT_V8QI:
24311 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24313 case IX86_BUILTIN_VEC_EXT_V2DF:
24314 case IX86_BUILTIN_VEC_EXT_V2DI:
24315 case IX86_BUILTIN_VEC_EXT_V4SF:
24316 case IX86_BUILTIN_VEC_EXT_V4SI:
24317 case IX86_BUILTIN_VEC_EXT_V8HI:
24318 case IX86_BUILTIN_VEC_EXT_V2SI:
24319 case IX86_BUILTIN_VEC_EXT_V4HI:
24320 case IX86_BUILTIN_VEC_EXT_V16QI:
24321 return ix86_expand_vec_ext_builtin (exp, target);
24323 case IX86_BUILTIN_VEC_SET_V2DI:
24324 case IX86_BUILTIN_VEC_SET_V4SF:
24325 case IX86_BUILTIN_VEC_SET_V4SI:
24326 case IX86_BUILTIN_VEC_SET_V8HI:
24327 case IX86_BUILTIN_VEC_SET_V4HI:
24328 case IX86_BUILTIN_VEC_SET_V16QI:
24329 return ix86_expand_vec_set_builtin (exp);
24331 case IX86_BUILTIN_VEC_PERM_V2DF:
24332 case IX86_BUILTIN_VEC_PERM_V4SF:
24333 case IX86_BUILTIN_VEC_PERM_V2DI:
24334 case IX86_BUILTIN_VEC_PERM_V4SI:
24335 case IX86_BUILTIN_VEC_PERM_V8HI:
24336 case IX86_BUILTIN_VEC_PERM_V16QI:
24337 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24338 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24339 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24340 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24341 case IX86_BUILTIN_VEC_PERM_V4DF:
24342 case IX86_BUILTIN_VEC_PERM_V8SF:
24343 return ix86_expand_vec_perm_builtin (exp);
24345 case IX86_BUILTIN_INFQ:
24346 case IX86_BUILTIN_HUGE_VALQ:
24348 REAL_VALUE_TYPE inf;
24352 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24354 tmp = validize_mem (force_const_mem (mode, tmp));
24357 target = gen_reg_rtx (mode);
24359 emit_move_insn (target, tmp);
24363 case IX86_BUILTIN_LLWPCB:
24364 arg0 = CALL_EXPR_ARG (exp, 0);
24365 op0 = expand_normal (arg0);
24366 icode = CODE_FOR_lwp_llwpcb;
24367 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24368 op0 = copy_to_mode_reg (Pmode, op0);
24369 emit_insn (gen_lwp_llwpcb (op0));
24372 case IX86_BUILTIN_SLWPCB:
24373 icode = CODE_FOR_lwp_slwpcb;
24375 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24376 target = gen_reg_rtx (Pmode);
24377 emit_insn (gen_lwp_slwpcb (target));
24384 for (i = 0, d = bdesc_special_args;
24385 i < ARRAY_SIZE (bdesc_special_args);
24387 if (d->code == fcode)
24388 return ix86_expand_special_args_builtin (d, exp, target);
24390 for (i = 0, d = bdesc_args;
24391 i < ARRAY_SIZE (bdesc_args);
24393 if (d->code == fcode)
24396 case IX86_BUILTIN_FABSQ:
24397 case IX86_BUILTIN_COPYSIGNQ:
24399 /* Emit a normal call if SSE2 isn't available. */
24400 return expand_call (exp, target, ignore);
24402 return ix86_expand_args_builtin (d, exp, target);
24405 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24406 if (d->code == fcode)
24407 return ix86_expand_sse_comi (d, exp, target);
24409 for (i = 0, d = bdesc_pcmpestr;
24410 i < ARRAY_SIZE (bdesc_pcmpestr);
24412 if (d->code == fcode)
24413 return ix86_expand_sse_pcmpestr (d, exp, target);
24415 for (i = 0, d = bdesc_pcmpistr;
24416 i < ARRAY_SIZE (bdesc_pcmpistr);
24418 if (d->code == fcode)
24419 return ix86_expand_sse_pcmpistr (d, exp, target);
24421 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24422 if (d->code == fcode)
24423 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24424 (enum ix86_builtin_func_type)
24425 d->flag, d->comparison);
24427 gcc_unreachable ();
24430 /* Returns a function decl for a vectorized version of the builtin function
24431 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24432 if it is not available. */
24435 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24438 enum machine_mode in_mode, out_mode;
24440 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24442 if (TREE_CODE (type_out) != VECTOR_TYPE
24443 || TREE_CODE (type_in) != VECTOR_TYPE
24444 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24447 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24448 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24449 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24450 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24454 case BUILT_IN_SQRT:
24455 if (out_mode == DFmode && out_n == 2
24456 && in_mode == DFmode && in_n == 2)
24457 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24460 case BUILT_IN_SQRTF:
24461 if (out_mode == SFmode && out_n == 4
24462 && in_mode == SFmode && in_n == 4)
24463 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24466 case BUILT_IN_LRINT:
24467 if (out_mode == SImode && out_n == 4
24468 && in_mode == DFmode && in_n == 2)
24469 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24472 case BUILT_IN_LRINTF:
24473 if (out_mode == SImode && out_n == 4
24474 && in_mode == SFmode && in_n == 4)
24475 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24478 case BUILT_IN_COPYSIGN:
24479 if (out_mode == DFmode && out_n == 2
24480 && in_mode == DFmode && in_n == 2)
24481 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24484 case BUILT_IN_COPYSIGNF:
24485 if (out_mode == SFmode && out_n == 4
24486 && in_mode == SFmode && in_n == 4)
24487 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24494 /* Dispatch to a handler for a vectorization library. */
24495 if (ix86_veclib_handler)
24496 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24502 /* Handler for an SVML-style interface to
24503 a library with vectorized intrinsics. */
24506 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24509 tree fntype, new_fndecl, args;
24512 enum machine_mode el_mode, in_mode;
24515 /* The SVML is suitable for unsafe math only. */
24516 if (!flag_unsafe_math_optimizations)
24519 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24520 n = TYPE_VECTOR_SUBPARTS (type_out);
24521 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24522 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24523 if (el_mode != in_mode
24531 case BUILT_IN_LOG10:
24533 case BUILT_IN_TANH:
24535 case BUILT_IN_ATAN:
24536 case BUILT_IN_ATAN2:
24537 case BUILT_IN_ATANH:
24538 case BUILT_IN_CBRT:
24539 case BUILT_IN_SINH:
24541 case BUILT_IN_ASINH:
24542 case BUILT_IN_ASIN:
24543 case BUILT_IN_COSH:
24545 case BUILT_IN_ACOSH:
24546 case BUILT_IN_ACOS:
24547 if (el_mode != DFmode || n != 2)
24551 case BUILT_IN_EXPF:
24552 case BUILT_IN_LOGF:
24553 case BUILT_IN_LOG10F:
24554 case BUILT_IN_POWF:
24555 case BUILT_IN_TANHF:
24556 case BUILT_IN_TANF:
24557 case BUILT_IN_ATANF:
24558 case BUILT_IN_ATAN2F:
24559 case BUILT_IN_ATANHF:
24560 case BUILT_IN_CBRTF:
24561 case BUILT_IN_SINHF:
24562 case BUILT_IN_SINF:
24563 case BUILT_IN_ASINHF:
24564 case BUILT_IN_ASINF:
24565 case BUILT_IN_COSHF:
24566 case BUILT_IN_COSF:
24567 case BUILT_IN_ACOSHF:
24568 case BUILT_IN_ACOSF:
24569 if (el_mode != SFmode || n != 4)
24577 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24579 if (fn == BUILT_IN_LOGF)
24580 strcpy (name, "vmlsLn4");
24581 else if (fn == BUILT_IN_LOG)
24582 strcpy (name, "vmldLn2");
24585 sprintf (name, "vmls%s", bname+10);
24586 name[strlen (name)-1] = '4';
24589 sprintf (name, "vmld%s2", bname+10);
24591 /* Convert to uppercase. */
24595 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24596 args = TREE_CHAIN (args))
24600 fntype = build_function_type_list (type_out, type_in, NULL);
24602 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24604 /* Build a function declaration for the vectorized function. */
24605 new_fndecl = build_decl (BUILTINS_LOCATION,
24606 FUNCTION_DECL, get_identifier (name), fntype);
24607 TREE_PUBLIC (new_fndecl) = 1;
24608 DECL_EXTERNAL (new_fndecl) = 1;
24609 DECL_IS_NOVOPS (new_fndecl) = 1;
24610 TREE_READONLY (new_fndecl) = 1;
24615 /* Handler for an ACML-style interface to
24616 a library with vectorized intrinsics. */
24619 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24621 char name[20] = "__vr.._";
24622 tree fntype, new_fndecl, args;
24625 enum machine_mode el_mode, in_mode;
24628 /* The ACML is 64bits only and suitable for unsafe math only as
24629 it does not correctly support parts of IEEE with the required
24630 precision such as denormals. */
24632 || !flag_unsafe_math_optimizations)
24635 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24636 n = TYPE_VECTOR_SUBPARTS (type_out);
24637 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24638 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24639 if (el_mode != in_mode
24649 case BUILT_IN_LOG2:
24650 case BUILT_IN_LOG10:
24653 if (el_mode != DFmode
24658 case BUILT_IN_SINF:
24659 case BUILT_IN_COSF:
24660 case BUILT_IN_EXPF:
24661 case BUILT_IN_POWF:
24662 case BUILT_IN_LOGF:
24663 case BUILT_IN_LOG2F:
24664 case BUILT_IN_LOG10F:
24667 if (el_mode != SFmode
24676 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24677 sprintf (name + 7, "%s", bname+10);
24680 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24681 args = TREE_CHAIN (args))
24685 fntype = build_function_type_list (type_out, type_in, NULL);
24687 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24689 /* Build a function declaration for the vectorized function. */
24690 new_fndecl = build_decl (BUILTINS_LOCATION,
24691 FUNCTION_DECL, get_identifier (name), fntype);
24692 TREE_PUBLIC (new_fndecl) = 1;
24693 DECL_EXTERNAL (new_fndecl) = 1;
24694 DECL_IS_NOVOPS (new_fndecl) = 1;
24695 TREE_READONLY (new_fndecl) = 1;
24701 /* Returns a decl of a function that implements conversion of an integer vector
24702 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24703 are the types involved when converting according to CODE.
24704 Return NULL_TREE if it is not available. */
24707 ix86_vectorize_builtin_conversion (unsigned int code,
24708 tree dest_type, tree src_type)
24716 switch (TYPE_MODE (src_type))
24719 switch (TYPE_MODE (dest_type))
24722 return (TYPE_UNSIGNED (src_type)
24723 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24724 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24726 return (TYPE_UNSIGNED (src_type)
24728 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24734 switch (TYPE_MODE (dest_type))
24737 return (TYPE_UNSIGNED (src_type)
24739 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24748 case FIX_TRUNC_EXPR:
24749 switch (TYPE_MODE (dest_type))
24752 switch (TYPE_MODE (src_type))
24755 return (TYPE_UNSIGNED (dest_type)
24757 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24759 return (TYPE_UNSIGNED (dest_type)
24761 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24768 switch (TYPE_MODE (src_type))
24771 return (TYPE_UNSIGNED (dest_type)
24773 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24790 /* Returns a code for a target-specific builtin that implements
24791 reciprocal of the function, or NULL_TREE if not available. */
24794 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24795 bool sqrt ATTRIBUTE_UNUSED)
24797 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24798 && flag_finite_math_only && !flag_trapping_math
24799 && flag_unsafe_math_optimizations))
24803 /* Machine dependent builtins. */
24806 /* Vectorized version of sqrt to rsqrt conversion. */
24807 case IX86_BUILTIN_SQRTPS_NR:
24808 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24814 /* Normal builtins. */
24817 /* Sqrt to rsqrt conversion. */
24818 case BUILT_IN_SQRTF:
24819 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24826 /* Helper for avx_vpermilps256_operand et al. This is also used by
24827 the expansion functions to turn the parallel back into a mask.
24828 The return value is 0 for no match and the imm8+1 for a match. */
24831 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24833 unsigned i, nelt = GET_MODE_NUNITS (mode);
24835 unsigned char ipar[8];
24837 if (XVECLEN (par, 0) != (int) nelt)
24840 /* Validate that all of the elements are constants, and not totally
24841 out of range. Copy the data into an integral array to make the
24842 subsequent checks easier. */
24843 for (i = 0; i < nelt; ++i)
24845 rtx er = XVECEXP (par, 0, i);
24846 unsigned HOST_WIDE_INT ei;
24848 if (!CONST_INT_P (er))
24859 /* In the 256-bit DFmode case, we can only move elements within
24861 for (i = 0; i < 2; ++i)
24865 mask |= ipar[i] << i;
24867 for (i = 2; i < 4; ++i)
24871 mask |= (ipar[i] - 2) << i;
24876 /* In the 256-bit SFmode case, we have full freedom of movement
24877 within the low 128-bit lane, but the high 128-bit lane must
24878 mirror the exact same pattern. */
24879 for (i = 0; i < 4; ++i)
24880 if (ipar[i] + 4 != ipar[i + 4])
24887 /* In the 128-bit case, we've full freedom in the placement of
24888 the elements from the source operand. */
24889 for (i = 0; i < nelt; ++i)
24890 mask |= ipar[i] << (i * (nelt / 2));
24894 gcc_unreachable ();
24897 /* Make sure success has a non-zero value by adding one. */
24901 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24902 the expansion functions to turn the parallel back into a mask.
24903 The return value is 0 for no match and the imm8+1 for a match. */
24906 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24908 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24910 unsigned char ipar[8];
24912 if (XVECLEN (par, 0) != (int) nelt)
24915 /* Validate that all of the elements are constants, and not totally
24916 out of range. Copy the data into an integral array to make the
24917 subsequent checks easier. */
24918 for (i = 0; i < nelt; ++i)
24920 rtx er = XVECEXP (par, 0, i);
24921 unsigned HOST_WIDE_INT ei;
24923 if (!CONST_INT_P (er))
24926 if (ei >= 2 * nelt)
24931 /* Validate that the halves of the permute are halves. */
24932 for (i = 0; i < nelt2 - 1; ++i)
24933 if (ipar[i] + 1 != ipar[i + 1])
24935 for (i = nelt2; i < nelt - 1; ++i)
24936 if (ipar[i] + 1 != ipar[i + 1])
24939 /* Reconstruct the mask. */
24940 for (i = 0; i < 2; ++i)
24942 unsigned e = ipar[i * nelt2];
24946 mask |= e << (i * 4);
24949 /* Make sure success has a non-zero value by adding one. */
24954 /* Store OPERAND to the memory after reload is completed. This means
24955 that we can't easily use assign_stack_local. */
24957 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24961 gcc_assert (reload_completed);
24962 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24964 result = gen_rtx_MEM (mode,
24965 gen_rtx_PLUS (Pmode,
24967 GEN_INT (-RED_ZONE_SIZE)));
24968 emit_move_insn (result, operand);
24970 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24976 operand = gen_lowpart (DImode, operand);
24980 gen_rtx_SET (VOIDmode,
24981 gen_rtx_MEM (DImode,
24982 gen_rtx_PRE_DEC (DImode,
24983 stack_pointer_rtx)),
24987 gcc_unreachable ();
24989 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24998 split_di (&operand, 1, operands, operands + 1);
25000 gen_rtx_SET (VOIDmode,
25001 gen_rtx_MEM (SImode,
25002 gen_rtx_PRE_DEC (Pmode,
25003 stack_pointer_rtx)),
25006 gen_rtx_SET (VOIDmode,
25007 gen_rtx_MEM (SImode,
25008 gen_rtx_PRE_DEC (Pmode,
25009 stack_pointer_rtx)),
25014 /* Store HImodes as SImodes. */
25015 operand = gen_lowpart (SImode, operand);
25019 gen_rtx_SET (VOIDmode,
25020 gen_rtx_MEM (GET_MODE (operand),
25021 gen_rtx_PRE_DEC (SImode,
25022 stack_pointer_rtx)),
25026 gcc_unreachable ();
25028 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25033 /* Free operand from the memory. */
25035 ix86_free_from_memory (enum machine_mode mode)
25037 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25041 if (mode == DImode || TARGET_64BIT)
25045 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25046 to pop or add instruction if registers are available. */
25047 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25048 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25053 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25054 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25056 static const enum reg_class *
25057 i386_ira_cover_classes (void)
25059 static const enum reg_class sse_fpmath_classes[] = {
25060 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25062 static const enum reg_class no_sse_fpmath_classes[] = {
25063 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25066 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25069 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25070 QImode must go into class Q_REGS.
25071 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25072 movdf to do mem-to-mem moves through integer regs. */
25074 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25076 enum machine_mode mode = GET_MODE (x);
25078 /* We're only allowed to return a subclass of CLASS. Many of the
25079 following checks fail for NO_REGS, so eliminate that early. */
25080 if (regclass == NO_REGS)
25083 /* All classes can load zeros. */
25084 if (x == CONST0_RTX (mode))
25087 /* Force constants into memory if we are loading a (nonzero) constant into
25088 an MMX or SSE register. This is because there are no MMX/SSE instructions
25089 to load from a constant. */
25091 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25094 /* Prefer SSE regs only, if we can use them for math. */
25095 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25096 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25098 /* Floating-point constants need more complex checks. */
25099 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25101 /* General regs can load everything. */
25102 if (reg_class_subset_p (regclass, GENERAL_REGS))
25105 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25106 zero above. We only want to wind up preferring 80387 registers if
25107 we plan on doing computation with them. */
25109 && standard_80387_constant_p (x))
25111 /* Limit class to non-sse. */
25112 if (regclass == FLOAT_SSE_REGS)
25114 if (regclass == FP_TOP_SSE_REGS)
25116 if (regclass == FP_SECOND_SSE_REGS)
25117 return FP_SECOND_REG;
25118 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25125 /* Generally when we see PLUS here, it's the function invariant
25126 (plus soft-fp const_int). Which can only be computed into general
25128 if (GET_CODE (x) == PLUS)
25129 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25131 /* QImode constants are easy to load, but non-constant QImode data
25132 must go into Q_REGS. */
25133 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25135 if (reg_class_subset_p (regclass, Q_REGS))
25137 if (reg_class_subset_p (Q_REGS, regclass))
25145 /* Discourage putting floating-point values in SSE registers unless
25146 SSE math is being used, and likewise for the 387 registers. */
25148 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25150 enum machine_mode mode = GET_MODE (x);
25152 /* Restrict the output reload class to the register bank that we are doing
25153 math on. If we would like not to return a subset of CLASS, reject this
25154 alternative: if reload cannot do this, it will still use its choice. */
25155 mode = GET_MODE (x);
25156 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25157 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25159 if (X87_FLOAT_MODE_P (mode))
25161 if (regclass == FP_TOP_SSE_REGS)
25163 else if (regclass == FP_SECOND_SSE_REGS)
25164 return FP_SECOND_REG;
25166 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25172 static enum reg_class
25173 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25174 enum machine_mode mode,
25175 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25177 /* QImode spills from non-QI registers require
25178 intermediate register on 32bit targets. */
25179 if (!in_p && mode == QImode && !TARGET_64BIT
25180 && (rclass == GENERAL_REGS
25181 || rclass == LEGACY_REGS
25182 || rclass == INDEX_REGS))
25191 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25192 regno = true_regnum (x);
25194 /* Return Q_REGS if the operand is in memory. */
25202 /* If we are copying between general and FP registers, we need a memory
25203 location. The same is true for SSE and MMX registers.
25205 To optimize register_move_cost performance, allow inline variant.
25207 The macro can't work reliably when one of the CLASSES is class containing
25208 registers from multiple units (SSE, MMX, integer). We avoid this by never
25209 combining those units in single alternative in the machine description.
25210 Ensure that this constraint holds to avoid unexpected surprises.
25212 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25213 enforce these sanity checks. */
25216 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25217 enum machine_mode mode, int strict)
25219 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25220 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25221 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25222 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25223 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25224 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25226 gcc_assert (!strict);
25230 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25233 /* ??? This is a lie. We do have moves between mmx/general, and for
25234 mmx/sse2. But by saying we need secondary memory we discourage the
25235 register allocator from using the mmx registers unless needed. */
25236 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25239 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25241 /* SSE1 doesn't have any direct moves from other classes. */
25245 /* If the target says that inter-unit moves are more expensive
25246 than moving through memory, then don't generate them. */
25247 if (!TARGET_INTER_UNIT_MOVES)
25250 /* Between SSE and general, we have moves no larger than word size. */
25251 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25259 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25260 enum machine_mode mode, int strict)
25262 return inline_secondary_memory_needed (class1, class2, mode, strict);
25265 /* Return true if the registers in CLASS cannot represent the change from
25266 modes FROM to TO. */
25269 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25270 enum reg_class regclass)
25275 /* x87 registers can't do subreg at all, as all values are reformatted
25276 to extended precision. */
25277 if (MAYBE_FLOAT_CLASS_P (regclass))
25280 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25282 /* Vector registers do not support QI or HImode loads. If we don't
25283 disallow a change to these modes, reload will assume it's ok to
25284 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25285 the vec_dupv4hi pattern. */
25286 if (GET_MODE_SIZE (from) < 4)
25289 /* Vector registers do not support subreg with nonzero offsets, which
25290 are otherwise valid for integer registers. Since we can't see
25291 whether we have a nonzero offset from here, prohibit all
25292 nonparadoxical subregs changing size. */
25293 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25300 /* Return the cost of moving data of mode M between a
25301 register and memory. A value of 2 is the default; this cost is
25302 relative to those in `REGISTER_MOVE_COST'.
25304 This function is used extensively by register_move_cost that is used to
25305 build tables at startup. Make it inline in this case.
25306 When IN is 2, return maximum of in and out move cost.
25308 If moving between registers and memory is more expensive than
25309 between two registers, you should define this macro to express the
25312 Model also increased moving costs of QImode registers in non
25316 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25320 if (FLOAT_CLASS_P (regclass))
25338 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25339 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25341 if (SSE_CLASS_P (regclass))
25344 switch (GET_MODE_SIZE (mode))
25359 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25360 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25362 if (MMX_CLASS_P (regclass))
25365 switch (GET_MODE_SIZE (mode))
25377 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25378 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25380 switch (GET_MODE_SIZE (mode))
25383 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25386 return ix86_cost->int_store[0];
25387 if (TARGET_PARTIAL_REG_DEPENDENCY
25388 && optimize_function_for_speed_p (cfun))
25389 cost = ix86_cost->movzbl_load;
25391 cost = ix86_cost->int_load[0];
25393 return MAX (cost, ix86_cost->int_store[0]);
25399 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25401 return ix86_cost->movzbl_load;
25403 return ix86_cost->int_store[0] + 4;
25408 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25409 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25411 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25412 if (mode == TFmode)
25415 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25417 cost = ix86_cost->int_load[2];
25419 cost = ix86_cost->int_store[2];
25420 return (cost * (((int) GET_MODE_SIZE (mode)
25421 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25426 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25428 return inline_memory_move_cost (mode, regclass, in);
25432 /* Return the cost of moving data from a register in class CLASS1 to
25433 one in class CLASS2.
25435 It is not required that the cost always equal 2 when FROM is the same as TO;
25436 on some machines it is expensive to move between registers if they are not
25437 general registers. */
25440 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25441 enum reg_class class2)
25443 /* In case we require secondary memory, compute cost of the store followed
25444 by load. In order to avoid bad register allocation choices, we need
25445 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25447 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25451 cost += inline_memory_move_cost (mode, class1, 2);
25452 cost += inline_memory_move_cost (mode, class2, 2);
25454 /* In case of copying from general_purpose_register we may emit multiple
25455 stores followed by single load causing memory size mismatch stall.
25456 Count this as arbitrarily high cost of 20. */
25457 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25460 /* In the case of FP/MMX moves, the registers actually overlap, and we
25461 have to switch modes in order to treat them differently. */
25462 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25463 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25469 /* Moves between SSE/MMX and integer unit are expensive. */
25470 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25471 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25473 /* ??? By keeping returned value relatively high, we limit the number
25474 of moves between integer and MMX/SSE registers for all targets.
25475 Additionally, high value prevents problem with x86_modes_tieable_p(),
25476 where integer modes in MMX/SSE registers are not tieable
25477 because of missing QImode and HImode moves to, from or between
25478 MMX/SSE registers. */
25479 return MAX (8, ix86_cost->mmxsse_to_integer);
25481 if (MAYBE_FLOAT_CLASS_P (class1))
25482 return ix86_cost->fp_move;
25483 if (MAYBE_SSE_CLASS_P (class1))
25484 return ix86_cost->sse_move;
25485 if (MAYBE_MMX_CLASS_P (class1))
25486 return ix86_cost->mmx_move;
25490 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25493 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25495 /* Flags and only flags can only hold CCmode values. */
25496 if (CC_REGNO_P (regno))
25497 return GET_MODE_CLASS (mode) == MODE_CC;
25498 if (GET_MODE_CLASS (mode) == MODE_CC
25499 || GET_MODE_CLASS (mode) == MODE_RANDOM
25500 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25502 if (FP_REGNO_P (regno))
25503 return VALID_FP_MODE_P (mode);
25504 if (SSE_REGNO_P (regno))
25506 /* We implement the move patterns for all vector modes into and
25507 out of SSE registers, even when no operation instructions
25508 are available. OImode move is available only when AVX is
25510 return ((TARGET_AVX && mode == OImode)
25511 || VALID_AVX256_REG_MODE (mode)
25512 || VALID_SSE_REG_MODE (mode)
25513 || VALID_SSE2_REG_MODE (mode)
25514 || VALID_MMX_REG_MODE (mode)
25515 || VALID_MMX_REG_MODE_3DNOW (mode));
25517 if (MMX_REGNO_P (regno))
25519 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25520 so if the register is available at all, then we can move data of
25521 the given mode into or out of it. */
25522 return (VALID_MMX_REG_MODE (mode)
25523 || VALID_MMX_REG_MODE_3DNOW (mode));
25526 if (mode == QImode)
25528 /* Take care for QImode values - they can be in non-QI regs,
25529 but then they do cause partial register stalls. */
25530 if (regno <= BX_REG || TARGET_64BIT)
25532 if (!TARGET_PARTIAL_REG_STALL)
25534 return reload_in_progress || reload_completed;
25536 /* We handle both integer and floats in the general purpose registers. */
25537 else if (VALID_INT_MODE_P (mode))
25539 else if (VALID_FP_MODE_P (mode))
25541 else if (VALID_DFP_MODE_P (mode))
25543 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25544 on to use that value in smaller contexts, this can easily force a
25545 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25546 supporting DImode, allow it. */
25547 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25553 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25554 tieable integer mode. */
25557 ix86_tieable_integer_mode_p (enum machine_mode mode)
25566 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25569 return TARGET_64BIT;
25576 /* Return true if MODE1 is accessible in a register that can hold MODE2
25577 without copying. That is, all register classes that can hold MODE2
25578 can also hold MODE1. */
25581 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25583 if (mode1 == mode2)
25586 if (ix86_tieable_integer_mode_p (mode1)
25587 && ix86_tieable_integer_mode_p (mode2))
25590 /* MODE2 being XFmode implies fp stack or general regs, which means we
25591 can tie any smaller floating point modes to it. Note that we do not
25592 tie this with TFmode. */
25593 if (mode2 == XFmode)
25594 return mode1 == SFmode || mode1 == DFmode;
25596 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25597 that we can tie it with SFmode. */
25598 if (mode2 == DFmode)
25599 return mode1 == SFmode;
25601 /* If MODE2 is only appropriate for an SSE register, then tie with
25602 any other mode acceptable to SSE registers. */
25603 if (GET_MODE_SIZE (mode2) == 16
25604 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25605 return (GET_MODE_SIZE (mode1) == 16
25606 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25608 /* If MODE2 is appropriate for an MMX register, then tie
25609 with any other mode acceptable to MMX registers. */
25610 if (GET_MODE_SIZE (mode2) == 8
25611 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25612 return (GET_MODE_SIZE (mode1) == 8
25613 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25618 /* Compute a (partial) cost for rtx X. Return true if the complete
25619 cost has been computed, and false if subexpressions should be
25620 scanned. In either case, *TOTAL contains the cost result. */
25623 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25625 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25626 enum machine_mode mode = GET_MODE (x);
25627 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25635 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25637 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25639 else if (flag_pic && SYMBOLIC_CONST (x)
25641 || (!GET_CODE (x) != LABEL_REF
25642 && (GET_CODE (x) != SYMBOL_REF
25643 || !SYMBOL_REF_LOCAL_P (x)))))
25650 if (mode == VOIDmode)
25653 switch (standard_80387_constant_p (x))
25658 default: /* Other constants */
25663 /* Start with (MEM (SYMBOL_REF)), since that's where
25664 it'll probably end up. Add a penalty for size. */
25665 *total = (COSTS_N_INSNS (1)
25666 + (flag_pic != 0 && !TARGET_64BIT)
25667 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25673 /* The zero extensions is often completely free on x86_64, so make
25674 it as cheap as possible. */
25675 if (TARGET_64BIT && mode == DImode
25676 && GET_MODE (XEXP (x, 0)) == SImode)
25678 else if (TARGET_ZERO_EXTEND_WITH_AND)
25679 *total = cost->add;
25681 *total = cost->movzx;
25685 *total = cost->movsx;
25689 if (CONST_INT_P (XEXP (x, 1))
25690 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25692 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25695 *total = cost->add;
25698 if ((value == 2 || value == 3)
25699 && cost->lea <= cost->shift_const)
25701 *total = cost->lea;
25711 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25713 if (CONST_INT_P (XEXP (x, 1)))
25715 if (INTVAL (XEXP (x, 1)) > 32)
25716 *total = cost->shift_const + COSTS_N_INSNS (2);
25718 *total = cost->shift_const * 2;
25722 if (GET_CODE (XEXP (x, 1)) == AND)
25723 *total = cost->shift_var * 2;
25725 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25730 if (CONST_INT_P (XEXP (x, 1)))
25731 *total = cost->shift_const;
25733 *total = cost->shift_var;
25738 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25740 /* ??? SSE scalar cost should be used here. */
25741 *total = cost->fmul;
25744 else if (X87_FLOAT_MODE_P (mode))
25746 *total = cost->fmul;
25749 else if (FLOAT_MODE_P (mode))
25751 /* ??? SSE vector cost should be used here. */
25752 *total = cost->fmul;
25757 rtx op0 = XEXP (x, 0);
25758 rtx op1 = XEXP (x, 1);
25760 if (CONST_INT_P (XEXP (x, 1)))
25762 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25763 for (nbits = 0; value != 0; value &= value - 1)
25767 /* This is arbitrary. */
25770 /* Compute costs correctly for widening multiplication. */
25771 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25772 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25773 == GET_MODE_SIZE (mode))
25775 int is_mulwiden = 0;
25776 enum machine_mode inner_mode = GET_MODE (op0);
25778 if (GET_CODE (op0) == GET_CODE (op1))
25779 is_mulwiden = 1, op1 = XEXP (op1, 0);
25780 else if (CONST_INT_P (op1))
25782 if (GET_CODE (op0) == SIGN_EXTEND)
25783 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25786 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25790 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25793 *total = (cost->mult_init[MODE_INDEX (mode)]
25794 + nbits * cost->mult_bit
25795 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25804 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25805 /* ??? SSE cost should be used here. */
25806 *total = cost->fdiv;
25807 else if (X87_FLOAT_MODE_P (mode))
25808 *total = cost->fdiv;
25809 else if (FLOAT_MODE_P (mode))
25810 /* ??? SSE vector cost should be used here. */
25811 *total = cost->fdiv;
25813 *total = cost->divide[MODE_INDEX (mode)];
25817 if (GET_MODE_CLASS (mode) == MODE_INT
25818 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25820 if (GET_CODE (XEXP (x, 0)) == PLUS
25821 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25822 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25823 && CONSTANT_P (XEXP (x, 1)))
25825 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25826 if (val == 2 || val == 4 || val == 8)
25828 *total = cost->lea;
25829 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25830 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25831 outer_code, speed);
25832 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25836 else if (GET_CODE (XEXP (x, 0)) == MULT
25837 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25839 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25840 if (val == 2 || val == 4 || val == 8)
25842 *total = cost->lea;
25843 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25844 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25848 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25850 *total = cost->lea;
25851 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25852 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25853 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25860 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25862 /* ??? SSE cost should be used here. */
25863 *total = cost->fadd;
25866 else if (X87_FLOAT_MODE_P (mode))
25868 *total = cost->fadd;
25871 else if (FLOAT_MODE_P (mode))
25873 /* ??? SSE vector cost should be used here. */
25874 *total = cost->fadd;
25882 if (!TARGET_64BIT && mode == DImode)
25884 *total = (cost->add * 2
25885 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25886 << (GET_MODE (XEXP (x, 0)) != DImode))
25887 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25888 << (GET_MODE (XEXP (x, 1)) != DImode)));
25894 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25896 /* ??? SSE cost should be used here. */
25897 *total = cost->fchs;
25900 else if (X87_FLOAT_MODE_P (mode))
25902 *total = cost->fchs;
25905 else if (FLOAT_MODE_P (mode))
25907 /* ??? SSE vector cost should be used here. */
25908 *total = cost->fchs;
25914 if (!TARGET_64BIT && mode == DImode)
25915 *total = cost->add * 2;
25917 *total = cost->add;
25921 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25922 && XEXP (XEXP (x, 0), 1) == const1_rtx
25923 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25924 && XEXP (x, 1) == const0_rtx)
25926 /* This kind of construct is implemented using test[bwl].
25927 Treat it as if we had an AND. */
25928 *total = (cost->add
25929 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25930 + rtx_cost (const1_rtx, outer_code, speed));
25936 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25941 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25942 /* ??? SSE cost should be used here. */
25943 *total = cost->fabs;
25944 else if (X87_FLOAT_MODE_P (mode))
25945 *total = cost->fabs;
25946 else if (FLOAT_MODE_P (mode))
25947 /* ??? SSE vector cost should be used here. */
25948 *total = cost->fabs;
25952 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25953 /* ??? SSE cost should be used here. */
25954 *total = cost->fsqrt;
25955 else if (X87_FLOAT_MODE_P (mode))
25956 *total = cost->fsqrt;
25957 else if (FLOAT_MODE_P (mode))
25958 /* ??? SSE vector cost should be used here. */
25959 *total = cost->fsqrt;
25963 if (XINT (x, 1) == UNSPEC_TP)
25970 case VEC_DUPLICATE:
25971 /* ??? Assume all of these vector manipulation patterns are
25972 recognizable. In which case they all pretty much have the
25974 *total = COSTS_N_INSNS (1);
25984 static int current_machopic_label_num;
25986 /* Given a symbol name and its associated stub, write out the
25987 definition of the stub. */
25990 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25992 unsigned int length;
25993 char *binder_name, *symbol_name, lazy_ptr_name[32];
25994 int label = ++current_machopic_label_num;
25996 /* For 64-bit we shouldn't get here. */
25997 gcc_assert (!TARGET_64BIT);
25999 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26000 symb = (*targetm.strip_name_encoding) (symb);
26002 length = strlen (stub);
26003 binder_name = XALLOCAVEC (char, length + 32);
26004 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26006 length = strlen (symb);
26007 symbol_name = XALLOCAVEC (char, length + 32);
26008 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26010 sprintf (lazy_ptr_name, "L%d$lz", label);
26013 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26015 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26017 fprintf (file, "%s:\n", stub);
26018 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26022 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26023 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26024 fprintf (file, "\tjmp\t*%%edx\n");
26027 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26029 fprintf (file, "%s:\n", binder_name);
26033 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26034 fputs ("\tpushl\t%eax\n", file);
26037 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26039 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26041 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26042 fprintf (file, "%s:\n", lazy_ptr_name);
26043 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26044 fprintf (file, ASM_LONG "%s\n", binder_name);
26046 #endif /* TARGET_MACHO */
26048 /* Order the registers for register allocator. */
26051 x86_order_regs_for_local_alloc (void)
26056 /* First allocate the local general purpose registers. */
26057 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26058 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26059 reg_alloc_order [pos++] = i;
26061 /* Global general purpose registers. */
26062 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26063 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26064 reg_alloc_order [pos++] = i;
26066 /* x87 registers come first in case we are doing FP math
26068 if (!TARGET_SSE_MATH)
26069 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26070 reg_alloc_order [pos++] = i;
26072 /* SSE registers. */
26073 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26074 reg_alloc_order [pos++] = i;
26075 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26076 reg_alloc_order [pos++] = i;
26078 /* x87 registers. */
26079 if (TARGET_SSE_MATH)
26080 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26081 reg_alloc_order [pos++] = i;
26083 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26084 reg_alloc_order [pos++] = i;
26086 /* Initialize the rest of array as we do not allocate some registers
26088 while (pos < FIRST_PSEUDO_REGISTER)
26089 reg_alloc_order [pos++] = 0;
26092 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26093 struct attribute_spec.handler. */
26095 ix86_handle_abi_attribute (tree *node, tree name,
26096 tree args ATTRIBUTE_UNUSED,
26097 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26099 if (TREE_CODE (*node) != FUNCTION_TYPE
26100 && TREE_CODE (*node) != METHOD_TYPE
26101 && TREE_CODE (*node) != FIELD_DECL
26102 && TREE_CODE (*node) != TYPE_DECL)
26104 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26106 *no_add_attrs = true;
26111 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26113 *no_add_attrs = true;
26117 /* Can combine regparm with all attributes but fastcall. */
26118 if (is_attribute_p ("ms_abi", name))
26120 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26122 error ("ms_abi and sysv_abi attributes are not compatible");
26127 else if (is_attribute_p ("sysv_abi", name))
26129 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26131 error ("ms_abi and sysv_abi attributes are not compatible");
26140 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26141 struct attribute_spec.handler. */
26143 ix86_handle_struct_attribute (tree *node, tree name,
26144 tree args ATTRIBUTE_UNUSED,
26145 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26148 if (DECL_P (*node))
26150 if (TREE_CODE (*node) == TYPE_DECL)
26151 type = &TREE_TYPE (*node);
26156 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26157 || TREE_CODE (*type) == UNION_TYPE)))
26159 warning (OPT_Wattributes, "%qE attribute ignored",
26161 *no_add_attrs = true;
26164 else if ((is_attribute_p ("ms_struct", name)
26165 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26166 || ((is_attribute_p ("gcc_struct", name)
26167 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26169 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26171 *no_add_attrs = true;
26178 ix86_handle_fndecl_attribute (tree *node, tree name,
26179 tree args ATTRIBUTE_UNUSED,
26180 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26182 if (TREE_CODE (*node) != FUNCTION_DECL)
26184 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26186 *no_add_attrs = true;
26192 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26197 #ifndef HAVE_AS_IX86_SWAP
26198 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26205 ix86_ms_bitfield_layout_p (const_tree record_type)
26207 return (TARGET_MS_BITFIELD_LAYOUT &&
26208 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26209 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26212 /* Returns an expression indicating where the this parameter is
26213 located on entry to the FUNCTION. */
26216 x86_this_parameter (tree function)
26218 tree type = TREE_TYPE (function);
26219 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26224 const int *parm_regs;
26226 if (ix86_function_type_abi (type) == MS_ABI)
26227 parm_regs = x86_64_ms_abi_int_parameter_registers;
26229 parm_regs = x86_64_int_parameter_registers;
26230 return gen_rtx_REG (DImode, parm_regs[aggr]);
26233 nregs = ix86_function_regparm (type, function);
26235 if (nregs > 0 && !stdarg_p (type))
26239 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26240 regno = aggr ? DX_REG : CX_REG;
26241 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26245 return gen_rtx_MEM (SImode,
26246 plus_constant (stack_pointer_rtx, 4));
26255 return gen_rtx_MEM (SImode,
26256 plus_constant (stack_pointer_rtx, 4));
26259 return gen_rtx_REG (SImode, regno);
26262 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26265 /* Determine whether x86_output_mi_thunk can succeed. */
26268 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26269 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26270 HOST_WIDE_INT vcall_offset, const_tree function)
26272 /* 64-bit can handle anything. */
26276 /* For 32-bit, everything's fine if we have one free register. */
26277 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26280 /* Need a free register for vcall_offset. */
26284 /* Need a free register for GOT references. */
26285 if (flag_pic && !(*targetm.binds_local_p) (function))
26288 /* Otherwise ok. */
26292 /* Output the assembler code for a thunk function. THUNK_DECL is the
26293 declaration for the thunk function itself, FUNCTION is the decl for
26294 the target function. DELTA is an immediate constant offset to be
26295 added to THIS. If VCALL_OFFSET is nonzero, the word at
26296 *(*this + vcall_offset) should be added to THIS. */
26299 x86_output_mi_thunk (FILE *file,
26300 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26301 HOST_WIDE_INT vcall_offset, tree function)
26304 rtx this_param = x86_this_parameter (function);
26307 /* Make sure unwind info is emitted for the thunk if needed. */
26308 final_start_function (emit_barrier (), file, 1);
26310 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26311 pull it in now and let DELTA benefit. */
26312 if (REG_P (this_param))
26313 this_reg = this_param;
26314 else if (vcall_offset)
26316 /* Put the this parameter into %eax. */
26317 xops[0] = this_param;
26318 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26319 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26322 this_reg = NULL_RTX;
26324 /* Adjust the this parameter by a fixed constant. */
26327 xops[0] = GEN_INT (delta);
26328 xops[1] = this_reg ? this_reg : this_param;
26331 if (!x86_64_general_operand (xops[0], DImode))
26333 tmp = gen_rtx_REG (DImode, R10_REG);
26335 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26337 xops[1] = this_param;
26339 if (x86_maybe_negate_const_int (&xops[0], DImode))
26340 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26342 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26344 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26345 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26347 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26350 /* Adjust the this parameter by a value stored in the vtable. */
26354 tmp = gen_rtx_REG (DImode, R10_REG);
26357 int tmp_regno = CX_REG;
26358 if (lookup_attribute ("fastcall",
26359 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26360 || lookup_attribute ("thiscall",
26361 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26362 tmp_regno = AX_REG;
26363 tmp = gen_rtx_REG (SImode, tmp_regno);
26366 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26368 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26370 /* Adjust the this parameter. */
26371 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26372 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26374 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26375 xops[0] = GEN_INT (vcall_offset);
26377 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26378 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26380 xops[1] = this_reg;
26381 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26384 /* If necessary, drop THIS back to its stack slot. */
26385 if (this_reg && this_reg != this_param)
26387 xops[0] = this_reg;
26388 xops[1] = this_param;
26389 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26392 xops[0] = XEXP (DECL_RTL (function), 0);
26395 if (!flag_pic || (*targetm.binds_local_p) (function))
26396 output_asm_insn ("jmp\t%P0", xops);
26397 /* All thunks should be in the same object as their target,
26398 and thus binds_local_p should be true. */
26399 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26400 gcc_unreachable ();
26403 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26404 tmp = gen_rtx_CONST (Pmode, tmp);
26405 tmp = gen_rtx_MEM (QImode, tmp);
26407 output_asm_insn ("jmp\t%A0", xops);
26412 if (!flag_pic || (*targetm.binds_local_p) (function))
26413 output_asm_insn ("jmp\t%P0", xops);
26418 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26419 tmp = (gen_rtx_SYMBOL_REF
26421 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26422 tmp = gen_rtx_MEM (QImode, tmp);
26424 output_asm_insn ("jmp\t%0", xops);
26427 #endif /* TARGET_MACHO */
26429 tmp = gen_rtx_REG (SImode, CX_REG);
26430 output_set_got (tmp, NULL_RTX);
26433 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26434 output_asm_insn ("jmp\t{*}%1", xops);
26437 final_end_function ();
26441 x86_file_start (void)
26443 default_file_start ();
26445 darwin_file_start ();
26447 if (X86_FILE_START_VERSION_DIRECTIVE)
26448 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26449 if (X86_FILE_START_FLTUSED)
26450 fputs ("\t.global\t__fltused\n", asm_out_file);
26451 if (ix86_asm_dialect == ASM_INTEL)
26452 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26456 x86_field_alignment (tree field, int computed)
26458 enum machine_mode mode;
26459 tree type = TREE_TYPE (field);
26461 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26463 mode = TYPE_MODE (strip_array_types (type));
26464 if (mode == DFmode || mode == DCmode
26465 || GET_MODE_CLASS (mode) == MODE_INT
26466 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26467 return MIN (32, computed);
26471 /* Output assembler code to FILE to increment profiler label # LABELNO
26472 for profiling a function entry. */
26474 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26478 #ifndef NO_PROFILE_COUNTERS
26479 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26482 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26483 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26485 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26489 #ifndef NO_PROFILE_COUNTERS
26490 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26493 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26497 #ifndef NO_PROFILE_COUNTERS
26498 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26501 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26505 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26506 /* We don't have exact information about the insn sizes, but we may assume
26507 quite safely that we are informed about all 1 byte insns and memory
26508 address sizes. This is enough to eliminate unnecessary padding in
26512 min_insn_size (rtx insn)
26516 if (!INSN_P (insn) || !active_insn_p (insn))
26519 /* Discard alignments we've emit and jump instructions. */
26520 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26521 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26523 if (JUMP_TABLE_DATA_P (insn))
26526 /* Important case - calls are always 5 bytes.
26527 It is common to have many calls in the row. */
26529 && symbolic_reference_mentioned_p (PATTERN (insn))
26530 && !SIBLING_CALL_P (insn))
26532 len = get_attr_length (insn);
26536 /* For normal instructions we rely on get_attr_length being exact,
26537 with a few exceptions. */
26538 if (!JUMP_P (insn))
26540 enum attr_type type = get_attr_type (insn);
26545 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26546 || asm_noperands (PATTERN (insn)) >= 0)
26553 /* Otherwise trust get_attr_length. */
26557 l = get_attr_length_address (insn);
26558 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26567 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26571 ix86_avoid_jump_mispredicts (void)
26573 rtx insn, start = get_insns ();
26574 int nbytes = 0, njumps = 0;
26577 /* Look for all minimal intervals of instructions containing 4 jumps.
26578 The intervals are bounded by START and INSN. NBYTES is the total
26579 size of instructions in the interval including INSN and not including
26580 START. When the NBYTES is smaller than 16 bytes, it is possible
26581 that the end of START and INSN ends up in the same 16byte page.
26583 The smallest offset in the page INSN can start is the case where START
26584 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26585 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26587 for (insn = start; insn; insn = NEXT_INSN (insn))
26591 if (LABEL_P (insn))
26593 int align = label_to_alignment (insn);
26594 int max_skip = label_to_max_skip (insn);
26598 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26599 already in the current 16 byte page, because otherwise
26600 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26601 bytes to reach 16 byte boundary. */
26603 || (align <= 3 && max_skip != (1 << align) - 1))
26606 fprintf (dump_file, "Label %i with max_skip %i\n",
26607 INSN_UID (insn), max_skip);
26610 while (nbytes + max_skip >= 16)
26612 start = NEXT_INSN (start);
26613 if ((JUMP_P (start)
26614 && GET_CODE (PATTERN (start)) != ADDR_VEC
26615 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26617 njumps--, isjump = 1;
26620 nbytes -= min_insn_size (start);
26626 min_size = min_insn_size (insn);
26627 nbytes += min_size;
26629 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26630 INSN_UID (insn), min_size);
26632 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26633 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26641 start = NEXT_INSN (start);
26642 if ((JUMP_P (start)
26643 && GET_CODE (PATTERN (start)) != ADDR_VEC
26644 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26646 njumps--, isjump = 1;
26649 nbytes -= min_insn_size (start);
26651 gcc_assert (njumps >= 0);
26653 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26654 INSN_UID (start), INSN_UID (insn), nbytes);
26656 if (njumps == 3 && isjump && nbytes < 16)
26658 int padsize = 15 - nbytes + min_insn_size (insn);
26661 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26662 INSN_UID (insn), padsize);
26663 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26669 /* AMD Athlon works faster
26670 when RET is not destination of conditional jump or directly preceded
26671 by other jump instruction. We avoid the penalty by inserting NOP just
26672 before the RET instructions in such cases. */
26674 ix86_pad_returns (void)
26679 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26681 basic_block bb = e->src;
26682 rtx ret = BB_END (bb);
26684 bool replace = false;
26686 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26687 || optimize_bb_for_size_p (bb))
26689 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26690 if (active_insn_p (prev) || LABEL_P (prev))
26692 if (prev && LABEL_P (prev))
26697 FOR_EACH_EDGE (e, ei, bb->preds)
26698 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26699 && !(e->flags & EDGE_FALLTHRU))
26704 prev = prev_active_insn (ret);
26706 && ((JUMP_P (prev) && any_condjump_p (prev))
26709 /* Empty functions get branch mispredict even when the jump destination
26710 is not visible to us. */
26711 if (!prev && !optimize_function_for_size_p (cfun))
26716 emit_jump_insn_before (gen_return_internal_long (), ret);
26722 /* Implement machine specific optimizations. We implement padding of returns
26723 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26727 if (optimize && optimize_function_for_speed_p (cfun))
26729 if (TARGET_PAD_RETURNS)
26730 ix86_pad_returns ();
26731 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26732 if (TARGET_FOUR_JUMP_LIMIT)
26733 ix86_avoid_jump_mispredicts ();
26738 /* Return nonzero when QImode register that must be represented via REX prefix
26741 x86_extended_QIreg_mentioned_p (rtx insn)
26744 extract_insn_cached (insn);
26745 for (i = 0; i < recog_data.n_operands; i++)
26746 if (REG_P (recog_data.operand[i])
26747 && REGNO (recog_data.operand[i]) > BX_REG)
26752 /* Return nonzero when P points to register encoded via REX prefix.
26753 Called via for_each_rtx. */
26755 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26757 unsigned int regno;
26760 regno = REGNO (*p);
26761 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26764 /* Return true when INSN mentions register that must be encoded using REX
26767 x86_extended_reg_mentioned_p (rtx insn)
26769 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26770 extended_reg_mentioned_1, NULL);
26773 /* If profitable, negate (without causing overflow) integer constant
26774 of mode MODE at location LOC. Return true in this case. */
26776 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26780 if (!CONST_INT_P (*loc))
26786 /* DImode x86_64 constants must fit in 32 bits. */
26787 gcc_assert (x86_64_immediate_operand (*loc, mode));
26798 gcc_unreachable ();
26801 /* Avoid overflows. */
26802 if (mode_signbit_p (mode, *loc))
26805 val = INTVAL (*loc);
26807 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26808 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26809 if ((val < 0 && val != -128)
26812 *loc = GEN_INT (-val);
26819 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26820 optabs would emit if we didn't have TFmode patterns. */
26823 x86_emit_floatuns (rtx operands[2])
26825 rtx neglab, donelab, i0, i1, f0, in, out;
26826 enum machine_mode mode, inmode;
26828 inmode = GET_MODE (operands[1]);
26829 gcc_assert (inmode == SImode || inmode == DImode);
26832 in = force_reg (inmode, operands[1]);
26833 mode = GET_MODE (out);
26834 neglab = gen_label_rtx ();
26835 donelab = gen_label_rtx ();
26836 f0 = gen_reg_rtx (mode);
26838 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26840 expand_float (out, in, 0);
26842 emit_jump_insn (gen_jump (donelab));
26845 emit_label (neglab);
26847 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26849 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26851 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26853 expand_float (f0, i0, 0);
26855 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26857 emit_label (donelab);
26860 /* AVX does not support 32-byte integer vector operations,
26861 thus the longest vector we are faced with is V16QImode. */
26862 #define MAX_VECT_LEN 16
26864 struct expand_vec_perm_d
26866 rtx target, op0, op1;
26867 unsigned char perm[MAX_VECT_LEN];
26868 enum machine_mode vmode;
26869 unsigned char nelt;
26873 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26874 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26876 /* Get a vector mode of the same size as the original but with elements
26877 twice as wide. This is only guaranteed to apply to integral vectors. */
26879 static inline enum machine_mode
26880 get_mode_wider_vector (enum machine_mode o)
26882 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26883 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26884 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26885 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26889 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26890 with all elements equal to VAR. Return true if successful. */
26893 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26894 rtx target, rtx val)
26917 /* First attempt to recognize VAL as-is. */
26918 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26919 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26920 if (recog_memoized (insn) < 0)
26923 /* If that fails, force VAL into a register. */
26926 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26927 seq = get_insns ();
26930 emit_insn_before (seq, insn);
26932 ok = recog_memoized (insn) >= 0;
26941 if (TARGET_SSE || TARGET_3DNOW_A)
26945 val = gen_lowpart (SImode, val);
26946 x = gen_rtx_TRUNCATE (HImode, val);
26947 x = gen_rtx_VEC_DUPLICATE (mode, x);
26948 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26961 struct expand_vec_perm_d dperm;
26965 memset (&dperm, 0, sizeof (dperm));
26966 dperm.target = target;
26967 dperm.vmode = mode;
26968 dperm.nelt = GET_MODE_NUNITS (mode);
26969 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26971 /* Extend to SImode using a paradoxical SUBREG. */
26972 tmp1 = gen_reg_rtx (SImode);
26973 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26975 /* Insert the SImode value as low element of a V4SImode vector. */
26976 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26977 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26979 ok = (expand_vec_perm_1 (&dperm)
26980 || expand_vec_perm_broadcast_1 (&dperm));
26992 /* Replicate the value once into the next wider mode and recurse. */
26994 enum machine_mode smode, wsmode, wvmode;
26997 smode = GET_MODE_INNER (mode);
26998 wvmode = get_mode_wider_vector (mode);
26999 wsmode = GET_MODE_INNER (wvmode);
27001 val = convert_modes (wsmode, smode, val, true);
27002 x = expand_simple_binop (wsmode, ASHIFT, val,
27003 GEN_INT (GET_MODE_BITSIZE (smode)),
27004 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27005 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27007 x = gen_lowpart (wvmode, target);
27008 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27016 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27017 rtx x = gen_reg_rtx (hvmode);
27019 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27022 x = gen_rtx_VEC_CONCAT (mode, x, x);
27023 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27032 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27033 whose ONE_VAR element is VAR, and other elements are zero. Return true
27037 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27038 rtx target, rtx var, int one_var)
27040 enum machine_mode vsimode;
27043 bool use_vector_set = false;
27048 /* For SSE4.1, we normally use vector set. But if the second
27049 element is zero and inter-unit moves are OK, we use movq
27051 use_vector_set = (TARGET_64BIT
27053 && !(TARGET_INTER_UNIT_MOVES
27059 use_vector_set = TARGET_SSE4_1;
27062 use_vector_set = TARGET_SSE2;
27065 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27072 use_vector_set = TARGET_AVX;
27075 /* Use ix86_expand_vector_set in 64bit mode only. */
27076 use_vector_set = TARGET_AVX && TARGET_64BIT;
27082 if (use_vector_set)
27084 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27085 var = force_reg (GET_MODE_INNER (mode), var);
27086 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27102 var = force_reg (GET_MODE_INNER (mode), var);
27103 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27104 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27109 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27110 new_target = gen_reg_rtx (mode);
27112 new_target = target;
27113 var = force_reg (GET_MODE_INNER (mode), var);
27114 x = gen_rtx_VEC_DUPLICATE (mode, var);
27115 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27116 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27119 /* We need to shuffle the value to the correct position, so
27120 create a new pseudo to store the intermediate result. */
27122 /* With SSE2, we can use the integer shuffle insns. */
27123 if (mode != V4SFmode && TARGET_SSE2)
27125 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27127 GEN_INT (one_var == 1 ? 0 : 1),
27128 GEN_INT (one_var == 2 ? 0 : 1),
27129 GEN_INT (one_var == 3 ? 0 : 1)));
27130 if (target != new_target)
27131 emit_move_insn (target, new_target);
27135 /* Otherwise convert the intermediate result to V4SFmode and
27136 use the SSE1 shuffle instructions. */
27137 if (mode != V4SFmode)
27139 tmp = gen_reg_rtx (V4SFmode);
27140 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27145 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27147 GEN_INT (one_var == 1 ? 0 : 1),
27148 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27149 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27151 if (mode != V4SFmode)
27152 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27153 else if (tmp != target)
27154 emit_move_insn (target, tmp);
27156 else if (target != new_target)
27157 emit_move_insn (target, new_target);
27162 vsimode = V4SImode;
27168 vsimode = V2SImode;
27174 /* Zero extend the variable element to SImode and recurse. */
27175 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27177 x = gen_reg_rtx (vsimode);
27178 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27180 gcc_unreachable ();
27182 emit_move_insn (target, gen_lowpart (mode, x));
27190 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27191 consisting of the values in VALS. It is known that all elements
27192 except ONE_VAR are constants. Return true if successful. */
27195 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27196 rtx target, rtx vals, int one_var)
27198 rtx var = XVECEXP (vals, 0, one_var);
27199 enum machine_mode wmode;
27202 const_vec = copy_rtx (vals);
27203 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27204 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27212 /* For the two element vectors, it's just as easy to use
27213 the general case. */
27217 /* Use ix86_expand_vector_set in 64bit mode only. */
27240 /* There's no way to set one QImode entry easily. Combine
27241 the variable value with its adjacent constant value, and
27242 promote to an HImode set. */
27243 x = XVECEXP (vals, 0, one_var ^ 1);
27246 var = convert_modes (HImode, QImode, var, true);
27247 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27248 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27249 x = GEN_INT (INTVAL (x) & 0xff);
27253 var = convert_modes (HImode, QImode, var, true);
27254 x = gen_int_mode (INTVAL (x) << 8, HImode);
27256 if (x != const0_rtx)
27257 var = expand_simple_binop (HImode, IOR, var, x, var,
27258 1, OPTAB_LIB_WIDEN);
27260 x = gen_reg_rtx (wmode);
27261 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27262 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27264 emit_move_insn (target, gen_lowpart (mode, x));
27271 emit_move_insn (target, const_vec);
27272 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27276 /* A subroutine of ix86_expand_vector_init_general. Use vector
27277 concatenate to handle the most general case: all values variable,
27278 and none identical. */
27281 ix86_expand_vector_init_concat (enum machine_mode mode,
27282 rtx target, rtx *ops, int n)
27284 enum machine_mode cmode, hmode = VOIDmode;
27285 rtx first[8], second[4];
27325 gcc_unreachable ();
27328 if (!register_operand (ops[1], cmode))
27329 ops[1] = force_reg (cmode, ops[1]);
27330 if (!register_operand (ops[0], cmode))
27331 ops[0] = force_reg (cmode, ops[0]);
27332 emit_insn (gen_rtx_SET (VOIDmode, target,
27333 gen_rtx_VEC_CONCAT (mode, ops[0],
27353 gcc_unreachable ();
27369 gcc_unreachable ();
27374 /* FIXME: We process inputs backward to help RA. PR 36222. */
27377 for (; i > 0; i -= 2, j--)
27379 first[j] = gen_reg_rtx (cmode);
27380 v = gen_rtvec (2, ops[i - 1], ops[i]);
27381 ix86_expand_vector_init (false, first[j],
27382 gen_rtx_PARALLEL (cmode, v));
27388 gcc_assert (hmode != VOIDmode);
27389 for (i = j = 0; i < n; i += 2, j++)
27391 second[j] = gen_reg_rtx (hmode);
27392 ix86_expand_vector_init_concat (hmode, second [j],
27396 ix86_expand_vector_init_concat (mode, target, second, n);
27399 ix86_expand_vector_init_concat (mode, target, first, n);
27403 gcc_unreachable ();
27407 /* A subroutine of ix86_expand_vector_init_general. Use vector
27408 interleave to handle the most general case: all values variable,
27409 and none identical. */
27412 ix86_expand_vector_init_interleave (enum machine_mode mode,
27413 rtx target, rtx *ops, int n)
27415 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27418 rtx (*gen_load_even) (rtx, rtx, rtx);
27419 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27420 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27425 gen_load_even = gen_vec_setv8hi;
27426 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27427 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27428 inner_mode = HImode;
27429 first_imode = V4SImode;
27430 second_imode = V2DImode;
27431 third_imode = VOIDmode;
27434 gen_load_even = gen_vec_setv16qi;
27435 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27436 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27437 inner_mode = QImode;
27438 first_imode = V8HImode;
27439 second_imode = V4SImode;
27440 third_imode = V2DImode;
27443 gcc_unreachable ();
27446 for (i = 0; i < n; i++)
27448 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27449 op0 = gen_reg_rtx (SImode);
27450 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27452 /* Insert the SImode value as low element of V4SImode vector. */
27453 op1 = gen_reg_rtx (V4SImode);
27454 op0 = gen_rtx_VEC_MERGE (V4SImode,
27455 gen_rtx_VEC_DUPLICATE (V4SImode,
27457 CONST0_RTX (V4SImode),
27459 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27461 /* Cast the V4SImode vector back to a vector in orignal mode. */
27462 op0 = gen_reg_rtx (mode);
27463 emit_move_insn (op0, gen_lowpart (mode, op1));
27465 /* Load even elements into the second positon. */
27466 emit_insn ((*gen_load_even) (op0,
27467 force_reg (inner_mode,
27471 /* Cast vector to FIRST_IMODE vector. */
27472 ops[i] = gen_reg_rtx (first_imode);
27473 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27476 /* Interleave low FIRST_IMODE vectors. */
27477 for (i = j = 0; i < n; i += 2, j++)
27479 op0 = gen_reg_rtx (first_imode);
27480 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27482 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27483 ops[j] = gen_reg_rtx (second_imode);
27484 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27487 /* Interleave low SECOND_IMODE vectors. */
27488 switch (second_imode)
27491 for (i = j = 0; i < n / 2; i += 2, j++)
27493 op0 = gen_reg_rtx (second_imode);
27494 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27497 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27499 ops[j] = gen_reg_rtx (third_imode);
27500 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27502 second_imode = V2DImode;
27503 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27507 op0 = gen_reg_rtx (second_imode);
27508 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27511 /* Cast the SECOND_IMODE vector back to a vector on original
27513 emit_insn (gen_rtx_SET (VOIDmode, target,
27514 gen_lowpart (mode, op0)));
27518 gcc_unreachable ();
27522 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27523 all values variable, and none identical. */
27526 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27527 rtx target, rtx vals)
27529 rtx ops[32], op0, op1;
27530 enum machine_mode half_mode = VOIDmode;
27537 if (!mmx_ok && !TARGET_SSE)
27549 n = GET_MODE_NUNITS (mode);
27550 for (i = 0; i < n; i++)
27551 ops[i] = XVECEXP (vals, 0, i);
27552 ix86_expand_vector_init_concat (mode, target, ops, n);
27556 half_mode = V16QImode;
27560 half_mode = V8HImode;
27564 n = GET_MODE_NUNITS (mode);
27565 for (i = 0; i < n; i++)
27566 ops[i] = XVECEXP (vals, 0, i);
27567 op0 = gen_reg_rtx (half_mode);
27568 op1 = gen_reg_rtx (half_mode);
27569 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27571 ix86_expand_vector_init_interleave (half_mode, op1,
27572 &ops [n >> 1], n >> 2);
27573 emit_insn (gen_rtx_SET (VOIDmode, target,
27574 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27578 if (!TARGET_SSE4_1)
27586 /* Don't use ix86_expand_vector_init_interleave if we can't
27587 move from GPR to SSE register directly. */
27588 if (!TARGET_INTER_UNIT_MOVES)
27591 n = GET_MODE_NUNITS (mode);
27592 for (i = 0; i < n; i++)
27593 ops[i] = XVECEXP (vals, 0, i);
27594 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27602 gcc_unreachable ();
27606 int i, j, n_elts, n_words, n_elt_per_word;
27607 enum machine_mode inner_mode;
27608 rtx words[4], shift;
27610 inner_mode = GET_MODE_INNER (mode);
27611 n_elts = GET_MODE_NUNITS (mode);
27612 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27613 n_elt_per_word = n_elts / n_words;
27614 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27616 for (i = 0; i < n_words; ++i)
27618 rtx word = NULL_RTX;
27620 for (j = 0; j < n_elt_per_word; ++j)
27622 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27623 elt = convert_modes (word_mode, inner_mode, elt, true);
27629 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27630 word, 1, OPTAB_LIB_WIDEN);
27631 word = expand_simple_binop (word_mode, IOR, word, elt,
27632 word, 1, OPTAB_LIB_WIDEN);
27640 emit_move_insn (target, gen_lowpart (mode, words[0]));
27641 else if (n_words == 2)
27643 rtx tmp = gen_reg_rtx (mode);
27644 emit_clobber (tmp);
27645 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27646 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27647 emit_move_insn (target, tmp);
27649 else if (n_words == 4)
27651 rtx tmp = gen_reg_rtx (V4SImode);
27652 gcc_assert (word_mode == SImode);
27653 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27654 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27655 emit_move_insn (target, gen_lowpart (mode, tmp));
27658 gcc_unreachable ();
27662 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27663 instructions unless MMX_OK is true. */
27666 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27668 enum machine_mode mode = GET_MODE (target);
27669 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27670 int n_elts = GET_MODE_NUNITS (mode);
27671 int n_var = 0, one_var = -1;
27672 bool all_same = true, all_const_zero = true;
27676 for (i = 0; i < n_elts; ++i)
27678 x = XVECEXP (vals, 0, i);
27679 if (!(CONST_INT_P (x)
27680 || GET_CODE (x) == CONST_DOUBLE
27681 || GET_CODE (x) == CONST_FIXED))
27682 n_var++, one_var = i;
27683 else if (x != CONST0_RTX (inner_mode))
27684 all_const_zero = false;
27685 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27689 /* Constants are best loaded from the constant pool. */
27692 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27696 /* If all values are identical, broadcast the value. */
27698 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27699 XVECEXP (vals, 0, 0)))
27702 /* Values where only one field is non-constant are best loaded from
27703 the pool and overwritten via move later. */
27707 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27708 XVECEXP (vals, 0, one_var),
27712 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27716 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27720 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27722 enum machine_mode mode = GET_MODE (target);
27723 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27724 enum machine_mode half_mode;
27725 bool use_vec_merge = false;
27727 static rtx (*gen_extract[6][2]) (rtx, rtx)
27729 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27730 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27731 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27732 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27733 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27734 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27736 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27738 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27739 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27740 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27741 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27742 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27743 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27753 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27754 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27756 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27758 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27759 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27765 use_vec_merge = TARGET_SSE4_1;
27773 /* For the two element vectors, we implement a VEC_CONCAT with
27774 the extraction of the other element. */
27776 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27777 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27780 op0 = val, op1 = tmp;
27782 op0 = tmp, op1 = val;
27784 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27785 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27790 use_vec_merge = TARGET_SSE4_1;
27797 use_vec_merge = true;
27801 /* tmp = target = A B C D */
27802 tmp = copy_to_reg (target);
27803 /* target = A A B B */
27804 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27805 /* target = X A B B */
27806 ix86_expand_vector_set (false, target, val, 0);
27807 /* target = A X C D */
27808 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27809 const1_rtx, const0_rtx,
27810 GEN_INT (2+4), GEN_INT (3+4)));
27814 /* tmp = target = A B C D */
27815 tmp = copy_to_reg (target);
27816 /* tmp = X B C D */
27817 ix86_expand_vector_set (false, tmp, val, 0);
27818 /* target = A B X D */
27819 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27820 const0_rtx, const1_rtx,
27821 GEN_INT (0+4), GEN_INT (3+4)));
27825 /* tmp = target = A B C D */
27826 tmp = copy_to_reg (target);
27827 /* tmp = X B C D */
27828 ix86_expand_vector_set (false, tmp, val, 0);
27829 /* target = A B X D */
27830 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27831 const0_rtx, const1_rtx,
27832 GEN_INT (2+4), GEN_INT (0+4)));
27836 gcc_unreachable ();
27841 use_vec_merge = TARGET_SSE4_1;
27845 /* Element 0 handled by vec_merge below. */
27848 use_vec_merge = true;
27854 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27855 store into element 0, then shuffle them back. */
27859 order[0] = GEN_INT (elt);
27860 order[1] = const1_rtx;
27861 order[2] = const2_rtx;
27862 order[3] = GEN_INT (3);
27863 order[elt] = const0_rtx;
27865 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27866 order[1], order[2], order[3]));
27868 ix86_expand_vector_set (false, target, val, 0);
27870 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27871 order[1], order[2], order[3]));
27875 /* For SSE1, we have to reuse the V4SF code. */
27876 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27877 gen_lowpart (SFmode, val), elt);
27882 use_vec_merge = TARGET_SSE2;
27885 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27889 use_vec_merge = TARGET_SSE4_1;
27896 half_mode = V16QImode;
27902 half_mode = V8HImode;
27908 half_mode = V4SImode;
27914 half_mode = V2DImode;
27920 half_mode = V4SFmode;
27926 half_mode = V2DFmode;
27932 /* Compute offset. */
27936 gcc_assert (i <= 1);
27938 /* Extract the half. */
27939 tmp = gen_reg_rtx (half_mode);
27940 emit_insn ((*gen_extract[j][i]) (tmp, target));
27942 /* Put val in tmp at elt. */
27943 ix86_expand_vector_set (false, tmp, val, elt);
27946 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27955 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27956 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27957 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27961 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27963 emit_move_insn (mem, target);
27965 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27966 emit_move_insn (tmp, val);
27968 emit_move_insn (target, mem);
27973 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27975 enum machine_mode mode = GET_MODE (vec);
27976 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27977 bool use_vec_extr = false;
27990 use_vec_extr = true;
27994 use_vec_extr = TARGET_SSE4_1;
28006 tmp = gen_reg_rtx (mode);
28007 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28008 GEN_INT (elt), GEN_INT (elt),
28009 GEN_INT (elt+4), GEN_INT (elt+4)));
28013 tmp = gen_reg_rtx (mode);
28014 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28018 gcc_unreachable ();
28021 use_vec_extr = true;
28026 use_vec_extr = TARGET_SSE4_1;
28040 tmp = gen_reg_rtx (mode);
28041 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28042 GEN_INT (elt), GEN_INT (elt),
28043 GEN_INT (elt), GEN_INT (elt)));
28047 tmp = gen_reg_rtx (mode);
28048 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28052 gcc_unreachable ();
28055 use_vec_extr = true;
28060 /* For SSE1, we have to reuse the V4SF code. */
28061 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28062 gen_lowpart (V4SFmode, vec), elt);
28068 use_vec_extr = TARGET_SSE2;
28071 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28075 use_vec_extr = TARGET_SSE4_1;
28079 /* ??? Could extract the appropriate HImode element and shift. */
28086 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28087 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28089 /* Let the rtl optimizers know about the zero extension performed. */
28090 if (inner_mode == QImode || inner_mode == HImode)
28092 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28093 target = gen_lowpart (SImode, target);
28096 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28100 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28102 emit_move_insn (mem, vec);
28104 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28105 emit_move_insn (target, tmp);
28109 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28110 pattern to reduce; DEST is the destination; IN is the input vector. */
28113 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28115 rtx tmp1, tmp2, tmp3;
28117 tmp1 = gen_reg_rtx (V4SFmode);
28118 tmp2 = gen_reg_rtx (V4SFmode);
28119 tmp3 = gen_reg_rtx (V4SFmode);
28121 emit_insn (gen_sse_movhlps (tmp1, in, in));
28122 emit_insn (fn (tmp2, tmp1, in));
28124 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28125 const1_rtx, const1_rtx,
28126 GEN_INT (1+4), GEN_INT (1+4)));
28127 emit_insn (fn (dest, tmp2, tmp3));
28130 /* Target hook for scalar_mode_supported_p. */
28132 ix86_scalar_mode_supported_p (enum machine_mode mode)
28134 if (DECIMAL_FLOAT_MODE_P (mode))
28135 return default_decimal_float_supported_p ();
28136 else if (mode == TFmode)
28139 return default_scalar_mode_supported_p (mode);
28142 /* Implements target hook vector_mode_supported_p. */
28144 ix86_vector_mode_supported_p (enum machine_mode mode)
28146 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28148 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28150 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28152 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28154 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28159 /* Target hook for c_mode_for_suffix. */
28160 static enum machine_mode
28161 ix86_c_mode_for_suffix (char suffix)
28171 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28173 We do this in the new i386 backend to maintain source compatibility
28174 with the old cc0-based compiler. */
28177 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28178 tree inputs ATTRIBUTE_UNUSED,
28181 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28183 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28188 /* Implements target vector targetm.asm.encode_section_info. This
28189 is not used by netware. */
28191 static void ATTRIBUTE_UNUSED
28192 ix86_encode_section_info (tree decl, rtx rtl, int first)
28194 default_encode_section_info (decl, rtl, first);
28196 if (TREE_CODE (decl) == VAR_DECL
28197 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28198 && ix86_in_large_data_p (decl))
28199 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28202 /* Worker function for REVERSE_CONDITION. */
28205 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28207 return (mode != CCFPmode && mode != CCFPUmode
28208 ? reverse_condition (code)
28209 : reverse_condition_maybe_unordered (code));
28212 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28216 output_387_reg_move (rtx insn, rtx *operands)
28218 if (REG_P (operands[0]))
28220 if (REG_P (operands[1])
28221 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28223 if (REGNO (operands[0]) == FIRST_STACK_REG)
28224 return output_387_ffreep (operands, 0);
28225 return "fstp\t%y0";
28227 if (STACK_TOP_P (operands[0]))
28228 return "fld%Z1\t%y1";
28231 else if (MEM_P (operands[0]))
28233 gcc_assert (REG_P (operands[1]));
28234 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28235 return "fstp%Z0\t%y0";
28238 /* There is no non-popping store to memory for XFmode.
28239 So if we need one, follow the store with a load. */
28240 if (GET_MODE (operands[0]) == XFmode)
28241 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28243 return "fst%Z0\t%y0";
28250 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28251 FP status register is set. */
28254 ix86_emit_fp_unordered_jump (rtx label)
28256 rtx reg = gen_reg_rtx (HImode);
28259 emit_insn (gen_x86_fnstsw_1 (reg));
28261 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28263 emit_insn (gen_x86_sahf_1 (reg));
28265 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28266 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28270 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28272 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28273 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28276 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28277 gen_rtx_LABEL_REF (VOIDmode, label),
28279 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28281 emit_jump_insn (temp);
28282 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28285 /* Output code to perform a log1p XFmode calculation. */
28287 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28289 rtx label1 = gen_label_rtx ();
28290 rtx label2 = gen_label_rtx ();
28292 rtx tmp = gen_reg_rtx (XFmode);
28293 rtx tmp2 = gen_reg_rtx (XFmode);
28296 emit_insn (gen_absxf2 (tmp, op1));
28297 test = gen_rtx_GE (VOIDmode, tmp,
28298 CONST_DOUBLE_FROM_REAL_VALUE (
28299 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28301 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28303 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28304 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28305 emit_jump (label2);
28307 emit_label (label1);
28308 emit_move_insn (tmp, CONST1_RTX (XFmode));
28309 emit_insn (gen_addxf3 (tmp, op1, tmp));
28310 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28311 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28313 emit_label (label2);
28316 /* Output code to perform a Newton-Rhapson approximation of a single precision
28317 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28319 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28321 rtx x0, x1, e0, e1, two;
28323 x0 = gen_reg_rtx (mode);
28324 e0 = gen_reg_rtx (mode);
28325 e1 = gen_reg_rtx (mode);
28326 x1 = gen_reg_rtx (mode);
28328 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28330 if (VECTOR_MODE_P (mode))
28331 two = ix86_build_const_vector (SFmode, true, two);
28333 two = force_reg (mode, two);
28335 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28337 /* x0 = rcp(b) estimate */
28338 emit_insn (gen_rtx_SET (VOIDmode, x0,
28339 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28342 emit_insn (gen_rtx_SET (VOIDmode, e0,
28343 gen_rtx_MULT (mode, x0, a)));
28345 emit_insn (gen_rtx_SET (VOIDmode, e1,
28346 gen_rtx_MULT (mode, x0, b)));
28348 emit_insn (gen_rtx_SET (VOIDmode, x1,
28349 gen_rtx_MINUS (mode, two, e1)));
28350 /* res = e0 * x1 */
28351 emit_insn (gen_rtx_SET (VOIDmode, res,
28352 gen_rtx_MULT (mode, e0, x1)));
28355 /* Output code to perform a Newton-Rhapson approximation of a
28356 single precision floating point [reciprocal] square root. */
28358 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28361 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28364 x0 = gen_reg_rtx (mode);
28365 e0 = gen_reg_rtx (mode);
28366 e1 = gen_reg_rtx (mode);
28367 e2 = gen_reg_rtx (mode);
28368 e3 = gen_reg_rtx (mode);
28370 real_from_integer (&r, VOIDmode, -3, -1, 0);
28371 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28373 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28374 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28376 if (VECTOR_MODE_P (mode))
28378 mthree = ix86_build_const_vector (SFmode, true, mthree);
28379 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28382 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28383 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28385 /* x0 = rsqrt(a) estimate */
28386 emit_insn (gen_rtx_SET (VOIDmode, x0,
28387 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28390 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28395 zero = gen_reg_rtx (mode);
28396 mask = gen_reg_rtx (mode);
28398 zero = force_reg (mode, CONST0_RTX(mode));
28399 emit_insn (gen_rtx_SET (VOIDmode, mask,
28400 gen_rtx_NE (mode, zero, a)));
28402 emit_insn (gen_rtx_SET (VOIDmode, x0,
28403 gen_rtx_AND (mode, x0, mask)));
28407 emit_insn (gen_rtx_SET (VOIDmode, e0,
28408 gen_rtx_MULT (mode, x0, a)));
28410 emit_insn (gen_rtx_SET (VOIDmode, e1,
28411 gen_rtx_MULT (mode, e0, x0)));
28414 mthree = force_reg (mode, mthree);
28415 emit_insn (gen_rtx_SET (VOIDmode, e2,
28416 gen_rtx_PLUS (mode, e1, mthree)));
28418 mhalf = force_reg (mode, mhalf);
28420 /* e3 = -.5 * x0 */
28421 emit_insn (gen_rtx_SET (VOIDmode, e3,
28422 gen_rtx_MULT (mode, x0, mhalf)));
28424 /* e3 = -.5 * e0 */
28425 emit_insn (gen_rtx_SET (VOIDmode, e3,
28426 gen_rtx_MULT (mode, e0, mhalf)));
28427 /* ret = e2 * e3 */
28428 emit_insn (gen_rtx_SET (VOIDmode, res,
28429 gen_rtx_MULT (mode, e2, e3)));
28432 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28434 static void ATTRIBUTE_UNUSED
28435 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28438 /* With Binutils 2.15, the "@unwind" marker must be specified on
28439 every occurrence of the ".eh_frame" section, not just the first
28442 && strcmp (name, ".eh_frame") == 0)
28444 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28445 flags & SECTION_WRITE ? "aw" : "a");
28448 default_elf_asm_named_section (name, flags, decl);
28451 /* Return the mangling of TYPE if it is an extended fundamental type. */
28453 static const char *
28454 ix86_mangle_type (const_tree type)
28456 type = TYPE_MAIN_VARIANT (type);
28458 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28459 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28462 switch (TYPE_MODE (type))
28465 /* __float128 is "g". */
28468 /* "long double" or __float80 is "e". */
28475 /* For 32-bit code we can save PIC register setup by using
28476 __stack_chk_fail_local hidden function instead of calling
28477 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28478 register, so it is better to call __stack_chk_fail directly. */
28481 ix86_stack_protect_fail (void)
28483 return TARGET_64BIT
28484 ? default_external_stack_protect_fail ()
28485 : default_hidden_stack_protect_fail ();
28488 /* Select a format to encode pointers in exception handling data. CODE
28489 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28490 true if the symbol may be affected by dynamic relocations.
28492 ??? All x86 object file formats are capable of representing this.
28493 After all, the relocation needed is the same as for the call insn.
28494 Whether or not a particular assembler allows us to enter such, I
28495 guess we'll have to see. */
28497 asm_preferred_eh_data_format (int code, int global)
28501 int type = DW_EH_PE_sdata8;
28503 || ix86_cmodel == CM_SMALL_PIC
28504 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28505 type = DW_EH_PE_sdata4;
28506 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28508 if (ix86_cmodel == CM_SMALL
28509 || (ix86_cmodel == CM_MEDIUM && code))
28510 return DW_EH_PE_udata4;
28511 return DW_EH_PE_absptr;
28514 /* Expand copysign from SIGN to the positive value ABS_VALUE
28515 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28518 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28520 enum machine_mode mode = GET_MODE (sign);
28521 rtx sgn = gen_reg_rtx (mode);
28522 if (mask == NULL_RTX)
28524 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28525 if (!VECTOR_MODE_P (mode))
28527 /* We need to generate a scalar mode mask in this case. */
28528 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28529 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28530 mask = gen_reg_rtx (mode);
28531 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28535 mask = gen_rtx_NOT (mode, mask);
28536 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28537 gen_rtx_AND (mode, mask, sign)));
28538 emit_insn (gen_rtx_SET (VOIDmode, result,
28539 gen_rtx_IOR (mode, abs_value, sgn)));
28542 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28543 mask for masking out the sign-bit is stored in *SMASK, if that is
28546 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28548 enum machine_mode mode = GET_MODE (op0);
28551 xa = gen_reg_rtx (mode);
28552 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28553 if (!VECTOR_MODE_P (mode))
28555 /* We need to generate a scalar mode mask in this case. */
28556 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28557 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28558 mask = gen_reg_rtx (mode);
28559 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28561 emit_insn (gen_rtx_SET (VOIDmode, xa,
28562 gen_rtx_AND (mode, op0, mask)));
28570 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28571 swapping the operands if SWAP_OPERANDS is true. The expanded
28572 code is a forward jump to a newly created label in case the
28573 comparison is true. The generated label rtx is returned. */
28575 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28576 bool swap_operands)
28587 label = gen_label_rtx ();
28588 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28589 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28590 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28591 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28592 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28593 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28594 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28595 JUMP_LABEL (tmp) = label;
28600 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28601 using comparison code CODE. Operands are swapped for the comparison if
28602 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28604 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28605 bool swap_operands)
28607 enum machine_mode mode = GET_MODE (op0);
28608 rtx mask = gen_reg_rtx (mode);
28617 if (mode == DFmode)
28618 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28619 gen_rtx_fmt_ee (code, mode, op0, op1)));
28621 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28622 gen_rtx_fmt_ee (code, mode, op0, op1)));
28627 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28628 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28630 ix86_gen_TWO52 (enum machine_mode mode)
28632 REAL_VALUE_TYPE TWO52r;
28635 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28636 TWO52 = const_double_from_real_value (TWO52r, mode);
28637 TWO52 = force_reg (mode, TWO52);
28642 /* Expand SSE sequence for computing lround from OP1 storing
28645 ix86_expand_lround (rtx op0, rtx op1)
28647 /* C code for the stuff we're doing below:
28648 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28651 enum machine_mode mode = GET_MODE (op1);
28652 const struct real_format *fmt;
28653 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28656 /* load nextafter (0.5, 0.0) */
28657 fmt = REAL_MODE_FORMAT (mode);
28658 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28659 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28661 /* adj = copysign (0.5, op1) */
28662 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28663 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28665 /* adj = op1 + adj */
28666 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28668 /* op0 = (imode)adj */
28669 expand_fix (op0, adj, 0);
28672 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28675 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28677 /* C code for the stuff we're doing below (for do_floor):
28679 xi -= (double)xi > op1 ? 1 : 0;
28682 enum machine_mode fmode = GET_MODE (op1);
28683 enum machine_mode imode = GET_MODE (op0);
28684 rtx ireg, freg, label, tmp;
28686 /* reg = (long)op1 */
28687 ireg = gen_reg_rtx (imode);
28688 expand_fix (ireg, op1, 0);
28690 /* freg = (double)reg */
28691 freg = gen_reg_rtx (fmode);
28692 expand_float (freg, ireg, 0);
28694 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28695 label = ix86_expand_sse_compare_and_jump (UNLE,
28696 freg, op1, !do_floor);
28697 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28698 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28699 emit_move_insn (ireg, tmp);
28701 emit_label (label);
28702 LABEL_NUSES (label) = 1;
28704 emit_move_insn (op0, ireg);
28707 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28708 result in OPERAND0. */
28710 ix86_expand_rint (rtx operand0, rtx operand1)
28712 /* C code for the stuff we're doing below:
28713 xa = fabs (operand1);
28714 if (!isless (xa, 2**52))
28716 xa = xa + 2**52 - 2**52;
28717 return copysign (xa, operand1);
28719 enum machine_mode mode = GET_MODE (operand0);
28720 rtx res, xa, label, TWO52, mask;
28722 res = gen_reg_rtx (mode);
28723 emit_move_insn (res, operand1);
28725 /* xa = abs (operand1) */
28726 xa = ix86_expand_sse_fabs (res, &mask);
28728 /* if (!isless (xa, TWO52)) goto label; */
28729 TWO52 = ix86_gen_TWO52 (mode);
28730 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28732 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28733 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28735 ix86_sse_copysign_to_positive (res, xa, res, mask);
28737 emit_label (label);
28738 LABEL_NUSES (label) = 1;
28740 emit_move_insn (operand0, res);
28743 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28746 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28748 /* C code for the stuff we expand below.
28749 double xa = fabs (x), x2;
28750 if (!isless (xa, TWO52))
28752 xa = xa + TWO52 - TWO52;
28753 x2 = copysign (xa, x);
28762 enum machine_mode mode = GET_MODE (operand0);
28763 rtx xa, TWO52, tmp, label, one, res, mask;
28765 TWO52 = ix86_gen_TWO52 (mode);
28767 /* Temporary for holding the result, initialized to the input
28768 operand to ease control flow. */
28769 res = gen_reg_rtx (mode);
28770 emit_move_insn (res, operand1);
28772 /* xa = abs (operand1) */
28773 xa = ix86_expand_sse_fabs (res, &mask);
28775 /* if (!isless (xa, TWO52)) goto label; */
28776 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28778 /* xa = xa + TWO52 - TWO52; */
28779 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28780 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28782 /* xa = copysign (xa, operand1) */
28783 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28785 /* generate 1.0 or -1.0 */
28786 one = force_reg (mode,
28787 const_double_from_real_value (do_floor
28788 ? dconst1 : dconstm1, mode));
28790 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28791 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28792 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28793 gen_rtx_AND (mode, one, tmp)));
28794 /* We always need to subtract here to preserve signed zero. */
28795 tmp = expand_simple_binop (mode, MINUS,
28796 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28797 emit_move_insn (res, tmp);
28799 emit_label (label);
28800 LABEL_NUSES (label) = 1;
28802 emit_move_insn (operand0, res);
28805 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28808 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28810 /* C code for the stuff we expand below.
28811 double xa = fabs (x), x2;
28812 if (!isless (xa, TWO52))
28814 x2 = (double)(long)x;
28821 if (HONOR_SIGNED_ZEROS (mode))
28822 return copysign (x2, x);
28825 enum machine_mode mode = GET_MODE (operand0);
28826 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28828 TWO52 = ix86_gen_TWO52 (mode);
28830 /* Temporary for holding the result, initialized to the input
28831 operand to ease control flow. */
28832 res = gen_reg_rtx (mode);
28833 emit_move_insn (res, operand1);
28835 /* xa = abs (operand1) */
28836 xa = ix86_expand_sse_fabs (res, &mask);
28838 /* if (!isless (xa, TWO52)) goto label; */
28839 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28841 /* xa = (double)(long)x */
28842 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28843 expand_fix (xi, res, 0);
28844 expand_float (xa, xi, 0);
28847 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28849 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28850 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28851 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28852 gen_rtx_AND (mode, one, tmp)));
28853 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28854 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28855 emit_move_insn (res, tmp);
28857 if (HONOR_SIGNED_ZEROS (mode))
28858 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28860 emit_label (label);
28861 LABEL_NUSES (label) = 1;
28863 emit_move_insn (operand0, res);
28866 /* Expand SSE sequence for computing round from OPERAND1 storing
28867 into OPERAND0. Sequence that works without relying on DImode truncation
28868 via cvttsd2siq that is only available on 64bit targets. */
28870 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28872 /* C code for the stuff we expand below.
28873 double xa = fabs (x), xa2, x2;
28874 if (!isless (xa, TWO52))
28876 Using the absolute value and copying back sign makes
28877 -0.0 -> -0.0 correct.
28878 xa2 = xa + TWO52 - TWO52;
28883 else if (dxa > 0.5)
28885 x2 = copysign (xa2, x);
28888 enum machine_mode mode = GET_MODE (operand0);
28889 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28891 TWO52 = ix86_gen_TWO52 (mode);
28893 /* Temporary for holding the result, initialized to the input
28894 operand to ease control flow. */
28895 res = gen_reg_rtx (mode);
28896 emit_move_insn (res, operand1);
28898 /* xa = abs (operand1) */
28899 xa = ix86_expand_sse_fabs (res, &mask);
28901 /* if (!isless (xa, TWO52)) goto label; */
28902 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28904 /* xa2 = xa + TWO52 - TWO52; */
28905 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28906 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28908 /* dxa = xa2 - xa; */
28909 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28911 /* generate 0.5, 1.0 and -0.5 */
28912 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28913 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28914 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28918 tmp = gen_reg_rtx (mode);
28919 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28920 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28921 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28922 gen_rtx_AND (mode, one, tmp)));
28923 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28924 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28925 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28926 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28927 gen_rtx_AND (mode, one, tmp)));
28928 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28930 /* res = copysign (xa2, operand1) */
28931 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28933 emit_label (label);
28934 LABEL_NUSES (label) = 1;
28936 emit_move_insn (operand0, res);
28939 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28942 ix86_expand_trunc (rtx operand0, rtx operand1)
28944 /* C code for SSE variant we expand below.
28945 double xa = fabs (x), x2;
28946 if (!isless (xa, TWO52))
28948 x2 = (double)(long)x;
28949 if (HONOR_SIGNED_ZEROS (mode))
28950 return copysign (x2, x);
28953 enum machine_mode mode = GET_MODE (operand0);
28954 rtx xa, xi, TWO52, label, res, mask;
28956 TWO52 = ix86_gen_TWO52 (mode);
28958 /* Temporary for holding the result, initialized to the input
28959 operand to ease control flow. */
28960 res = gen_reg_rtx (mode);
28961 emit_move_insn (res, operand1);
28963 /* xa = abs (operand1) */
28964 xa = ix86_expand_sse_fabs (res, &mask);
28966 /* if (!isless (xa, TWO52)) goto label; */
28967 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28969 /* x = (double)(long)x */
28970 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28971 expand_fix (xi, res, 0);
28972 expand_float (res, xi, 0);
28974 if (HONOR_SIGNED_ZEROS (mode))
28975 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28977 emit_label (label);
28978 LABEL_NUSES (label) = 1;
28980 emit_move_insn (operand0, res);
28983 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28986 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28988 enum machine_mode mode = GET_MODE (operand0);
28989 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28991 /* C code for SSE variant we expand below.
28992 double xa = fabs (x), x2;
28993 if (!isless (xa, TWO52))
28995 xa2 = xa + TWO52 - TWO52;
28999 x2 = copysign (xa2, x);
29003 TWO52 = ix86_gen_TWO52 (mode);
29005 /* Temporary for holding the result, initialized to the input
29006 operand to ease control flow. */
29007 res = gen_reg_rtx (mode);
29008 emit_move_insn (res, operand1);
29010 /* xa = abs (operand1) */
29011 xa = ix86_expand_sse_fabs (res, &smask);
29013 /* if (!isless (xa, TWO52)) goto label; */
29014 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29016 /* res = xa + TWO52 - TWO52; */
29017 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29018 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29019 emit_move_insn (res, tmp);
29022 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29024 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29025 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29026 emit_insn (gen_rtx_SET (VOIDmode, mask,
29027 gen_rtx_AND (mode, mask, one)));
29028 tmp = expand_simple_binop (mode, MINUS,
29029 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29030 emit_move_insn (res, tmp);
29032 /* res = copysign (res, operand1) */
29033 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29035 emit_label (label);
29036 LABEL_NUSES (label) = 1;
29038 emit_move_insn (operand0, res);
29041 /* Expand SSE sequence for computing round from OPERAND1 storing
29044 ix86_expand_round (rtx operand0, rtx operand1)
29046 /* C code for the stuff we're doing below:
29047 double xa = fabs (x);
29048 if (!isless (xa, TWO52))
29050 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29051 return copysign (xa, x);
29053 enum machine_mode mode = GET_MODE (operand0);
29054 rtx res, TWO52, xa, label, xi, half, mask;
29055 const struct real_format *fmt;
29056 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29058 /* Temporary for holding the result, initialized to the input
29059 operand to ease control flow. */
29060 res = gen_reg_rtx (mode);
29061 emit_move_insn (res, operand1);
29063 TWO52 = ix86_gen_TWO52 (mode);
29064 xa = ix86_expand_sse_fabs (res, &mask);
29065 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29067 /* load nextafter (0.5, 0.0) */
29068 fmt = REAL_MODE_FORMAT (mode);
29069 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29070 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29072 /* xa = xa + 0.5 */
29073 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29074 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29076 /* xa = (double)(int64_t)xa */
29077 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29078 expand_fix (xi, xa, 0);
29079 expand_float (xa, xi, 0);
29081 /* res = copysign (xa, operand1) */
29082 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29084 emit_label (label);
29085 LABEL_NUSES (label) = 1;
29087 emit_move_insn (operand0, res);
29091 /* Table of valid machine attributes. */
29092 static const struct attribute_spec ix86_attribute_table[] =
29094 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29095 /* Stdcall attribute says callee is responsible for popping arguments
29096 if they are not variable. */
29097 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29098 /* Fastcall attribute says callee is responsible for popping arguments
29099 if they are not variable. */
29100 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29101 /* Thiscall attribute says callee is responsible for popping arguments
29102 if they are not variable. */
29103 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29104 /* Cdecl attribute says the callee is a normal C declaration */
29105 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29106 /* Regparm attribute specifies how many integer arguments are to be
29107 passed in registers. */
29108 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29109 /* Sseregparm attribute says we are using x86_64 calling conventions
29110 for FP arguments. */
29111 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29112 /* force_align_arg_pointer says this function realigns the stack at entry. */
29113 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29114 false, true, true, ix86_handle_cconv_attribute },
29115 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29116 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29117 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29118 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29120 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29121 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29122 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29123 SUBTARGET_ATTRIBUTE_TABLE,
29125 /* ms_abi and sysv_abi calling convention function attributes. */
29126 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29127 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29128 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29130 { NULL, 0, 0, false, false, false, NULL }
29133 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29135 ix86_builtin_vectorization_cost (bool runtime_test)
29137 /* If the branch of the runtime test is taken - i.e. - the vectorized
29138 version is skipped - this incurs a misprediction cost (because the
29139 vectorized version is expected to be the fall-through). So we subtract
29140 the latency of a mispredicted branch from the costs that are incured
29141 when the vectorized version is executed.
29143 TODO: The values in individual target tables have to be tuned or new
29144 fields may be needed. For eg. on K8, the default branch path is the
29145 not-taken path. If the taken path is predicted correctly, the minimum
29146 penalty of going down the taken-path is 1 cycle. If the taken-path is
29147 not predicted correctly, then the minimum penalty is 10 cycles. */
29151 return (-(ix86_cost->cond_taken_branch_cost));
29157 /* Implement targetm.vectorize.builtin_vec_perm. */
29160 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29162 tree itype = TREE_TYPE (vec_type);
29163 bool u = TYPE_UNSIGNED (itype);
29164 enum machine_mode vmode = TYPE_MODE (vec_type);
29165 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29166 bool ok = TARGET_SSE2;
29172 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29175 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29177 itype = ix86_get_builtin_type (IX86_BT_DI);
29182 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29186 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29188 itype = ix86_get_builtin_type (IX86_BT_SI);
29192 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29195 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29198 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29201 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29211 *mask_type = itype;
29212 return ix86_builtins[(int) fcode];
29215 /* Return a vector mode with twice as many elements as VMODE. */
29216 /* ??? Consider moving this to a table generated by genmodes.c. */
29218 static enum machine_mode
29219 doublesize_vector_mode (enum machine_mode vmode)
29223 case V2SFmode: return V4SFmode;
29224 case V1DImode: return V2DImode;
29225 case V2SImode: return V4SImode;
29226 case V4HImode: return V8HImode;
29227 case V8QImode: return V16QImode;
29229 case V2DFmode: return V4DFmode;
29230 case V4SFmode: return V8SFmode;
29231 case V2DImode: return V4DImode;
29232 case V4SImode: return V8SImode;
29233 case V8HImode: return V16HImode;
29234 case V16QImode: return V32QImode;
29236 case V4DFmode: return V8DFmode;
29237 case V8SFmode: return V16SFmode;
29238 case V4DImode: return V8DImode;
29239 case V8SImode: return V16SImode;
29240 case V16HImode: return V32HImode;
29241 case V32QImode: return V64QImode;
29244 gcc_unreachable ();
29248 /* Construct (set target (vec_select op0 (parallel perm))) and
29249 return true if that's a valid instruction in the active ISA. */
29252 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29254 rtx rperm[MAX_VECT_LEN], x;
29257 for (i = 0; i < nelt; ++i)
29258 rperm[i] = GEN_INT (perm[i]);
29260 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29261 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29262 x = gen_rtx_SET (VOIDmode, target, x);
29265 if (recog_memoized (x) < 0)
29273 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29276 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29277 const unsigned char *perm, unsigned nelt)
29279 enum machine_mode v2mode;
29282 v2mode = doublesize_vector_mode (GET_MODE (op0));
29283 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29284 return expand_vselect (target, x, perm, nelt);
29287 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29288 in terms of blendp[sd] / pblendw / pblendvb. */
29291 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29293 enum machine_mode vmode = d->vmode;
29294 unsigned i, mask, nelt = d->nelt;
29295 rtx target, op0, op1, x;
29297 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29299 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29302 /* This is a blend, not a permute. Elements must stay in their
29303 respective lanes. */
29304 for (i = 0; i < nelt; ++i)
29306 unsigned e = d->perm[i];
29307 if (!(e == i || e == i + nelt))
29314 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29315 decision should be extracted elsewhere, so that we only try that
29316 sequence once all budget==3 options have been tried. */
29318 /* For bytes, see if bytes move in pairs so we can use pblendw with
29319 an immediate argument, rather than pblendvb with a vector argument. */
29320 if (vmode == V16QImode)
29322 bool pblendw_ok = true;
29323 for (i = 0; i < 16 && pblendw_ok; i += 2)
29324 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29328 rtx rperm[16], vperm;
29330 for (i = 0; i < nelt; ++i)
29331 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29333 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29334 vperm = force_reg (V16QImode, vperm);
29336 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29341 target = d->target;
29353 for (i = 0; i < nelt; ++i)
29354 mask |= (d->perm[i] >= nelt) << i;
29358 for (i = 0; i < 2; ++i)
29359 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29363 for (i = 0; i < 4; ++i)
29364 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29368 for (i = 0; i < 8; ++i)
29369 mask |= (d->perm[i * 2] >= 16) << i;
29373 target = gen_lowpart (vmode, target);
29374 op0 = gen_lowpart (vmode, op0);
29375 op1 = gen_lowpart (vmode, op1);
29379 gcc_unreachable ();
29382 /* This matches five different patterns with the different modes. */
29383 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29384 x = gen_rtx_SET (VOIDmode, target, x);
29390 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29391 in terms of the variable form of vpermilps.
29393 Note that we will have already failed the immediate input vpermilps,
29394 which requires that the high and low part shuffle be identical; the
29395 variable form doesn't require that. */
29398 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29400 rtx rperm[8], vperm;
29403 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29406 /* We can only permute within the 128-bit lane. */
29407 for (i = 0; i < 8; ++i)
29409 unsigned e = d->perm[i];
29410 if (i < 4 ? e >= 4 : e < 4)
29417 for (i = 0; i < 8; ++i)
29419 unsigned e = d->perm[i];
29421 /* Within each 128-bit lane, the elements of op0 are numbered
29422 from 0 and the elements of op1 are numbered from 4. */
29428 rperm[i] = GEN_INT (e);
29431 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29432 vperm = force_reg (V8SImode, vperm);
29433 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29438 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29439 in terms of pshufb or vpperm. */
29442 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29444 unsigned i, nelt, eltsz;
29445 rtx rperm[16], vperm, target, op0, op1;
29447 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29449 if (GET_MODE_SIZE (d->vmode) != 16)
29456 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29458 for (i = 0; i < nelt; ++i)
29460 unsigned j, e = d->perm[i];
29461 for (j = 0; j < eltsz; ++j)
29462 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29465 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29466 vperm = force_reg (V16QImode, vperm);
29468 target = gen_lowpart (V16QImode, d->target);
29469 op0 = gen_lowpart (V16QImode, d->op0);
29470 if (d->op0 == d->op1)
29471 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29474 op1 = gen_lowpart (V16QImode, d->op1);
29475 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29481 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29482 in a single instruction. */
29485 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29487 unsigned i, nelt = d->nelt;
29488 unsigned char perm2[MAX_VECT_LEN];
29490 /* Check plain VEC_SELECT first, because AVX has instructions that could
29491 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29492 input where SEL+CONCAT may not. */
29493 if (d->op0 == d->op1)
29495 int mask = nelt - 1;
29497 for (i = 0; i < nelt; i++)
29498 perm2[i] = d->perm[i] & mask;
29500 if (expand_vselect (d->target, d->op0, perm2, nelt))
29503 /* There are plenty of patterns in sse.md that are written for
29504 SEL+CONCAT and are not replicated for a single op. Perhaps
29505 that should be changed, to avoid the nastiness here. */
29507 /* Recognize interleave style patterns, which means incrementing
29508 every other permutation operand. */
29509 for (i = 0; i < nelt; i += 2)
29511 perm2[i] = d->perm[i] & mask;
29512 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29514 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29517 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29520 for (i = 0; i < nelt; i += 4)
29522 perm2[i + 0] = d->perm[i + 0] & mask;
29523 perm2[i + 1] = d->perm[i + 1] & mask;
29524 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29525 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29528 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29533 /* Finally, try the fully general two operand permute. */
29534 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29537 /* Recognize interleave style patterns with reversed operands. */
29538 if (d->op0 != d->op1)
29540 for (i = 0; i < nelt; ++i)
29542 unsigned e = d->perm[i];
29550 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29554 /* Try the SSE4.1 blend variable merge instructions. */
29555 if (expand_vec_perm_blend (d))
29558 /* Try one of the AVX vpermil variable permutations. */
29559 if (expand_vec_perm_vpermil (d))
29562 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29563 if (expand_vec_perm_pshufb (d))
29569 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29570 in terms of a pair of pshuflw + pshufhw instructions. */
29573 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29575 unsigned char perm2[MAX_VECT_LEN];
29579 if (d->vmode != V8HImode || d->op0 != d->op1)
29582 /* The two permutations only operate in 64-bit lanes. */
29583 for (i = 0; i < 4; ++i)
29584 if (d->perm[i] >= 4)
29586 for (i = 4; i < 8; ++i)
29587 if (d->perm[i] < 4)
29593 /* Emit the pshuflw. */
29594 memcpy (perm2, d->perm, 4);
29595 for (i = 4; i < 8; ++i)
29597 ok = expand_vselect (d->target, d->op0, perm2, 8);
29600 /* Emit the pshufhw. */
29601 memcpy (perm2 + 4, d->perm + 4, 4);
29602 for (i = 0; i < 4; ++i)
29604 ok = expand_vselect (d->target, d->target, perm2, 8);
29610 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29611 the permutation using the SSSE3 palignr instruction. This succeeds
29612 when all of the elements in PERM fit within one vector and we merely
29613 need to shift them down so that a single vector permutation has a
29614 chance to succeed. */
29617 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29619 unsigned i, nelt = d->nelt;
29624 /* Even with AVX, palignr only operates on 128-bit vectors. */
29625 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29628 min = nelt, max = 0;
29629 for (i = 0; i < nelt; ++i)
29631 unsigned e = d->perm[i];
29637 if (min == 0 || max - min >= nelt)
29640 /* Given that we have SSSE3, we know we'll be able to implement the
29641 single operand permutation after the palignr with pshufb. */
29645 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29646 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29647 gen_lowpart (TImode, d->op1),
29648 gen_lowpart (TImode, d->op0), shift));
29650 d->op0 = d->op1 = d->target;
29653 for (i = 0; i < nelt; ++i)
29655 unsigned e = d->perm[i] - min;
29661 /* Test for the degenerate case where the alignment by itself
29662 produces the desired permutation. */
29666 ok = expand_vec_perm_1 (d);
29672 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29673 a two vector permutation into a single vector permutation by using
29674 an interleave operation to merge the vectors. */
29677 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29679 struct expand_vec_perm_d dremap, dfinal;
29680 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29681 unsigned contents, h1, h2, h3, h4;
29682 unsigned char remap[2 * MAX_VECT_LEN];
29686 if (d->op0 == d->op1)
29689 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29690 lanes. We can use similar techniques with the vperm2f128 instruction,
29691 but it requires slightly different logic. */
29692 if (GET_MODE_SIZE (d->vmode) != 16)
29695 /* Examine from whence the elements come. */
29697 for (i = 0; i < nelt; ++i)
29698 contents |= 1u << d->perm[i];
29700 /* Split the two input vectors into 4 halves. */
29701 h1 = (1u << nelt2) - 1;
29706 memset (remap, 0xff, sizeof (remap));
29709 /* If the elements from the low halves use interleave low, and similarly
29710 for interleave high. If the elements are from mis-matched halves, we
29711 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29712 if ((contents & (h1 | h3)) == contents)
29714 for (i = 0; i < nelt2; ++i)
29717 remap[i + nelt] = i * 2 + 1;
29718 dremap.perm[i * 2] = i;
29719 dremap.perm[i * 2 + 1] = i + nelt;
29722 else if ((contents & (h2 | h4)) == contents)
29724 for (i = 0; i < nelt2; ++i)
29726 remap[i + nelt2] = i * 2;
29727 remap[i + nelt + nelt2] = i * 2 + 1;
29728 dremap.perm[i * 2] = i + nelt2;
29729 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29732 else if ((contents & (h1 | h4)) == contents)
29734 for (i = 0; i < nelt2; ++i)
29737 remap[i + nelt + nelt2] = i + nelt2;
29738 dremap.perm[i] = i;
29739 dremap.perm[i + nelt2] = i + nelt + nelt2;
29743 dremap.vmode = V2DImode;
29745 dremap.perm[0] = 0;
29746 dremap.perm[1] = 3;
29749 else if ((contents & (h2 | h3)) == contents)
29751 for (i = 0; i < nelt2; ++i)
29753 remap[i + nelt2] = i;
29754 remap[i + nelt] = i + nelt2;
29755 dremap.perm[i] = i + nelt2;
29756 dremap.perm[i + nelt2] = i + nelt;
29760 dremap.vmode = V2DImode;
29762 dremap.perm[0] = 1;
29763 dremap.perm[1] = 2;
29769 /* Use the remapping array set up above to move the elements from their
29770 swizzled locations into their final destinations. */
29772 for (i = 0; i < nelt; ++i)
29774 unsigned e = remap[d->perm[i]];
29775 gcc_assert (e < nelt);
29776 dfinal.perm[i] = e;
29778 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29779 dfinal.op1 = dfinal.op0;
29780 dremap.target = dfinal.op0;
29782 /* Test if the final remap can be done with a single insn. For V4SFmode or
29783 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29785 ok = expand_vec_perm_1 (&dfinal);
29786 seq = get_insns ();
29792 if (dremap.vmode != dfinal.vmode)
29794 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29795 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29796 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29799 ok = expand_vec_perm_1 (&dremap);
29806 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29807 permutation with two pshufb insns and an ior. We should have already
29808 failed all two instruction sequences. */
29811 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29813 rtx rperm[2][16], vperm, l, h, op, m128;
29814 unsigned int i, nelt, eltsz;
29816 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29818 gcc_assert (d->op0 != d->op1);
29821 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29823 /* Generate two permutation masks. If the required element is within
29824 the given vector it is shuffled into the proper lane. If the required
29825 element is in the other vector, force a zero into the lane by setting
29826 bit 7 in the permutation mask. */
29827 m128 = GEN_INT (-128);
29828 for (i = 0; i < nelt; ++i)
29830 unsigned j, e = d->perm[i];
29831 unsigned which = (e >= nelt);
29835 for (j = 0; j < eltsz; ++j)
29837 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29838 rperm[1-which][i*eltsz + j] = m128;
29842 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29843 vperm = force_reg (V16QImode, vperm);
29845 l = gen_reg_rtx (V16QImode);
29846 op = gen_lowpart (V16QImode, d->op0);
29847 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29849 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29850 vperm = force_reg (V16QImode, vperm);
29852 h = gen_reg_rtx (V16QImode);
29853 op = gen_lowpart (V16QImode, d->op1);
29854 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29856 op = gen_lowpart (V16QImode, d->target);
29857 emit_insn (gen_iorv16qi3 (op, l, h));
29862 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29863 and extract-odd permutations. */
29866 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29868 rtx t1, t2, t3, t4;
29873 t1 = gen_reg_rtx (V4DFmode);
29874 t2 = gen_reg_rtx (V4DFmode);
29876 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29877 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29878 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29880 /* Now an unpck[lh]pd will produce the result required. */
29882 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29884 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29890 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29891 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29892 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29894 t1 = gen_reg_rtx (V8SFmode);
29895 t2 = gen_reg_rtx (V8SFmode);
29896 t3 = gen_reg_rtx (V8SFmode);
29897 t4 = gen_reg_rtx (V8SFmode);
29899 /* Shuffle within the 128-bit lanes to produce:
29900 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29901 expand_vselect (t1, d->op0, perm1, 8);
29902 expand_vselect (t2, d->op1, perm1, 8);
29904 /* Shuffle the lanes around to produce:
29905 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29906 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29907 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29909 /* Now a vpermil2p will produce the result required. */
29910 /* ??? The vpermil2p requires a vector constant. Another option
29911 is a unpck[lh]ps to merge the two vectors to produce
29912 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29913 vpermilps to get the elements into the final order. */
29916 memcpy (d->perm, odd ? permo: perme, 8);
29917 expand_vec_perm_vpermil (d);
29925 /* These are always directly implementable by expand_vec_perm_1. */
29926 gcc_unreachable ();
29930 return expand_vec_perm_pshufb2 (d);
29933 /* We need 2*log2(N)-1 operations to achieve odd/even
29934 with interleave. */
29935 t1 = gen_reg_rtx (V8HImode);
29936 t2 = gen_reg_rtx (V8HImode);
29937 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29938 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29939 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29940 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29942 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29944 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29951 return expand_vec_perm_pshufb2 (d);
29954 t1 = gen_reg_rtx (V16QImode);
29955 t2 = gen_reg_rtx (V16QImode);
29956 t3 = gen_reg_rtx (V16QImode);
29957 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29958 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29959 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29960 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29961 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29962 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29964 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29966 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29972 gcc_unreachable ();
29978 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29979 extract-even and extract-odd permutations. */
29982 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29984 unsigned i, odd, nelt = d->nelt;
29987 if (odd != 0 && odd != 1)
29990 for (i = 1; i < nelt; ++i)
29991 if (d->perm[i] != 2 * i + odd)
29994 return expand_vec_perm_even_odd_1 (d, odd);
29997 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29998 permutations. We assume that expand_vec_perm_1 has already failed. */
30001 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30003 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30004 enum machine_mode vmode = d->vmode;
30005 unsigned char perm2[4];
30013 /* These are special-cased in sse.md so that we can optionally
30014 use the vbroadcast instruction. They expand to two insns
30015 if the input happens to be in a register. */
30016 gcc_unreachable ();
30022 /* These are always implementable using standard shuffle patterns. */
30023 gcc_unreachable ();
30027 /* These can be implemented via interleave. We save one insn by
30028 stopping once we have promoted to V4SImode and then use pshufd. */
30031 optab otab = vec_interleave_low_optab;
30035 otab = vec_interleave_high_optab;
30040 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30041 vmode = get_mode_wider_vector (vmode);
30042 op0 = gen_lowpart (vmode, op0);
30044 while (vmode != V4SImode);
30046 memset (perm2, elt, 4);
30047 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30052 gcc_unreachable ();
30056 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30057 broadcast permutations. */
30060 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30062 unsigned i, elt, nelt = d->nelt;
30064 if (d->op0 != d->op1)
30068 for (i = 1; i < nelt; ++i)
30069 if (d->perm[i] != elt)
30072 return expand_vec_perm_broadcast_1 (d);
30075 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30076 With all of the interface bits taken care of, perform the expansion
30077 in D and return true on success. */
30080 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30082 /* Try a single instruction expansion. */
30083 if (expand_vec_perm_1 (d))
30086 /* Try sequences of two instructions. */
30088 if (expand_vec_perm_pshuflw_pshufhw (d))
30091 if (expand_vec_perm_palignr (d))
30094 if (expand_vec_perm_interleave2 (d))
30097 if (expand_vec_perm_broadcast (d))
30100 /* Try sequences of three instructions. */
30102 if (expand_vec_perm_pshufb2 (d))
30105 /* ??? Look for narrow permutations whose element orderings would
30106 allow the promotion to a wider mode. */
30108 /* ??? Look for sequences of interleave or a wider permute that place
30109 the data into the correct lanes for a half-vector shuffle like
30110 pshuf[lh]w or vpermilps. */
30112 /* ??? Look for sequences of interleave that produce the desired results.
30113 The combinatorics of punpck[lh] get pretty ugly... */
30115 if (expand_vec_perm_even_odd (d))
30121 /* Extract the values from the vector CST into the permutation array in D.
30122 Return 0 on error, 1 if all values from the permutation come from the
30123 first vector, 2 if all values from the second vector, and 3 otherwise. */
30126 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30128 tree list = TREE_VECTOR_CST_ELTS (cst);
30129 unsigned i, nelt = d->nelt;
30132 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30134 unsigned HOST_WIDE_INT e;
30136 if (!host_integerp (TREE_VALUE (list), 1))
30138 e = tree_low_cst (TREE_VALUE (list), 1);
30142 ret |= (e < nelt ? 1 : 2);
30145 gcc_assert (list == NULL);
30147 /* For all elements from second vector, fold the elements to first. */
30149 for (i = 0; i < nelt; ++i)
30150 d->perm[i] -= nelt;
30156 ix86_expand_vec_perm_builtin (tree exp)
30158 struct expand_vec_perm_d d;
30159 tree arg0, arg1, arg2;
30161 arg0 = CALL_EXPR_ARG (exp, 0);
30162 arg1 = CALL_EXPR_ARG (exp, 1);
30163 arg2 = CALL_EXPR_ARG (exp, 2);
30165 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30166 d.nelt = GET_MODE_NUNITS (d.vmode);
30167 d.testing_p = false;
30168 gcc_assert (VECTOR_MODE_P (d.vmode));
30170 if (TREE_CODE (arg2) != VECTOR_CST)
30172 error_at (EXPR_LOCATION (exp),
30173 "vector permutation requires vector constant");
30177 switch (extract_vec_perm_cst (&d, arg2))
30183 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30187 if (!operand_equal_p (arg0, arg1, 0))
30189 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30190 d.op0 = force_reg (d.vmode, d.op0);
30191 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30192 d.op1 = force_reg (d.vmode, d.op1);
30196 /* The elements of PERM do not suggest that only the first operand
30197 is used, but both operands are identical. Allow easier matching
30198 of the permutation by folding the permutation into the single
30201 unsigned i, nelt = d.nelt;
30202 for (i = 0; i < nelt; ++i)
30203 if (d.perm[i] >= nelt)
30209 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30210 d.op0 = force_reg (d.vmode, d.op0);
30215 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30216 d.op0 = force_reg (d.vmode, d.op0);
30221 d.target = gen_reg_rtx (d.vmode);
30222 if (ix86_expand_vec_perm_builtin_1 (&d))
30225 /* For compiler generated permutations, we should never got here, because
30226 the compiler should also be checking the ok hook. But since this is a
30227 builtin the user has access too, so don't abort. */
30231 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30234 sorry ("vector permutation (%d %d %d %d)",
30235 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30238 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30239 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30240 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30243 sorry ("vector permutation "
30244 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30245 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30246 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30247 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30248 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30251 gcc_unreachable ();
30254 return CONST0_RTX (d.vmode);
30257 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30260 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30262 struct expand_vec_perm_d d;
30266 d.vmode = TYPE_MODE (vec_type);
30267 d.nelt = GET_MODE_NUNITS (d.vmode);
30268 d.testing_p = true;
30270 /* Given sufficient ISA support we can just return true here
30271 for selected vector modes. */
30272 if (GET_MODE_SIZE (d.vmode) == 16)
30274 /* All implementable with a single vpperm insn. */
30277 /* All implementable with 2 pshufb + 1 ior. */
30280 /* All implementable with shufpd or unpck[lh]pd. */
30285 vec_mask = extract_vec_perm_cst (&d, mask);
30287 /* This hook is cannot be called in response to something that the
30288 user does (unlike the builtin expander) so we shouldn't ever see
30289 an error generated from the extract. */
30290 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30291 one_vec = (vec_mask != 3);
30293 /* Implementable with shufps or pshufd. */
30294 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30297 /* Otherwise we have to go through the motions and see if we can
30298 figure out how to generate the requested permutation. */
30299 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30300 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30302 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30305 ret = ix86_expand_vec_perm_builtin_1 (&d);
30312 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30314 struct expand_vec_perm_d d;
30320 d.vmode = GET_MODE (targ);
30321 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30322 d.testing_p = false;
30324 for (i = 0; i < nelt; ++i)
30325 d.perm[i] = i * 2 + odd;
30327 /* We'll either be able to implement the permutation directly... */
30328 if (expand_vec_perm_1 (&d))
30331 /* ... or we use the special-case patterns. */
30332 expand_vec_perm_even_odd_1 (&d, odd);
30335 /* This function returns the calling abi specific va_list type node.
30336 It returns the FNDECL specific va_list type. */
30339 ix86_fn_abi_va_list (tree fndecl)
30342 return va_list_type_node;
30343 gcc_assert (fndecl != NULL_TREE);
30345 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30346 return ms_va_list_type_node;
30348 return sysv_va_list_type_node;
30351 /* Returns the canonical va_list type specified by TYPE. If there
30352 is no valid TYPE provided, it return NULL_TREE. */
30355 ix86_canonical_va_list_type (tree type)
30359 /* Resolve references and pointers to va_list type. */
30360 if (INDIRECT_REF_P (type))
30361 type = TREE_TYPE (type);
30362 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30363 type = TREE_TYPE (type);
30367 wtype = va_list_type_node;
30368 gcc_assert (wtype != NULL_TREE);
30370 if (TREE_CODE (wtype) == ARRAY_TYPE)
30372 /* If va_list is an array type, the argument may have decayed
30373 to a pointer type, e.g. by being passed to another function.
30374 In that case, unwrap both types so that we can compare the
30375 underlying records. */
30376 if (TREE_CODE (htype) == ARRAY_TYPE
30377 || POINTER_TYPE_P (htype))
30379 wtype = TREE_TYPE (wtype);
30380 htype = TREE_TYPE (htype);
30383 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30384 return va_list_type_node;
30385 wtype = sysv_va_list_type_node;
30386 gcc_assert (wtype != NULL_TREE);
30388 if (TREE_CODE (wtype) == ARRAY_TYPE)
30390 /* If va_list is an array type, the argument may have decayed
30391 to a pointer type, e.g. by being passed to another function.
30392 In that case, unwrap both types so that we can compare the
30393 underlying records. */
30394 if (TREE_CODE (htype) == ARRAY_TYPE
30395 || POINTER_TYPE_P (htype))
30397 wtype = TREE_TYPE (wtype);
30398 htype = TREE_TYPE (htype);
30401 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30402 return sysv_va_list_type_node;
30403 wtype = ms_va_list_type_node;
30404 gcc_assert (wtype != NULL_TREE);
30406 if (TREE_CODE (wtype) == ARRAY_TYPE)
30408 /* If va_list is an array type, the argument may have decayed
30409 to a pointer type, e.g. by being passed to another function.
30410 In that case, unwrap both types so that we can compare the
30411 underlying records. */
30412 if (TREE_CODE (htype) == ARRAY_TYPE
30413 || POINTER_TYPE_P (htype))
30415 wtype = TREE_TYPE (wtype);
30416 htype = TREE_TYPE (htype);
30419 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30420 return ms_va_list_type_node;
30423 return std_canonical_va_list_type (type);
30426 /* Iterate through the target-specific builtin types for va_list.
30427 IDX denotes the iterator, *PTREE is set to the result type of
30428 the va_list builtin, and *PNAME to its internal type.
30429 Returns zero if there is no element for this index, otherwise
30430 IDX should be increased upon the next call.
30431 Note, do not iterate a base builtin's name like __builtin_va_list.
30432 Used from c_common_nodes_and_builtins. */
30435 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30441 *ptree = ms_va_list_type_node;
30442 *pname = "__builtin_ms_va_list";
30445 *ptree = sysv_va_list_type_node;
30446 *pname = "__builtin_sysv_va_list";
30454 /* Initialize the GCC target structure. */
30455 #undef TARGET_RETURN_IN_MEMORY
30456 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30458 #undef TARGET_LEGITIMIZE_ADDRESS
30459 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30461 #undef TARGET_ATTRIBUTE_TABLE
30462 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30463 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30464 # undef TARGET_MERGE_DECL_ATTRIBUTES
30465 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30468 #undef TARGET_COMP_TYPE_ATTRIBUTES
30469 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30471 #undef TARGET_INIT_BUILTINS
30472 #define TARGET_INIT_BUILTINS ix86_init_builtins
30473 #undef TARGET_BUILTIN_DECL
30474 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30475 #undef TARGET_EXPAND_BUILTIN
30476 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30478 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30479 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30480 ix86_builtin_vectorized_function
30482 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30483 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30485 #undef TARGET_BUILTIN_RECIPROCAL
30486 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30488 #undef TARGET_ASM_FUNCTION_EPILOGUE
30489 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30491 #undef TARGET_ENCODE_SECTION_INFO
30492 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30493 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30495 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30498 #undef TARGET_ASM_OPEN_PAREN
30499 #define TARGET_ASM_OPEN_PAREN ""
30500 #undef TARGET_ASM_CLOSE_PAREN
30501 #define TARGET_ASM_CLOSE_PAREN ""
30503 #undef TARGET_ASM_BYTE_OP
30504 #define TARGET_ASM_BYTE_OP ASM_BYTE
30506 #undef TARGET_ASM_ALIGNED_HI_OP
30507 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30508 #undef TARGET_ASM_ALIGNED_SI_OP
30509 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30511 #undef TARGET_ASM_ALIGNED_DI_OP
30512 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30515 #undef TARGET_ASM_UNALIGNED_HI_OP
30516 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30517 #undef TARGET_ASM_UNALIGNED_SI_OP
30518 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30519 #undef TARGET_ASM_UNALIGNED_DI_OP
30520 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30522 #undef TARGET_SCHED_ADJUST_COST
30523 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30524 #undef TARGET_SCHED_ISSUE_RATE
30525 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30526 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30527 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30528 ia32_multipass_dfa_lookahead
30530 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30531 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30534 #undef TARGET_HAVE_TLS
30535 #define TARGET_HAVE_TLS true
30537 #undef TARGET_CANNOT_FORCE_CONST_MEM
30538 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30539 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30540 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30542 #undef TARGET_DELEGITIMIZE_ADDRESS
30543 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30545 #undef TARGET_MS_BITFIELD_LAYOUT_P
30546 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30549 #undef TARGET_BINDS_LOCAL_P
30550 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30552 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30553 #undef TARGET_BINDS_LOCAL_P
30554 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30557 #undef TARGET_ASM_OUTPUT_MI_THUNK
30558 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30559 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30560 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30562 #undef TARGET_ASM_FILE_START
30563 #define TARGET_ASM_FILE_START x86_file_start
30565 #undef TARGET_DEFAULT_TARGET_FLAGS
30566 #define TARGET_DEFAULT_TARGET_FLAGS \
30568 | TARGET_SUBTARGET_DEFAULT \
30569 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30572 #undef TARGET_HANDLE_OPTION
30573 #define TARGET_HANDLE_OPTION ix86_handle_option
30575 #undef TARGET_RTX_COSTS
30576 #define TARGET_RTX_COSTS ix86_rtx_costs
30577 #undef TARGET_ADDRESS_COST
30578 #define TARGET_ADDRESS_COST ix86_address_cost
30580 #undef TARGET_FIXED_CONDITION_CODE_REGS
30581 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30582 #undef TARGET_CC_MODES_COMPATIBLE
30583 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30585 #undef TARGET_MACHINE_DEPENDENT_REORG
30586 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30588 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30589 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30591 #undef TARGET_BUILD_BUILTIN_VA_LIST
30592 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30594 #undef TARGET_FN_ABI_VA_LIST
30595 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30597 #undef TARGET_CANONICAL_VA_LIST_TYPE
30598 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30600 #undef TARGET_EXPAND_BUILTIN_VA_START
30601 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30603 #undef TARGET_MD_ASM_CLOBBERS
30604 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30606 #undef TARGET_PROMOTE_PROTOTYPES
30607 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30608 #undef TARGET_STRUCT_VALUE_RTX
30609 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30610 #undef TARGET_SETUP_INCOMING_VARARGS
30611 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30612 #undef TARGET_MUST_PASS_IN_STACK
30613 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30614 #undef TARGET_PASS_BY_REFERENCE
30615 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30616 #undef TARGET_INTERNAL_ARG_POINTER
30617 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30618 #undef TARGET_UPDATE_STACK_BOUNDARY
30619 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30620 #undef TARGET_GET_DRAP_RTX
30621 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30622 #undef TARGET_STRICT_ARGUMENT_NAMING
30623 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30624 #undef TARGET_STATIC_CHAIN
30625 #define TARGET_STATIC_CHAIN ix86_static_chain
30626 #undef TARGET_TRAMPOLINE_INIT
30627 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30629 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30630 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30632 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30633 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30635 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30636 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30638 #undef TARGET_C_MODE_FOR_SUFFIX
30639 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30642 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30643 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30646 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30647 #undef TARGET_INSERT_ATTRIBUTES
30648 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30651 #undef TARGET_MANGLE_TYPE
30652 #define TARGET_MANGLE_TYPE ix86_mangle_type
30654 #undef TARGET_STACK_PROTECT_FAIL
30655 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30657 #undef TARGET_FUNCTION_VALUE
30658 #define TARGET_FUNCTION_VALUE ix86_function_value
30660 #undef TARGET_FUNCTION_VALUE_REGNO_P
30661 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30663 #undef TARGET_SECONDARY_RELOAD
30664 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30666 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30667 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30668 ix86_builtin_vectorization_cost
30669 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30670 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30671 ix86_vectorize_builtin_vec_perm
30672 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30673 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30674 ix86_vectorize_builtin_vec_perm_ok
30676 #undef TARGET_SET_CURRENT_FUNCTION
30677 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30679 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30680 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30682 #undef TARGET_OPTION_SAVE
30683 #define TARGET_OPTION_SAVE ix86_function_specific_save
30685 #undef TARGET_OPTION_RESTORE
30686 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30688 #undef TARGET_OPTION_PRINT
30689 #define TARGET_OPTION_PRINT ix86_function_specific_print
30691 #undef TARGET_CAN_INLINE_P
30692 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30694 #undef TARGET_EXPAND_TO_RTL_HOOK
30695 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30697 #undef TARGET_LEGITIMATE_ADDRESS_P
30698 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30700 #undef TARGET_IRA_COVER_CLASSES
30701 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30703 #undef TARGET_FRAME_POINTER_REQUIRED
30704 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30706 #undef TARGET_CAN_ELIMINATE
30707 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30709 #undef TARGET_ASM_CODE_END
30710 #define TARGET_ASM_CODE_END ix86_code_end
30712 struct gcc_target targetm = TARGET_INITIALIZER;
30714 #include "gt-i386.h"