1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1351 /* X86_TUNE_USE_MOV0 */
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1366 /* X86_TUNE_READ_MODIFY */
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1379 /* X86_TUNE_QIMODE_MATH */
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1454 /* X86_TUNE_SHIFT1 */
1457 /* X86_TUNE_USE_FFREEP */
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1521 m_AMDFAM10 | m_GENERIC,
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1567 static enum stringop_alg stringop_alg = no_stringop;
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1609 /* The "default" register map used in 32bit mode. */
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1622 /* The "default" register map used in 64bit mode. */
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1706 /* Define parameter passing and return registers. */
1708 static int const x86_64_int_parameter_registers[6] =
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1715 CX_REG, DX_REG, R8_REG, R9_REG
1718 static int const x86_64_int_return_registers[4] =
1720 AX_REG, DX_REG, DI_REG, SI_REG
1723 /* Define the structure for the machine field in struct function. */
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1729 struct stack_local_entry *next;
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1762 HOST_WIDE_INT frame;
1764 int outgoing_arguments_size;
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1820 /* Alignment for incoming stack boundary in bits specified at
1822 static unsigned int ix86_user_incoming_stack_boundary;
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1843 int ix86_section_threshold = 65536;
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1849 /* Fence to use after loop using movnt. */
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1874 #define MAX_CLASSES 4
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static rtx ix86_static_chain (const_tree, bool);
1884 static int ix86_function_regparm (const_tree, const_tree);
1885 static void ix86_compute_frame_layout (struct ix86_frame *);
1886 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1888 static void ix86_add_new_builtins (int);
1889 static rtx ix86_expand_vec_perm_builtin (tree);
1891 enum ix86_function_specific_strings
1893 IX86_FUNCTION_SPECIFIC_ARCH,
1894 IX86_FUNCTION_SPECIFIC_TUNE,
1895 IX86_FUNCTION_SPECIFIC_FPMATH,
1896 IX86_FUNCTION_SPECIFIC_MAX
1899 static char *ix86_target_string (int, int, const char *, const char *,
1900 const char *, bool);
1901 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1902 static void ix86_function_specific_save (struct cl_target_option *);
1903 static void ix86_function_specific_restore (struct cl_target_option *);
1904 static void ix86_function_specific_print (FILE *, int,
1905 struct cl_target_option *);
1906 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1907 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1908 static bool ix86_can_inline_p (tree, tree);
1909 static void ix86_set_current_function (tree);
1910 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1912 static enum calling_abi ix86_function_abi (const_tree);
1915 #ifndef SUBTARGET32_DEFAULT_CPU
1916 #define SUBTARGET32_DEFAULT_CPU "i386"
1919 /* The svr4 ABI for the i386 says that records and unions are returned
1921 #ifndef DEFAULT_PCC_STRUCT_RETURN
1922 #define DEFAULT_PCC_STRUCT_RETURN 1
1925 /* Whether -mtune= or -march= were specified */
1926 static int ix86_tune_defaulted;
1927 static int ix86_arch_specified;
1929 /* Bit flags that specify the ISA we are compiling for. */
1930 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1932 /* A mask of ix86_isa_flags that includes bit X if X
1933 was set or cleared on the command line. */
1934 static int ix86_isa_flags_explicit;
1936 /* Define a set of ISAs which are available when a given ISA is
1937 enabled. MMX and SSE ISAs are handled separately. */
1939 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1940 #define OPTION_MASK_ISA_3DNOW_SET \
1941 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1943 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1944 #define OPTION_MASK_ISA_SSE2_SET \
1945 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1946 #define OPTION_MASK_ISA_SSE3_SET \
1947 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1948 #define OPTION_MASK_ISA_SSSE3_SET \
1949 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1950 #define OPTION_MASK_ISA_SSE4_1_SET \
1951 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1952 #define OPTION_MASK_ISA_SSE4_2_SET \
1953 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1954 #define OPTION_MASK_ISA_AVX_SET \
1955 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1956 #define OPTION_MASK_ISA_FMA_SET \
1957 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1959 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1961 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1963 #define OPTION_MASK_ISA_SSE4A_SET \
1964 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1965 #define OPTION_MASK_ISA_FMA4_SET \
1966 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1967 | OPTION_MASK_ISA_AVX_SET)
1968 #define OPTION_MASK_ISA_XOP_SET \
1969 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1970 #define OPTION_MASK_ISA_LWP_SET \
1973 /* AES and PCLMUL need SSE2 because they use xmm registers */
1974 #define OPTION_MASK_ISA_AES_SET \
1975 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1976 #define OPTION_MASK_ISA_PCLMUL_SET \
1977 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1979 #define OPTION_MASK_ISA_ABM_SET \
1980 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1982 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1983 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1984 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1985 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1986 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1988 /* Define a set of ISAs which aren't available when a given ISA is
1989 disabled. MMX and SSE ISAs are handled separately. */
1991 #define OPTION_MASK_ISA_MMX_UNSET \
1992 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1993 #define OPTION_MASK_ISA_3DNOW_UNSET \
1994 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1995 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1997 #define OPTION_MASK_ISA_SSE_UNSET \
1998 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1999 #define OPTION_MASK_ISA_SSE2_UNSET \
2000 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2001 #define OPTION_MASK_ISA_SSE3_UNSET \
2002 (OPTION_MASK_ISA_SSE3 \
2003 | OPTION_MASK_ISA_SSSE3_UNSET \
2004 | OPTION_MASK_ISA_SSE4A_UNSET )
2005 #define OPTION_MASK_ISA_SSSE3_UNSET \
2006 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2007 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2008 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2009 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2010 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2011 #define OPTION_MASK_ISA_AVX_UNSET \
2012 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2013 | OPTION_MASK_ISA_FMA4_UNSET)
2014 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2016 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2018 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2020 #define OPTION_MASK_ISA_SSE4A_UNSET \
2021 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2023 #define OPTION_MASK_ISA_FMA4_UNSET \
2024 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2025 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2026 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2028 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2029 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2030 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2031 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2032 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2033 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2034 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2035 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2037 /* Vectorization library interface and handlers. */
2038 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2039 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2040 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2042 /* Processor target table, indexed by processor number */
2045 const struct processor_costs *cost; /* Processor costs */
2046 const int align_loop; /* Default alignments. */
2047 const int align_loop_max_skip;
2048 const int align_jump;
2049 const int align_jump_max_skip;
2050 const int align_func;
2053 static const struct ptt processor_target_table[PROCESSOR_max] =
2055 {&i386_cost, 4, 3, 4, 3, 4},
2056 {&i486_cost, 16, 15, 16, 15, 16},
2057 {&pentium_cost, 16, 7, 16, 7, 16},
2058 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2059 {&geode_cost, 0, 0, 0, 0, 0},
2060 {&k6_cost, 32, 7, 32, 7, 32},
2061 {&athlon_cost, 16, 7, 16, 7, 16},
2062 {&pentium4_cost, 0, 0, 0, 0, 0},
2063 {&k8_cost, 16, 7, 16, 7, 16},
2064 {&nocona_cost, 0, 0, 0, 0, 0},
2065 {&core2_cost, 16, 10, 16, 10, 16},
2066 {&generic32_cost, 16, 7, 16, 7, 16},
2067 {&generic64_cost, 16, 10, 16, 10, 16},
2068 {&amdfam10_cost, 32, 24, 32, 7, 32},
2069 {&atom_cost, 16, 7, 16, 7, 16}
2072 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2098 /* Implement TARGET_HANDLE_OPTION. */
2101 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2108 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2109 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2113 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2121 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2122 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2126 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2127 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2137 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2150 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2163 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2176 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2189 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2190 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2194 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2202 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2203 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2207 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2215 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2216 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2220 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2228 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2229 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2233 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2239 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2244 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2245 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2251 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2252 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2256 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2257 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2264 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2265 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2269 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2270 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2277 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2278 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2282 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2283 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2290 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2291 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2295 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2296 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2303 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2304 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2308 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2309 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2316 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2317 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2321 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2322 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2329 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2330 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2334 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2335 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2342 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2343 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2347 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2348 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2355 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2356 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2360 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2368 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2369 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2373 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2374 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2381 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2382 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2386 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2387 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2394 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2395 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2399 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2400 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2409 /* Return a string that documents the current -m options. The caller is
2410 responsible for freeing the string. */
2413 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2414 const char *fpmath, bool add_nl_p)
2416 struct ix86_target_opts
2418 const char *option; /* option string */
2419 int mask; /* isa mask options */
2422 /* This table is ordered so that options like -msse4.2 that imply
2423 preceding options while match those first. */
2424 static struct ix86_target_opts isa_opts[] =
2426 { "-m64", OPTION_MASK_ISA_64BIT },
2427 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2428 { "-mfma", OPTION_MASK_ISA_FMA },
2429 { "-mxop", OPTION_MASK_ISA_XOP },
2430 { "-mlwp", OPTION_MASK_ISA_LWP },
2431 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2432 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2433 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2434 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2435 { "-msse3", OPTION_MASK_ISA_SSE3 },
2436 { "-msse2", OPTION_MASK_ISA_SSE2 },
2437 { "-msse", OPTION_MASK_ISA_SSE },
2438 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2439 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2440 { "-mmmx", OPTION_MASK_ISA_MMX },
2441 { "-mabm", OPTION_MASK_ISA_ABM },
2442 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2443 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2444 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2445 { "-maes", OPTION_MASK_ISA_AES },
2446 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2450 static struct ix86_target_opts flag_opts[] =
2452 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2453 { "-m80387", MASK_80387 },
2454 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2455 { "-malign-double", MASK_ALIGN_DOUBLE },
2456 { "-mcld", MASK_CLD },
2457 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2458 { "-mieee-fp", MASK_IEEE_FP },
2459 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2460 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2461 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2462 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2463 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2464 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2465 { "-mno-red-zone", MASK_NO_RED_ZONE },
2466 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2467 { "-mrecip", MASK_RECIP },
2468 { "-mrtd", MASK_RTD },
2469 { "-msseregparm", MASK_SSEREGPARM },
2470 { "-mstack-arg-probe", MASK_STACK_PROBE },
2471 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2474 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2477 char target_other[40];
2486 memset (opts, '\0', sizeof (opts));
2488 /* Add -march= option. */
2491 opts[num][0] = "-march=";
2492 opts[num++][1] = arch;
2495 /* Add -mtune= option. */
2498 opts[num][0] = "-mtune=";
2499 opts[num++][1] = tune;
2502 /* Pick out the options in isa options. */
2503 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2505 if ((isa & isa_opts[i].mask) != 0)
2507 opts[num++][0] = isa_opts[i].option;
2508 isa &= ~ isa_opts[i].mask;
2512 if (isa && add_nl_p)
2514 opts[num++][0] = isa_other;
2515 sprintf (isa_other, "(other isa: 0x%x)", isa);
2518 /* Add flag options. */
2519 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2521 if ((flags & flag_opts[i].mask) != 0)
2523 opts[num++][0] = flag_opts[i].option;
2524 flags &= ~ flag_opts[i].mask;
2528 if (flags && add_nl_p)
2530 opts[num++][0] = target_other;
2531 sprintf (target_other, "(other flags: 0x%x)", isa);
2534 /* Add -fpmath= option. */
2537 opts[num][0] = "-mfpmath=";
2538 opts[num++][1] = fpmath;
2545 gcc_assert (num < ARRAY_SIZE (opts));
2547 /* Size the string. */
2549 sep_len = (add_nl_p) ? 3 : 1;
2550 for (i = 0; i < num; i++)
2553 for (j = 0; j < 2; j++)
2555 len += strlen (opts[i][j]);
2558 /* Build the string. */
2559 ret = ptr = (char *) xmalloc (len);
2562 for (i = 0; i < num; i++)
2566 for (j = 0; j < 2; j++)
2567 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2574 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2582 for (j = 0; j < 2; j++)
2585 memcpy (ptr, opts[i][j], len2[j]);
2587 line_len += len2[j];
2592 gcc_assert (ret + len >= ptr);
2597 /* Function that is callable from the debugger to print the current
2600 ix86_debug_options (void)
2602 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2603 ix86_arch_string, ix86_tune_string,
2604 ix86_fpmath_string, true);
2608 fprintf (stderr, "%s\n\n", opts);
2612 fputs ("<no options>\n\n", stderr);
2617 /* Sometimes certain combinations of command options do not make
2618 sense on a particular target machine. You can define a macro
2619 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2620 defined, is executed once just after all the command options have
2623 Don't use this macro to turn on various extra optimizations for
2624 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2627 override_options (bool main_args_p)
2630 unsigned int ix86_arch_mask, ix86_tune_mask;
2631 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2636 /* Comes from final.c -- no real reason to change it. */
2637 #define MAX_CODE_ALIGN 16
2645 PTA_PREFETCH_SSE = 1 << 4,
2647 PTA_3DNOW_A = 1 << 6,
2651 PTA_POPCNT = 1 << 10,
2653 PTA_SSE4A = 1 << 12,
2654 PTA_NO_SAHF = 1 << 13,
2655 PTA_SSE4_1 = 1 << 14,
2656 PTA_SSE4_2 = 1 << 15,
2658 PTA_PCLMUL = 1 << 17,
2661 PTA_MOVBE = 1 << 20,
2669 const char *const name; /* processor name or nickname. */
2670 const enum processor_type processor;
2671 const enum attr_cpu schedule;
2672 const unsigned /*enum pta_flags*/ flags;
2674 const processor_alias_table[] =
2676 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2677 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2678 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2679 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2681 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2682 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2683 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2685 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2686 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2688 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2690 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2692 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 PTA_MMX | PTA_SSE | PTA_SSE2},
2694 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2695 PTA_MMX |PTA_SSE | PTA_SSE2},
2696 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2697 PTA_MMX | PTA_SSE | PTA_SSE2},
2698 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2699 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2700 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2701 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2702 | PTA_CX16 | PTA_NO_SAHF},
2703 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2704 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2705 | PTA_SSSE3 | PTA_CX16},
2706 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2707 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2708 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2709 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2710 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2711 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2712 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2713 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2716 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2717 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2718 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2719 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2720 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2721 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2722 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2723 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2724 {"x86-64", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2726 {"k8", PROCESSOR_K8, CPU_K8,
2727 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2728 | PTA_SSE2 | PTA_NO_SAHF},
2729 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2730 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2731 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2732 {"opteron", PROCESSOR_K8, CPU_K8,
2733 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2734 | PTA_SSE2 | PTA_NO_SAHF},
2735 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2736 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2737 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2738 {"athlon64", PROCESSOR_K8, CPU_K8,
2739 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2740 | PTA_SSE2 | PTA_NO_SAHF},
2741 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2742 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2743 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2744 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2745 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2746 | PTA_SSE2 | PTA_NO_SAHF},
2747 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2748 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2749 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2750 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2751 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2752 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2753 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2754 0 /* flags are only used for -march switch. */ },
2755 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2756 PTA_64BIT /* flags are only used for -march switch. */ },
2759 int const pta_size = ARRAY_SIZE (processor_alias_table);
2761 /* Set up prefix/suffix so the error messages refer to either the command
2762 line argument, or the attribute(target). */
2771 prefix = "option(\"";
2776 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2777 SUBTARGET_OVERRIDE_OPTIONS;
2780 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2781 SUBSUBTARGET_OVERRIDE_OPTIONS;
2784 /* -fPIC is the default for x86_64. */
2785 if (TARGET_MACHO && TARGET_64BIT)
2788 /* Set the default values for switches whose default depends on TARGET_64BIT
2789 in case they weren't overwritten by command line options. */
2792 /* Mach-O doesn't support omitting the frame pointer for now. */
2793 if (flag_omit_frame_pointer == 2)
2794 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2795 if (flag_asynchronous_unwind_tables == 2)
2796 flag_asynchronous_unwind_tables = 1;
2797 if (flag_pcc_struct_return == 2)
2798 flag_pcc_struct_return = 0;
2802 if (flag_omit_frame_pointer == 2)
2803 flag_omit_frame_pointer = 0;
2804 if (flag_asynchronous_unwind_tables == 2)
2805 flag_asynchronous_unwind_tables = 0;
2806 if (flag_pcc_struct_return == 2)
2807 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2810 /* Need to check -mtune=generic first. */
2811 if (ix86_tune_string)
2813 if (!strcmp (ix86_tune_string, "generic")
2814 || !strcmp (ix86_tune_string, "i686")
2815 /* As special support for cross compilers we read -mtune=native
2816 as -mtune=generic. With native compilers we won't see the
2817 -mtune=native, as it was changed by the driver. */
2818 || !strcmp (ix86_tune_string, "native"))
2821 ix86_tune_string = "generic64";
2823 ix86_tune_string = "generic32";
2825 /* If this call is for setting the option attribute, allow the
2826 generic32/generic64 that was previously set. */
2827 else if (!main_args_p
2828 && (!strcmp (ix86_tune_string, "generic32")
2829 || !strcmp (ix86_tune_string, "generic64")))
2831 else if (!strncmp (ix86_tune_string, "generic", 7))
2832 error ("bad value (%s) for %stune=%s %s",
2833 ix86_tune_string, prefix, suffix, sw);
2834 else if (!strcmp (ix86_tune_string, "x86-64"))
2835 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2836 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2837 prefix, suffix, prefix, suffix, prefix, suffix);
2841 if (ix86_arch_string)
2842 ix86_tune_string = ix86_arch_string;
2843 if (!ix86_tune_string)
2845 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2846 ix86_tune_defaulted = 1;
2849 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2850 need to use a sensible tune option. */
2851 if (!strcmp (ix86_tune_string, "generic")
2852 || !strcmp (ix86_tune_string, "x86-64")
2853 || !strcmp (ix86_tune_string, "i686"))
2856 ix86_tune_string = "generic64";
2858 ix86_tune_string = "generic32";
2862 if (ix86_stringop_string)
2864 if (!strcmp (ix86_stringop_string, "rep_byte"))
2865 stringop_alg = rep_prefix_1_byte;
2866 else if (!strcmp (ix86_stringop_string, "libcall"))
2867 stringop_alg = libcall;
2868 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2869 stringop_alg = rep_prefix_4_byte;
2870 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2872 /* rep; movq isn't available in 32-bit code. */
2873 stringop_alg = rep_prefix_8_byte;
2874 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2875 stringop_alg = loop_1_byte;
2876 else if (!strcmp (ix86_stringop_string, "loop"))
2877 stringop_alg = loop;
2878 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2879 stringop_alg = unrolled_loop;
2881 error ("bad value (%s) for %sstringop-strategy=%s %s",
2882 ix86_stringop_string, prefix, suffix, sw);
2885 if (!ix86_arch_string)
2886 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2888 ix86_arch_specified = 1;
2890 /* Validate -mabi= value. */
2891 if (ix86_abi_string)
2893 if (strcmp (ix86_abi_string, "sysv") == 0)
2894 ix86_abi = SYSV_ABI;
2895 else if (strcmp (ix86_abi_string, "ms") == 0)
2898 error ("unknown ABI (%s) for %sabi=%s %s",
2899 ix86_abi_string, prefix, suffix, sw);
2902 ix86_abi = DEFAULT_ABI;
2904 if (ix86_cmodel_string != 0)
2906 if (!strcmp (ix86_cmodel_string, "small"))
2907 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2908 else if (!strcmp (ix86_cmodel_string, "medium"))
2909 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2910 else if (!strcmp (ix86_cmodel_string, "large"))
2911 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2913 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2914 else if (!strcmp (ix86_cmodel_string, "32"))
2915 ix86_cmodel = CM_32;
2916 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2917 ix86_cmodel = CM_KERNEL;
2919 error ("bad value (%s) for %scmodel=%s %s",
2920 ix86_cmodel_string, prefix, suffix, sw);
2924 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2925 use of rip-relative addressing. This eliminates fixups that
2926 would otherwise be needed if this object is to be placed in a
2927 DLL, and is essentially just as efficient as direct addressing. */
2928 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2929 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2930 else if (TARGET_64BIT)
2931 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2933 ix86_cmodel = CM_32;
2935 if (ix86_asm_string != 0)
2938 && !strcmp (ix86_asm_string, "intel"))
2939 ix86_asm_dialect = ASM_INTEL;
2940 else if (!strcmp (ix86_asm_string, "att"))
2941 ix86_asm_dialect = ASM_ATT;
2943 error ("bad value (%s) for %sasm=%s %s",
2944 ix86_asm_string, prefix, suffix, sw);
2946 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2947 error ("code model %qs not supported in the %s bit mode",
2948 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2949 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2950 sorry ("%i-bit mode not compiled in",
2951 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2953 for (i = 0; i < pta_size; i++)
2954 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2956 ix86_schedule = processor_alias_table[i].schedule;
2957 ix86_arch = processor_alias_table[i].processor;
2958 /* Default cpu tuning to the architecture. */
2959 ix86_tune = ix86_arch;
2961 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2962 error ("CPU you selected does not support x86-64 "
2965 if (processor_alias_table[i].flags & PTA_MMX
2966 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2967 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2968 if (processor_alias_table[i].flags & PTA_3DNOW
2969 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2970 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2971 if (processor_alias_table[i].flags & PTA_3DNOW_A
2972 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2973 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2974 if (processor_alias_table[i].flags & PTA_SSE
2975 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2976 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2977 if (processor_alias_table[i].flags & PTA_SSE2
2978 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2979 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2980 if (processor_alias_table[i].flags & PTA_SSE3
2981 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2982 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2983 if (processor_alias_table[i].flags & PTA_SSSE3
2984 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2985 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2986 if (processor_alias_table[i].flags & PTA_SSE4_1
2987 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2988 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2989 if (processor_alias_table[i].flags & PTA_SSE4_2
2990 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2991 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2992 if (processor_alias_table[i].flags & PTA_AVX
2993 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2994 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2995 if (processor_alias_table[i].flags & PTA_FMA
2996 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2997 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2998 if (processor_alias_table[i].flags & PTA_SSE4A
2999 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3000 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3001 if (processor_alias_table[i].flags & PTA_FMA4
3002 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3003 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3004 if (processor_alias_table[i].flags & PTA_XOP
3005 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3006 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3007 if (processor_alias_table[i].flags & PTA_LWP
3008 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3009 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3010 if (processor_alias_table[i].flags & PTA_ABM
3011 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3012 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3013 if (processor_alias_table[i].flags & PTA_CX16
3014 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3015 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3016 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3017 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3018 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3019 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3020 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3021 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3022 if (processor_alias_table[i].flags & PTA_MOVBE
3023 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3024 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3025 if (processor_alias_table[i].flags & PTA_AES
3026 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3027 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3028 if (processor_alias_table[i].flags & PTA_PCLMUL
3029 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3030 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3031 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3032 x86_prefetch_sse = true;
3037 if (!strcmp (ix86_arch_string, "generic"))
3038 error ("generic CPU can be used only for %stune=%s %s",
3039 prefix, suffix, sw);
3040 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3041 error ("bad value (%s) for %sarch=%s %s",
3042 ix86_arch_string, prefix, suffix, sw);
3044 ix86_arch_mask = 1u << ix86_arch;
3045 for (i = 0; i < X86_ARCH_LAST; ++i)
3046 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3048 for (i = 0; i < pta_size; i++)
3049 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3051 ix86_schedule = processor_alias_table[i].schedule;
3052 ix86_tune = processor_alias_table[i].processor;
3053 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3055 if (ix86_tune_defaulted)
3057 ix86_tune_string = "x86-64";
3058 for (i = 0; i < pta_size; i++)
3059 if (! strcmp (ix86_tune_string,
3060 processor_alias_table[i].name))
3062 ix86_schedule = processor_alias_table[i].schedule;
3063 ix86_tune = processor_alias_table[i].processor;
3066 error ("CPU you selected does not support x86-64 "
3069 /* Intel CPUs have always interpreted SSE prefetch instructions as
3070 NOPs; so, we can enable SSE prefetch instructions even when
3071 -mtune (rather than -march) points us to a processor that has them.
3072 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3073 higher processors. */
3075 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3076 x86_prefetch_sse = true;
3080 if (ix86_tune_specified && i == pta_size)
3081 error ("bad value (%s) for %stune=%s %s",
3082 ix86_tune_string, prefix, suffix, sw);
3084 ix86_tune_mask = 1u << ix86_tune;
3085 for (i = 0; i < X86_TUNE_LAST; ++i)
3086 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3089 ix86_cost = &ix86_size_cost;
3091 ix86_cost = processor_target_table[ix86_tune].cost;
3093 /* Arrange to set up i386_stack_locals for all functions. */
3094 init_machine_status = ix86_init_machine_status;
3096 /* Validate -mregparm= value. */
3097 if (ix86_regparm_string)
3100 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3101 i = atoi (ix86_regparm_string);
3102 if (i < 0 || i > REGPARM_MAX)
3103 error ("%sregparm=%d%s is not between 0 and %d",
3104 prefix, i, suffix, REGPARM_MAX);
3109 ix86_regparm = REGPARM_MAX;
3111 /* If the user has provided any of the -malign-* options,
3112 warn and use that value only if -falign-* is not set.
3113 Remove this code in GCC 3.2 or later. */
3114 if (ix86_align_loops_string)
3116 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3117 prefix, suffix, suffix);
3118 if (align_loops == 0)
3120 i = atoi (ix86_align_loops_string);
3121 if (i < 0 || i > MAX_CODE_ALIGN)
3122 error ("%salign-loops=%d%s is not between 0 and %d",
3123 prefix, i, suffix, MAX_CODE_ALIGN);
3125 align_loops = 1 << i;
3129 if (ix86_align_jumps_string)
3131 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3132 prefix, suffix, suffix);
3133 if (align_jumps == 0)
3135 i = atoi (ix86_align_jumps_string);
3136 if (i < 0 || i > MAX_CODE_ALIGN)
3137 error ("%salign-loops=%d%s is not between 0 and %d",
3138 prefix, i, suffix, MAX_CODE_ALIGN);
3140 align_jumps = 1 << i;
3144 if (ix86_align_funcs_string)
3146 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3147 prefix, suffix, suffix);
3148 if (align_functions == 0)
3150 i = atoi (ix86_align_funcs_string);
3151 if (i < 0 || i > MAX_CODE_ALIGN)
3152 error ("%salign-loops=%d%s is not between 0 and %d",
3153 prefix, i, suffix, MAX_CODE_ALIGN);
3155 align_functions = 1 << i;
3159 /* Default align_* from the processor table. */
3160 if (align_loops == 0)
3162 align_loops = processor_target_table[ix86_tune].align_loop;
3163 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3165 if (align_jumps == 0)
3167 align_jumps = processor_target_table[ix86_tune].align_jump;
3168 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3170 if (align_functions == 0)
3172 align_functions = processor_target_table[ix86_tune].align_func;
3175 /* Validate -mbranch-cost= value, or provide default. */
3176 ix86_branch_cost = ix86_cost->branch_cost;
3177 if (ix86_branch_cost_string)
3179 i = atoi (ix86_branch_cost_string);
3181 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3183 ix86_branch_cost = i;
3185 if (ix86_section_threshold_string)
3187 i = atoi (ix86_section_threshold_string);
3189 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3191 ix86_section_threshold = i;
3194 if (ix86_tls_dialect_string)
3196 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3197 ix86_tls_dialect = TLS_DIALECT_GNU;
3198 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3199 ix86_tls_dialect = TLS_DIALECT_GNU2;
3201 error ("bad value (%s) for %stls-dialect=%s %s",
3202 ix86_tls_dialect_string, prefix, suffix, sw);
3205 if (ix87_precision_string)
3207 i = atoi (ix87_precision_string);
3208 if (i != 32 && i != 64 && i != 80)
3209 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3214 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3216 /* Enable by default the SSE and MMX builtins. Do allow the user to
3217 explicitly disable any of these. In particular, disabling SSE and
3218 MMX for kernel code is extremely useful. */
3219 if (!ix86_arch_specified)
3221 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3222 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3225 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3229 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3231 if (!ix86_arch_specified)
3233 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3235 /* i386 ABI does not specify red zone. It still makes sense to use it
3236 when programmer takes care to stack from being destroyed. */
3237 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3238 target_flags |= MASK_NO_RED_ZONE;
3241 /* Keep nonleaf frame pointers. */
3242 if (flag_omit_frame_pointer)
3243 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3244 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3245 flag_omit_frame_pointer = 1;
3247 /* If we're doing fast math, we don't care about comparison order
3248 wrt NaNs. This lets us use a shorter comparison sequence. */
3249 if (flag_finite_math_only)
3250 target_flags &= ~MASK_IEEE_FP;
3252 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3253 since the insns won't need emulation. */
3254 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3255 target_flags &= ~MASK_NO_FANCY_MATH_387;
3257 /* Likewise, if the target doesn't have a 387, or we've specified
3258 software floating point, don't use 387 inline intrinsics. */
3260 target_flags |= MASK_NO_FANCY_MATH_387;
3262 /* Turn on MMX builtins for -msse. */
3265 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3266 x86_prefetch_sse = true;
3269 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3270 if (TARGET_SSE4_2 || TARGET_ABM)
3271 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3273 /* Validate -mpreferred-stack-boundary= value or default it to
3274 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3275 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3276 if (ix86_preferred_stack_boundary_string)
3278 i = atoi (ix86_preferred_stack_boundary_string);
3279 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3280 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3281 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3283 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3286 /* Set the default value for -mstackrealign. */
3287 if (ix86_force_align_arg_pointer == -1)
3288 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3290 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3292 /* Validate -mincoming-stack-boundary= value or default it to
3293 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3294 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3295 if (ix86_incoming_stack_boundary_string)
3297 i = atoi (ix86_incoming_stack_boundary_string);
3298 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3299 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3300 i, TARGET_64BIT ? 4 : 2);
3303 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3304 ix86_incoming_stack_boundary
3305 = ix86_user_incoming_stack_boundary;
3309 /* Accept -msseregparm only if at least SSE support is enabled. */
3310 if (TARGET_SSEREGPARM
3312 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3314 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3315 if (ix86_fpmath_string != 0)
3317 if (! strcmp (ix86_fpmath_string, "387"))
3318 ix86_fpmath = FPMATH_387;
3319 else if (! strcmp (ix86_fpmath_string, "sse"))
3323 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3324 ix86_fpmath = FPMATH_387;
3327 ix86_fpmath = FPMATH_SSE;
3329 else if (! strcmp (ix86_fpmath_string, "387,sse")
3330 || ! strcmp (ix86_fpmath_string, "387+sse")
3331 || ! strcmp (ix86_fpmath_string, "sse,387")
3332 || ! strcmp (ix86_fpmath_string, "sse+387")
3333 || ! strcmp (ix86_fpmath_string, "both"))
3337 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3338 ix86_fpmath = FPMATH_387;
3340 else if (!TARGET_80387)
3342 warning (0, "387 instruction set disabled, using SSE arithmetics");
3343 ix86_fpmath = FPMATH_SSE;
3346 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3349 error ("bad value (%s) for %sfpmath=%s %s",
3350 ix86_fpmath_string, prefix, suffix, sw);
3353 /* If the i387 is disabled, then do not return values in it. */
3355 target_flags &= ~MASK_FLOAT_RETURNS;
3357 /* Use external vectorized library in vectorizing intrinsics. */
3358 if (ix86_veclibabi_string)
3360 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3361 ix86_veclib_handler = ix86_veclibabi_svml;
3362 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3363 ix86_veclib_handler = ix86_veclibabi_acml;
3365 error ("unknown vectorization library ABI type (%s) for "
3366 "%sveclibabi=%s %s", ix86_veclibabi_string,
3367 prefix, suffix, sw);
3370 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3371 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3373 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3375 /* ??? Unwind info is not correct around the CFG unless either a frame
3376 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3377 unwind info generation to be aware of the CFG and propagating states
3379 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3380 || flag_exceptions || flag_non_call_exceptions)
3381 && flag_omit_frame_pointer
3382 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3384 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3385 warning (0, "unwind tables currently require either a frame pointer "
3386 "or %saccumulate-outgoing-args%s for correctness",
3388 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3391 /* If stack probes are required, the space used for large function
3392 arguments on the stack must also be probed, so enable
3393 -maccumulate-outgoing-args so this happens in the prologue. */
3394 if (TARGET_STACK_PROBE
3395 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3397 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3398 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3399 "for correctness", prefix, suffix);
3400 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3403 /* For sane SSE instruction set generation we need fcomi instruction.
3404 It is safe to enable all CMOVE instructions. */
3408 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3411 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3412 p = strchr (internal_label_prefix, 'X');
3413 internal_label_prefix_len = p - internal_label_prefix;
3417 /* When scheduling description is not available, disable scheduler pass
3418 so it won't slow down the compilation and make x87 code slower. */
3419 if (!TARGET_SCHEDULE)
3420 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3422 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3423 set_param_value ("simultaneous-prefetches",
3424 ix86_cost->simultaneous_prefetches);
3425 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3426 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3427 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3428 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3429 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3430 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3432 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3433 can be optimized to ap = __builtin_next_arg (0). */
3435 targetm.expand_builtin_va_start = NULL;
3439 ix86_gen_leave = gen_leave_rex64;
3440 ix86_gen_pop1 = gen_popdi1;
3441 ix86_gen_add3 = gen_adddi3;
3442 ix86_gen_sub3 = gen_subdi3;
3443 ix86_gen_sub3_carry = gen_subdi3_carry;
3444 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3445 ix86_gen_monitor = gen_sse3_monitor64;
3446 ix86_gen_andsp = gen_anddi3;
3450 ix86_gen_leave = gen_leave;
3451 ix86_gen_pop1 = gen_popsi1;
3452 ix86_gen_add3 = gen_addsi3;
3453 ix86_gen_sub3 = gen_subsi3;
3454 ix86_gen_sub3_carry = gen_subsi3_carry;
3455 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3456 ix86_gen_monitor = gen_sse3_monitor;
3457 ix86_gen_andsp = gen_andsi3;
3461 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3463 target_flags |= MASK_CLD & ~target_flags_explicit;
3466 /* Save the initial options in case the user does function specific options */
3468 target_option_default_node = target_option_current_node
3469 = build_target_option_node ();
3472 /* Update register usage after having seen the compiler flags. */
3475 ix86_conditional_register_usage (void)
3480 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3482 if (fixed_regs[i] > 1)
3483 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 if (call_used_regs[i] > 1)
3485 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3488 /* The PIC register, if it exists, is fixed. */
3489 j = PIC_OFFSET_TABLE_REGNUM;
3490 if (j != INVALID_REGNUM)
3491 fixed_regs[j] = call_used_regs[j] = 1;
3493 /* The MS_ABI changes the set of call-used registers. */
3494 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3496 call_used_regs[SI_REG] = 0;
3497 call_used_regs[DI_REG] = 0;
3498 call_used_regs[XMM6_REG] = 0;
3499 call_used_regs[XMM7_REG] = 0;
3500 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3501 call_used_regs[i] = 0;
3504 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3505 other call-clobbered regs for 64-bit. */
3508 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3511 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3512 && call_used_regs[i])
3513 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3516 /* If MMX is disabled, squash the registers. */
3518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3519 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3520 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3522 /* If SSE is disabled, squash the registers. */
3524 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3525 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3526 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3528 /* If the FPU is disabled, squash the registers. */
3529 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3530 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3531 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3532 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3534 /* If 32-bit, squash the 64-bit registers. */
3537 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3539 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3545 /* Save the current options */
3548 ix86_function_specific_save (struct cl_target_option *ptr)
3550 ptr->arch = ix86_arch;
3551 ptr->schedule = ix86_schedule;
3552 ptr->tune = ix86_tune;
3553 ptr->fpmath = ix86_fpmath;
3554 ptr->branch_cost = ix86_branch_cost;
3555 ptr->tune_defaulted = ix86_tune_defaulted;
3556 ptr->arch_specified = ix86_arch_specified;
3557 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3558 ptr->target_flags_explicit = target_flags_explicit;
3560 /* The fields are char but the variables are not; make sure the
3561 values fit in the fields. */
3562 gcc_assert (ptr->arch == ix86_arch);
3563 gcc_assert (ptr->schedule == ix86_schedule);
3564 gcc_assert (ptr->tune == ix86_tune);
3565 gcc_assert (ptr->fpmath == ix86_fpmath);
3566 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3569 /* Restore the current options */
3572 ix86_function_specific_restore (struct cl_target_option *ptr)
3574 enum processor_type old_tune = ix86_tune;
3575 enum processor_type old_arch = ix86_arch;
3576 unsigned int ix86_arch_mask, ix86_tune_mask;
3579 ix86_arch = (enum processor_type) ptr->arch;
3580 ix86_schedule = (enum attr_cpu) ptr->schedule;
3581 ix86_tune = (enum processor_type) ptr->tune;
3582 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3583 ix86_branch_cost = ptr->branch_cost;
3584 ix86_tune_defaulted = ptr->tune_defaulted;
3585 ix86_arch_specified = ptr->arch_specified;
3586 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3587 target_flags_explicit = ptr->target_flags_explicit;
3589 /* Recreate the arch feature tests if the arch changed */
3590 if (old_arch != ix86_arch)
3592 ix86_arch_mask = 1u << ix86_arch;
3593 for (i = 0; i < X86_ARCH_LAST; ++i)
3594 ix86_arch_features[i]
3595 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3598 /* Recreate the tune optimization tests */
3599 if (old_tune != ix86_tune)
3601 ix86_tune_mask = 1u << ix86_tune;
3602 for (i = 0; i < X86_TUNE_LAST; ++i)
3603 ix86_tune_features[i]
3604 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3608 /* Print the current options */
3611 ix86_function_specific_print (FILE *file, int indent,
3612 struct cl_target_option *ptr)
3615 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3616 NULL, NULL, NULL, false);
3618 fprintf (file, "%*sarch = %d (%s)\n",
3621 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3622 ? cpu_names[ptr->arch]
3625 fprintf (file, "%*stune = %d (%s)\n",
3628 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3629 ? cpu_names[ptr->tune]
3632 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3633 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3634 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3635 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3639 fprintf (file, "%*s%s\n", indent, "", target_string);
3640 free (target_string);
3645 /* Inner function to process the attribute((target(...))), take an argument and
3646 set the current options from the argument. If we have a list, recursively go
3650 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3655 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3656 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3657 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3658 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3673 enum ix86_opt_type type;
3678 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3679 IX86_ATTR_ISA ("abm", OPT_mabm),
3680 IX86_ATTR_ISA ("aes", OPT_maes),
3681 IX86_ATTR_ISA ("avx", OPT_mavx),
3682 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3683 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3684 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3685 IX86_ATTR_ISA ("sse", OPT_msse),
3686 IX86_ATTR_ISA ("sse2", OPT_msse2),
3687 IX86_ATTR_ISA ("sse3", OPT_msse3),
3688 IX86_ATTR_ISA ("sse4", OPT_msse4),
3689 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3690 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3691 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3692 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3693 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3694 IX86_ATTR_ISA ("xop", OPT_mxop),
3695 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3697 /* string options */
3698 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3699 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3700 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3703 IX86_ATTR_YES ("cld",
3707 IX86_ATTR_NO ("fancy-math-387",
3708 OPT_mfancy_math_387,
3709 MASK_NO_FANCY_MATH_387),
3711 IX86_ATTR_YES ("ieee-fp",
3715 IX86_ATTR_YES ("inline-all-stringops",
3716 OPT_minline_all_stringops,
3717 MASK_INLINE_ALL_STRINGOPS),
3719 IX86_ATTR_YES ("inline-stringops-dynamically",
3720 OPT_minline_stringops_dynamically,
3721 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3723 IX86_ATTR_NO ("align-stringops",
3724 OPT_mno_align_stringops,
3725 MASK_NO_ALIGN_STRINGOPS),
3727 IX86_ATTR_YES ("recip",
3733 /* If this is a list, recurse to get the options. */
3734 if (TREE_CODE (args) == TREE_LIST)
3738 for (; args; args = TREE_CHAIN (args))
3739 if (TREE_VALUE (args)
3740 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3746 else if (TREE_CODE (args) != STRING_CST)
3749 /* Handle multiple arguments separated by commas. */
3750 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3752 while (next_optstr && *next_optstr != '\0')
3754 char *p = next_optstr;
3756 char *comma = strchr (next_optstr, ',');
3757 const char *opt_string;
3758 size_t len, opt_len;
3763 enum ix86_opt_type type = ix86_opt_unknown;
3769 len = comma - next_optstr;
3770 next_optstr = comma + 1;
3778 /* Recognize no-xxx. */
3779 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3788 /* Find the option. */
3791 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3793 type = attrs[i].type;
3794 opt_len = attrs[i].len;
3795 if (ch == attrs[i].string[0]
3796 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3797 && memcmp (p, attrs[i].string, opt_len) == 0)
3800 mask = attrs[i].mask;
3801 opt_string = attrs[i].string;
3806 /* Process the option. */
3809 error ("attribute(target(\"%s\")) is unknown", orig_p);
3813 else if (type == ix86_opt_isa)
3814 ix86_handle_option (opt, p, opt_set_p);
3816 else if (type == ix86_opt_yes || type == ix86_opt_no)
3818 if (type == ix86_opt_no)
3819 opt_set_p = !opt_set_p;
3822 target_flags |= mask;
3824 target_flags &= ~mask;
3827 else if (type == ix86_opt_str)
3831 error ("option(\"%s\") was already specified", opt_string);
3835 p_strings[opt] = xstrdup (p + opt_len);
3845 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3848 ix86_valid_target_attribute_tree (tree args)
3850 const char *orig_arch_string = ix86_arch_string;
3851 const char *orig_tune_string = ix86_tune_string;
3852 const char *orig_fpmath_string = ix86_fpmath_string;
3853 int orig_tune_defaulted = ix86_tune_defaulted;
3854 int orig_arch_specified = ix86_arch_specified;
3855 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3858 struct cl_target_option *def
3859 = TREE_TARGET_OPTION (target_option_default_node);
3861 /* Process each of the options on the chain. */
3862 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3865 /* If the changed options are different from the default, rerun override_options,
3866 and then save the options away. The string options are are attribute options,
3867 and will be undone when we copy the save structure. */
3868 if (ix86_isa_flags != def->ix86_isa_flags
3869 || target_flags != def->target_flags
3870 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3871 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3874 /* If we are using the default tune= or arch=, undo the string assigned,
3875 and use the default. */
3876 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3877 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3878 else if (!orig_arch_specified)
3879 ix86_arch_string = NULL;
3881 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3882 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3883 else if (orig_tune_defaulted)
3884 ix86_tune_string = NULL;
3886 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3887 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3888 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3889 else if (!TARGET_64BIT && TARGET_SSE)
3890 ix86_fpmath_string = "sse,387";
3892 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3893 override_options (false);
3895 /* Add any builtin functions with the new isa if any. */
3896 ix86_add_new_builtins (ix86_isa_flags);
3898 /* Save the current options unless we are validating options for
3900 t = build_target_option_node ();
3902 ix86_arch_string = orig_arch_string;
3903 ix86_tune_string = orig_tune_string;
3904 ix86_fpmath_string = orig_fpmath_string;
3906 /* Free up memory allocated to hold the strings */
3907 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3908 if (option_strings[i])
3909 free (option_strings[i]);
3915 /* Hook to validate attribute((target("string"))). */
3918 ix86_valid_target_attribute_p (tree fndecl,
3919 tree ARG_UNUSED (name),
3921 int ARG_UNUSED (flags))
3923 struct cl_target_option cur_target;
3925 tree old_optimize = build_optimization_node ();
3926 tree new_target, new_optimize;
3927 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3929 /* If the function changed the optimization levels as well as setting target
3930 options, start with the optimizations specified. */
3931 if (func_optimize && func_optimize != old_optimize)
3932 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3934 /* The target attributes may also change some optimization flags, so update
3935 the optimization options if necessary. */
3936 cl_target_option_save (&cur_target);
3937 new_target = ix86_valid_target_attribute_tree (args);
3938 new_optimize = build_optimization_node ();
3945 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3947 if (old_optimize != new_optimize)
3948 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3951 cl_target_option_restore (&cur_target);
3953 if (old_optimize != new_optimize)
3954 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3960 /* Hook to determine if one function can safely inline another. */
3963 ix86_can_inline_p (tree caller, tree callee)
3966 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3967 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3969 /* If callee has no option attributes, then it is ok to inline. */
3973 /* If caller has no option attributes, but callee does then it is not ok to
3975 else if (!caller_tree)
3980 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3981 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3983 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3984 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3986 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3987 != callee_opts->ix86_isa_flags)
3990 /* See if we have the same non-isa options. */
3991 else if (caller_opts->target_flags != callee_opts->target_flags)
3994 /* See if arch, tune, etc. are the same. */
3995 else if (caller_opts->arch != callee_opts->arch)
3998 else if (caller_opts->tune != callee_opts->tune)
4001 else if (caller_opts->fpmath != callee_opts->fpmath)
4004 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4015 /* Remember the last target of ix86_set_current_function. */
4016 static GTY(()) tree ix86_previous_fndecl;
4018 /* Establish appropriate back-end context for processing the function
4019 FNDECL. The argument might be NULL to indicate processing at top
4020 level, outside of any function scope. */
4022 ix86_set_current_function (tree fndecl)
4024 /* Only change the context if the function changes. This hook is called
4025 several times in the course of compiling a function, and we don't want to
4026 slow things down too much or call target_reinit when it isn't safe. */
4027 if (fndecl && fndecl != ix86_previous_fndecl)
4029 tree old_tree = (ix86_previous_fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4033 tree new_tree = (fndecl
4034 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4037 ix86_previous_fndecl = fndecl;
4038 if (old_tree == new_tree)
4043 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4049 struct cl_target_option *def
4050 = TREE_TARGET_OPTION (target_option_current_node);
4052 cl_target_option_restore (def);
4059 /* Return true if this goes in large data/bss. */
4062 ix86_in_large_data_p (tree exp)
4064 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4067 /* Functions are never large data. */
4068 if (TREE_CODE (exp) == FUNCTION_DECL)
4071 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4073 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4074 if (strcmp (section, ".ldata") == 0
4075 || strcmp (section, ".lbss") == 0)
4081 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4083 /* If this is an incomplete type with size 0, then we can't put it
4084 in data because it might be too big when completed. */
4085 if (!size || size > ix86_section_threshold)
4092 /* Switch to the appropriate section for output of DECL.
4093 DECL is either a `VAR_DECL' node or a constant of some sort.
4094 RELOC indicates whether forming the initial value of DECL requires
4095 link-time relocations. */
4097 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4101 x86_64_elf_select_section (tree decl, int reloc,
4102 unsigned HOST_WIDE_INT align)
4104 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4105 && ix86_in_large_data_p (decl))
4107 const char *sname = NULL;
4108 unsigned int flags = SECTION_WRITE;
4109 switch (categorize_decl_for_section (decl, reloc))
4114 case SECCAT_DATA_REL:
4115 sname = ".ldata.rel";
4117 case SECCAT_DATA_REL_LOCAL:
4118 sname = ".ldata.rel.local";
4120 case SECCAT_DATA_REL_RO:
4121 sname = ".ldata.rel.ro";
4123 case SECCAT_DATA_REL_RO_LOCAL:
4124 sname = ".ldata.rel.ro.local";
4128 flags |= SECTION_BSS;
4131 case SECCAT_RODATA_MERGE_STR:
4132 case SECCAT_RODATA_MERGE_STR_INIT:
4133 case SECCAT_RODATA_MERGE_CONST:
4137 case SECCAT_SRODATA:
4144 /* We don't split these for medium model. Place them into
4145 default sections and hope for best. */
4147 case SECCAT_EMUTLS_VAR:
4148 case SECCAT_EMUTLS_TMPL:
4153 /* We might get called with string constants, but get_named_section
4154 doesn't like them as they are not DECLs. Also, we need to set
4155 flags in that case. */
4157 return get_section (sname, flags, NULL);
4158 return get_named_section (decl, sname, reloc);
4161 return default_elf_select_section (decl, reloc, align);
4164 /* Build up a unique section name, expressed as a
4165 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4166 RELOC indicates whether the initial value of EXP requires
4167 link-time relocations. */
4169 static void ATTRIBUTE_UNUSED
4170 x86_64_elf_unique_section (tree decl, int reloc)
4172 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4173 && ix86_in_large_data_p (decl))
4175 const char *prefix = NULL;
4176 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4177 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4179 switch (categorize_decl_for_section (decl, reloc))
4182 case SECCAT_DATA_REL:
4183 case SECCAT_DATA_REL_LOCAL:
4184 case SECCAT_DATA_REL_RO:
4185 case SECCAT_DATA_REL_RO_LOCAL:
4186 prefix = one_only ? ".ld" : ".ldata";
4189 prefix = one_only ? ".lb" : ".lbss";
4192 case SECCAT_RODATA_MERGE_STR:
4193 case SECCAT_RODATA_MERGE_STR_INIT:
4194 case SECCAT_RODATA_MERGE_CONST:
4195 prefix = one_only ? ".lr" : ".lrodata";
4197 case SECCAT_SRODATA:
4204 /* We don't split these for medium model. Place them into
4205 default sections and hope for best. */
4207 case SECCAT_EMUTLS_VAR:
4208 prefix = targetm.emutls.var_section;
4210 case SECCAT_EMUTLS_TMPL:
4211 prefix = targetm.emutls.tmpl_section;
4216 const char *name, *linkonce;
4219 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4220 name = targetm.strip_name_encoding (name);
4222 /* If we're using one_only, then there needs to be a .gnu.linkonce
4223 prefix to the section name. */
4224 linkonce = one_only ? ".gnu.linkonce" : "";
4226 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4228 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4232 default_unique_section (decl, reloc);
4235 #ifdef COMMON_ASM_OP
4236 /* This says how to output assembler code to declare an
4237 uninitialized external linkage data object.
4239 For medium model x86-64 we need to use .largecomm opcode for
4242 x86_elf_aligned_common (FILE *file,
4243 const char *name, unsigned HOST_WIDE_INT size,
4246 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4247 && size > (unsigned int)ix86_section_threshold)
4248 fputs (".largecomm\t", file);
4250 fputs (COMMON_ASM_OP, file);
4251 assemble_name (file, name);
4252 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4253 size, align / BITS_PER_UNIT);
4257 /* Utility function for targets to use in implementing
4258 ASM_OUTPUT_ALIGNED_BSS. */
4261 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4262 const char *name, unsigned HOST_WIDE_INT size,
4265 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4266 && size > (unsigned int)ix86_section_threshold)
4267 switch_to_section (get_named_section (decl, ".lbss", 0));
4269 switch_to_section (bss_section);
4270 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4271 #ifdef ASM_DECLARE_OBJECT_NAME
4272 last_assemble_variable_decl = decl;
4273 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4275 /* Standard thing is just output label for the object. */
4276 ASM_OUTPUT_LABEL (file, name);
4277 #endif /* ASM_DECLARE_OBJECT_NAME */
4278 ASM_OUTPUT_SKIP (file, size ? size : 1);
4282 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4284 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4285 make the problem with not enough registers even worse. */
4286 #ifdef INSN_SCHEDULING
4288 flag_schedule_insns = 0;
4292 /* The Darwin libraries never set errno, so we might as well
4293 avoid calling them when that's the only reason we would. */
4294 flag_errno_math = 0;
4296 /* The default values of these switches depend on the TARGET_64BIT
4297 that is not known at this moment. Mark these values with 2 and
4298 let user the to override these. In case there is no command line option
4299 specifying them, we will set the defaults in override_options. */
4301 flag_omit_frame_pointer = 2;
4302 flag_pcc_struct_return = 2;
4303 flag_asynchronous_unwind_tables = 2;
4304 flag_vect_cost_model = 1;
4305 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4306 SUBTARGET_OPTIMIZATION_OPTIONS;
4310 /* Decide whether we can make a sibling call to a function. DECL is the
4311 declaration of the function being targeted by the call and EXP is the
4312 CALL_EXPR representing the call. */
4315 ix86_function_ok_for_sibcall (tree decl, tree exp)
4317 tree type, decl_or_type;
4320 /* If we are generating position-independent code, we cannot sibcall
4321 optimize any indirect call, or a direct call to a global function,
4322 as the PLT requires %ebx be live. */
4323 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4326 /* If we need to align the outgoing stack, then sibcalling would
4327 unalign the stack, which may break the called function. */
4328 if (ix86_minimum_incoming_stack_boundary (true)
4329 < PREFERRED_STACK_BOUNDARY)
4334 decl_or_type = decl;
4335 type = TREE_TYPE (decl);
4339 /* We're looking at the CALL_EXPR, we need the type of the function. */
4340 type = CALL_EXPR_FN (exp); /* pointer expression */
4341 type = TREE_TYPE (type); /* pointer type */
4342 type = TREE_TYPE (type); /* function type */
4343 decl_or_type = type;
4346 /* Check that the return value locations are the same. Like
4347 if we are returning floats on the 80387 register stack, we cannot
4348 make a sibcall from a function that doesn't return a float to a
4349 function that does or, conversely, from a function that does return
4350 a float to a function that doesn't; the necessary stack adjustment
4351 would not be executed. This is also the place we notice
4352 differences in the return value ABI. Note that it is ok for one
4353 of the functions to have void return type as long as the return
4354 value of the other is passed in a register. */
4355 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4356 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4358 if (STACK_REG_P (a) || STACK_REG_P (b))
4360 if (!rtx_equal_p (a, b))
4363 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4365 else if (!rtx_equal_p (a, b))
4370 /* The SYSV ABI has more call-clobbered registers;
4371 disallow sibcalls from MS to SYSV. */
4372 if (cfun->machine->call_abi == MS_ABI
4373 && ix86_function_type_abi (type) == SYSV_ABI)
4378 /* If this call is indirect, we'll need to be able to use a
4379 call-clobbered register for the address of the target function.
4380 Make sure that all such registers are not used for passing
4381 parameters. Note that DLLIMPORT functions are indirect. */
4383 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4385 if (ix86_function_regparm (type, NULL) >= 3)
4387 /* ??? Need to count the actual number of registers to be used,
4388 not the possible number of registers. Fix later. */
4394 /* Otherwise okay. That also includes certain types of indirect calls. */
4398 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4399 and "sseregparm" calling convention attributes;
4400 arguments as in struct attribute_spec.handler. */
4403 ix86_handle_cconv_attribute (tree *node, tree name,
4405 int flags ATTRIBUTE_UNUSED,
4408 if (TREE_CODE (*node) != FUNCTION_TYPE
4409 && TREE_CODE (*node) != METHOD_TYPE
4410 && TREE_CODE (*node) != FIELD_DECL
4411 && TREE_CODE (*node) != TYPE_DECL)
4413 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4415 *no_add_attrs = true;
4419 /* Can combine regparm with all attributes but fastcall. */
4420 if (is_attribute_p ("regparm", name))
4424 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4426 error ("fastcall and regparm attributes are not compatible");
4429 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4431 error ("regparam and thiscall attributes are not compatible");
4434 cst = TREE_VALUE (args);
4435 if (TREE_CODE (cst) != INTEGER_CST)
4437 warning (OPT_Wattributes,
4438 "%qE attribute requires an integer constant argument",
4440 *no_add_attrs = true;
4442 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4444 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4446 *no_add_attrs = true;
4454 /* Do not warn when emulating the MS ABI. */
4455 if ((TREE_CODE (*node) != FUNCTION_TYPE
4456 && TREE_CODE (*node) != METHOD_TYPE)
4457 || ix86_function_type_abi (*node) != MS_ABI)
4458 warning (OPT_Wattributes, "%qE attribute ignored",
4460 *no_add_attrs = true;
4464 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4465 if (is_attribute_p ("fastcall", name))
4467 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4469 error ("fastcall and cdecl attributes are not compatible");
4471 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4473 error ("fastcall and stdcall attributes are not compatible");
4475 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4477 error ("fastcall and regparm attributes are not compatible");
4479 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4481 error ("fastcall and thiscall attributes are not compatible");
4485 /* Can combine stdcall with fastcall (redundant), regparm and
4487 else if (is_attribute_p ("stdcall", name))
4489 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4491 error ("stdcall and cdecl attributes are not compatible");
4493 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4495 error ("stdcall and fastcall attributes are not compatible");
4497 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4499 error ("stdcall and thiscall attributes are not compatible");
4503 /* Can combine cdecl with regparm and sseregparm. */
4504 else if (is_attribute_p ("cdecl", name))
4506 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4508 error ("stdcall and cdecl attributes are not compatible");
4510 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4512 error ("fastcall and cdecl attributes are not compatible");
4514 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4516 error ("cdecl and thiscall attributes are not compatible");
4519 else if (is_attribute_p ("thiscall", name))
4521 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4522 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4524 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4526 error ("stdcall and thiscall attributes are not compatible");
4528 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4530 error ("fastcall and thiscall attributes are not compatible");
4532 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4534 error ("cdecl and thiscall attributes are not compatible");
4538 /* Can combine sseregparm with all attributes. */
4543 /* Return 0 if the attributes for two types are incompatible, 1 if they
4544 are compatible, and 2 if they are nearly compatible (which causes a
4545 warning to be generated). */
4548 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4550 /* Check for mismatch of non-default calling convention. */
4551 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4553 if (TREE_CODE (type1) != FUNCTION_TYPE
4554 && TREE_CODE (type1) != METHOD_TYPE)
4557 /* Check for mismatched fastcall/regparm types. */
4558 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4559 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4560 || (ix86_function_regparm (type1, NULL)
4561 != ix86_function_regparm (type2, NULL)))
4564 /* Check for mismatched sseregparm types. */
4565 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4566 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4569 /* Check for mismatched thiscall types. */
4570 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4571 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4574 /* Check for mismatched return types (cdecl vs stdcall). */
4575 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4576 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4582 /* Return the regparm value for a function with the indicated TYPE and DECL.
4583 DECL may be NULL when calling function indirectly
4584 or considering a libcall. */
4587 ix86_function_regparm (const_tree type, const_tree decl)
4593 return (ix86_function_type_abi (type) == SYSV_ABI
4594 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4596 regparm = ix86_regparm;
4597 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4600 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4604 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4607 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4610 /* Use register calling convention for local functions when possible. */
4612 && TREE_CODE (decl) == FUNCTION_DECL
4616 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4617 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4620 int local_regparm, globals = 0, regno;
4622 /* Make sure no regparm register is taken by a
4623 fixed register variable. */
4624 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4625 if (fixed_regs[local_regparm])
4628 /* We don't want to use regparm(3) for nested functions as
4629 these use a static chain pointer in the third argument. */
4630 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4633 /* Each fixed register usage increases register pressure,
4634 so less registers should be used for argument passing.
4635 This functionality can be overriden by an explicit
4637 for (regno = 0; regno <= DI_REG; regno++)
4638 if (fixed_regs[regno])
4642 = globals < local_regparm ? local_regparm - globals : 0;
4644 if (local_regparm > regparm)
4645 regparm = local_regparm;
4652 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4653 DFmode (2) arguments in SSE registers for a function with the
4654 indicated TYPE and DECL. DECL may be NULL when calling function
4655 indirectly or considering a libcall. Otherwise return 0. */
4658 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4660 gcc_assert (!TARGET_64BIT);
4662 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4663 by the sseregparm attribute. */
4664 if (TARGET_SSEREGPARM
4665 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4672 error ("Calling %qD with attribute sseregparm without "
4673 "SSE/SSE2 enabled", decl);
4675 error ("Calling %qT with attribute sseregparm without "
4676 "SSE/SSE2 enabled", type);
4684 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4685 (and DFmode for SSE2) arguments in SSE registers. */
4686 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4688 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4689 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4691 return TARGET_SSE2 ? 2 : 1;
4697 /* Return true if EAX is live at the start of the function. Used by
4698 ix86_expand_prologue to determine if we need special help before
4699 calling allocate_stack_worker. */
4702 ix86_eax_live_at_start_p (void)
4704 /* Cheat. Don't bother working forward from ix86_function_regparm
4705 to the function type to whether an actual argument is located in
4706 eax. Instead just look at cfg info, which is still close enough
4707 to correct at this point. This gives false positives for broken
4708 functions that might use uninitialized data that happens to be
4709 allocated in eax, but who cares? */
4710 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4713 /* Value is the number of bytes of arguments automatically
4714 popped when returning from a subroutine call.
4715 FUNDECL is the declaration node of the function (as a tree),
4716 FUNTYPE is the data type of the function (as a tree),
4717 or for a library call it is an identifier node for the subroutine name.
4718 SIZE is the number of bytes of arguments passed on the stack.
4720 On the 80386, the RTD insn may be used to pop them if the number
4721 of args is fixed, but if the number is variable then the caller
4722 must pop them all. RTD can't be used for library calls now
4723 because the library is compiled with the Unix compiler.
4724 Use of RTD is a selectable option, since it is incompatible with
4725 standard Unix calling sequences. If the option is not selected,
4726 the caller must always pop the args.
4728 The attribute stdcall is equivalent to RTD on a per module basis. */
4731 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4735 /* None of the 64-bit ABIs pop arguments. */
4739 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4741 /* Cdecl functions override -mrtd, and never pop the stack. */
4742 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4744 /* Stdcall and fastcall functions will pop the stack if not
4746 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4747 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4748 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4751 if (rtd && ! stdarg_p (funtype))
4755 /* Lose any fake structure return argument if it is passed on the stack. */
4756 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4757 && !KEEP_AGGREGATE_RETURN_POINTER)
4759 int nregs = ix86_function_regparm (funtype, fundecl);
4761 return GET_MODE_SIZE (Pmode);
4767 /* Argument support functions. */
4769 /* Return true when register may be used to pass function parameters. */
4771 ix86_function_arg_regno_p (int regno)
4774 const int *parm_regs;
4779 return (regno < REGPARM_MAX
4780 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4782 return (regno < REGPARM_MAX
4783 || (TARGET_MMX && MMX_REGNO_P (regno)
4784 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4785 || (TARGET_SSE && SSE_REGNO_P (regno)
4786 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4791 if (SSE_REGNO_P (regno) && TARGET_SSE)
4796 if (TARGET_SSE && SSE_REGNO_P (regno)
4797 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4801 /* TODO: The function should depend on current function ABI but
4802 builtins.c would need updating then. Therefore we use the
4805 /* RAX is used as hidden argument to va_arg functions. */
4806 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4809 if (ix86_abi == MS_ABI)
4810 parm_regs = x86_64_ms_abi_int_parameter_registers;
4812 parm_regs = x86_64_int_parameter_registers;
4813 for (i = 0; i < (ix86_abi == MS_ABI
4814 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4815 if (regno == parm_regs[i])
4820 /* Return if we do not know how to pass TYPE solely in registers. */
4823 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4825 if (must_pass_in_stack_var_size_or_pad (mode, type))
4828 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4829 The layout_type routine is crafty and tries to trick us into passing
4830 currently unsupported vector types on the stack by using TImode. */
4831 return (!TARGET_64BIT && mode == TImode
4832 && type && TREE_CODE (type) != VECTOR_TYPE);
4835 /* It returns the size, in bytes, of the area reserved for arguments passed
4836 in registers for the function represented by fndecl dependent to the used
4839 ix86_reg_parm_stack_space (const_tree fndecl)
4841 enum calling_abi call_abi = SYSV_ABI;
4842 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4843 call_abi = ix86_function_abi (fndecl);
4845 call_abi = ix86_function_type_abi (fndecl);
4846 if (call_abi == MS_ABI)
4851 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4854 ix86_function_type_abi (const_tree fntype)
4856 if (TARGET_64BIT && fntype != NULL)
4858 enum calling_abi abi = ix86_abi;
4859 if (abi == SYSV_ABI)
4861 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4864 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4872 ix86_function_ms_hook_prologue (const_tree fntype)
4876 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4878 if (decl_function_context (fntype) != NULL_TREE)
4880 error_at (DECL_SOURCE_LOCATION (fntype),
4881 "ms_hook_prologue is not compatible with nested function");
4890 static enum calling_abi
4891 ix86_function_abi (const_tree fndecl)
4895 return ix86_function_type_abi (TREE_TYPE (fndecl));
4898 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4901 ix86_cfun_abi (void)
4903 if (! cfun || ! TARGET_64BIT)
4905 return cfun->machine->call_abi;
4909 extern void init_regs (void);
4911 /* Implementation of call abi switching target hook. Specific to FNDECL
4912 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4913 for more details. */
4915 ix86_call_abi_override (const_tree fndecl)
4917 if (fndecl == NULL_TREE)
4918 cfun->machine->call_abi = ix86_abi;
4920 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4923 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4924 re-initialization of init_regs each time we switch function context since
4925 this is needed only during RTL expansion. */
4927 ix86_maybe_switch_abi (void)
4930 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4935 for a call to a function whose data type is FNTYPE.
4936 For a library call, FNTYPE is 0. */
4939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4940 tree fntype, /* tree ptr for function decl */
4941 rtx libname, /* SYMBOL_REF of library name or 0 */
4944 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4945 memset (cum, 0, sizeof (*cum));
4948 cum->call_abi = ix86_function_abi (fndecl);
4950 cum->call_abi = ix86_function_type_abi (fntype);
4951 /* Set up the number of registers to use for passing arguments. */
4953 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4954 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4955 "or subtarget optimization implying it");
4956 cum->nregs = ix86_regparm;
4959 if (cum->call_abi != ix86_abi)
4960 cum->nregs = (ix86_abi != SYSV_ABI
4961 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4965 cum->sse_nregs = SSE_REGPARM_MAX;
4968 if (cum->call_abi != ix86_abi)
4969 cum->sse_nregs = (ix86_abi != SYSV_ABI
4970 ? X86_64_SSE_REGPARM_MAX
4971 : X86_64_MS_SSE_REGPARM_MAX);
4975 cum->mmx_nregs = MMX_REGPARM_MAX;
4976 cum->warn_avx = true;
4977 cum->warn_sse = true;
4978 cum->warn_mmx = true;
4980 /* Because type might mismatch in between caller and callee, we need to
4981 use actual type of function for local calls.
4982 FIXME: cgraph_analyze can be told to actually record if function uses
4983 va_start so for local functions maybe_vaarg can be made aggressive
4985 FIXME: once typesytem is fixed, we won't need this code anymore. */
4987 fntype = TREE_TYPE (fndecl);
4988 cum->maybe_vaarg = (fntype
4989 ? (!prototype_p (fntype) || stdarg_p (fntype))
4994 /* If there are variable arguments, then we won't pass anything
4995 in registers in 32-bit mode. */
4996 if (stdarg_p (fntype))
5007 /* Use ecx and edx registers if function has fastcall attribute,
5008 else look for regparm information. */
5011 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5014 cum->fastcall = 1; /* Same first register as in fastcall. */
5016 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5022 cum->nregs = ix86_function_regparm (fntype, fndecl);
5025 /* Set up the number of SSE registers used for passing SFmode
5026 and DFmode arguments. Warn for mismatching ABI. */
5027 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5031 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5032 But in the case of vector types, it is some vector mode.
5034 When we have only some of our vector isa extensions enabled, then there
5035 are some modes for which vector_mode_supported_p is false. For these
5036 modes, the generic vector support in gcc will choose some non-vector mode
5037 in order to implement the type. By computing the natural mode, we'll
5038 select the proper ABI location for the operand and not depend on whatever
5039 the middle-end decides to do with these vector types.
5041 The midde-end can't deal with the vector types > 16 bytes. In this
5042 case, we return the original mode and warn ABI change if CUM isn't
5045 static enum machine_mode
5046 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5048 enum machine_mode mode = TYPE_MODE (type);
5050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5052 HOST_WIDE_INT size = int_size_in_bytes (type);
5053 if ((size == 8 || size == 16 || size == 32)
5054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5055 && TYPE_VECTOR_SUBPARTS (type) > 1)
5057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5060 mode = MIN_MODE_VECTOR_FLOAT;
5062 mode = MIN_MODE_VECTOR_INT;
5064 /* Get the mode which has this inner mode and number of units. */
5065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5067 && GET_MODE_INNER (mode) == innermode)
5069 if (size == 32 && !TARGET_AVX)
5071 static bool warnedavx;
5078 warning (0, "AVX vector argument without AVX "
5079 "enabled changes the ABI");
5081 return TYPE_MODE (type);
5094 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5095 this may not agree with the mode that the type system has chosen for the
5096 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5097 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5100 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5105 if (orig_mode != BLKmode)
5106 tmp = gen_rtx_REG (orig_mode, regno);
5109 tmp = gen_rtx_REG (mode, regno);
5110 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5111 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5117 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5118 of this code is to classify each 8bytes of incoming argument by the register
5119 class and assign registers accordingly. */
5121 /* Return the union class of CLASS1 and CLASS2.
5122 See the x86-64 PS ABI for details. */
5124 static enum x86_64_reg_class
5125 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5127 /* Rule #1: If both classes are equal, this is the resulting class. */
5128 if (class1 == class2)
5131 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5133 if (class1 == X86_64_NO_CLASS)
5135 if (class2 == X86_64_NO_CLASS)
5138 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5139 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5140 return X86_64_MEMORY_CLASS;
5142 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5143 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5144 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5145 return X86_64_INTEGERSI_CLASS;
5146 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5147 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5148 return X86_64_INTEGER_CLASS;
5150 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5152 if (class1 == X86_64_X87_CLASS
5153 || class1 == X86_64_X87UP_CLASS
5154 || class1 == X86_64_COMPLEX_X87_CLASS
5155 || class2 == X86_64_X87_CLASS
5156 || class2 == X86_64_X87UP_CLASS
5157 || class2 == X86_64_COMPLEX_X87_CLASS)
5158 return X86_64_MEMORY_CLASS;
5160 /* Rule #6: Otherwise class SSE is used. */
5161 return X86_64_SSE_CLASS;
5164 /* Classify the argument of type TYPE and mode MODE.
5165 CLASSES will be filled by the register class used to pass each word
5166 of the operand. The number of words is returned. In case the parameter
5167 should be passed in memory, 0 is returned. As a special case for zero
5168 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5170 BIT_OFFSET is used internally for handling records and specifies offset
5171 of the offset in bits modulo 256 to avoid overflow cases.
5173 See the x86-64 PS ABI for details.
5177 classify_argument (enum machine_mode mode, const_tree type,
5178 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5180 HOST_WIDE_INT bytes =
5181 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5182 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5184 /* Variable sized entities are always passed/returned in memory. */
5188 if (mode != VOIDmode
5189 && targetm.calls.must_pass_in_stack (mode, type))
5192 if (type && AGGREGATE_TYPE_P (type))
5196 enum x86_64_reg_class subclasses[MAX_CLASSES];
5198 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5202 for (i = 0; i < words; i++)
5203 classes[i] = X86_64_NO_CLASS;
5205 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5206 signalize memory class, so handle it as special case. */
5209 classes[0] = X86_64_NO_CLASS;
5213 /* Classify each field of record and merge classes. */
5214 switch (TREE_CODE (type))
5217 /* And now merge the fields of structure. */
5218 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5220 if (TREE_CODE (field) == FIELD_DECL)
5224 if (TREE_TYPE (field) == error_mark_node)
5227 /* Bitfields are always classified as integer. Handle them
5228 early, since later code would consider them to be
5229 misaligned integers. */
5230 if (DECL_BIT_FIELD (field))
5232 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5233 i < ((int_bit_position (field) + (bit_offset % 64))
5234 + tree_low_cst (DECL_SIZE (field), 0)
5237 merge_classes (X86_64_INTEGER_CLASS,
5244 type = TREE_TYPE (field);
5246 /* Flexible array member is ignored. */
5247 if (TYPE_MODE (type) == BLKmode
5248 && TREE_CODE (type) == ARRAY_TYPE
5249 && TYPE_SIZE (type) == NULL_TREE
5250 && TYPE_DOMAIN (type) != NULL_TREE
5251 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5256 if (!warned && warn_psabi)
5259 inform (input_location,
5260 "The ABI of passing struct with"
5261 " a flexible array member has"
5262 " changed in GCC 4.4");
5266 num = classify_argument (TYPE_MODE (type), type,
5268 (int_bit_position (field)
5269 + bit_offset) % 256);
5272 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5273 for (i = 0; i < num && (i + pos) < words; i++)
5275 merge_classes (subclasses[i], classes[i + pos]);
5282 /* Arrays are handled as small records. */
5285 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5286 TREE_TYPE (type), subclasses, bit_offset);
5290 /* The partial classes are now full classes. */
5291 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5292 subclasses[0] = X86_64_SSE_CLASS;
5293 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5294 && !((bit_offset % 64) == 0 && bytes == 4))
5295 subclasses[0] = X86_64_INTEGER_CLASS;
5297 for (i = 0; i < words; i++)
5298 classes[i] = subclasses[i % num];
5303 case QUAL_UNION_TYPE:
5304 /* Unions are similar to RECORD_TYPE but offset is always 0.
5306 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5308 if (TREE_CODE (field) == FIELD_DECL)
5312 if (TREE_TYPE (field) == error_mark_node)
5315 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5316 TREE_TYPE (field), subclasses,
5320 for (i = 0; i < num; i++)
5321 classes[i] = merge_classes (subclasses[i], classes[i]);
5332 /* When size > 16 bytes, if the first one isn't
5333 X86_64_SSE_CLASS or any other ones aren't
5334 X86_64_SSEUP_CLASS, everything should be passed in
5336 if (classes[0] != X86_64_SSE_CLASS)
5339 for (i = 1; i < words; i++)
5340 if (classes[i] != X86_64_SSEUP_CLASS)
5344 /* Final merger cleanup. */
5345 for (i = 0; i < words; i++)
5347 /* If one class is MEMORY, everything should be passed in
5349 if (classes[i] == X86_64_MEMORY_CLASS)
5352 /* The X86_64_SSEUP_CLASS should be always preceded by
5353 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5354 if (classes[i] == X86_64_SSEUP_CLASS
5355 && classes[i - 1] != X86_64_SSE_CLASS
5356 && classes[i - 1] != X86_64_SSEUP_CLASS)
5358 /* The first one should never be X86_64_SSEUP_CLASS. */
5359 gcc_assert (i != 0);
5360 classes[i] = X86_64_SSE_CLASS;
5363 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5364 everything should be passed in memory. */
5365 if (classes[i] == X86_64_X87UP_CLASS
5366 && (classes[i - 1] != X86_64_X87_CLASS))
5370 /* The first one should never be X86_64_X87UP_CLASS. */
5371 gcc_assert (i != 0);
5372 if (!warned && warn_psabi)
5375 inform (input_location,
5376 "The ABI of passing union with long double"
5377 " has changed in GCC 4.4");
5385 /* Compute alignment needed. We align all types to natural boundaries with
5386 exception of XFmode that is aligned to 64bits. */
5387 if (mode != VOIDmode && mode != BLKmode)
5389 int mode_alignment = GET_MODE_BITSIZE (mode);
5392 mode_alignment = 128;
5393 else if (mode == XCmode)
5394 mode_alignment = 256;
5395 if (COMPLEX_MODE_P (mode))
5396 mode_alignment /= 2;
5397 /* Misaligned fields are always returned in memory. */
5398 if (bit_offset % mode_alignment)
5402 /* for V1xx modes, just use the base mode */
5403 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5404 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5405 mode = GET_MODE_INNER (mode);
5407 /* Classification of atomic types. */
5412 classes[0] = X86_64_SSE_CLASS;
5415 classes[0] = X86_64_SSE_CLASS;
5416 classes[1] = X86_64_SSEUP_CLASS;
5426 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5430 classes[0] = X86_64_INTEGERSI_CLASS;
5433 else if (size <= 64)
5435 classes[0] = X86_64_INTEGER_CLASS;
5438 else if (size <= 64+32)
5440 classes[0] = X86_64_INTEGER_CLASS;
5441 classes[1] = X86_64_INTEGERSI_CLASS;
5444 else if (size <= 64+64)
5446 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5454 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5458 /* OImode shouldn't be used directly. */
5463 if (!(bit_offset % 64))
5464 classes[0] = X86_64_SSESF_CLASS;
5466 classes[0] = X86_64_SSE_CLASS;
5469 classes[0] = X86_64_SSEDF_CLASS;
5472 classes[0] = X86_64_X87_CLASS;
5473 classes[1] = X86_64_X87UP_CLASS;
5476 classes[0] = X86_64_SSE_CLASS;
5477 classes[1] = X86_64_SSEUP_CLASS;
5480 classes[0] = X86_64_SSE_CLASS;
5481 if (!(bit_offset % 64))
5487 if (!warned && warn_psabi)
5490 inform (input_location,
5491 "The ABI of passing structure with complex float"
5492 " member has changed in GCC 4.4");
5494 classes[1] = X86_64_SSESF_CLASS;
5498 classes[0] = X86_64_SSEDF_CLASS;
5499 classes[1] = X86_64_SSEDF_CLASS;
5502 classes[0] = X86_64_COMPLEX_X87_CLASS;
5505 /* This modes is larger than 16 bytes. */
5513 classes[0] = X86_64_SSE_CLASS;
5514 classes[1] = X86_64_SSEUP_CLASS;
5515 classes[2] = X86_64_SSEUP_CLASS;
5516 classes[3] = X86_64_SSEUP_CLASS;
5524 classes[0] = X86_64_SSE_CLASS;
5525 classes[1] = X86_64_SSEUP_CLASS;
5533 classes[0] = X86_64_SSE_CLASS;
5539 gcc_assert (VECTOR_MODE_P (mode));
5544 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5546 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5547 classes[0] = X86_64_INTEGERSI_CLASS;
5549 classes[0] = X86_64_INTEGER_CLASS;
5550 classes[1] = X86_64_INTEGER_CLASS;
5551 return 1 + (bytes > 8);
5555 /* Examine the argument and return set number of register required in each
5556 class. Return 0 iff parameter should be passed in memory. */
5558 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5559 int *int_nregs, int *sse_nregs)
5561 enum x86_64_reg_class regclass[MAX_CLASSES];
5562 int n = classify_argument (mode, type, regclass, 0);
5568 for (n--; n >= 0; n--)
5569 switch (regclass[n])
5571 case X86_64_INTEGER_CLASS:
5572 case X86_64_INTEGERSI_CLASS:
5575 case X86_64_SSE_CLASS:
5576 case X86_64_SSESF_CLASS:
5577 case X86_64_SSEDF_CLASS:
5580 case X86_64_NO_CLASS:
5581 case X86_64_SSEUP_CLASS:
5583 case X86_64_X87_CLASS:
5584 case X86_64_X87UP_CLASS:
5588 case X86_64_COMPLEX_X87_CLASS:
5589 return in_return ? 2 : 0;
5590 case X86_64_MEMORY_CLASS:
5596 /* Construct container for the argument used by GCC interface. See
5597 FUNCTION_ARG for the detailed description. */
5600 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5601 const_tree type, int in_return, int nintregs, int nsseregs,
5602 const int *intreg, int sse_regno)
5604 /* The following variables hold the static issued_error state. */
5605 static bool issued_sse_arg_error;
5606 static bool issued_sse_ret_error;
5607 static bool issued_x87_ret_error;
5609 enum machine_mode tmpmode;
5611 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5612 enum x86_64_reg_class regclass[MAX_CLASSES];
5616 int needed_sseregs, needed_intregs;
5617 rtx exp[MAX_CLASSES];
5620 n = classify_argument (mode, type, regclass, 0);
5623 if (!examine_argument (mode, type, in_return, &needed_intregs,
5626 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5629 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5630 some less clueful developer tries to use floating-point anyway. */
5631 if (needed_sseregs && !TARGET_SSE)
5635 if (!issued_sse_ret_error)
5637 error ("SSE register return with SSE disabled");
5638 issued_sse_ret_error = true;
5641 else if (!issued_sse_arg_error)
5643 error ("SSE register argument with SSE disabled");
5644 issued_sse_arg_error = true;
5649 /* Likewise, error if the ABI requires us to return values in the
5650 x87 registers and the user specified -mno-80387. */
5651 if (!TARGET_80387 && in_return)
5652 for (i = 0; i < n; i++)
5653 if (regclass[i] == X86_64_X87_CLASS
5654 || regclass[i] == X86_64_X87UP_CLASS
5655 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5657 if (!issued_x87_ret_error)
5659 error ("x87 register return with x87 disabled");
5660 issued_x87_ret_error = true;
5665 /* First construct simple cases. Avoid SCmode, since we want to use
5666 single register to pass this type. */
5667 if (n == 1 && mode != SCmode)
5668 switch (regclass[0])
5670 case X86_64_INTEGER_CLASS:
5671 case X86_64_INTEGERSI_CLASS:
5672 return gen_rtx_REG (mode, intreg[0]);
5673 case X86_64_SSE_CLASS:
5674 case X86_64_SSESF_CLASS:
5675 case X86_64_SSEDF_CLASS:
5676 if (mode != BLKmode)
5677 return gen_reg_or_parallel (mode, orig_mode,
5678 SSE_REGNO (sse_regno));
5680 case X86_64_X87_CLASS:
5681 case X86_64_COMPLEX_X87_CLASS:
5682 return gen_rtx_REG (mode, FIRST_STACK_REG);
5683 case X86_64_NO_CLASS:
5684 /* Zero sized array, struct or class. */
5689 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5690 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5691 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5693 && regclass[0] == X86_64_SSE_CLASS
5694 && regclass[1] == X86_64_SSEUP_CLASS
5695 && regclass[2] == X86_64_SSEUP_CLASS
5696 && regclass[3] == X86_64_SSEUP_CLASS
5698 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5701 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5702 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5703 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5704 && regclass[1] == X86_64_INTEGER_CLASS
5705 && (mode == CDImode || mode == TImode || mode == TFmode)
5706 && intreg[0] + 1 == intreg[1])
5707 return gen_rtx_REG (mode, intreg[0]);
5709 /* Otherwise figure out the entries of the PARALLEL. */
5710 for (i = 0; i < n; i++)
5714 switch (regclass[i])
5716 case X86_64_NO_CLASS:
5718 case X86_64_INTEGER_CLASS:
5719 case X86_64_INTEGERSI_CLASS:
5720 /* Merge TImodes on aligned occasions here too. */
5721 if (i * 8 + 8 > bytes)
5722 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5723 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5727 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5728 if (tmpmode == BLKmode)
5730 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5731 gen_rtx_REG (tmpmode, *intreg),
5735 case X86_64_SSESF_CLASS:
5736 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5737 gen_rtx_REG (SFmode,
5738 SSE_REGNO (sse_regno)),
5742 case X86_64_SSEDF_CLASS:
5743 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5744 gen_rtx_REG (DFmode,
5745 SSE_REGNO (sse_regno)),
5749 case X86_64_SSE_CLASS:
5757 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5767 && regclass[1] == X86_64_SSEUP_CLASS
5768 && regclass[2] == X86_64_SSEUP_CLASS
5769 && regclass[3] == X86_64_SSEUP_CLASS);
5776 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5777 gen_rtx_REG (tmpmode,
5778 SSE_REGNO (sse_regno)),
5787 /* Empty aligned struct, union or class. */
5791 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5792 for (i = 0; i < nexps; i++)
5793 XVECEXP (ret, 0, i) = exp [i];
5797 /* Update the data in CUM to advance over an argument of mode MODE
5798 and data type TYPE. (TYPE is null for libcalls where that information
5799 may not be available.) */
5802 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5803 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5819 cum->words += words;
5820 cum->nregs -= words;
5821 cum->regno += words;
5823 if (cum->nregs <= 0)
5831 /* OImode shouldn't be used directly. */
5835 if (cum->float_in_sse < 2)
5838 if (cum->float_in_sse < 1)
5855 if (!type || !AGGREGATE_TYPE_P (type))
5857 cum->sse_words += words;
5858 cum->sse_nregs -= 1;
5859 cum->sse_regno += 1;
5860 if (cum->sse_nregs <= 0)
5874 if (!type || !AGGREGATE_TYPE_P (type))
5876 cum->mmx_words += words;
5877 cum->mmx_nregs -= 1;
5878 cum->mmx_regno += 1;
5879 if (cum->mmx_nregs <= 0)
5890 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5891 tree type, HOST_WIDE_INT words, int named)
5893 int int_nregs, sse_nregs;
5895 /* Unnamed 256bit vector mode parameters are passed on stack. */
5896 if (!named && VALID_AVX256_REG_MODE (mode))
5899 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5900 cum->words += words;
5901 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5903 cum->nregs -= int_nregs;
5904 cum->sse_nregs -= sse_nregs;
5905 cum->regno += int_nregs;
5906 cum->sse_regno += sse_nregs;
5909 cum->words += words;
5913 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5914 HOST_WIDE_INT words)
5916 /* Otherwise, this should be passed indirect. */
5917 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5919 cum->words += words;
5928 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5929 tree type, int named)
5931 HOST_WIDE_INT bytes, words;
5933 if (mode == BLKmode)
5934 bytes = int_size_in_bytes (type);
5936 bytes = GET_MODE_SIZE (mode);
5937 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5940 mode = type_natural_mode (type, NULL);
5942 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5943 function_arg_advance_ms_64 (cum, bytes, words);
5944 else if (TARGET_64BIT)
5945 function_arg_advance_64 (cum, mode, type, words, named);
5947 function_arg_advance_32 (cum, mode, type, bytes, words);
5950 /* Define where to put the arguments to a function.
5951 Value is zero to push the argument on the stack,
5952 or a hard register in which to store the argument.
5954 MODE is the argument's machine mode.
5955 TYPE is the data type of the argument (as a tree).
5956 This is null for libcalls where that information may
5958 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5959 the preceding args and about the function being called.
5960 NAMED is nonzero if this argument is a named parameter
5961 (otherwise it is an extra parameter matching an ellipsis). */
5964 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5965 enum machine_mode orig_mode, tree type,
5966 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5968 static bool warnedsse, warnedmmx;
5970 /* Avoid the AL settings for the Unix64 ABI. */
5971 if (mode == VOIDmode)
5987 if (words <= cum->nregs)
5989 int regno = cum->regno;
5991 /* Fastcall allocates the first two DWORD (SImode) or
5992 smaller arguments to ECX and EDX if it isn't an
5998 || (type && AGGREGATE_TYPE_P (type)))
6001 /* ECX not EAX is the first allocated register. */
6002 if (regno == AX_REG)
6005 return gen_rtx_REG (mode, regno);
6010 if (cum->float_in_sse < 2)
6013 if (cum->float_in_sse < 1)
6017 /* In 32bit, we pass TImode in xmm registers. */
6024 if (!type || !AGGREGATE_TYPE_P (type))
6026 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6029 warning (0, "SSE vector argument without SSE enabled "
6033 return gen_reg_or_parallel (mode, orig_mode,
6034 cum->sse_regno + FIRST_SSE_REG);
6039 /* OImode shouldn't be used directly. */
6048 if (!type || !AGGREGATE_TYPE_P (type))
6051 return gen_reg_or_parallel (mode, orig_mode,
6052 cum->sse_regno + FIRST_SSE_REG);
6062 if (!type || !AGGREGATE_TYPE_P (type))
6064 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6067 warning (0, "MMX vector argument without MMX enabled "
6071 return gen_reg_or_parallel (mode, orig_mode,
6072 cum->mmx_regno + FIRST_MMX_REG);
6081 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6082 enum machine_mode orig_mode, tree type, int named)
6084 /* Handle a hidden AL argument containing number of registers
6085 for varargs x86-64 functions. */
6086 if (mode == VOIDmode)
6087 return GEN_INT (cum->maybe_vaarg
6088 ? (cum->sse_nregs < 0
6089 ? (cum->call_abi == ix86_abi
6091 : (ix86_abi != SYSV_ABI
6092 ? X86_64_SSE_REGPARM_MAX
6093 : X86_64_MS_SSE_REGPARM_MAX))
6108 /* Unnamed 256bit vector mode parameters are passed on stack. */
6114 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6116 &x86_64_int_parameter_registers [cum->regno],
6121 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6122 enum machine_mode orig_mode, int named,
6123 HOST_WIDE_INT bytes)
6127 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6128 We use value of -2 to specify that current function call is MSABI. */
6129 if (mode == VOIDmode)
6130 return GEN_INT (-2);
6132 /* If we've run out of registers, it goes on the stack. */
6133 if (cum->nregs == 0)
6136 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6138 /* Only floating point modes are passed in anything but integer regs. */
6139 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6142 regno = cum->regno + FIRST_SSE_REG;
6147 /* Unnamed floating parameters are passed in both the
6148 SSE and integer registers. */
6149 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6150 t2 = gen_rtx_REG (mode, regno);
6151 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6152 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6153 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6156 /* Handle aggregated types passed in register. */
6157 if (orig_mode == BLKmode)
6159 if (bytes > 0 && bytes <= 8)
6160 mode = (bytes > 4 ? DImode : SImode);
6161 if (mode == BLKmode)
6165 return gen_reg_or_parallel (mode, orig_mode, regno);
6169 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6170 tree type, int named)
6172 enum machine_mode mode = omode;
6173 HOST_WIDE_INT bytes, words;
6175 if (mode == BLKmode)
6176 bytes = int_size_in_bytes (type);
6178 bytes = GET_MODE_SIZE (mode);
6179 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6181 /* To simplify the code below, represent vector types with a vector mode
6182 even if MMX/SSE are not active. */
6183 if (type && TREE_CODE (type) == VECTOR_TYPE)
6184 mode = type_natural_mode (type, cum);
6186 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6187 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6188 else if (TARGET_64BIT)
6189 return function_arg_64 (cum, mode, omode, type, named);
6191 return function_arg_32 (cum, mode, omode, type, bytes, words);
6194 /* A C expression that indicates when an argument must be passed by
6195 reference. If nonzero for an argument, a copy of that argument is
6196 made in memory and a pointer to the argument is passed instead of
6197 the argument itself. The pointer is passed in whatever way is
6198 appropriate for passing a pointer to that type. */
6201 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6202 enum machine_mode mode ATTRIBUTE_UNUSED,
6203 const_tree type, bool named ATTRIBUTE_UNUSED)
6205 /* See Windows x64 Software Convention. */
6206 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6208 int msize = (int) GET_MODE_SIZE (mode);
6211 /* Arrays are passed by reference. */
6212 if (TREE_CODE (type) == ARRAY_TYPE)
6215 if (AGGREGATE_TYPE_P (type))
6217 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6218 are passed by reference. */
6219 msize = int_size_in_bytes (type);
6223 /* __m128 is passed by reference. */
6225 case 1: case 2: case 4: case 8:
6231 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6237 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6240 contains_aligned_value_p (tree type)
6242 enum machine_mode mode = TYPE_MODE (type);
6243 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6247 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6249 if (TYPE_ALIGN (type) < 128)
6252 if (AGGREGATE_TYPE_P (type))
6254 /* Walk the aggregates recursively. */
6255 switch (TREE_CODE (type))
6259 case QUAL_UNION_TYPE:
6263 /* Walk all the structure fields. */
6264 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6266 if (TREE_CODE (field) == FIELD_DECL
6267 && contains_aligned_value_p (TREE_TYPE (field)))
6274 /* Just for use if some languages passes arrays by value. */
6275 if (contains_aligned_value_p (TREE_TYPE (type)))
6286 /* Gives the alignment boundary, in bits, of an argument with the
6287 specified mode and type. */
6290 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6295 /* Since canonical type is used for call, we convert it to
6296 canonical type if needed. */
6297 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6298 type = TYPE_CANONICAL (type);
6299 align = TYPE_ALIGN (type);
6302 align = GET_MODE_ALIGNMENT (mode);
6303 if (align < PARM_BOUNDARY)
6304 align = PARM_BOUNDARY;
6305 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6306 natural boundaries. */
6307 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6309 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6310 make an exception for SSE modes since these require 128bit
6313 The handling here differs from field_alignment. ICC aligns MMX
6314 arguments to 4 byte boundaries, while structure fields are aligned
6315 to 8 byte boundaries. */
6318 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6319 align = PARM_BOUNDARY;
6323 if (!contains_aligned_value_p (type))
6324 align = PARM_BOUNDARY;
6327 if (align > BIGGEST_ALIGNMENT)
6328 align = BIGGEST_ALIGNMENT;
6332 /* Return true if N is a possible register number of function value. */
6335 ix86_function_value_regno_p (int regno)
6342 case FIRST_FLOAT_REG:
6343 /* TODO: The function should depend on current function ABI but
6344 builtins.c would need updating then. Therefore we use the
6346 if (TARGET_64BIT && ix86_abi == MS_ABI)
6348 return TARGET_FLOAT_RETURNS_IN_80387;
6354 if (TARGET_MACHO || TARGET_64BIT)
6362 /* Define how to find the value returned by a function.
6363 VALTYPE is the data type of the value (as a tree).
6364 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6365 otherwise, FUNC is 0. */
6368 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6369 const_tree fntype, const_tree fn)
6373 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6374 we normally prevent this case when mmx is not available. However
6375 some ABIs may require the result to be returned like DImode. */
6376 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6377 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6379 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6380 we prevent this case when sse is not available. However some ABIs
6381 may require the result to be returned like integer TImode. */
6382 else if (mode == TImode
6383 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6384 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6386 /* 32-byte vector modes in %ymm0. */
6387 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6388 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6390 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6391 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6392 regno = FIRST_FLOAT_REG;
6394 /* Most things go in %eax. */
6397 /* Override FP return register with %xmm0 for local functions when
6398 SSE math is enabled or for functions with sseregparm attribute. */
6399 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6401 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6402 if ((sse_level >= 1 && mode == SFmode)
6403 || (sse_level == 2 && mode == DFmode))
6404 regno = FIRST_SSE_REG;
6407 /* OImode shouldn't be used directly. */
6408 gcc_assert (mode != OImode);
6410 return gen_rtx_REG (orig_mode, regno);
6414 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6419 /* Handle libcalls, which don't provide a type node. */
6420 if (valtype == NULL)
6432 return gen_rtx_REG (mode, FIRST_SSE_REG);
6435 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6439 return gen_rtx_REG (mode, AX_REG);
6443 ret = construct_container (mode, orig_mode, valtype, 1,
6444 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6445 x86_64_int_return_registers, 0);
6447 /* For zero sized structures, construct_container returns NULL, but we
6448 need to keep rest of compiler happy by returning meaningful value. */
6450 ret = gen_rtx_REG (orig_mode, AX_REG);
6456 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6458 unsigned int regno = AX_REG;
6462 switch (GET_MODE_SIZE (mode))
6465 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6466 && !COMPLEX_MODE_P (mode))
6467 regno = FIRST_SSE_REG;
6471 if (mode == SFmode || mode == DFmode)
6472 regno = FIRST_SSE_REG;
6478 return gen_rtx_REG (orig_mode, regno);
6482 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6483 enum machine_mode orig_mode, enum machine_mode mode)
6485 const_tree fn, fntype;
6488 if (fntype_or_decl && DECL_P (fntype_or_decl))
6489 fn = fntype_or_decl;
6490 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6492 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6493 return function_value_ms_64 (orig_mode, mode);
6494 else if (TARGET_64BIT)
6495 return function_value_64 (orig_mode, mode, valtype);
6497 return function_value_32 (orig_mode, mode, fntype, fn);
6501 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6502 bool outgoing ATTRIBUTE_UNUSED)
6504 enum machine_mode mode, orig_mode;
6506 orig_mode = TYPE_MODE (valtype);
6507 mode = type_natural_mode (valtype, NULL);
6508 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6512 ix86_libcall_value (enum machine_mode mode)
6514 return ix86_function_value_1 (NULL, NULL, mode, mode);
6517 /* Return true iff type is returned in memory. */
6519 static int ATTRIBUTE_UNUSED
6520 return_in_memory_32 (const_tree type, enum machine_mode mode)
6524 if (mode == BLKmode)
6527 size = int_size_in_bytes (type);
6529 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6532 if (VECTOR_MODE_P (mode) || mode == TImode)
6534 /* User-created vectors small enough to fit in EAX. */
6538 /* MMX/3dNow values are returned in MM0,
6539 except when it doesn't exits. */
6541 return (TARGET_MMX ? 0 : 1);
6543 /* SSE values are returned in XMM0, except when it doesn't exist. */
6545 return (TARGET_SSE ? 0 : 1);
6547 /* AVX values are returned in YMM0, except when it doesn't exist. */
6549 return TARGET_AVX ? 0 : 1;
6558 /* OImode shouldn't be used directly. */
6559 gcc_assert (mode != OImode);
6564 static int ATTRIBUTE_UNUSED
6565 return_in_memory_64 (const_tree type, enum machine_mode mode)
6567 int needed_intregs, needed_sseregs;
6568 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6571 static int ATTRIBUTE_UNUSED
6572 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6574 HOST_WIDE_INT size = int_size_in_bytes (type);
6576 /* __m128 is returned in xmm0. */
6577 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6578 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6581 /* Otherwise, the size must be exactly in [1248]. */
6582 return (size != 1 && size != 2 && size != 4 && size != 8);
6586 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6588 #ifdef SUBTARGET_RETURN_IN_MEMORY
6589 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6591 const enum machine_mode mode = type_natural_mode (type, NULL);
6595 if (ix86_function_type_abi (fntype) == MS_ABI)
6596 return return_in_memory_ms_64 (type, mode);
6598 return return_in_memory_64 (type, mode);
6601 return return_in_memory_32 (type, mode);
6605 /* Return false iff TYPE is returned in memory. This version is used
6606 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6607 but differs notably in that when MMX is available, 8-byte vectors
6608 are returned in memory, rather than in MMX registers. */
6611 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6614 enum machine_mode mode = type_natural_mode (type, NULL);
6617 return return_in_memory_64 (type, mode);
6619 if (mode == BLKmode)
6622 size = int_size_in_bytes (type);
6624 if (VECTOR_MODE_P (mode))
6626 /* Return in memory only if MMX registers *are* available. This
6627 seems backwards, but it is consistent with the existing
6634 else if (mode == TImode)
6636 else if (mode == XFmode)
6642 /* When returning SSE vector types, we have a choice of either
6643 (1) being abi incompatible with a -march switch, or
6644 (2) generating an error.
6645 Given no good solution, I think the safest thing is one warning.
6646 The user won't be able to use -Werror, but....
6648 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6649 called in response to actually generating a caller or callee that
6650 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6651 via aggregate_value_p for general type probing from tree-ssa. */
6654 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6656 static bool warnedsse, warnedmmx;
6658 if (!TARGET_64BIT && type)
6660 /* Look at the return type of the function, not the function type. */
6661 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6663 if (!TARGET_SSE && !warnedsse)
6666 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6669 warning (0, "SSE vector return without SSE enabled "
6674 if (!TARGET_MMX && !warnedmmx)
6676 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6679 warning (0, "MMX vector return without MMX enabled "
6689 /* Create the va_list data type. */
6691 /* Returns the calling convention specific va_list date type.
6692 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6695 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6697 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6699 /* For i386 we use plain pointer to argument area. */
6700 if (!TARGET_64BIT || abi == MS_ABI)
6701 return build_pointer_type (char_type_node);
6703 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6704 type_decl = build_decl (BUILTINS_LOCATION,
6705 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6707 f_gpr = build_decl (BUILTINS_LOCATION,
6708 FIELD_DECL, get_identifier ("gp_offset"),
6709 unsigned_type_node);
6710 f_fpr = build_decl (BUILTINS_LOCATION,
6711 FIELD_DECL, get_identifier ("fp_offset"),
6712 unsigned_type_node);
6713 f_ovf = build_decl (BUILTINS_LOCATION,
6714 FIELD_DECL, get_identifier ("overflow_arg_area"),
6716 f_sav = build_decl (BUILTINS_LOCATION,
6717 FIELD_DECL, get_identifier ("reg_save_area"),
6720 va_list_gpr_counter_field = f_gpr;
6721 va_list_fpr_counter_field = f_fpr;
6723 DECL_FIELD_CONTEXT (f_gpr) = record;
6724 DECL_FIELD_CONTEXT (f_fpr) = record;
6725 DECL_FIELD_CONTEXT (f_ovf) = record;
6726 DECL_FIELD_CONTEXT (f_sav) = record;
6728 TREE_CHAIN (record) = type_decl;
6729 TYPE_NAME (record) = type_decl;
6730 TYPE_FIELDS (record) = f_gpr;
6731 TREE_CHAIN (f_gpr) = f_fpr;
6732 TREE_CHAIN (f_fpr) = f_ovf;
6733 TREE_CHAIN (f_ovf) = f_sav;
6735 layout_type (record);
6737 /* The correct type is an array type of one element. */
6738 return build_array_type (record, build_index_type (size_zero_node));
6741 /* Setup the builtin va_list data type and for 64-bit the additional
6742 calling convention specific va_list data types. */
6745 ix86_build_builtin_va_list (void)
6747 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6749 /* Initialize abi specific va_list builtin types. */
6753 if (ix86_abi == MS_ABI)
6755 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6756 if (TREE_CODE (t) != RECORD_TYPE)
6757 t = build_variant_type_copy (t);
6758 sysv_va_list_type_node = t;
6763 if (TREE_CODE (t) != RECORD_TYPE)
6764 t = build_variant_type_copy (t);
6765 sysv_va_list_type_node = t;
6767 if (ix86_abi != MS_ABI)
6769 t = ix86_build_builtin_va_list_abi (MS_ABI);
6770 if (TREE_CODE (t) != RECORD_TYPE)
6771 t = build_variant_type_copy (t);
6772 ms_va_list_type_node = t;
6777 if (TREE_CODE (t) != RECORD_TYPE)
6778 t = build_variant_type_copy (t);
6779 ms_va_list_type_node = t;
6786 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6789 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6798 int regparm = ix86_regparm;
6800 if (cum->call_abi != ix86_abi)
6801 regparm = (ix86_abi != SYSV_ABI
6802 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6804 /* GPR size of varargs save area. */
6805 if (cfun->va_list_gpr_size)
6806 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6808 ix86_varargs_gpr_size = 0;
6810 /* FPR size of varargs save area. We don't need it if we don't pass
6811 anything in SSE registers. */
6812 if (cum->sse_nregs && cfun->va_list_fpr_size)
6813 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6815 ix86_varargs_fpr_size = 0;
6817 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6820 save_area = frame_pointer_rtx;
6821 set = get_varargs_alias_set ();
6823 for (i = cum->regno;
6825 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6828 mem = gen_rtx_MEM (Pmode,
6829 plus_constant (save_area, i * UNITS_PER_WORD));
6830 MEM_NOTRAP_P (mem) = 1;
6831 set_mem_alias_set (mem, set);
6832 emit_move_insn (mem, gen_rtx_REG (Pmode,
6833 x86_64_int_parameter_registers[i]));
6836 if (ix86_varargs_fpr_size)
6838 /* Now emit code to save SSE registers. The AX parameter contains number
6839 of SSE parameter registers used to call this function. We use
6840 sse_prologue_save insn template that produces computed jump across
6841 SSE saves. We need some preparation work to get this working. */
6843 label = gen_label_rtx ();
6844 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6846 /* Compute address to jump to :
6847 label - eax*4 + nnamed_sse_arguments*4 Or
6848 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6849 tmp_reg = gen_reg_rtx (Pmode);
6850 nsse_reg = gen_reg_rtx (Pmode);
6851 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6852 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6853 gen_rtx_MULT (Pmode, nsse_reg,
6856 /* vmovaps is one byte longer than movaps. */
6858 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6859 gen_rtx_PLUS (Pmode, tmp_reg,
6865 gen_rtx_CONST (DImode,
6866 gen_rtx_PLUS (DImode,
6868 GEN_INT (cum->sse_regno
6869 * (TARGET_AVX ? 5 : 4)))));
6871 emit_move_insn (nsse_reg, label_ref);
6872 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6874 /* Compute address of memory block we save into. We always use pointer
6875 pointing 127 bytes after first byte to store - this is needed to keep
6876 instruction size limited by 4 bytes (5 bytes for AVX) with one
6877 byte displacement. */
6878 tmp_reg = gen_reg_rtx (Pmode);
6879 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6880 plus_constant (save_area,
6881 ix86_varargs_gpr_size + 127)));
6882 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6883 MEM_NOTRAP_P (mem) = 1;
6884 set_mem_alias_set (mem, set);
6885 set_mem_align (mem, BITS_PER_WORD);
6887 /* And finally do the dirty job! */
6888 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6889 GEN_INT (cum->sse_regno), label));
6894 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6896 alias_set_type set = get_varargs_alias_set ();
6899 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6903 mem = gen_rtx_MEM (Pmode,
6904 plus_constant (virtual_incoming_args_rtx,
6905 i * UNITS_PER_WORD));
6906 MEM_NOTRAP_P (mem) = 1;
6907 set_mem_alias_set (mem, set);
6909 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6910 emit_move_insn (mem, reg);
6915 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6916 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6919 CUMULATIVE_ARGS next_cum;
6922 /* This argument doesn't appear to be used anymore. Which is good,
6923 because the old code here didn't suppress rtl generation. */
6924 gcc_assert (!no_rtl);
6929 fntype = TREE_TYPE (current_function_decl);
6931 /* For varargs, we do not want to skip the dummy va_dcl argument.
6932 For stdargs, we do want to skip the last named argument. */
6934 if (stdarg_p (fntype))
6935 function_arg_advance (&next_cum, mode, type, 1);
6937 if (cum->call_abi == MS_ABI)
6938 setup_incoming_varargs_ms_64 (&next_cum);
6940 setup_incoming_varargs_64 (&next_cum);
6943 /* Checks if TYPE is of kind va_list char *. */
6946 is_va_list_char_pointer (tree type)
6950 /* For 32-bit it is always true. */
6953 canonic = ix86_canonical_va_list_type (type);
6954 return (canonic == ms_va_list_type_node
6955 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6958 /* Implement va_start. */
6961 ix86_va_start (tree valist, rtx nextarg)
6963 HOST_WIDE_INT words, n_gpr, n_fpr;
6964 tree f_gpr, f_fpr, f_ovf, f_sav;
6965 tree gpr, fpr, ovf, sav, t;
6968 /* Only 64bit target needs something special. */
6969 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6971 std_expand_builtin_va_start (valist, nextarg);
6975 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6976 f_fpr = TREE_CHAIN (f_gpr);
6977 f_ovf = TREE_CHAIN (f_fpr);
6978 f_sav = TREE_CHAIN (f_ovf);
6980 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6981 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6982 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6983 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6984 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6986 /* Count number of gp and fp argument registers used. */
6987 words = crtl->args.info.words;
6988 n_gpr = crtl->args.info.regno;
6989 n_fpr = crtl->args.info.sse_regno;
6991 if (cfun->va_list_gpr_size)
6993 type = TREE_TYPE (gpr);
6994 t = build2 (MODIFY_EXPR, type,
6995 gpr, build_int_cst (type, n_gpr * 8));
6996 TREE_SIDE_EFFECTS (t) = 1;
6997 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7000 if (TARGET_SSE && cfun->va_list_fpr_size)
7002 type = TREE_TYPE (fpr);
7003 t = build2 (MODIFY_EXPR, type, fpr,
7004 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7005 TREE_SIDE_EFFECTS (t) = 1;
7006 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7009 /* Find the overflow area. */
7010 type = TREE_TYPE (ovf);
7011 t = make_tree (type, crtl->args.internal_arg_pointer);
7013 t = build2 (POINTER_PLUS_EXPR, type, t,
7014 size_int (words * UNITS_PER_WORD));
7015 t = build2 (MODIFY_EXPR, type, ovf, t);
7016 TREE_SIDE_EFFECTS (t) = 1;
7017 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7019 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7021 /* Find the register save area.
7022 Prologue of the function save it right above stack frame. */
7023 type = TREE_TYPE (sav);
7024 t = make_tree (type, frame_pointer_rtx);
7025 if (!ix86_varargs_gpr_size)
7026 t = build2 (POINTER_PLUS_EXPR, type, t,
7027 size_int (-8 * X86_64_REGPARM_MAX));
7028 t = build2 (MODIFY_EXPR, type, sav, t);
7029 TREE_SIDE_EFFECTS (t) = 1;
7030 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7034 /* Implement va_arg. */
7037 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7040 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7041 tree f_gpr, f_fpr, f_ovf, f_sav;
7042 tree gpr, fpr, ovf, sav, t;
7044 tree lab_false, lab_over = NULL_TREE;
7049 enum machine_mode nat_mode;
7052 /* Only 64bit target needs something special. */
7053 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7054 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7056 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7057 f_fpr = TREE_CHAIN (f_gpr);
7058 f_ovf = TREE_CHAIN (f_fpr);
7059 f_sav = TREE_CHAIN (f_ovf);
7061 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7062 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7063 valist = build_va_arg_indirect_ref (valist);
7064 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7065 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7066 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7068 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7070 type = build_pointer_type (type);
7071 size = int_size_in_bytes (type);
7072 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7074 nat_mode = type_natural_mode (type, NULL);
7083 /* Unnamed 256bit vector mode parameters are passed on stack. */
7084 if (ix86_cfun_abi () == SYSV_ABI)
7091 container = construct_container (nat_mode, TYPE_MODE (type),
7092 type, 0, X86_64_REGPARM_MAX,
7093 X86_64_SSE_REGPARM_MAX, intreg,
7098 /* Pull the value out of the saved registers. */
7100 addr = create_tmp_var (ptr_type_node, "addr");
7104 int needed_intregs, needed_sseregs;
7106 tree int_addr, sse_addr;
7108 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7109 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7111 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7113 need_temp = (!REG_P (container)
7114 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7115 || TYPE_ALIGN (type) > 128));
7117 /* In case we are passing structure, verify that it is consecutive block
7118 on the register save area. If not we need to do moves. */
7119 if (!need_temp && !REG_P (container))
7121 /* Verify that all registers are strictly consecutive */
7122 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7126 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7128 rtx slot = XVECEXP (container, 0, i);
7129 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7130 || INTVAL (XEXP (slot, 1)) != i * 16)
7138 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7140 rtx slot = XVECEXP (container, 0, i);
7141 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7142 || INTVAL (XEXP (slot, 1)) != i * 8)
7154 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7155 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7158 /* First ensure that we fit completely in registers. */
7161 t = build_int_cst (TREE_TYPE (gpr),
7162 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7163 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7164 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7165 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7166 gimplify_and_add (t, pre_p);
7170 t = build_int_cst (TREE_TYPE (fpr),
7171 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7172 + X86_64_REGPARM_MAX * 8);
7173 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7174 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7175 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7176 gimplify_and_add (t, pre_p);
7179 /* Compute index to start of area used for integer regs. */
7182 /* int_addr = gpr + sav; */
7183 t = fold_convert (sizetype, gpr);
7184 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7185 gimplify_assign (int_addr, t, pre_p);
7189 /* sse_addr = fpr + sav; */
7190 t = fold_convert (sizetype, fpr);
7191 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7192 gimplify_assign (sse_addr, t, pre_p);
7197 tree temp = create_tmp_var (type, "va_arg_tmp");
7200 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7201 gimplify_assign (addr, t, pre_p);
7203 for (i = 0; i < XVECLEN (container, 0); i++)
7205 rtx slot = XVECEXP (container, 0, i);
7206 rtx reg = XEXP (slot, 0);
7207 enum machine_mode mode = GET_MODE (reg);
7208 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7209 tree addr_type = build_pointer_type (piece_type);
7210 tree daddr_type = build_pointer_type_for_mode (piece_type,
7214 tree dest_addr, dest;
7216 if (SSE_REGNO_P (REGNO (reg)))
7218 src_addr = sse_addr;
7219 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7223 src_addr = int_addr;
7224 src_offset = REGNO (reg) * 8;
7226 src_addr = fold_convert (addr_type, src_addr);
7227 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7228 size_int (src_offset));
7229 src = build_va_arg_indirect_ref (src_addr);
7231 dest_addr = fold_convert (daddr_type, addr);
7232 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7233 size_int (INTVAL (XEXP (slot, 1))));
7234 dest = build_va_arg_indirect_ref (dest_addr);
7236 gimplify_assign (dest, src, pre_p);
7242 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7243 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7244 gimplify_assign (gpr, t, pre_p);
7249 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7250 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7251 gimplify_assign (fpr, t, pre_p);
7254 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7256 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7259 /* ... otherwise out of the overflow area. */
7261 /* When we align parameter on stack for caller, if the parameter
7262 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7263 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7264 here with caller. */
7265 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7266 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7267 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7269 /* Care for on-stack alignment if needed. */
7270 if (arg_boundary <= 64
7271 || integer_zerop (TYPE_SIZE (type)))
7275 HOST_WIDE_INT align = arg_boundary / 8;
7276 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7277 size_int (align - 1));
7278 t = fold_convert (sizetype, t);
7279 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7281 t = fold_convert (TREE_TYPE (ovf), t);
7283 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7284 gimplify_assign (addr, t, pre_p);
7286 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7287 size_int (rsize * UNITS_PER_WORD));
7288 gimplify_assign (unshare_expr (ovf), t, pre_p);
7291 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7293 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7294 addr = fold_convert (ptrtype, addr);
7297 addr = build_va_arg_indirect_ref (addr);
7298 return build_va_arg_indirect_ref (addr);
7301 /* Return nonzero if OPNUM's MEM should be matched
7302 in movabs* patterns. */
7305 ix86_check_movabs (rtx insn, int opnum)
7309 set = PATTERN (insn);
7310 if (GET_CODE (set) == PARALLEL)
7311 set = XVECEXP (set, 0, 0);
7312 gcc_assert (GET_CODE (set) == SET);
7313 mem = XEXP (set, opnum);
7314 while (GET_CODE (mem) == SUBREG)
7315 mem = SUBREG_REG (mem);
7316 gcc_assert (MEM_P (mem));
7317 return (volatile_ok || !MEM_VOLATILE_P (mem));
7320 /* Initialize the table of extra 80387 mathematical constants. */
7323 init_ext_80387_constants (void)
7325 static const char * cst[5] =
7327 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7328 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7329 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7330 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7331 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7335 for (i = 0; i < 5; i++)
7337 real_from_string (&ext_80387_constants_table[i], cst[i]);
7338 /* Ensure each constant is rounded to XFmode precision. */
7339 real_convert (&ext_80387_constants_table[i],
7340 XFmode, &ext_80387_constants_table[i]);
7343 ext_80387_constants_init = 1;
7346 /* Return true if the constant is something that can be loaded with
7347 a special instruction. */
7350 standard_80387_constant_p (rtx x)
7352 enum machine_mode mode = GET_MODE (x);
7356 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7359 if (x == CONST0_RTX (mode))
7361 if (x == CONST1_RTX (mode))
7364 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7366 /* For XFmode constants, try to find a special 80387 instruction when
7367 optimizing for size or on those CPUs that benefit from them. */
7369 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7373 if (! ext_80387_constants_init)
7374 init_ext_80387_constants ();
7376 for (i = 0; i < 5; i++)
7377 if (real_identical (&r, &ext_80387_constants_table[i]))
7381 /* Load of the constant -0.0 or -1.0 will be split as
7382 fldz;fchs or fld1;fchs sequence. */
7383 if (real_isnegzero (&r))
7385 if (real_identical (&r, &dconstm1))
7391 /* Return the opcode of the special instruction to be used to load
7395 standard_80387_constant_opcode (rtx x)
7397 switch (standard_80387_constant_p (x))
7421 /* Return the CONST_DOUBLE representing the 80387 constant that is
7422 loaded by the specified special instruction. The argument IDX
7423 matches the return value from standard_80387_constant_p. */
7426 standard_80387_constant_rtx (int idx)
7430 if (! ext_80387_constants_init)
7431 init_ext_80387_constants ();
7447 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7451 /* Return 1 if X is all 0s and 2 if x is all 1s
7452 in supported SSE vector mode. */
7455 standard_sse_constant_p (rtx x)
7457 enum machine_mode mode = GET_MODE (x);
7459 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7461 if (vector_all_ones_operand (x, mode))
7477 /* Return the opcode of the special instruction to be used to load
7481 standard_sse_constant_opcode (rtx insn, rtx x)
7483 switch (standard_sse_constant_p (x))
7486 switch (get_attr_mode (insn))
7489 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7491 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7493 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7495 return "vxorps\t%x0, %x0, %x0";
7497 return "vxorpd\t%x0, %x0, %x0";
7499 return "vpxor\t%x0, %x0, %x0";
7504 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7511 /* Returns 1 if OP contains a symbol reference */
7514 symbolic_reference_mentioned_p (rtx op)
7519 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7522 fmt = GET_RTX_FORMAT (GET_CODE (op));
7523 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7529 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7530 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7534 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7541 /* Return 1 if it is appropriate to emit `ret' instructions in the
7542 body of a function. Do this only if the epilogue is simple, needing a
7543 couple of insns. Prior to reloading, we can't tell how many registers
7544 must be saved, so return 0 then. Return 0 if there is no frame
7545 marker to de-allocate. */
7548 ix86_can_use_return_insn_p (void)
7550 struct ix86_frame frame;
7552 if (! reload_completed || frame_pointer_needed)
7555 /* Don't allow more than 32 pop, since that's all we can do
7556 with one instruction. */
7557 if (crtl->args.pops_args
7558 && crtl->args.size >= 32768)
7561 ix86_compute_frame_layout (&frame);
7562 return frame.to_allocate == 0 && frame.padding0 == 0
7563 && (frame.nregs + frame.nsseregs) == 0;
7566 /* Value should be nonzero if functions must have frame pointers.
7567 Zero means the frame pointer need not be set up (and parms may
7568 be accessed via the stack pointer) in functions that seem suitable. */
7571 ix86_frame_pointer_required (void)
7573 /* If we accessed previous frames, then the generated code expects
7574 to be able to access the saved ebp value in our frame. */
7575 if (cfun->machine->accesses_prev_frame)
7578 /* Several x86 os'es need a frame pointer for other reasons,
7579 usually pertaining to setjmp. */
7580 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7583 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7584 the frame pointer by default. Turn it back on now if we've not
7585 got a leaf function. */
7586 if (TARGET_OMIT_LEAF_FRAME_POINTER
7587 && (!current_function_is_leaf
7588 || ix86_current_function_calls_tls_descriptor))
7597 /* Record that the current function accesses previous call frames. */
7600 ix86_setup_frame_addresses (void)
7602 cfun->machine->accesses_prev_frame = 1;
7605 #ifndef USE_HIDDEN_LINKONCE
7606 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7607 # define USE_HIDDEN_LINKONCE 1
7609 # define USE_HIDDEN_LINKONCE 0
7613 static int pic_labels_used;
7615 /* Fills in the label name that should be used for a pc thunk for
7616 the given register. */
7619 get_pc_thunk_name (char name[32], unsigned int regno)
7621 gcc_assert (!TARGET_64BIT);
7623 if (USE_HIDDEN_LINKONCE)
7624 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7626 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7630 /* This function generates code for -fpic that loads %ebx with
7631 the return address of the caller and then returns. */
7634 ix86_code_end (void)
7639 for (regno = 0; regno < 8; ++regno)
7644 if (! ((pic_labels_used >> regno) & 1))
7647 get_pc_thunk_name (name, regno);
7649 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7650 get_identifier (name),
7651 build_function_type (void_type_node, void_list_node));
7652 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7653 NULL_TREE, void_type_node);
7654 TREE_PUBLIC (decl) = 1;
7655 TREE_STATIC (decl) = 1;
7660 switch_to_section (darwin_sections[text_coal_section]);
7661 fputs ("\t.weak_definition\t", asm_out_file);
7662 assemble_name (asm_out_file, name);
7663 fputs ("\n\t.private_extern\t", asm_out_file);
7664 assemble_name (asm_out_file, name);
7665 fputs ("\n", asm_out_file);
7666 ASM_OUTPUT_LABEL (asm_out_file, name);
7667 DECL_WEAK (decl) = 1;
7671 if (USE_HIDDEN_LINKONCE)
7673 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7675 (*targetm.asm_out.unique_section) (decl, 0);
7676 switch_to_section (get_named_section (decl, NULL, 0));
7678 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7679 fputs ("\t.hidden\t", asm_out_file);
7680 assemble_name (asm_out_file, name);
7681 putc ('\n', asm_out_file);
7682 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7686 switch_to_section (text_section);
7687 ASM_OUTPUT_LABEL (asm_out_file, name);
7690 DECL_INITIAL (decl) = make_node (BLOCK);
7691 current_function_decl = decl;
7692 init_function_start (decl);
7693 first_function_block_is_cold = false;
7694 /* Make sure unwind info is emitted for the thunk if needed. */
7695 final_start_function (emit_barrier (), asm_out_file, 1);
7697 xops[0] = gen_rtx_REG (Pmode, regno);
7698 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7699 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7700 output_asm_insn ("ret", xops);
7701 final_end_function ();
7702 init_insn_lengths ();
7703 free_after_compilation (cfun);
7705 current_function_decl = NULL;
7709 /* Emit code for the SET_GOT patterns. */
7712 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7718 if (TARGET_VXWORKS_RTP && flag_pic)
7720 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7721 xops[2] = gen_rtx_MEM (Pmode,
7722 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7723 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7725 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7726 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7727 an unadorned address. */
7728 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7729 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7730 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7734 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7736 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7738 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7741 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7744 output_asm_insn ("call\t%a2", xops);
7745 #ifdef DWARF2_UNWIND_INFO
7746 /* The call to next label acts as a push. */
7747 if (dwarf2out_do_frame ())
7751 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7752 gen_rtx_PLUS (Pmode,
7755 RTX_FRAME_RELATED_P (insn) = 1;
7756 dwarf2out_frame_debug (insn, true);
7763 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7764 is what will be referenced by the Mach-O PIC subsystem. */
7766 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7769 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7770 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7774 output_asm_insn ("pop%z0\t%0", xops);
7775 #ifdef DWARF2_UNWIND_INFO
7776 /* The pop is a pop and clobbers dest, but doesn't restore it
7777 for unwind info purposes. */
7778 if (dwarf2out_do_frame ())
7782 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7783 dwarf2out_frame_debug (insn, true);
7784 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7785 gen_rtx_PLUS (Pmode,
7788 RTX_FRAME_RELATED_P (insn) = 1;
7789 dwarf2out_frame_debug (insn, true);
7798 get_pc_thunk_name (name, REGNO (dest));
7799 pic_labels_used |= 1 << REGNO (dest);
7801 #ifdef DWARF2_UNWIND_INFO
7802 /* Ensure all queued register saves are flushed before the
7804 if (dwarf2out_do_frame ())
7808 insn = emit_barrier ();
7810 dwarf2out_frame_debug (insn, false);
7813 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7814 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7815 output_asm_insn ("call\t%X2", xops);
7816 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7817 is what will be referenced by the Mach-O PIC subsystem. */
7820 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7822 targetm.asm_out.internal_label (asm_out_file, "L",
7823 CODE_LABEL_NUMBER (label));
7830 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7831 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7833 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7838 /* Generate an "push" pattern for input ARG. */
7843 if (ix86_cfa_state->reg == stack_pointer_rtx)
7844 ix86_cfa_state->offset += UNITS_PER_WORD;
7846 return gen_rtx_SET (VOIDmode,
7848 gen_rtx_PRE_DEC (Pmode,
7849 stack_pointer_rtx)),
7853 /* Return >= 0 if there is an unused call-clobbered register available
7854 for the entire function. */
7857 ix86_select_alt_pic_regnum (void)
7859 if (current_function_is_leaf && !crtl->profile
7860 && !ix86_current_function_calls_tls_descriptor)
7863 /* Can't use the same register for both PIC and DRAP. */
7865 drap = REGNO (crtl->drap_reg);
7868 for (i = 2; i >= 0; --i)
7869 if (i != drap && !df_regs_ever_live_p (i))
7873 return INVALID_REGNUM;
7876 /* Return 1 if we need to save REGNO. */
7878 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7880 if (pic_offset_table_rtx
7881 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7882 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7884 || crtl->calls_eh_return
7885 || crtl->uses_const_pool))
7887 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7892 if (crtl->calls_eh_return && maybe_eh_return)
7897 unsigned test = EH_RETURN_DATA_REGNO (i);
7898 if (test == INVALID_REGNUM)
7905 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7908 return (df_regs_ever_live_p (regno)
7909 && !call_used_regs[regno]
7910 && !fixed_regs[regno]
7911 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7914 /* Return number of saved general prupose registers. */
7917 ix86_nsaved_regs (void)
7922 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7923 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7928 /* Return number of saved SSE registrers. */
7931 ix86_nsaved_sseregs (void)
7936 if (ix86_cfun_abi () != MS_ABI)
7938 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7939 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7944 /* Given FROM and TO register numbers, say whether this elimination is
7945 allowed. If stack alignment is needed, we can only replace argument
7946 pointer with hard frame pointer, or replace frame pointer with stack
7947 pointer. Otherwise, frame pointer elimination is automatically
7948 handled and all other eliminations are valid. */
7951 ix86_can_eliminate (const int from, const int to)
7953 if (stack_realign_fp)
7954 return ((from == ARG_POINTER_REGNUM
7955 && to == HARD_FRAME_POINTER_REGNUM)
7956 || (from == FRAME_POINTER_REGNUM
7957 && to == STACK_POINTER_REGNUM));
7959 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7962 /* Return the offset between two registers, one to be eliminated, and the other
7963 its replacement, at the start of a routine. */
7966 ix86_initial_elimination_offset (int from, int to)
7968 struct ix86_frame frame;
7969 ix86_compute_frame_layout (&frame);
7971 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7972 return frame.hard_frame_pointer_offset;
7973 else if (from == FRAME_POINTER_REGNUM
7974 && to == HARD_FRAME_POINTER_REGNUM)
7975 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7978 gcc_assert (to == STACK_POINTER_REGNUM);
7980 if (from == ARG_POINTER_REGNUM)
7981 return frame.stack_pointer_offset;
7983 gcc_assert (from == FRAME_POINTER_REGNUM);
7984 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7988 /* In a dynamically-aligned function, we can't know the offset from
7989 stack pointer to frame pointer, so we must ensure that setjmp
7990 eliminates fp against the hard fp (%ebp) rather than trying to
7991 index from %esp up to the top of the frame across a gap that is
7992 of unknown (at compile-time) size. */
7994 ix86_builtin_setjmp_frame_value (void)
7996 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7999 /* Fill structure ix86_frame about frame of currently computed function. */
8002 ix86_compute_frame_layout (struct ix86_frame *frame)
8004 unsigned int stack_alignment_needed;
8005 HOST_WIDE_INT offset;
8006 unsigned int preferred_alignment;
8007 HOST_WIDE_INT size = get_frame_size ();
8009 frame->nregs = ix86_nsaved_regs ();
8010 frame->nsseregs = ix86_nsaved_sseregs ();
8012 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8013 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8015 /* MS ABI seem to require stack alignment to be always 16 except for function
8017 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8019 preferred_alignment = 16;
8020 stack_alignment_needed = 16;
8021 crtl->preferred_stack_boundary = 128;
8022 crtl->stack_alignment_needed = 128;
8025 gcc_assert (!size || stack_alignment_needed);
8026 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8027 gcc_assert (preferred_alignment <= stack_alignment_needed);
8029 /* During reload iteration the amount of registers saved can change.
8030 Recompute the value as needed. Do not recompute when amount of registers
8031 didn't change as reload does multiple calls to the function and does not
8032 expect the decision to change within single iteration. */
8033 if (!optimize_function_for_size_p (cfun)
8034 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8036 int count = frame->nregs;
8038 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8039 /* The fast prologue uses move instead of push to save registers. This
8040 is significantly longer, but also executes faster as modern hardware
8041 can execute the moves in parallel, but can't do that for push/pop.
8043 Be careful about choosing what prologue to emit: When function takes
8044 many instructions to execute we may use slow version as well as in
8045 case function is known to be outside hot spot (this is known with
8046 feedback only). Weight the size of function by number of registers
8047 to save as it is cheap to use one or two push instructions but very
8048 slow to use many of them. */
8050 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8051 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
8052 || (flag_branch_probabilities
8053 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
8054 cfun->machine->use_fast_prologue_epilogue = false;
8056 cfun->machine->use_fast_prologue_epilogue
8057 = !expensive_function_p (count);
8059 if (TARGET_PROLOGUE_USING_MOVE
8060 && cfun->machine->use_fast_prologue_epilogue)
8061 frame->save_regs_using_mov = true;
8063 frame->save_regs_using_mov = false;
8065 /* Skip return address. */
8066 offset = UNITS_PER_WORD;
8068 /* Skip pushed static chain. */
8069 if (ix86_static_chain_on_stack)
8070 offset += UNITS_PER_WORD;
8072 /* Skip saved base pointer. */
8073 if (frame_pointer_needed)
8074 offset += UNITS_PER_WORD;
8076 frame->hard_frame_pointer_offset = offset;
8078 /* Set offset to aligned because the realigned frame starts from
8080 if (stack_realign_fp)
8081 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8083 /* Register save area */
8084 offset += frame->nregs * UNITS_PER_WORD;
8086 /* Align SSE reg save area. */
8087 if (frame->nsseregs)
8088 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8090 frame->padding0 = 0;
8092 /* SSE register save area. */
8093 offset += frame->padding0 + frame->nsseregs * 16;
8096 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8097 offset += frame->va_arg_size;
8099 /* Align start of frame for local function. */
8100 frame->padding1 = ((offset + stack_alignment_needed - 1)
8101 & -stack_alignment_needed) - offset;
8103 offset += frame->padding1;
8105 /* Frame pointer points here. */
8106 frame->frame_pointer_offset = offset;
8110 /* Add outgoing arguments area. Can be skipped if we eliminated
8111 all the function calls as dead code.
8112 Skipping is however impossible when function calls alloca. Alloca
8113 expander assumes that last crtl->outgoing_args_size
8114 of stack frame are unused. */
8115 if (ACCUMULATE_OUTGOING_ARGS
8116 && (!current_function_is_leaf || cfun->calls_alloca
8117 || ix86_current_function_calls_tls_descriptor))
8119 offset += crtl->outgoing_args_size;
8120 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8123 frame->outgoing_arguments_size = 0;
8125 /* Align stack boundary. Only needed if we're calling another function
8127 if (!current_function_is_leaf || cfun->calls_alloca
8128 || ix86_current_function_calls_tls_descriptor)
8129 frame->padding2 = ((offset + preferred_alignment - 1)
8130 & -preferred_alignment) - offset;
8132 frame->padding2 = 0;
8134 offset += frame->padding2;
8136 /* We've reached end of stack frame. */
8137 frame->stack_pointer_offset = offset;
8139 /* Size prologue needs to allocate. */
8140 frame->to_allocate =
8141 (size + frame->padding1 + frame->padding2
8142 + frame->outgoing_arguments_size + frame->va_arg_size);
8144 if ((!frame->to_allocate && frame->nregs <= 1)
8145 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8146 frame->save_regs_using_mov = false;
8148 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8149 && current_function_sp_is_unchanging
8150 && current_function_is_leaf
8151 && !ix86_current_function_calls_tls_descriptor)
8153 frame->red_zone_size = frame->to_allocate;
8154 if (frame->save_regs_using_mov)
8155 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8156 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8157 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8160 frame->red_zone_size = 0;
8161 frame->to_allocate -= frame->red_zone_size;
8162 frame->stack_pointer_offset -= frame->red_zone_size;
8165 /* Emit code to save registers in the prologue. */
8168 ix86_emit_save_regs (void)
8173 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8174 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8176 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8177 RTX_FRAME_RELATED_P (insn) = 1;
8181 /* Emit code to save registers using MOV insns. First register
8182 is restored from POINTER + OFFSET. */
8184 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8189 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8190 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8192 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8194 gen_rtx_REG (Pmode, regno));
8195 RTX_FRAME_RELATED_P (insn) = 1;
8196 offset += UNITS_PER_WORD;
8200 /* Emit code to save registers using MOV insns. First register
8201 is restored from POINTER + OFFSET. */
8203 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8209 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8210 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8212 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8213 set_mem_align (mem, 128);
8214 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8215 RTX_FRAME_RELATED_P (insn) = 1;
8220 static GTY(()) rtx queued_cfa_restores;
8222 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8223 manipulation insn. Don't add it if the previously
8224 saved value will be left untouched within stack red-zone till return,
8225 as unwinders can find the same value in the register and
8229 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8232 && !TARGET_64BIT_MS_ABI
8233 && red_offset + RED_ZONE_SIZE >= 0
8234 && crtl->args.pops_args < 65536)
8239 add_reg_note (insn, REG_CFA_RESTORE, reg);
8240 RTX_FRAME_RELATED_P (insn) = 1;
8244 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8247 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8250 ix86_add_queued_cfa_restore_notes (rtx insn)
8253 if (!queued_cfa_restores)
8255 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8257 XEXP (last, 1) = REG_NOTES (insn);
8258 REG_NOTES (insn) = queued_cfa_restores;
8259 queued_cfa_restores = NULL_RTX;
8260 RTX_FRAME_RELATED_P (insn) = 1;
8263 /* Expand prologue or epilogue stack adjustment.
8264 The pattern exist to put a dependency on all ebp-based memory accesses.
8265 STYLE should be negative if instructions should be marked as frame related,
8266 zero if %r11 register is live and cannot be freely used and positive
8270 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8271 int style, bool set_cfa)
8276 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8277 else if (x86_64_immediate_operand (offset, DImode))
8278 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8282 /* r11 is used by indirect sibcall return as well, set before the
8283 epilogue and used after the epilogue. ATM indirect sibcall
8284 shouldn't be used together with huge frame sizes in one
8285 function because of the frame_size check in sibcall.c. */
8287 r11 = gen_rtx_REG (DImode, R11_REG);
8288 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8290 RTX_FRAME_RELATED_P (insn) = 1;
8291 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8296 ix86_add_queued_cfa_restore_notes (insn);
8302 gcc_assert (ix86_cfa_state->reg == src);
8303 ix86_cfa_state->offset += INTVAL (offset);
8304 ix86_cfa_state->reg = dest;
8306 r = gen_rtx_PLUS (Pmode, src, offset);
8307 r = gen_rtx_SET (VOIDmode, dest, r);
8308 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8309 RTX_FRAME_RELATED_P (insn) = 1;
8312 RTX_FRAME_RELATED_P (insn) = 1;
8315 /* Find an available register to be used as dynamic realign argument
8316 pointer regsiter. Such a register will be written in prologue and
8317 used in begin of body, so it must not be
8318 1. parameter passing register.
8320 We reuse static-chain register if it is available. Otherwise, we
8321 use DI for i386 and R13 for x86-64. We chose R13 since it has
8324 Return: the regno of chosen register. */
8327 find_drap_reg (void)
8329 tree decl = cfun->decl;
8333 /* Use R13 for nested function or function need static chain.
8334 Since function with tail call may use any caller-saved
8335 registers in epilogue, DRAP must not use caller-saved
8336 register in such case. */
8337 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8344 /* Use DI for nested function or function need static chain.
8345 Since function with tail call may use any caller-saved
8346 registers in epilogue, DRAP must not use caller-saved
8347 register in such case. */
8348 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8351 /* Reuse static chain register if it isn't used for parameter
8353 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8354 && !lookup_attribute ("fastcall",
8355 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8356 && !lookup_attribute ("thiscall",
8357 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8364 /* Return minimum incoming stack alignment. */
8367 ix86_minimum_incoming_stack_boundary (bool sibcall)
8369 unsigned int incoming_stack_boundary;
8371 /* Prefer the one specified at command line. */
8372 if (ix86_user_incoming_stack_boundary)
8373 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8374 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8375 if -mstackrealign is used, it isn't used for sibcall check and
8376 estimated stack alignment is 128bit. */
8379 && ix86_force_align_arg_pointer
8380 && crtl->stack_alignment_estimated == 128)
8381 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8383 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8385 /* Incoming stack alignment can be changed on individual functions
8386 via force_align_arg_pointer attribute. We use the smallest
8387 incoming stack boundary. */
8388 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8389 && lookup_attribute (ix86_force_align_arg_pointer_string,
8390 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8391 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8393 /* The incoming stack frame has to be aligned at least at
8394 parm_stack_boundary. */
8395 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8396 incoming_stack_boundary = crtl->parm_stack_boundary;
8398 /* Stack at entrance of main is aligned by runtime. We use the
8399 smallest incoming stack boundary. */
8400 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8401 && DECL_NAME (current_function_decl)
8402 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8403 && DECL_FILE_SCOPE_P (current_function_decl))
8404 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8406 return incoming_stack_boundary;
8409 /* Update incoming stack boundary and estimated stack alignment. */
8412 ix86_update_stack_boundary (void)
8414 ix86_incoming_stack_boundary
8415 = ix86_minimum_incoming_stack_boundary (false);
8417 /* x86_64 vararg needs 16byte stack alignment for register save
8421 && crtl->stack_alignment_estimated < 128)
8422 crtl->stack_alignment_estimated = 128;
8425 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8426 needed or an rtx for DRAP otherwise. */
8429 ix86_get_drap_rtx (void)
8431 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8432 crtl->need_drap = true;
8434 if (stack_realign_drap)
8436 /* Assign DRAP to vDRAP and returns vDRAP */
8437 unsigned int regno = find_drap_reg ();
8442 arg_ptr = gen_rtx_REG (Pmode, regno);
8443 crtl->drap_reg = arg_ptr;
8446 drap_vreg = copy_to_reg (arg_ptr);
8450 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8453 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8454 RTX_FRAME_RELATED_P (insn) = 1;
8462 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8465 ix86_internal_arg_pointer (void)
8467 return virtual_incoming_args_rtx;
8470 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8471 to be generated in correct form. */
8473 ix86_finalize_stack_realign_flags (void)
8475 /* Check if stack realign is really needed after reload, and
8476 stores result in cfun */
8477 unsigned int incoming_stack_boundary
8478 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8479 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8480 unsigned int stack_realign = (incoming_stack_boundary
8481 < (current_function_is_leaf
8482 ? crtl->max_used_stack_slot_alignment
8483 : crtl->stack_alignment_needed));
8485 if (crtl->stack_realign_finalized)
8487 /* After stack_realign_needed is finalized, we can't no longer
8489 gcc_assert (crtl->stack_realign_needed == stack_realign);
8493 crtl->stack_realign_needed = stack_realign;
8494 crtl->stack_realign_finalized = true;
8498 /* Expand the prologue into a bunch of separate insns. */
8501 ix86_expand_prologue (void)
8505 struct ix86_frame frame;
8506 HOST_WIDE_INT allocate;
8507 int gen_frame_pointer = frame_pointer_needed;
8509 ix86_finalize_stack_realign_flags ();
8511 /* DRAP should not coexist with stack_realign_fp */
8512 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8514 /* Initialize CFA state for before the prologue. */
8515 ix86_cfa_state->reg = stack_pointer_rtx;
8516 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8518 ix86_compute_frame_layout (&frame);
8520 if (ix86_function_ms_hook_prologue (current_function_decl))
8524 /* Make sure the function starts with
8525 8b ff movl.s %edi,%edi
8527 8b ec movl.s %esp,%ebp
8529 This matches the hookable function prologue in Win32 API
8530 functions in Microsoft Windows XP Service Pack 2 and newer.
8531 Wine uses this to enable Windows apps to hook the Win32 API
8532 functions provided by Wine. */
8533 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8534 gen_rtx_REG (SImode, DI_REG)));
8535 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8536 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8537 stack_pointer_rtx));
8539 if (frame_pointer_needed && !(crtl->drap_reg
8540 && crtl->stack_realign_needed))
8542 /* The push %ebp and movl.s %esp, %ebp already set up
8543 the frame pointer. No need to do this again. */
8544 gen_frame_pointer = 0;
8545 RTX_FRAME_RELATED_P (push) = 1;
8546 RTX_FRAME_RELATED_P (mov) = 1;
8547 if (ix86_cfa_state->reg == stack_pointer_rtx)
8548 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8551 /* If the frame pointer is not needed, pop %ebp again. This
8552 could be optimized for cases where ebp needs to be backed up
8553 for some other reason. If stack realignment is needed, pop
8554 the base pointer again, align the stack, and later regenerate
8555 the frame pointer setup. The frame pointer generated by the
8556 hook prologue is not aligned, so it can't be used. */
8557 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8560 /* The first insn of a function that accepts its static chain on the
8561 stack is to push the register that would be filled in by a direct
8562 call. This insn will be skipped by the trampoline. */
8563 if (ix86_static_chain_on_stack)
8567 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8568 emit_insn (gen_blockage ());
8570 /* We don't want to interpret this push insn as a register save,
8571 only as a stack adjustment. The real copy of the register as
8572 a save will be done later, if needed. */
8573 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8574 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8575 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8576 RTX_FRAME_RELATED_P (insn) = 1;
8579 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8580 of DRAP is needed and stack realignment is really needed after reload */
8581 if (crtl->drap_reg && crtl->stack_realign_needed)
8584 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8585 int param_ptr_offset = UNITS_PER_WORD;
8587 if (ix86_static_chain_on_stack)
8588 param_ptr_offset += UNITS_PER_WORD;
8589 if (!call_used_regs[REGNO (crtl->drap_reg)])
8590 param_ptr_offset += UNITS_PER_WORD;
8592 gcc_assert (stack_realign_drap);
8594 /* Grab the argument pointer. */
8595 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8598 /* Only need to push parameter pointer reg if it is caller
8600 if (!call_used_regs[REGNO (crtl->drap_reg)])
8602 /* Push arg pointer reg */
8603 insn = emit_insn (gen_push (y));
8604 RTX_FRAME_RELATED_P (insn) = 1;
8607 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8608 RTX_FRAME_RELATED_P (insn) = 1;
8609 ix86_cfa_state->reg = crtl->drap_reg;
8611 /* Align the stack. */
8612 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8614 GEN_INT (-align_bytes)));
8615 RTX_FRAME_RELATED_P (insn) = 1;
8617 /* Replicate the return address on the stack so that return
8618 address can be reached via (argp - 1) slot. This is needed
8619 to implement macro RETURN_ADDR_RTX and intrinsic function
8620 expand_builtin_return_addr etc. */
8622 x = gen_frame_mem (Pmode,
8623 plus_constant (x, -UNITS_PER_WORD));
8624 insn = emit_insn (gen_push (x));
8625 RTX_FRAME_RELATED_P (insn) = 1;
8628 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8629 slower on all targets. Also sdb doesn't like it. */
8631 if (gen_frame_pointer)
8633 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8634 RTX_FRAME_RELATED_P (insn) = 1;
8636 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8637 RTX_FRAME_RELATED_P (insn) = 1;
8639 if (ix86_cfa_state->reg == stack_pointer_rtx)
8640 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8643 if (stack_realign_fp)
8645 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8646 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8648 /* Align the stack. */
8649 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8651 GEN_INT (-align_bytes)));
8652 RTX_FRAME_RELATED_P (insn) = 1;
8655 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8657 if (!frame.save_regs_using_mov)
8658 ix86_emit_save_regs ();
8660 allocate += frame.nregs * UNITS_PER_WORD;
8662 /* When using red zone we may start register saving before allocating
8663 the stack frame saving one cycle of the prologue. However I will
8664 avoid doing this if I am going to have to probe the stack since
8665 at least on x86_64 the stack probe can turn into a call that clobbers
8666 a red zone location */
8667 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8668 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8669 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8670 && !crtl->stack_realign_needed)
8671 ? hard_frame_pointer_rtx
8672 : stack_pointer_rtx,
8673 -frame.nregs * UNITS_PER_WORD);
8677 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8678 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8679 GEN_INT (-allocate), -1,
8680 ix86_cfa_state->reg == stack_pointer_rtx);
8683 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8687 if (cfun->machine->call_abi == MS_ABI)
8690 eax_live = ix86_eax_live_at_start_p ();
8694 emit_insn (gen_push (eax));
8695 allocate -= UNITS_PER_WORD;
8698 emit_move_insn (eax, GEN_INT (allocate));
8701 insn = gen_allocate_stack_worker_64 (eax, eax);
8703 insn = gen_allocate_stack_worker_32 (eax, eax);
8704 insn = emit_insn (insn);
8706 if (ix86_cfa_state->reg == stack_pointer_rtx)
8708 ix86_cfa_state->offset += allocate;
8709 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8710 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8711 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8712 RTX_FRAME_RELATED_P (insn) = 1;
8717 if (frame_pointer_needed)
8718 t = plus_constant (hard_frame_pointer_rtx,
8721 - frame.nregs * UNITS_PER_WORD);
8723 t = plus_constant (stack_pointer_rtx, allocate);
8724 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8728 if (frame.save_regs_using_mov
8729 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8730 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8732 if (!frame_pointer_needed
8733 || !(frame.to_allocate + frame.padding0)
8734 || crtl->stack_realign_needed)
8735 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8737 + frame.nsseregs * 16 + frame.padding0);
8739 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8740 -frame.nregs * UNITS_PER_WORD);
8742 if (!frame_pointer_needed
8743 || !(frame.to_allocate + frame.padding0)
8744 || crtl->stack_realign_needed)
8745 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8748 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8749 - frame.nregs * UNITS_PER_WORD
8750 - frame.nsseregs * 16
8753 pic_reg_used = false;
8754 if (pic_offset_table_rtx
8755 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8758 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8760 if (alt_pic_reg_used != INVALID_REGNUM)
8761 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8763 pic_reg_used = true;
8770 if (ix86_cmodel == CM_LARGE_PIC)
8772 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8773 rtx label = gen_label_rtx ();
8775 LABEL_PRESERVE_P (label) = 1;
8776 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8777 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8778 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8779 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8780 pic_offset_table_rtx, tmp_reg));
8783 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8786 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8789 /* In the pic_reg_used case, make sure that the got load isn't deleted
8790 when mcount needs it. Blockage to avoid call movement across mcount
8791 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8793 if (crtl->profile && pic_reg_used)
8794 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8796 if (crtl->drap_reg && !crtl->stack_realign_needed)
8798 /* vDRAP is setup but after reload it turns out stack realign
8799 isn't necessary, here we will emit prologue to setup DRAP
8800 without stack realign adjustment */
8802 int drap_bp_offset = UNITS_PER_WORD * 2;
8804 if (ix86_static_chain_on_stack)
8805 drap_bp_offset += UNITS_PER_WORD;
8806 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8807 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8810 /* Prevent instructions from being scheduled into register save push
8811 sequence when access to the redzone area is done through frame pointer.
8812 The offset between the frame pointer and the stack pointer is calculated
8813 relative to the value of the stack pointer at the end of the function
8814 prologue, and moving instructions that access redzone area via frame
8815 pointer inside push sequence violates this assumption. */
8816 if (frame_pointer_needed && frame.red_zone_size)
8817 emit_insn (gen_memory_blockage ());
8819 /* Emit cld instruction if stringops are used in the function. */
8820 if (TARGET_CLD && ix86_current_function_needs_cld)
8821 emit_insn (gen_cld ());
8824 /* Emit code to restore REG using a POP insn. */
8827 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8829 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8831 if (ix86_cfa_state->reg == crtl->drap_reg
8832 && REGNO (reg) == REGNO (crtl->drap_reg))
8834 /* Previously we'd represented the CFA as an expression
8835 like *(%ebp - 8). We've just popped that value from
8836 the stack, which means we need to reset the CFA to
8837 the drap register. This will remain until we restore
8838 the stack pointer. */
8839 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8840 RTX_FRAME_RELATED_P (insn) = 1;
8844 if (ix86_cfa_state->reg == stack_pointer_rtx)
8846 ix86_cfa_state->offset -= UNITS_PER_WORD;
8847 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8848 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8849 RTX_FRAME_RELATED_P (insn) = 1;
8852 /* When the frame pointer is the CFA, and we pop it, we are
8853 swapping back to the stack pointer as the CFA. This happens
8854 for stack frames that don't allocate other data, so we assume
8855 the stack pointer is now pointing at the return address, i.e.
8856 the function entry state, which makes the offset be 1 word. */
8857 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8858 && reg == hard_frame_pointer_rtx)
8860 ix86_cfa_state->reg = stack_pointer_rtx;
8861 ix86_cfa_state->offset -= UNITS_PER_WORD;
8863 add_reg_note (insn, REG_CFA_DEF_CFA,
8864 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8865 GEN_INT (ix86_cfa_state->offset)));
8866 RTX_FRAME_RELATED_P (insn) = 1;
8869 ix86_add_cfa_restore_note (insn, reg, red_offset);
8872 /* Emit code to restore saved registers using POP insns. */
8875 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8879 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8880 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8882 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8884 red_offset += UNITS_PER_WORD;
8888 /* Emit code and notes for the LEAVE instruction. */
8891 ix86_emit_leave (HOST_WIDE_INT red_offset)
8893 rtx insn = emit_insn (ix86_gen_leave ());
8895 ix86_add_queued_cfa_restore_notes (insn);
8897 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8899 ix86_cfa_state->reg = stack_pointer_rtx;
8900 ix86_cfa_state->offset -= UNITS_PER_WORD;
8902 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8903 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8904 RTX_FRAME_RELATED_P (insn) = 1;
8905 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8909 /* Emit code to restore saved registers using MOV insns. First register
8910 is restored from POINTER + OFFSET. */
8912 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8913 HOST_WIDE_INT red_offset,
8914 int maybe_eh_return)
8917 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8920 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8921 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8923 rtx reg = gen_rtx_REG (Pmode, regno);
8925 /* Ensure that adjust_address won't be forced to produce pointer
8926 out of range allowed by x86-64 instruction set. */
8927 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8931 r11 = gen_rtx_REG (DImode, R11_REG);
8932 emit_move_insn (r11, GEN_INT (offset));
8933 emit_insn (gen_adddi3 (r11, r11, pointer));
8934 base_address = gen_rtx_MEM (Pmode, r11);
8937 insn = emit_move_insn (reg,
8938 adjust_address (base_address, Pmode, offset));
8939 offset += UNITS_PER_WORD;
8941 if (ix86_cfa_state->reg == crtl->drap_reg
8942 && regno == REGNO (crtl->drap_reg))
8944 /* Previously we'd represented the CFA as an expression
8945 like *(%ebp - 8). We've just popped that value from
8946 the stack, which means we need to reset the CFA to
8947 the drap register. This will remain until we restore
8948 the stack pointer. */
8949 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8950 RTX_FRAME_RELATED_P (insn) = 1;
8953 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8955 red_offset += UNITS_PER_WORD;
8959 /* Emit code to restore saved registers using MOV insns. First register
8960 is restored from POINTER + OFFSET. */
8962 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8963 HOST_WIDE_INT red_offset,
8964 int maybe_eh_return)
8967 rtx base_address = gen_rtx_MEM (TImode, pointer);
8970 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8971 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8973 rtx reg = gen_rtx_REG (TImode, regno);
8975 /* Ensure that adjust_address won't be forced to produce pointer
8976 out of range allowed by x86-64 instruction set. */
8977 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8981 r11 = gen_rtx_REG (DImode, R11_REG);
8982 emit_move_insn (r11, GEN_INT (offset));
8983 emit_insn (gen_adddi3 (r11, r11, pointer));
8984 base_address = gen_rtx_MEM (TImode, r11);
8987 mem = adjust_address (base_address, TImode, offset);
8988 set_mem_align (mem, 128);
8989 emit_move_insn (reg, mem);
8992 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8998 /* Restore function stack, frame, and registers. */
9001 ix86_expand_epilogue (int style)
9004 struct ix86_frame frame;
9005 HOST_WIDE_INT offset, red_offset;
9006 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9009 ix86_finalize_stack_realign_flags ();
9011 /* When stack is realigned, SP must be valid. */
9012 sp_valid = (!frame_pointer_needed
9013 || current_function_sp_is_unchanging
9014 || stack_realign_fp);
9016 ix86_compute_frame_layout (&frame);
9018 /* See the comment about red zone and frame
9019 pointer usage in ix86_expand_prologue. */
9020 if (frame_pointer_needed && frame.red_zone_size)
9021 emit_insn (gen_memory_blockage ());
9023 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9024 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9026 /* Calculate start of saved registers relative to ebp. Special care
9027 must be taken for the normal return case of a function using
9028 eh_return: the eax and edx registers are marked as saved, but not
9029 restored along this path. */
9030 offset = frame.nregs;
9031 if (crtl->calls_eh_return && style != 2)
9033 offset *= -UNITS_PER_WORD;
9034 offset -= frame.nsseregs * 16 + frame.padding0;
9036 /* Calculate start of saved registers relative to esp on entry of the
9037 function. When realigning stack, this needs to be the most negative
9038 value possible at runtime. */
9039 red_offset = offset;
9041 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9043 else if (stack_realign_fp)
9044 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9046 if (ix86_static_chain_on_stack)
9047 red_offset -= UNITS_PER_WORD;
9048 if (frame_pointer_needed)
9049 red_offset -= UNITS_PER_WORD;
9051 /* If we're only restoring one register and sp is not valid then
9052 using a move instruction to restore the register since it's
9053 less work than reloading sp and popping the register.
9055 The default code result in stack adjustment using add/lea instruction,
9056 while this code results in LEAVE instruction (or discrete equivalent),
9057 so it is profitable in some other cases as well. Especially when there
9058 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9059 and there is exactly one register to pop. This heuristic may need some
9060 tuning in future. */
9061 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9062 || (TARGET_EPILOGUE_USING_MOVE
9063 && cfun->machine->use_fast_prologue_epilogue
9064 && ((frame.nregs + frame.nsseregs) > 1
9065 || (frame.to_allocate + frame.padding0) != 0))
9066 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9067 && (frame.to_allocate + frame.padding0) != 0)
9068 || (frame_pointer_needed && TARGET_USE_LEAVE
9069 && cfun->machine->use_fast_prologue_epilogue
9070 && (frame.nregs + frame.nsseregs) == 1)
9071 || crtl->calls_eh_return)
9073 /* Restore registers. We can use ebp or esp to address the memory
9074 locations. If both are available, default to ebp, since offsets
9075 are known to be small. Only exception is esp pointing directly
9076 to the end of block of saved registers, where we may simplify
9079 If we are realigning stack with bp and sp, regs restore can't
9080 be addressed by bp. sp must be used instead. */
9082 if (!frame_pointer_needed
9083 || (sp_valid && !(frame.to_allocate + frame.padding0))
9084 || stack_realign_fp)
9086 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9087 frame.to_allocate, red_offset,
9089 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9091 + frame.nsseregs * 16
9094 + frame.nsseregs * 16
9095 + frame.padding0, style == 2);
9099 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9102 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9104 + frame.nsseregs * 16
9107 + frame.nsseregs * 16
9108 + frame.padding0, style == 2);
9111 red_offset -= offset;
9113 /* eh_return epilogues need %ecx added to the stack pointer. */
9116 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9118 /* Stack align doesn't work with eh_return. */
9119 gcc_assert (!crtl->stack_realign_needed);
9120 /* Neither does regparm nested functions. */
9121 gcc_assert (!ix86_static_chain_on_stack);
9123 if (frame_pointer_needed)
9125 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9126 tmp = plus_constant (tmp, UNITS_PER_WORD);
9127 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9129 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9130 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9132 /* Note that we use SA as a temporary CFA, as the return
9133 address is at the proper place relative to it. We
9134 pretend this happens at the FP restore insn because
9135 prior to this insn the FP would be stored at the wrong
9136 offset relative to SA, and after this insn we have no
9137 other reasonable register to use for the CFA. We don't
9138 bother resetting the CFA to the SP for the duration of
9140 add_reg_note (tmp, REG_CFA_DEF_CFA,
9141 plus_constant (sa, UNITS_PER_WORD));
9142 ix86_add_queued_cfa_restore_notes (tmp);
9143 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9144 RTX_FRAME_RELATED_P (tmp) = 1;
9145 ix86_cfa_state->reg = sa;
9146 ix86_cfa_state->offset = UNITS_PER_WORD;
9148 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9149 const0_rtx, style, false);
9153 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9154 tmp = plus_constant (tmp, (frame.to_allocate
9155 + frame.nregs * UNITS_PER_WORD
9156 + frame.nsseregs * 16
9158 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9159 ix86_add_queued_cfa_restore_notes (tmp);
9161 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9162 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9164 ix86_cfa_state->offset = UNITS_PER_WORD;
9165 add_reg_note (tmp, REG_CFA_DEF_CFA,
9166 plus_constant (stack_pointer_rtx,
9168 RTX_FRAME_RELATED_P (tmp) = 1;
9172 else if (!frame_pointer_needed)
9173 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9174 GEN_INT (frame.to_allocate
9175 + frame.nregs * UNITS_PER_WORD
9176 + frame.nsseregs * 16
9178 style, !using_drap);
9179 /* If not an i386, mov & pop is faster than "leave". */
9180 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9181 || !cfun->machine->use_fast_prologue_epilogue)
9182 ix86_emit_leave (red_offset);
9185 pro_epilogue_adjust_stack (stack_pointer_rtx,
9186 hard_frame_pointer_rtx,
9187 const0_rtx, style, !using_drap);
9189 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9194 /* First step is to deallocate the stack frame so that we can
9197 If we realign stack with frame pointer, then stack pointer
9198 won't be able to recover via lea $offset(%bp), %sp, because
9199 there is a padding area between bp and sp for realign.
9200 "add $to_allocate, %sp" must be used instead. */
9203 gcc_assert (frame_pointer_needed);
9204 gcc_assert (!stack_realign_fp);
9205 pro_epilogue_adjust_stack (stack_pointer_rtx,
9206 hard_frame_pointer_rtx,
9207 GEN_INT (offset), style, false);
9208 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9211 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9212 GEN_INT (frame.nsseregs * 16
9216 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9218 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9219 frame.to_allocate, red_offset,
9221 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9222 GEN_INT (frame.to_allocate
9223 + frame.nsseregs * 16
9224 + frame.padding0), style,
9225 !using_drap && !frame_pointer_needed);
9228 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9230 red_offset -= offset;
9232 if (frame_pointer_needed)
9234 /* Leave results in shorter dependency chains on CPUs that are
9235 able to grok it fast. */
9236 if (TARGET_USE_LEAVE)
9237 ix86_emit_leave (red_offset);
9240 /* For stack realigned really happens, recover stack
9241 pointer to hard frame pointer is a must, if not using
9243 if (stack_realign_fp)
9244 pro_epilogue_adjust_stack (stack_pointer_rtx,
9245 hard_frame_pointer_rtx,
9246 const0_rtx, style, !using_drap);
9247 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9255 int param_ptr_offset = UNITS_PER_WORD;
9258 gcc_assert (stack_realign_drap);
9260 if (ix86_static_chain_on_stack)
9261 param_ptr_offset += UNITS_PER_WORD;
9262 if (!call_used_regs[REGNO (crtl->drap_reg)])
9263 param_ptr_offset += UNITS_PER_WORD;
9265 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9267 GEN_INT (-param_ptr_offset)));
9269 ix86_cfa_state->reg = stack_pointer_rtx;
9270 ix86_cfa_state->offset = param_ptr_offset;
9272 add_reg_note (insn, REG_CFA_DEF_CFA,
9273 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9274 GEN_INT (ix86_cfa_state->offset)));
9275 RTX_FRAME_RELATED_P (insn) = 1;
9277 if (!call_used_regs[REGNO (crtl->drap_reg)])
9278 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9281 /* Remove the saved static chain from the stack. The use of ECX is
9282 merely as a scratch register, not as the actual static chain. */
9283 if (ix86_static_chain_on_stack)
9287 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9288 ix86_cfa_state->offset += UNITS_PER_WORD;
9290 r = gen_rtx_REG (Pmode, CX_REG);
9291 insn = emit_insn (ix86_gen_pop1 (r));
9293 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9294 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9295 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9296 RTX_FRAME_RELATED_P (insn) = 1;
9299 /* Sibcall epilogues don't want a return instruction. */
9302 *ix86_cfa_state = cfa_state_save;
9306 if (crtl->args.pops_args && crtl->args.size)
9308 rtx popc = GEN_INT (crtl->args.pops_args);
9310 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9311 address, do explicit add, and jump indirectly to the caller. */
9313 if (crtl->args.pops_args >= 65536)
9315 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9318 /* There is no "pascal" calling convention in any 64bit ABI. */
9319 gcc_assert (!TARGET_64BIT);
9321 insn = emit_insn (gen_popsi1 (ecx));
9322 ix86_cfa_state->offset -= UNITS_PER_WORD;
9324 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9325 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9326 add_reg_note (insn, REG_CFA_REGISTER,
9327 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9328 RTX_FRAME_RELATED_P (insn) = 1;
9330 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9332 emit_jump_insn (gen_return_indirect_internal (ecx));
9335 emit_jump_insn (gen_return_pop_internal (popc));
9338 emit_jump_insn (gen_return_internal ());
9340 /* Restore the state back to the state from the prologue,
9341 so that it's correct for the next epilogue. */
9342 *ix86_cfa_state = cfa_state_save;
9345 /* Reset from the function's potential modifications. */
9348 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9349 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9351 if (pic_offset_table_rtx)
9352 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9354 /* Mach-O doesn't support labels at the end of objects, so if
9355 it looks like we might want one, insert a NOP. */
9357 rtx insn = get_last_insn ();
9360 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9361 insn = PREV_INSN (insn);
9365 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9366 fputs ("\tnop\n", file);
9372 /* Extract the parts of an RTL expression that is a valid memory address
9373 for an instruction. Return 0 if the structure of the address is
9374 grossly off. Return -1 if the address contains ASHIFT, so it is not
9375 strictly valid, but still used for computing length of lea instruction. */
9378 ix86_decompose_address (rtx addr, struct ix86_address *out)
9380 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9381 rtx base_reg, index_reg;
9382 HOST_WIDE_INT scale = 1;
9383 rtx scale_rtx = NULL_RTX;
9385 enum ix86_address_seg seg = SEG_DEFAULT;
9387 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9389 else if (GET_CODE (addr) == PLUS)
9399 addends[n++] = XEXP (op, 1);
9402 while (GET_CODE (op) == PLUS);
9407 for (i = n; i >= 0; --i)
9410 switch (GET_CODE (op))
9415 index = XEXP (op, 0);
9416 scale_rtx = XEXP (op, 1);
9420 if (XINT (op, 1) == UNSPEC_TP
9421 && TARGET_TLS_DIRECT_SEG_REFS
9422 && seg == SEG_DEFAULT)
9423 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9452 else if (GET_CODE (addr) == MULT)
9454 index = XEXP (addr, 0); /* index*scale */
9455 scale_rtx = XEXP (addr, 1);
9457 else if (GET_CODE (addr) == ASHIFT)
9461 /* We're called for lea too, which implements ashift on occasion. */
9462 index = XEXP (addr, 0);
9463 tmp = XEXP (addr, 1);
9464 if (!CONST_INT_P (tmp))
9466 scale = INTVAL (tmp);
9467 if ((unsigned HOST_WIDE_INT) scale > 3)
9473 disp = addr; /* displacement */
9475 /* Extract the integral value of scale. */
9478 if (!CONST_INT_P (scale_rtx))
9480 scale = INTVAL (scale_rtx);
9483 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9484 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9486 /* Avoid useless 0 displacement. */
9487 if (disp == const0_rtx && (base || index))
9490 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9491 if (base_reg && index_reg && scale == 1
9492 && (index_reg == arg_pointer_rtx
9493 || index_reg == frame_pointer_rtx
9494 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9497 tmp = base, base = index, index = tmp;
9498 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9501 /* Special case: %ebp cannot be encoded as a base without a displacement.
9505 && (base_reg == hard_frame_pointer_rtx
9506 || base_reg == frame_pointer_rtx
9507 || base_reg == arg_pointer_rtx
9508 || (REG_P (base_reg)
9509 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9510 || REGNO (base_reg) == R13_REG))))
9513 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9514 Avoid this by transforming to [%esi+0].
9515 Reload calls address legitimization without cfun defined, so we need
9516 to test cfun for being non-NULL. */
9517 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9518 && base_reg && !index_reg && !disp
9520 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9523 /* Special case: encode reg+reg instead of reg*2. */
9524 if (!base && index && scale == 2)
9525 base = index, base_reg = index_reg, scale = 1;
9527 /* Special case: scaling cannot be encoded without base or displacement. */
9528 if (!base && !disp && index && scale != 1)
9540 /* Return cost of the memory address x.
9541 For i386, it is better to use a complex address than let gcc copy
9542 the address into a reg and make a new pseudo. But not if the address
9543 requires to two regs - that would mean more pseudos with longer
9546 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9548 struct ix86_address parts;
9550 int ok = ix86_decompose_address (x, &parts);
9554 if (parts.base && GET_CODE (parts.base) == SUBREG)
9555 parts.base = SUBREG_REG (parts.base);
9556 if (parts.index && GET_CODE (parts.index) == SUBREG)
9557 parts.index = SUBREG_REG (parts.index);
9559 /* Attempt to minimize number of registers in the address. */
9561 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9563 && (!REG_P (parts.index)
9564 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9568 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9570 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9571 && parts.base != parts.index)
9574 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9575 since it's predecode logic can't detect the length of instructions
9576 and it degenerates to vector decoded. Increase cost of such
9577 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9578 to split such addresses or even refuse such addresses at all.
9580 Following addressing modes are affected:
9585 The first and last case may be avoidable by explicitly coding the zero in
9586 memory address, but I don't have AMD-K6 machine handy to check this
9590 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9591 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9592 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9598 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9599 this is used for to form addresses to local data when -fPIC is in
9603 darwin_local_data_pic (rtx disp)
9605 return (GET_CODE (disp) == UNSPEC
9606 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9609 /* Determine if a given RTX is a valid constant. We already know this
9610 satisfies CONSTANT_P. */
9613 legitimate_constant_p (rtx x)
9615 switch (GET_CODE (x))
9620 if (GET_CODE (x) == PLUS)
9622 if (!CONST_INT_P (XEXP (x, 1)))
9627 if (TARGET_MACHO && darwin_local_data_pic (x))
9630 /* Only some unspecs are valid as "constants". */
9631 if (GET_CODE (x) == UNSPEC)
9632 switch (XINT (x, 1))
9637 return TARGET_64BIT;
9640 x = XVECEXP (x, 0, 0);
9641 return (GET_CODE (x) == SYMBOL_REF
9642 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9644 x = XVECEXP (x, 0, 0);
9645 return (GET_CODE (x) == SYMBOL_REF
9646 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9651 /* We must have drilled down to a symbol. */
9652 if (GET_CODE (x) == LABEL_REF)
9654 if (GET_CODE (x) != SYMBOL_REF)
9659 /* TLS symbols are never valid. */
9660 if (SYMBOL_REF_TLS_MODEL (x))
9663 /* DLLIMPORT symbols are never valid. */
9664 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9665 && SYMBOL_REF_DLLIMPORT_P (x))
9670 if (GET_MODE (x) == TImode
9671 && x != CONST0_RTX (TImode)
9677 if (!standard_sse_constant_p (x))
9684 /* Otherwise we handle everything else in the move patterns. */
9688 /* Determine if it's legal to put X into the constant pool. This
9689 is not possible for the address of thread-local symbols, which
9690 is checked above. */
9693 ix86_cannot_force_const_mem (rtx x)
9695 /* We can always put integral constants and vectors in memory. */
9696 switch (GET_CODE (x))
9706 return !legitimate_constant_p (x);
9710 /* Nonzero if the constant value X is a legitimate general operand
9711 when generating PIC code. It is given that flag_pic is on and
9712 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9715 legitimate_pic_operand_p (rtx x)
9719 switch (GET_CODE (x))
9722 inner = XEXP (x, 0);
9723 if (GET_CODE (inner) == PLUS
9724 && CONST_INT_P (XEXP (inner, 1)))
9725 inner = XEXP (inner, 0);
9727 /* Only some unspecs are valid as "constants". */
9728 if (GET_CODE (inner) == UNSPEC)
9729 switch (XINT (inner, 1))
9734 return TARGET_64BIT;
9736 x = XVECEXP (inner, 0, 0);
9737 return (GET_CODE (x) == SYMBOL_REF
9738 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9739 case UNSPEC_MACHOPIC_OFFSET:
9740 return legitimate_pic_address_disp_p (x);
9748 return legitimate_pic_address_disp_p (x);
9755 /* Determine if a given CONST RTX is a valid memory displacement
9759 legitimate_pic_address_disp_p (rtx disp)
9763 /* In 64bit mode we can allow direct addresses of symbols and labels
9764 when they are not dynamic symbols. */
9767 rtx op0 = disp, op1;
9769 switch (GET_CODE (disp))
9775 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9777 op0 = XEXP (XEXP (disp, 0), 0);
9778 op1 = XEXP (XEXP (disp, 0), 1);
9779 if (!CONST_INT_P (op1)
9780 || INTVAL (op1) >= 16*1024*1024
9781 || INTVAL (op1) < -16*1024*1024)
9783 if (GET_CODE (op0) == LABEL_REF)
9785 if (GET_CODE (op0) != SYMBOL_REF)
9790 /* TLS references should always be enclosed in UNSPEC. */
9791 if (SYMBOL_REF_TLS_MODEL (op0))
9793 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9794 && ix86_cmodel != CM_LARGE_PIC)
9802 if (GET_CODE (disp) != CONST)
9804 disp = XEXP (disp, 0);
9808 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9809 of GOT tables. We should not need these anyway. */
9810 if (GET_CODE (disp) != UNSPEC
9811 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9812 && XINT (disp, 1) != UNSPEC_GOTOFF
9813 && XINT (disp, 1) != UNSPEC_PLTOFF))
9816 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9817 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9823 if (GET_CODE (disp) == PLUS)
9825 if (!CONST_INT_P (XEXP (disp, 1)))
9827 disp = XEXP (disp, 0);
9831 if (TARGET_MACHO && darwin_local_data_pic (disp))
9834 if (GET_CODE (disp) != UNSPEC)
9837 switch (XINT (disp, 1))
9842 /* We need to check for both symbols and labels because VxWorks loads
9843 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9845 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9846 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9848 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9849 While ABI specify also 32bit relocation but we don't produce it in
9850 small PIC model at all. */
9851 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9852 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9854 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9856 case UNSPEC_GOTTPOFF:
9857 case UNSPEC_GOTNTPOFF:
9858 case UNSPEC_INDNTPOFF:
9861 disp = XVECEXP (disp, 0, 0);
9862 return (GET_CODE (disp) == SYMBOL_REF
9863 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9865 disp = XVECEXP (disp, 0, 0);
9866 return (GET_CODE (disp) == SYMBOL_REF
9867 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9869 disp = XVECEXP (disp, 0, 0);
9870 return (GET_CODE (disp) == SYMBOL_REF
9871 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9877 /* Recognizes RTL expressions that are valid memory addresses for an
9878 instruction. The MODE argument is the machine mode for the MEM
9879 expression that wants to use this address.
9881 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9882 convert common non-canonical forms to canonical form so that they will
9886 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9887 rtx addr, bool strict)
9889 struct ix86_address parts;
9890 rtx base, index, disp;
9891 HOST_WIDE_INT scale;
9893 if (ix86_decompose_address (addr, &parts) <= 0)
9894 /* Decomposition failed. */
9898 index = parts.index;
9900 scale = parts.scale;
9902 /* Validate base register.
9904 Don't allow SUBREG's that span more than a word here. It can lead to spill
9905 failures when the base is one word out of a two word structure, which is
9906 represented internally as a DImode int. */
9914 else if (GET_CODE (base) == SUBREG
9915 && REG_P (SUBREG_REG (base))
9916 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9918 reg = SUBREG_REG (base);
9920 /* Base is not a register. */
9923 if (GET_MODE (base) != Pmode)
9924 /* Base is not in Pmode. */
9927 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9928 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9929 /* Base is not valid. */
9933 /* Validate index register.
9935 Don't allow SUBREG's that span more than a word here -- same as above. */
9943 else if (GET_CODE (index) == SUBREG
9944 && REG_P (SUBREG_REG (index))
9945 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9947 reg = SUBREG_REG (index);
9949 /* Index is not a register. */
9952 if (GET_MODE (index) != Pmode)
9953 /* Index is not in Pmode. */
9956 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9957 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9958 /* Index is not valid. */
9962 /* Validate scale factor. */
9966 /* Scale without index. */
9969 if (scale != 2 && scale != 4 && scale != 8)
9970 /* Scale is not a valid multiplier. */
9974 /* Validate displacement. */
9977 if (GET_CODE (disp) == CONST
9978 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9979 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9980 switch (XINT (XEXP (disp, 0), 1))
9982 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9983 used. While ABI specify also 32bit relocations, we don't produce
9984 them at all and use IP relative instead. */
9987 gcc_assert (flag_pic);
9989 goto is_legitimate_pic;
9991 /* 64bit address unspec. */
9994 case UNSPEC_GOTPCREL:
9995 gcc_assert (flag_pic);
9996 goto is_legitimate_pic;
9998 case UNSPEC_GOTTPOFF:
9999 case UNSPEC_GOTNTPOFF:
10000 case UNSPEC_INDNTPOFF:
10001 case UNSPEC_NTPOFF:
10002 case UNSPEC_DTPOFF:
10006 /* Invalid address unspec. */
10010 else if (SYMBOLIC_CONST (disp)
10014 && MACHOPIC_INDIRECT
10015 && !machopic_operand_p (disp)
10021 if (TARGET_64BIT && (index || base))
10023 /* foo@dtpoff(%rX) is ok. */
10024 if (GET_CODE (disp) != CONST
10025 || GET_CODE (XEXP (disp, 0)) != PLUS
10026 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10027 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10028 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10029 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10030 /* Non-constant pic memory reference. */
10033 else if (! legitimate_pic_address_disp_p (disp))
10034 /* Displacement is an invalid pic construct. */
10037 /* This code used to verify that a symbolic pic displacement
10038 includes the pic_offset_table_rtx register.
10040 While this is good idea, unfortunately these constructs may
10041 be created by "adds using lea" optimization for incorrect
10050 This code is nonsensical, but results in addressing
10051 GOT table with pic_offset_table_rtx base. We can't
10052 just refuse it easily, since it gets matched by
10053 "addsi3" pattern, that later gets split to lea in the
10054 case output register differs from input. While this
10055 can be handled by separate addsi pattern for this case
10056 that never results in lea, this seems to be easier and
10057 correct fix for crash to disable this test. */
10059 else if (GET_CODE (disp) != LABEL_REF
10060 && !CONST_INT_P (disp)
10061 && (GET_CODE (disp) != CONST
10062 || !legitimate_constant_p (disp))
10063 && (GET_CODE (disp) != SYMBOL_REF
10064 || !legitimate_constant_p (disp)))
10065 /* Displacement is not constant. */
10067 else if (TARGET_64BIT
10068 && !x86_64_immediate_operand (disp, VOIDmode))
10069 /* Displacement is out of range. */
10073 /* Everything looks valid. */
10077 /* Determine if a given RTX is a valid constant address. */
10080 constant_address_p (rtx x)
10082 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10085 /* Return a unique alias set for the GOT. */
10087 static alias_set_type
10088 ix86_GOT_alias_set (void)
10090 static alias_set_type set = -1;
10092 set = new_alias_set ();
10096 /* Return a legitimate reference for ORIG (an address) using the
10097 register REG. If REG is 0, a new pseudo is generated.
10099 There are two types of references that must be handled:
10101 1. Global data references must load the address from the GOT, via
10102 the PIC reg. An insn is emitted to do this load, and the reg is
10105 2. Static data references, constant pool addresses, and code labels
10106 compute the address as an offset from the GOT, whose base is in
10107 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10108 differentiate them from global data objects. The returned
10109 address is the PIC reg + an unspec constant.
10111 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10112 reg also appears in the address. */
10115 legitimize_pic_address (rtx orig, rtx reg)
10118 rtx new_rtx = orig;
10122 if (TARGET_MACHO && !TARGET_64BIT)
10125 reg = gen_reg_rtx (Pmode);
10126 /* Use the generic Mach-O PIC machinery. */
10127 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10131 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10133 else if (TARGET_64BIT
10134 && ix86_cmodel != CM_SMALL_PIC
10135 && gotoff_operand (addr, Pmode))
10138 /* This symbol may be referenced via a displacement from the PIC
10139 base address (@GOTOFF). */
10141 if (reload_in_progress)
10142 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10143 if (GET_CODE (addr) == CONST)
10144 addr = XEXP (addr, 0);
10145 if (GET_CODE (addr) == PLUS)
10147 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10149 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10152 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10153 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10155 tmpreg = gen_reg_rtx (Pmode);
10158 emit_move_insn (tmpreg, new_rtx);
10162 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10163 tmpreg, 1, OPTAB_DIRECT);
10166 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10168 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10170 /* This symbol may be referenced via a displacement from the PIC
10171 base address (@GOTOFF). */
10173 if (reload_in_progress)
10174 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10175 if (GET_CODE (addr) == CONST)
10176 addr = XEXP (addr, 0);
10177 if (GET_CODE (addr) == PLUS)
10179 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10181 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10184 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10185 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10186 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10190 emit_move_insn (reg, new_rtx);
10194 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10195 /* We can't use @GOTOFF for text labels on VxWorks;
10196 see gotoff_operand. */
10197 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10199 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10201 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10202 return legitimize_dllimport_symbol (addr, true);
10203 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10204 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10205 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10207 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10208 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10212 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10214 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10215 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10216 new_rtx = gen_const_mem (Pmode, new_rtx);
10217 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10220 reg = gen_reg_rtx (Pmode);
10221 /* Use directly gen_movsi, otherwise the address is loaded
10222 into register for CSE. We don't want to CSE this addresses,
10223 instead we CSE addresses from the GOT table, so skip this. */
10224 emit_insn (gen_movsi (reg, new_rtx));
10229 /* This symbol must be referenced via a load from the
10230 Global Offset Table (@GOT). */
10232 if (reload_in_progress)
10233 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10234 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10235 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10237 new_rtx = force_reg (Pmode, new_rtx);
10238 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10239 new_rtx = gen_const_mem (Pmode, new_rtx);
10240 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10243 reg = gen_reg_rtx (Pmode);
10244 emit_move_insn (reg, new_rtx);
10250 if (CONST_INT_P (addr)
10251 && !x86_64_immediate_operand (addr, VOIDmode))
10255 emit_move_insn (reg, addr);
10259 new_rtx = force_reg (Pmode, addr);
10261 else if (GET_CODE (addr) == CONST)
10263 addr = XEXP (addr, 0);
10265 /* We must match stuff we generate before. Assume the only
10266 unspecs that can get here are ours. Not that we could do
10267 anything with them anyway.... */
10268 if (GET_CODE (addr) == UNSPEC
10269 || (GET_CODE (addr) == PLUS
10270 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10272 gcc_assert (GET_CODE (addr) == PLUS);
10274 if (GET_CODE (addr) == PLUS)
10276 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10278 /* Check first to see if this is a constant offset from a @GOTOFF
10279 symbol reference. */
10280 if (gotoff_operand (op0, Pmode)
10281 && CONST_INT_P (op1))
10285 if (reload_in_progress)
10286 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10287 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10289 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10290 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10291 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10295 emit_move_insn (reg, new_rtx);
10301 if (INTVAL (op1) < -16*1024*1024
10302 || INTVAL (op1) >= 16*1024*1024)
10304 if (!x86_64_immediate_operand (op1, Pmode))
10305 op1 = force_reg (Pmode, op1);
10306 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10312 base = legitimize_pic_address (XEXP (addr, 0), reg);
10313 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10314 base == reg ? NULL_RTX : reg);
10316 if (CONST_INT_P (new_rtx))
10317 new_rtx = plus_constant (base, INTVAL (new_rtx));
10320 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10322 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10323 new_rtx = XEXP (new_rtx, 1);
10325 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10333 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10336 get_thread_pointer (int to_reg)
10340 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10344 reg = gen_reg_rtx (Pmode);
10345 insn = gen_rtx_SET (VOIDmode, reg, tp);
10346 insn = emit_insn (insn);
10351 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10352 false if we expect this to be used for a memory address and true if
10353 we expect to load the address into a register. */
10356 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10358 rtx dest, base, off, pic, tp;
10363 case TLS_MODEL_GLOBAL_DYNAMIC:
10364 dest = gen_reg_rtx (Pmode);
10365 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10367 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10369 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10372 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10373 insns = get_insns ();
10376 RTL_CONST_CALL_P (insns) = 1;
10377 emit_libcall_block (insns, dest, rax, x);
10379 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10380 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10382 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10384 if (TARGET_GNU2_TLS)
10386 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10388 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10392 case TLS_MODEL_LOCAL_DYNAMIC:
10393 base = gen_reg_rtx (Pmode);
10394 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10396 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10398 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10401 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10402 insns = get_insns ();
10405 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10406 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10407 RTL_CONST_CALL_P (insns) = 1;
10408 emit_libcall_block (insns, base, rax, note);
10410 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10411 emit_insn (gen_tls_local_dynamic_base_64 (base));
10413 emit_insn (gen_tls_local_dynamic_base_32 (base));
10415 if (TARGET_GNU2_TLS)
10417 rtx x = ix86_tls_module_base ();
10419 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10420 gen_rtx_MINUS (Pmode, x, tp));
10423 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10424 off = gen_rtx_CONST (Pmode, off);
10426 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10428 if (TARGET_GNU2_TLS)
10430 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10432 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10437 case TLS_MODEL_INITIAL_EXEC:
10441 type = UNSPEC_GOTNTPOFF;
10445 if (reload_in_progress)
10446 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10447 pic = pic_offset_table_rtx;
10448 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10450 else if (!TARGET_ANY_GNU_TLS)
10452 pic = gen_reg_rtx (Pmode);
10453 emit_insn (gen_set_got (pic));
10454 type = UNSPEC_GOTTPOFF;
10459 type = UNSPEC_INDNTPOFF;
10462 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10463 off = gen_rtx_CONST (Pmode, off);
10465 off = gen_rtx_PLUS (Pmode, pic, off);
10466 off = gen_const_mem (Pmode, off);
10467 set_mem_alias_set (off, ix86_GOT_alias_set ());
10469 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10471 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10472 off = force_reg (Pmode, off);
10473 return gen_rtx_PLUS (Pmode, base, off);
10477 base = get_thread_pointer (true);
10478 dest = gen_reg_rtx (Pmode);
10479 emit_insn (gen_subsi3 (dest, base, off));
10483 case TLS_MODEL_LOCAL_EXEC:
10484 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10485 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10486 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10487 off = gen_rtx_CONST (Pmode, off);
10489 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10491 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10492 return gen_rtx_PLUS (Pmode, base, off);
10496 base = get_thread_pointer (true);
10497 dest = gen_reg_rtx (Pmode);
10498 emit_insn (gen_subsi3 (dest, base, off));
10503 gcc_unreachable ();
10509 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10512 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10513 htab_t dllimport_map;
10516 get_dllimport_decl (tree decl)
10518 struct tree_map *h, in;
10521 const char *prefix;
10522 size_t namelen, prefixlen;
10527 if (!dllimport_map)
10528 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10530 in.hash = htab_hash_pointer (decl);
10531 in.base.from = decl;
10532 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10533 h = (struct tree_map *) *loc;
10537 *loc = h = GGC_NEW (struct tree_map);
10539 h->base.from = decl;
10540 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10541 VAR_DECL, NULL, ptr_type_node);
10542 DECL_ARTIFICIAL (to) = 1;
10543 DECL_IGNORED_P (to) = 1;
10544 DECL_EXTERNAL (to) = 1;
10545 TREE_READONLY (to) = 1;
10547 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10548 name = targetm.strip_name_encoding (name);
10549 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10550 ? "*__imp_" : "*__imp__";
10551 namelen = strlen (name);
10552 prefixlen = strlen (prefix);
10553 imp_name = (char *) alloca (namelen + prefixlen + 1);
10554 memcpy (imp_name, prefix, prefixlen);
10555 memcpy (imp_name + prefixlen, name, namelen + 1);
10557 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10558 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10559 SET_SYMBOL_REF_DECL (rtl, to);
10560 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10562 rtl = gen_const_mem (Pmode, rtl);
10563 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10565 SET_DECL_RTL (to, rtl);
10566 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10571 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10572 true if we require the result be a register. */
10575 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10580 gcc_assert (SYMBOL_REF_DECL (symbol));
10581 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10583 x = DECL_RTL (imp_decl);
10585 x = force_reg (Pmode, x);
10589 /* Try machine-dependent ways of modifying an illegitimate address
10590 to be legitimate. If we find one, return the new, valid address.
10591 This macro is used in only one place: `memory_address' in explow.c.
10593 OLDX is the address as it was before break_out_memory_refs was called.
10594 In some cases it is useful to look at this to decide what needs to be done.
10596 It is always safe for this macro to do nothing. It exists to recognize
10597 opportunities to optimize the output.
10599 For the 80386, we handle X+REG by loading X into a register R and
10600 using R+REG. R will go in a general reg and indexing will be used.
10601 However, if REG is a broken-out memory address or multiplication,
10602 nothing needs to be done because REG can certainly go in a general reg.
10604 When -fpic is used, special handling is needed for symbolic references.
10605 See comments by legitimize_pic_address in i386.c for details. */
10608 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10609 enum machine_mode mode)
10614 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10616 return legitimize_tls_address (x, (enum tls_model) log, false);
10617 if (GET_CODE (x) == CONST
10618 && GET_CODE (XEXP (x, 0)) == PLUS
10619 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10620 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10622 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10623 (enum tls_model) log, false);
10624 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10627 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10629 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10630 return legitimize_dllimport_symbol (x, true);
10631 if (GET_CODE (x) == CONST
10632 && GET_CODE (XEXP (x, 0)) == PLUS
10633 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10634 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10636 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10637 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10641 if (flag_pic && SYMBOLIC_CONST (x))
10642 return legitimize_pic_address (x, 0);
10644 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10645 if (GET_CODE (x) == ASHIFT
10646 && CONST_INT_P (XEXP (x, 1))
10647 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10650 log = INTVAL (XEXP (x, 1));
10651 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10652 GEN_INT (1 << log));
10655 if (GET_CODE (x) == PLUS)
10657 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10659 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10660 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10661 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10664 log = INTVAL (XEXP (XEXP (x, 0), 1));
10665 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10666 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10667 GEN_INT (1 << log));
10670 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10671 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10672 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10675 log = INTVAL (XEXP (XEXP (x, 1), 1));
10676 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10677 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10678 GEN_INT (1 << log));
10681 /* Put multiply first if it isn't already. */
10682 if (GET_CODE (XEXP (x, 1)) == MULT)
10684 rtx tmp = XEXP (x, 0);
10685 XEXP (x, 0) = XEXP (x, 1);
10690 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10691 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10692 created by virtual register instantiation, register elimination, and
10693 similar optimizations. */
10694 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10697 x = gen_rtx_PLUS (Pmode,
10698 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10699 XEXP (XEXP (x, 1), 0)),
10700 XEXP (XEXP (x, 1), 1));
10704 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10705 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10706 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10707 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10708 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10709 && CONSTANT_P (XEXP (x, 1)))
10712 rtx other = NULL_RTX;
10714 if (CONST_INT_P (XEXP (x, 1)))
10716 constant = XEXP (x, 1);
10717 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10719 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10721 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10722 other = XEXP (x, 1);
10730 x = gen_rtx_PLUS (Pmode,
10731 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10732 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10733 plus_constant (other, INTVAL (constant)));
10737 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10740 if (GET_CODE (XEXP (x, 0)) == MULT)
10743 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10746 if (GET_CODE (XEXP (x, 1)) == MULT)
10749 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10753 && REG_P (XEXP (x, 1))
10754 && REG_P (XEXP (x, 0)))
10757 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10760 x = legitimize_pic_address (x, 0);
10763 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10766 if (REG_P (XEXP (x, 0)))
10768 rtx temp = gen_reg_rtx (Pmode);
10769 rtx val = force_operand (XEXP (x, 1), temp);
10771 emit_move_insn (temp, val);
10773 XEXP (x, 1) = temp;
10777 else if (REG_P (XEXP (x, 1)))
10779 rtx temp = gen_reg_rtx (Pmode);
10780 rtx val = force_operand (XEXP (x, 0), temp);
10782 emit_move_insn (temp, val);
10784 XEXP (x, 0) = temp;
10792 /* Print an integer constant expression in assembler syntax. Addition
10793 and subtraction are the only arithmetic that may appear in these
10794 expressions. FILE is the stdio stream to write to, X is the rtx, and
10795 CODE is the operand print code from the output string. */
10798 output_pic_addr_const (FILE *file, rtx x, int code)
10802 switch (GET_CODE (x))
10805 gcc_assert (flag_pic);
10810 if (! TARGET_MACHO || TARGET_64BIT)
10811 output_addr_const (file, x);
10814 const char *name = XSTR (x, 0);
10816 /* Mark the decl as referenced so that cgraph will
10817 output the function. */
10818 if (SYMBOL_REF_DECL (x))
10819 mark_decl_referenced (SYMBOL_REF_DECL (x));
10822 if (MACHOPIC_INDIRECT
10823 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10824 name = machopic_indirection_name (x, /*stub_p=*/true);
10826 assemble_name (file, name);
10828 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10829 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10830 fputs ("@PLT", file);
10837 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10838 assemble_name (asm_out_file, buf);
10842 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10846 /* This used to output parentheses around the expression,
10847 but that does not work on the 386 (either ATT or BSD assembler). */
10848 output_pic_addr_const (file, XEXP (x, 0), code);
10852 if (GET_MODE (x) == VOIDmode)
10854 /* We can use %d if the number is <32 bits and positive. */
10855 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10856 fprintf (file, "0x%lx%08lx",
10857 (unsigned long) CONST_DOUBLE_HIGH (x),
10858 (unsigned long) CONST_DOUBLE_LOW (x));
10860 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10863 /* We can't handle floating point constants;
10864 PRINT_OPERAND must handle them. */
10865 output_operand_lossage ("floating constant misused");
10869 /* Some assemblers need integer constants to appear first. */
10870 if (CONST_INT_P (XEXP (x, 0)))
10872 output_pic_addr_const (file, XEXP (x, 0), code);
10874 output_pic_addr_const (file, XEXP (x, 1), code);
10878 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10879 output_pic_addr_const (file, XEXP (x, 1), code);
10881 output_pic_addr_const (file, XEXP (x, 0), code);
10887 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10888 output_pic_addr_const (file, XEXP (x, 0), code);
10890 output_pic_addr_const (file, XEXP (x, 1), code);
10892 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10896 gcc_assert (XVECLEN (x, 0) == 1);
10897 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10898 switch (XINT (x, 1))
10901 fputs ("@GOT", file);
10903 case UNSPEC_GOTOFF:
10904 fputs ("@GOTOFF", file);
10906 case UNSPEC_PLTOFF:
10907 fputs ("@PLTOFF", file);
10909 case UNSPEC_GOTPCREL:
10910 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10911 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10913 case UNSPEC_GOTTPOFF:
10914 /* FIXME: This might be @TPOFF in Sun ld too. */
10915 fputs ("@gottpoff", file);
10918 fputs ("@tpoff", file);
10920 case UNSPEC_NTPOFF:
10922 fputs ("@tpoff", file);
10924 fputs ("@ntpoff", file);
10926 case UNSPEC_DTPOFF:
10927 fputs ("@dtpoff", file);
10929 case UNSPEC_GOTNTPOFF:
10931 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10932 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10934 fputs ("@gotntpoff", file);
10936 case UNSPEC_INDNTPOFF:
10937 fputs ("@indntpoff", file);
10940 case UNSPEC_MACHOPIC_OFFSET:
10942 machopic_output_function_base_name (file);
10946 output_operand_lossage ("invalid UNSPEC as operand");
10952 output_operand_lossage ("invalid expression as operand");
10956 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10957 We need to emit DTP-relative relocations. */
10959 static void ATTRIBUTE_UNUSED
10960 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10962 fputs (ASM_LONG, file);
10963 output_addr_const (file, x);
10964 fputs ("@dtpoff", file);
10970 fputs (", 0", file);
10973 gcc_unreachable ();
10977 /* Return true if X is a representation of the PIC register. This copes
10978 with calls from ix86_find_base_term, where the register might have
10979 been replaced by a cselib value. */
10982 ix86_pic_register_p (rtx x)
10984 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10985 return (pic_offset_table_rtx
10986 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10988 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10991 /* In the name of slightly smaller debug output, and to cater to
10992 general assembler lossage, recognize PIC+GOTOFF and turn it back
10993 into a direct symbol reference.
10995 On Darwin, this is necessary to avoid a crash, because Darwin
10996 has a different PIC label for each routine but the DWARF debugging
10997 information is not associated with any particular routine, so it's
10998 necessary to remove references to the PIC label from RTL stored by
10999 the DWARF output code. */
11002 ix86_delegitimize_address (rtx x)
11004 rtx orig_x = delegitimize_mem_from_attrs (x);
11005 /* addend is NULL or some rtx if x is something+GOTOFF where
11006 something doesn't include the PIC register. */
11007 rtx addend = NULL_RTX;
11008 /* reg_addend is NULL or a multiple of some register. */
11009 rtx reg_addend = NULL_RTX;
11010 /* const_addend is NULL or a const_int. */
11011 rtx const_addend = NULL_RTX;
11012 /* This is the result, or NULL. */
11013 rtx result = NULL_RTX;
11022 if (GET_CODE (x) != CONST
11023 || GET_CODE (XEXP (x, 0)) != UNSPEC
11024 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11025 || !MEM_P (orig_x))
11027 return XVECEXP (XEXP (x, 0), 0, 0);
11030 if (GET_CODE (x) != PLUS
11031 || GET_CODE (XEXP (x, 1)) != CONST)
11034 if (ix86_pic_register_p (XEXP (x, 0)))
11035 /* %ebx + GOT/GOTOFF */
11037 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11039 /* %ebx + %reg * scale + GOT/GOTOFF */
11040 reg_addend = XEXP (x, 0);
11041 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11042 reg_addend = XEXP (reg_addend, 1);
11043 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11044 reg_addend = XEXP (reg_addend, 0);
11047 reg_addend = NULL_RTX;
11048 addend = XEXP (x, 0);
11052 addend = XEXP (x, 0);
11054 x = XEXP (XEXP (x, 1), 0);
11055 if (GET_CODE (x) == PLUS
11056 && CONST_INT_P (XEXP (x, 1)))
11058 const_addend = XEXP (x, 1);
11062 if (GET_CODE (x) == UNSPEC
11063 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11064 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11065 result = XVECEXP (x, 0, 0);
11067 if (TARGET_MACHO && darwin_local_data_pic (x)
11068 && !MEM_P (orig_x))
11069 result = XVECEXP (x, 0, 0);
11075 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11077 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11080 /* If the rest of original X doesn't involve the PIC register, add
11081 addend and subtract pic_offset_table_rtx. This can happen e.g.
11083 leal (%ebx, %ecx, 4), %ecx
11085 movl foo@GOTOFF(%ecx), %edx
11086 in which case we return (%ecx - %ebx) + foo. */
11087 if (pic_offset_table_rtx)
11088 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11089 pic_offset_table_rtx),
11097 /* If X is a machine specific address (i.e. a symbol or label being
11098 referenced as a displacement from the GOT implemented using an
11099 UNSPEC), then return the base term. Otherwise return X. */
11102 ix86_find_base_term (rtx x)
11108 if (GET_CODE (x) != CONST)
11110 term = XEXP (x, 0);
11111 if (GET_CODE (term) == PLUS
11112 && (CONST_INT_P (XEXP (term, 1))
11113 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11114 term = XEXP (term, 0);
11115 if (GET_CODE (term) != UNSPEC
11116 || XINT (term, 1) != UNSPEC_GOTPCREL)
11119 return XVECEXP (term, 0, 0);
11122 return ix86_delegitimize_address (x);
11126 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11127 int fp, FILE *file)
11129 const char *suffix;
11131 if (mode == CCFPmode || mode == CCFPUmode)
11133 code = ix86_fp_compare_code_to_integer (code);
11137 code = reverse_condition (code);
11188 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11192 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11193 Those same assemblers have the same but opposite lossage on cmov. */
11194 if (mode == CCmode)
11195 suffix = fp ? "nbe" : "a";
11196 else if (mode == CCCmode)
11199 gcc_unreachable ();
11215 gcc_unreachable ();
11219 gcc_assert (mode == CCmode || mode == CCCmode);
11236 gcc_unreachable ();
11240 /* ??? As above. */
11241 gcc_assert (mode == CCmode || mode == CCCmode);
11242 suffix = fp ? "nb" : "ae";
11245 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11249 /* ??? As above. */
11250 if (mode == CCmode)
11252 else if (mode == CCCmode)
11253 suffix = fp ? "nb" : "ae";
11255 gcc_unreachable ();
11258 suffix = fp ? "u" : "p";
11261 suffix = fp ? "nu" : "np";
11264 gcc_unreachable ();
11266 fputs (suffix, file);
11269 /* Print the name of register X to FILE based on its machine mode and number.
11270 If CODE is 'w', pretend the mode is HImode.
11271 If CODE is 'b', pretend the mode is QImode.
11272 If CODE is 'k', pretend the mode is SImode.
11273 If CODE is 'q', pretend the mode is DImode.
11274 If CODE is 'x', pretend the mode is V4SFmode.
11275 If CODE is 't', pretend the mode is V8SFmode.
11276 If CODE is 'h', pretend the reg is the 'high' byte register.
11277 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11278 If CODE is 'd', duplicate the operand for AVX instruction.
11282 print_reg (rtx x, int code, FILE *file)
11285 bool duplicated = code == 'd' && TARGET_AVX;
11287 gcc_assert (x == pc_rtx
11288 || (REGNO (x) != ARG_POINTER_REGNUM
11289 && REGNO (x) != FRAME_POINTER_REGNUM
11290 && REGNO (x) != FLAGS_REG
11291 && REGNO (x) != FPSR_REG
11292 && REGNO (x) != FPCR_REG));
11294 if (ASSEMBLER_DIALECT == ASM_ATT)
11299 gcc_assert (TARGET_64BIT);
11300 fputs ("rip", file);
11304 if (code == 'w' || MMX_REG_P (x))
11306 else if (code == 'b')
11308 else if (code == 'k')
11310 else if (code == 'q')
11312 else if (code == 'y')
11314 else if (code == 'h')
11316 else if (code == 'x')
11318 else if (code == 't')
11321 code = GET_MODE_SIZE (GET_MODE (x));
11323 /* Irritatingly, AMD extended registers use different naming convention
11324 from the normal registers. */
11325 if (REX_INT_REG_P (x))
11327 gcc_assert (TARGET_64BIT);
11331 error ("extended registers have no high halves");
11334 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11337 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11340 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11343 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11346 error ("unsupported operand size for extended register");
11356 if (STACK_TOP_P (x))
11365 if (! ANY_FP_REG_P (x))
11366 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11371 reg = hi_reg_name[REGNO (x)];
11374 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11376 reg = qi_reg_name[REGNO (x)];
11379 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11381 reg = qi_high_reg_name[REGNO (x)];
11386 gcc_assert (!duplicated);
11388 fputs (hi_reg_name[REGNO (x)] + 1, file);
11393 gcc_unreachable ();
11399 if (ASSEMBLER_DIALECT == ASM_ATT)
11400 fprintf (file, ", %%%s", reg);
11402 fprintf (file, ", %s", reg);
11406 /* Locate some local-dynamic symbol still in use by this function
11407 so that we can print its name in some tls_local_dynamic_base
11411 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11415 if (GET_CODE (x) == SYMBOL_REF
11416 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11418 cfun->machine->some_ld_name = XSTR (x, 0);
11425 static const char *
11426 get_some_local_dynamic_name (void)
11430 if (cfun->machine->some_ld_name)
11431 return cfun->machine->some_ld_name;
11433 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11435 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11436 return cfun->machine->some_ld_name;
11441 /* Meaning of CODE:
11442 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11443 C -- print opcode suffix for set/cmov insn.
11444 c -- like C, but print reversed condition
11445 F,f -- likewise, but for floating-point.
11446 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11448 R -- print the prefix for register names.
11449 z -- print the opcode suffix for the size of the current operand.
11450 Z -- likewise, with special suffixes for x87 instructions.
11451 * -- print a star (in certain assembler syntax)
11452 A -- print an absolute memory reference.
11453 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11454 s -- print a shift double count, followed by the assemblers argument
11456 b -- print the QImode name of the register for the indicated operand.
11457 %b0 would print %al if operands[0] is reg 0.
11458 w -- likewise, print the HImode name of the register.
11459 k -- likewise, print the SImode name of the register.
11460 q -- likewise, print the DImode name of the register.
11461 x -- likewise, print the V4SFmode name of the register.
11462 t -- likewise, print the V8SFmode name of the register.
11463 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11464 y -- print "st(0)" instead of "st" as a register.
11465 d -- print duplicated register operand for AVX instruction.
11466 D -- print condition for SSE cmp instruction.
11467 P -- if PIC, print an @PLT suffix.
11468 X -- don't print any sort of PIC '@' suffix for a symbol.
11469 & -- print some in-use local-dynamic symbol name.
11470 H -- print a memory address offset by 8; used for sse high-parts
11471 Y -- print condition for XOP pcom* instruction.
11472 + -- print a branch hint as 'cs' or 'ds' prefix
11473 ; -- print a semicolon (after prefixes due to bug in older gas).
11477 print_operand (FILE *file, rtx x, int code)
11484 if (ASSEMBLER_DIALECT == ASM_ATT)
11490 const char *name = get_some_local_dynamic_name ();
11492 output_operand_lossage ("'%%&' used without any "
11493 "local dynamic TLS references");
11495 assemble_name (file, name);
11500 switch (ASSEMBLER_DIALECT)
11507 /* Intel syntax. For absolute addresses, registers should not
11508 be surrounded by braces. */
11512 PRINT_OPERAND (file, x, 0);
11519 gcc_unreachable ();
11522 PRINT_OPERAND (file, x, 0);
11527 if (ASSEMBLER_DIALECT == ASM_ATT)
11532 if (ASSEMBLER_DIALECT == ASM_ATT)
11537 if (ASSEMBLER_DIALECT == ASM_ATT)
11542 if (ASSEMBLER_DIALECT == ASM_ATT)
11547 if (ASSEMBLER_DIALECT == ASM_ATT)
11552 if (ASSEMBLER_DIALECT == ASM_ATT)
11557 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11559 /* Opcodes don't get size suffixes if using Intel opcodes. */
11560 if (ASSEMBLER_DIALECT == ASM_INTEL)
11563 switch (GET_MODE_SIZE (GET_MODE (x)))
11582 output_operand_lossage
11583 ("invalid operand size for operand code '%c'", code);
11588 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11590 (0, "non-integer operand used with operand code '%c'", code);
11594 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11595 if (ASSEMBLER_DIALECT == ASM_INTEL)
11598 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11600 switch (GET_MODE_SIZE (GET_MODE (x)))
11603 #ifdef HAVE_AS_IX86_FILDS
11613 #ifdef HAVE_AS_IX86_FILDQ
11616 fputs ("ll", file);
11624 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11626 /* 387 opcodes don't get size suffixes
11627 if the operands are registers. */
11628 if (STACK_REG_P (x))
11631 switch (GET_MODE_SIZE (GET_MODE (x)))
11652 output_operand_lossage
11653 ("invalid operand type used with operand code '%c'", code);
11657 output_operand_lossage
11658 ("invalid operand size for operand code '%c'", code);
11675 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11677 PRINT_OPERAND (file, x, 0);
11678 fputs (", ", file);
11683 /* Little bit of braindamage here. The SSE compare instructions
11684 does use completely different names for the comparisons that the
11685 fp conditional moves. */
11688 switch (GET_CODE (x))
11691 fputs ("eq", file);
11694 fputs ("eq_us", file);
11697 fputs ("lt", file);
11700 fputs ("nge", file);
11703 fputs ("le", file);
11706 fputs ("ngt", file);
11709 fputs ("unord", file);
11712 fputs ("neq", file);
11715 fputs ("neq_oq", file);
11718 fputs ("ge", file);
11721 fputs ("nlt", file);
11724 fputs ("gt", file);
11727 fputs ("nle", file);
11730 fputs ("ord", file);
11733 output_operand_lossage ("operand is not a condition code, "
11734 "invalid operand code 'D'");
11740 switch (GET_CODE (x))
11744 fputs ("eq", file);
11748 fputs ("lt", file);
11752 fputs ("le", file);
11755 fputs ("unord", file);
11759 fputs ("neq", file);
11763 fputs ("nlt", file);
11767 fputs ("nle", file);
11770 fputs ("ord", file);
11773 output_operand_lossage ("operand is not a condition code, "
11774 "invalid operand code 'D'");
11780 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11781 if (ASSEMBLER_DIALECT == ASM_ATT)
11783 switch (GET_MODE (x))
11785 case HImode: putc ('w', file); break;
11787 case SFmode: putc ('l', file); break;
11789 case DFmode: putc ('q', file); break;
11790 default: gcc_unreachable ();
11797 if (!COMPARISON_P (x))
11799 output_operand_lossage ("operand is neither a constant nor a "
11800 "condition code, invalid operand code "
11804 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11807 if (!COMPARISON_P (x))
11809 output_operand_lossage ("operand is neither a constant nor a "
11810 "condition code, invalid operand code "
11814 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11815 if (ASSEMBLER_DIALECT == ASM_ATT)
11818 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11821 /* Like above, but reverse condition */
11823 /* Check to see if argument to %c is really a constant
11824 and not a condition code which needs to be reversed. */
11825 if (!COMPARISON_P (x))
11827 output_operand_lossage ("operand is neither a constant nor a "
11828 "condition code, invalid operand "
11832 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11835 if (!COMPARISON_P (x))
11837 output_operand_lossage ("operand is neither a constant nor a "
11838 "condition code, invalid operand "
11842 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11843 if (ASSEMBLER_DIALECT == ASM_ATT)
11846 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11850 /* It doesn't actually matter what mode we use here, as we're
11851 only going to use this for printing. */
11852 x = adjust_address_nv (x, DImode, 8);
11860 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11863 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11866 int pred_val = INTVAL (XEXP (x, 0));
11868 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11869 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11871 int taken = pred_val > REG_BR_PROB_BASE / 2;
11872 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11874 /* Emit hints only in the case default branch prediction
11875 heuristics would fail. */
11876 if (taken != cputaken)
11878 /* We use 3e (DS) prefix for taken branches and
11879 2e (CS) prefix for not taken branches. */
11881 fputs ("ds ; ", file);
11883 fputs ("cs ; ", file);
11891 switch (GET_CODE (x))
11894 fputs ("neq", file);
11897 fputs ("eq", file);
11901 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11905 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11909 fputs ("le", file);
11913 fputs ("lt", file);
11916 fputs ("unord", file);
11919 fputs ("ord", file);
11922 fputs ("ueq", file);
11925 fputs ("nlt", file);
11928 fputs ("nle", file);
11931 fputs ("ule", file);
11934 fputs ("ult", file);
11937 fputs ("une", file);
11940 output_operand_lossage ("operand is not a condition code, "
11941 "invalid operand code 'Y'");
11948 fputs (" ; ", file);
11955 output_operand_lossage ("invalid operand code '%c'", code);
11960 print_reg (x, code, file);
11962 else if (MEM_P (x))
11964 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11965 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11966 && GET_MODE (x) != BLKmode)
11969 switch (GET_MODE_SIZE (GET_MODE (x)))
11971 case 1: size = "BYTE"; break;
11972 case 2: size = "WORD"; break;
11973 case 4: size = "DWORD"; break;
11974 case 8: size = "QWORD"; break;
11975 case 12: size = "TBYTE"; break;
11977 if (GET_MODE (x) == XFmode)
11982 case 32: size = "YMMWORD"; break;
11984 gcc_unreachable ();
11987 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11990 else if (code == 'w')
11992 else if (code == 'k')
11995 fputs (size, file);
11996 fputs (" PTR ", file);
12000 /* Avoid (%rip) for call operands. */
12001 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12002 && !CONST_INT_P (x))
12003 output_addr_const (file, x);
12004 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12005 output_operand_lossage ("invalid constraints for operand");
12007 output_address (x);
12010 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12015 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12016 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12018 if (ASSEMBLER_DIALECT == ASM_ATT)
12020 fprintf (file, "0x%08lx", (long unsigned int) l);
12023 /* These float cases don't actually occur as immediate operands. */
12024 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12028 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12029 fputs (dstr, file);
12032 else if (GET_CODE (x) == CONST_DOUBLE
12033 && GET_MODE (x) == XFmode)
12037 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12038 fputs (dstr, file);
12043 /* We have patterns that allow zero sets of memory, for instance.
12044 In 64-bit mode, we should probably support all 8-byte vectors,
12045 since we can in fact encode that into an immediate. */
12046 if (GET_CODE (x) == CONST_VECTOR)
12048 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12054 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12056 if (ASSEMBLER_DIALECT == ASM_ATT)
12059 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12060 || GET_CODE (x) == LABEL_REF)
12062 if (ASSEMBLER_DIALECT == ASM_ATT)
12065 fputs ("OFFSET FLAT:", file);
12068 if (CONST_INT_P (x))
12069 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12071 output_pic_addr_const (file, x, code);
12073 output_addr_const (file, x);
12077 /* Print a memory operand whose address is ADDR. */
12080 print_operand_address (FILE *file, rtx addr)
12082 struct ix86_address parts;
12083 rtx base, index, disp;
12085 int ok = ix86_decompose_address (addr, &parts);
12090 index = parts.index;
12092 scale = parts.scale;
12100 if (ASSEMBLER_DIALECT == ASM_ATT)
12102 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12105 gcc_unreachable ();
12108 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12109 if (TARGET_64BIT && !base && !index)
12113 if (GET_CODE (disp) == CONST
12114 && GET_CODE (XEXP (disp, 0)) == PLUS
12115 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12116 symbol = XEXP (XEXP (disp, 0), 0);
12118 if (GET_CODE (symbol) == LABEL_REF
12119 || (GET_CODE (symbol) == SYMBOL_REF
12120 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12123 if (!base && !index)
12125 /* Displacement only requires special attention. */
12127 if (CONST_INT_P (disp))
12129 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12130 fputs ("ds:", file);
12131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12134 output_pic_addr_const (file, disp, 0);
12136 output_addr_const (file, disp);
12140 if (ASSEMBLER_DIALECT == ASM_ATT)
12145 output_pic_addr_const (file, disp, 0);
12146 else if (GET_CODE (disp) == LABEL_REF)
12147 output_asm_label (disp);
12149 output_addr_const (file, disp);
12154 print_reg (base, 0, file);
12158 print_reg (index, 0, file);
12160 fprintf (file, ",%d", scale);
12166 rtx offset = NULL_RTX;
12170 /* Pull out the offset of a symbol; print any symbol itself. */
12171 if (GET_CODE (disp) == CONST
12172 && GET_CODE (XEXP (disp, 0)) == PLUS
12173 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12175 offset = XEXP (XEXP (disp, 0), 1);
12176 disp = gen_rtx_CONST (VOIDmode,
12177 XEXP (XEXP (disp, 0), 0));
12181 output_pic_addr_const (file, disp, 0);
12182 else if (GET_CODE (disp) == LABEL_REF)
12183 output_asm_label (disp);
12184 else if (CONST_INT_P (disp))
12187 output_addr_const (file, disp);
12193 print_reg (base, 0, file);
12196 if (INTVAL (offset) >= 0)
12198 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12202 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12209 print_reg (index, 0, file);
12211 fprintf (file, "*%d", scale);
12219 output_addr_const_extra (FILE *file, rtx x)
12223 if (GET_CODE (x) != UNSPEC)
12226 op = XVECEXP (x, 0, 0);
12227 switch (XINT (x, 1))
12229 case UNSPEC_GOTTPOFF:
12230 output_addr_const (file, op);
12231 /* FIXME: This might be @TPOFF in Sun ld. */
12232 fputs ("@gottpoff", file);
12235 output_addr_const (file, op);
12236 fputs ("@tpoff", file);
12238 case UNSPEC_NTPOFF:
12239 output_addr_const (file, op);
12241 fputs ("@tpoff", file);
12243 fputs ("@ntpoff", file);
12245 case UNSPEC_DTPOFF:
12246 output_addr_const (file, op);
12247 fputs ("@dtpoff", file);
12249 case UNSPEC_GOTNTPOFF:
12250 output_addr_const (file, op);
12252 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12253 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12255 fputs ("@gotntpoff", file);
12257 case UNSPEC_INDNTPOFF:
12258 output_addr_const (file, op);
12259 fputs ("@indntpoff", file);
12262 case UNSPEC_MACHOPIC_OFFSET:
12263 output_addr_const (file, op);
12265 machopic_output_function_base_name (file);
12276 /* Split one or more DImode RTL references into pairs of SImode
12277 references. The RTL can be REG, offsettable MEM, integer constant, or
12278 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12279 split and "num" is its length. lo_half and hi_half are output arrays
12280 that parallel "operands". */
12283 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12287 rtx op = operands[num];
12289 /* simplify_subreg refuse to split volatile memory addresses,
12290 but we still have to handle it. */
12293 lo_half[num] = adjust_address (op, SImode, 0);
12294 hi_half[num] = adjust_address (op, SImode, 4);
12298 lo_half[num] = simplify_gen_subreg (SImode, op,
12299 GET_MODE (op) == VOIDmode
12300 ? DImode : GET_MODE (op), 0);
12301 hi_half[num] = simplify_gen_subreg (SImode, op,
12302 GET_MODE (op) == VOIDmode
12303 ? DImode : GET_MODE (op), 4);
12307 /* Split one or more TImode RTL references into pairs of DImode
12308 references. The RTL can be REG, offsettable MEM, integer constant, or
12309 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12310 split and "num" is its length. lo_half and hi_half are output arrays
12311 that parallel "operands". */
12314 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12318 rtx op = operands[num];
12320 /* simplify_subreg refuse to split volatile memory addresses, but we
12321 still have to handle it. */
12324 lo_half[num] = adjust_address (op, DImode, 0);
12325 hi_half[num] = adjust_address (op, DImode, 8);
12329 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12330 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12335 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12336 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12337 is the expression of the binary operation. The output may either be
12338 emitted here, or returned to the caller, like all output_* functions.
12340 There is no guarantee that the operands are the same mode, as they
12341 might be within FLOAT or FLOAT_EXTEND expressions. */
12343 #ifndef SYSV386_COMPAT
12344 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12345 wants to fix the assemblers because that causes incompatibility
12346 with gcc. No-one wants to fix gcc because that causes
12347 incompatibility with assemblers... You can use the option of
12348 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12349 #define SYSV386_COMPAT 1
12353 output_387_binary_op (rtx insn, rtx *operands)
12355 static char buf[40];
12358 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12360 #ifdef ENABLE_CHECKING
12361 /* Even if we do not want to check the inputs, this documents input
12362 constraints. Which helps in understanding the following code. */
12363 if (STACK_REG_P (operands[0])
12364 && ((REG_P (operands[1])
12365 && REGNO (operands[0]) == REGNO (operands[1])
12366 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12367 || (REG_P (operands[2])
12368 && REGNO (operands[0]) == REGNO (operands[2])
12369 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12370 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12373 gcc_assert (is_sse);
12376 switch (GET_CODE (operands[3]))
12379 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12380 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12388 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12389 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12397 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12398 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12406 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12407 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12415 gcc_unreachable ();
12422 strcpy (buf, ssep);
12423 if (GET_MODE (operands[0]) == SFmode)
12424 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12426 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12430 strcpy (buf, ssep + 1);
12431 if (GET_MODE (operands[0]) == SFmode)
12432 strcat (buf, "ss\t{%2, %0|%0, %2}");
12434 strcat (buf, "sd\t{%2, %0|%0, %2}");
12440 switch (GET_CODE (operands[3]))
12444 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12446 rtx temp = operands[2];
12447 operands[2] = operands[1];
12448 operands[1] = temp;
12451 /* know operands[0] == operands[1]. */
12453 if (MEM_P (operands[2]))
12459 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12461 if (STACK_TOP_P (operands[0]))
12462 /* How is it that we are storing to a dead operand[2]?
12463 Well, presumably operands[1] is dead too. We can't
12464 store the result to st(0) as st(0) gets popped on this
12465 instruction. Instead store to operands[2] (which I
12466 think has to be st(1)). st(1) will be popped later.
12467 gcc <= 2.8.1 didn't have this check and generated
12468 assembly code that the Unixware assembler rejected. */
12469 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12471 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12475 if (STACK_TOP_P (operands[0]))
12476 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12478 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12483 if (MEM_P (operands[1]))
12489 if (MEM_P (operands[2]))
12495 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12498 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12499 derived assemblers, confusingly reverse the direction of
12500 the operation for fsub{r} and fdiv{r} when the
12501 destination register is not st(0). The Intel assembler
12502 doesn't have this brain damage. Read !SYSV386_COMPAT to
12503 figure out what the hardware really does. */
12504 if (STACK_TOP_P (operands[0]))
12505 p = "{p\t%0, %2|rp\t%2, %0}";
12507 p = "{rp\t%2, %0|p\t%0, %2}";
12509 if (STACK_TOP_P (operands[0]))
12510 /* As above for fmul/fadd, we can't store to st(0). */
12511 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12513 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12518 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12521 if (STACK_TOP_P (operands[0]))
12522 p = "{rp\t%0, %1|p\t%1, %0}";
12524 p = "{p\t%1, %0|rp\t%0, %1}";
12526 if (STACK_TOP_P (operands[0]))
12527 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12529 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12534 if (STACK_TOP_P (operands[0]))
12536 if (STACK_TOP_P (operands[1]))
12537 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12539 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12542 else if (STACK_TOP_P (operands[1]))
12545 p = "{\t%1, %0|r\t%0, %1}";
12547 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12553 p = "{r\t%2, %0|\t%0, %2}";
12555 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12561 gcc_unreachable ();
12568 /* Return needed mode for entity in optimize_mode_switching pass. */
12571 ix86_mode_needed (int entity, rtx insn)
12573 enum attr_i387_cw mode;
12575 /* The mode UNINITIALIZED is used to store control word after a
12576 function call or ASM pattern. The mode ANY specify that function
12577 has no requirements on the control word and make no changes in the
12578 bits we are interested in. */
12581 || (NONJUMP_INSN_P (insn)
12582 && (asm_noperands (PATTERN (insn)) >= 0
12583 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12584 return I387_CW_UNINITIALIZED;
12586 if (recog_memoized (insn) < 0)
12587 return I387_CW_ANY;
12589 mode = get_attr_i387_cw (insn);
12594 if (mode == I387_CW_TRUNC)
12599 if (mode == I387_CW_FLOOR)
12604 if (mode == I387_CW_CEIL)
12609 if (mode == I387_CW_MASK_PM)
12614 gcc_unreachable ();
12617 return I387_CW_ANY;
12620 /* Output code to initialize control word copies used by trunc?f?i and
12621 rounding patterns. CURRENT_MODE is set to current control word,
12622 while NEW_MODE is set to new control word. */
12625 emit_i387_cw_initialization (int mode)
12627 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12630 enum ix86_stack_slot slot;
12632 rtx reg = gen_reg_rtx (HImode);
12634 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12635 emit_move_insn (reg, copy_rtx (stored_mode));
12637 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12638 || optimize_function_for_size_p (cfun))
12642 case I387_CW_TRUNC:
12643 /* round toward zero (truncate) */
12644 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12645 slot = SLOT_CW_TRUNC;
12648 case I387_CW_FLOOR:
12649 /* round down toward -oo */
12650 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12651 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12652 slot = SLOT_CW_FLOOR;
12656 /* round up toward +oo */
12657 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12658 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12659 slot = SLOT_CW_CEIL;
12662 case I387_CW_MASK_PM:
12663 /* mask precision exception for nearbyint() */
12664 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12665 slot = SLOT_CW_MASK_PM;
12669 gcc_unreachable ();
12676 case I387_CW_TRUNC:
12677 /* round toward zero (truncate) */
12678 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12679 slot = SLOT_CW_TRUNC;
12682 case I387_CW_FLOOR:
12683 /* round down toward -oo */
12684 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12685 slot = SLOT_CW_FLOOR;
12689 /* round up toward +oo */
12690 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12691 slot = SLOT_CW_CEIL;
12694 case I387_CW_MASK_PM:
12695 /* mask precision exception for nearbyint() */
12696 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12697 slot = SLOT_CW_MASK_PM;
12701 gcc_unreachable ();
12705 gcc_assert (slot < MAX_386_STACK_LOCALS);
12707 new_mode = assign_386_stack_local (HImode, slot);
12708 emit_move_insn (new_mode, reg);
12711 /* Output code for INSN to convert a float to a signed int. OPERANDS
12712 are the insn operands. The output may be [HSD]Imode and the input
12713 operand may be [SDX]Fmode. */
12716 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12718 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12719 int dimode_p = GET_MODE (operands[0]) == DImode;
12720 int round_mode = get_attr_i387_cw (insn);
12722 /* Jump through a hoop or two for DImode, since the hardware has no
12723 non-popping instruction. We used to do this a different way, but
12724 that was somewhat fragile and broke with post-reload splitters. */
12725 if ((dimode_p || fisttp) && !stack_top_dies)
12726 output_asm_insn ("fld\t%y1", operands);
12728 gcc_assert (STACK_TOP_P (operands[1]));
12729 gcc_assert (MEM_P (operands[0]));
12730 gcc_assert (GET_MODE (operands[1]) != TFmode);
12733 output_asm_insn ("fisttp%Z0\t%0", operands);
12736 if (round_mode != I387_CW_ANY)
12737 output_asm_insn ("fldcw\t%3", operands);
12738 if (stack_top_dies || dimode_p)
12739 output_asm_insn ("fistp%Z0\t%0", operands);
12741 output_asm_insn ("fist%Z0\t%0", operands);
12742 if (round_mode != I387_CW_ANY)
12743 output_asm_insn ("fldcw\t%2", operands);
12749 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12750 have the values zero or one, indicates the ffreep insn's operand
12751 from the OPERANDS array. */
12753 static const char *
12754 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12756 if (TARGET_USE_FFREEP)
12757 #ifdef HAVE_AS_IX86_FFREEP
12758 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12761 static char retval[32];
12762 int regno = REGNO (operands[opno]);
12764 gcc_assert (FP_REGNO_P (regno));
12766 regno -= FIRST_STACK_REG;
12768 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12773 return opno ? "fstp\t%y1" : "fstp\t%y0";
12777 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12778 should be used. UNORDERED_P is true when fucom should be used. */
12781 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12783 int stack_top_dies;
12784 rtx cmp_op0, cmp_op1;
12785 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12789 cmp_op0 = operands[0];
12790 cmp_op1 = operands[1];
12794 cmp_op0 = operands[1];
12795 cmp_op1 = operands[2];
12800 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12801 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12802 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12803 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12805 if (GET_MODE (operands[0]) == SFmode)
12807 return &ucomiss[TARGET_AVX ? 0 : 1];
12809 return &comiss[TARGET_AVX ? 0 : 1];
12812 return &ucomisd[TARGET_AVX ? 0 : 1];
12814 return &comisd[TARGET_AVX ? 0 : 1];
12817 gcc_assert (STACK_TOP_P (cmp_op0));
12819 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12821 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12823 if (stack_top_dies)
12825 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12826 return output_387_ffreep (operands, 1);
12829 return "ftst\n\tfnstsw\t%0";
12832 if (STACK_REG_P (cmp_op1)
12834 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12835 && REGNO (cmp_op1) != FIRST_STACK_REG)
12837 /* If both the top of the 387 stack dies, and the other operand
12838 is also a stack register that dies, then this must be a
12839 `fcompp' float compare */
12843 /* There is no double popping fcomi variant. Fortunately,
12844 eflags is immune from the fstp's cc clobbering. */
12846 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12848 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12849 return output_387_ffreep (operands, 0);
12854 return "fucompp\n\tfnstsw\t%0";
12856 return "fcompp\n\tfnstsw\t%0";
12861 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12863 static const char * const alt[16] =
12865 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12866 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12867 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12868 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12870 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12871 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12875 "fcomi\t{%y1, %0|%0, %y1}",
12876 "fcomip\t{%y1, %0|%0, %y1}",
12877 "fucomi\t{%y1, %0|%0, %y1}",
12878 "fucomip\t{%y1, %0|%0, %y1}",
12889 mask = eflags_p << 3;
12890 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12891 mask |= unordered_p << 1;
12892 mask |= stack_top_dies;
12894 gcc_assert (mask < 16);
12903 ix86_output_addr_vec_elt (FILE *file, int value)
12905 const char *directive = ASM_LONG;
12909 directive = ASM_QUAD;
12911 gcc_assert (!TARGET_64BIT);
12914 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12918 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12920 const char *directive = ASM_LONG;
12923 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12924 directive = ASM_QUAD;
12926 gcc_assert (!TARGET_64BIT);
12928 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12929 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12930 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12931 directive, value, rel);
12932 else if (HAVE_AS_GOTOFF_IN_DATA)
12933 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12935 else if (TARGET_MACHO)
12937 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12938 machopic_output_function_base_name (file);
12943 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12944 GOT_SYMBOL_NAME, value);
12947 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12951 ix86_expand_clear (rtx dest)
12955 /* We play register width games, which are only valid after reload. */
12956 gcc_assert (reload_completed);
12958 /* Avoid HImode and its attendant prefix byte. */
12959 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12960 dest = gen_rtx_REG (SImode, REGNO (dest));
12961 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12963 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12964 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12966 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12967 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12973 /* X is an unchanging MEM. If it is a constant pool reference, return
12974 the constant pool rtx, else NULL. */
12977 maybe_get_pool_constant (rtx x)
12979 x = ix86_delegitimize_address (XEXP (x, 0));
12981 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12982 return get_pool_constant (x);
12988 ix86_expand_move (enum machine_mode mode, rtx operands[])
12991 enum tls_model model;
12996 if (GET_CODE (op1) == SYMBOL_REF)
12998 model = SYMBOL_REF_TLS_MODEL (op1);
13001 op1 = legitimize_tls_address (op1, model, true);
13002 op1 = force_operand (op1, op0);
13006 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13007 && SYMBOL_REF_DLLIMPORT_P (op1))
13008 op1 = legitimize_dllimport_symbol (op1, false);
13010 else if (GET_CODE (op1) == CONST
13011 && GET_CODE (XEXP (op1, 0)) == PLUS
13012 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13014 rtx addend = XEXP (XEXP (op1, 0), 1);
13015 rtx symbol = XEXP (XEXP (op1, 0), 0);
13018 model = SYMBOL_REF_TLS_MODEL (symbol);
13020 tmp = legitimize_tls_address (symbol, model, true);
13021 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13022 && SYMBOL_REF_DLLIMPORT_P (symbol))
13023 tmp = legitimize_dllimport_symbol (symbol, true);
13027 tmp = force_operand (tmp, NULL);
13028 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13029 op0, 1, OPTAB_DIRECT);
13035 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13037 if (TARGET_MACHO && !TARGET_64BIT)
13042 rtx temp = ((reload_in_progress
13043 || ((op0 && REG_P (op0))
13045 ? op0 : gen_reg_rtx (Pmode));
13046 op1 = machopic_indirect_data_reference (op1, temp);
13047 op1 = machopic_legitimize_pic_address (op1, mode,
13048 temp == op1 ? 0 : temp);
13050 else if (MACHOPIC_INDIRECT)
13051 op1 = machopic_indirect_data_reference (op1, 0);
13059 op1 = force_reg (Pmode, op1);
13060 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13062 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13063 op1 = legitimize_pic_address (op1, reg);
13072 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13073 || !push_operand (op0, mode))
13075 op1 = force_reg (mode, op1);
13077 if (push_operand (op0, mode)
13078 && ! general_no_elim_operand (op1, mode))
13079 op1 = copy_to_mode_reg (mode, op1);
13081 /* Force large constants in 64bit compilation into register
13082 to get them CSEed. */
13083 if (can_create_pseudo_p ()
13084 && (mode == DImode) && TARGET_64BIT
13085 && immediate_operand (op1, mode)
13086 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13087 && !register_operand (op0, mode)
13089 op1 = copy_to_mode_reg (mode, op1);
13091 if (can_create_pseudo_p ()
13092 && FLOAT_MODE_P (mode)
13093 && GET_CODE (op1) == CONST_DOUBLE)
13095 /* If we are loading a floating point constant to a register,
13096 force the value to memory now, since we'll get better code
13097 out the back end. */
13099 op1 = validize_mem (force_const_mem (mode, op1));
13100 if (!register_operand (op0, mode))
13102 rtx temp = gen_reg_rtx (mode);
13103 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13104 emit_move_insn (op0, temp);
13110 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13114 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13116 rtx op0 = operands[0], op1 = operands[1];
13117 unsigned int align = GET_MODE_ALIGNMENT (mode);
13119 /* Force constants other than zero into memory. We do not know how
13120 the instructions used to build constants modify the upper 64 bits
13121 of the register, once we have that information we may be able
13122 to handle some of them more efficiently. */
13123 if (can_create_pseudo_p ()
13124 && register_operand (op0, mode)
13125 && (CONSTANT_P (op1)
13126 || (GET_CODE (op1) == SUBREG
13127 && CONSTANT_P (SUBREG_REG (op1))))
13128 && !standard_sse_constant_p (op1))
13129 op1 = validize_mem (force_const_mem (mode, op1));
13131 /* We need to check memory alignment for SSE mode since attribute
13132 can make operands unaligned. */
13133 if (can_create_pseudo_p ()
13134 && SSE_REG_MODE_P (mode)
13135 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13136 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13140 /* ix86_expand_vector_move_misalign() does not like constants ... */
13141 if (CONSTANT_P (op1)
13142 || (GET_CODE (op1) == SUBREG
13143 && CONSTANT_P (SUBREG_REG (op1))))
13144 op1 = validize_mem (force_const_mem (mode, op1));
13146 /* ... nor both arguments in memory. */
13147 if (!register_operand (op0, mode)
13148 && !register_operand (op1, mode))
13149 op1 = force_reg (mode, op1);
13151 tmp[0] = op0; tmp[1] = op1;
13152 ix86_expand_vector_move_misalign (mode, tmp);
13156 /* Make operand1 a register if it isn't already. */
13157 if (can_create_pseudo_p ()
13158 && !register_operand (op0, mode)
13159 && !register_operand (op1, mode))
13161 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13165 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13168 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13169 straight to ix86_expand_vector_move. */
13170 /* Code generation for scalar reg-reg moves of single and double precision data:
13171 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13175 if (x86_sse_partial_reg_dependency == true)
13180 Code generation for scalar loads of double precision data:
13181 if (x86_sse_split_regs == true)
13182 movlpd mem, reg (gas syntax)
13186 Code generation for unaligned packed loads of single precision data
13187 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13188 if (x86_sse_unaligned_move_optimal)
13191 if (x86_sse_partial_reg_dependency == true)
13203 Code generation for unaligned packed loads of double precision data
13204 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13205 if (x86_sse_unaligned_move_optimal)
13208 if (x86_sse_split_regs == true)
13221 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13230 switch (GET_MODE_CLASS (mode))
13232 case MODE_VECTOR_INT:
13234 switch (GET_MODE_SIZE (mode))
13237 op0 = gen_lowpart (V16QImode, op0);
13238 op1 = gen_lowpart (V16QImode, op1);
13239 emit_insn (gen_avx_movdqu (op0, op1));
13242 op0 = gen_lowpart (V32QImode, op0);
13243 op1 = gen_lowpart (V32QImode, op1);
13244 emit_insn (gen_avx_movdqu256 (op0, op1));
13247 gcc_unreachable ();
13250 case MODE_VECTOR_FLOAT:
13251 op0 = gen_lowpart (mode, op0);
13252 op1 = gen_lowpart (mode, op1);
13257 emit_insn (gen_avx_movups (op0, op1));
13260 emit_insn (gen_avx_movups256 (op0, op1));
13263 emit_insn (gen_avx_movupd (op0, op1));
13266 emit_insn (gen_avx_movupd256 (op0, op1));
13269 gcc_unreachable ();
13274 gcc_unreachable ();
13282 /* If we're optimizing for size, movups is the smallest. */
13283 if (optimize_insn_for_size_p ())
13285 op0 = gen_lowpart (V4SFmode, op0);
13286 op1 = gen_lowpart (V4SFmode, op1);
13287 emit_insn (gen_sse_movups (op0, op1));
13291 /* ??? If we have typed data, then it would appear that using
13292 movdqu is the only way to get unaligned data loaded with
13294 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13296 op0 = gen_lowpart (V16QImode, op0);
13297 op1 = gen_lowpart (V16QImode, op1);
13298 emit_insn (gen_sse2_movdqu (op0, op1));
13302 if (TARGET_SSE2 && mode == V2DFmode)
13306 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13308 op0 = gen_lowpart (V2DFmode, op0);
13309 op1 = gen_lowpart (V2DFmode, op1);
13310 emit_insn (gen_sse2_movupd (op0, op1));
13314 /* When SSE registers are split into halves, we can avoid
13315 writing to the top half twice. */
13316 if (TARGET_SSE_SPLIT_REGS)
13318 emit_clobber (op0);
13323 /* ??? Not sure about the best option for the Intel chips.
13324 The following would seem to satisfy; the register is
13325 entirely cleared, breaking the dependency chain. We
13326 then store to the upper half, with a dependency depth
13327 of one. A rumor has it that Intel recommends two movsd
13328 followed by an unpacklpd, but this is unconfirmed. And
13329 given that the dependency depth of the unpacklpd would
13330 still be one, I'm not sure why this would be better. */
13331 zero = CONST0_RTX (V2DFmode);
13334 m = adjust_address (op1, DFmode, 0);
13335 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13336 m = adjust_address (op1, DFmode, 8);
13337 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13341 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13343 op0 = gen_lowpart (V4SFmode, op0);
13344 op1 = gen_lowpart (V4SFmode, op1);
13345 emit_insn (gen_sse_movups (op0, op1));
13349 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13350 emit_move_insn (op0, CONST0_RTX (mode));
13352 emit_clobber (op0);
13354 if (mode != V4SFmode)
13355 op0 = gen_lowpart (V4SFmode, op0);
13356 m = adjust_address (op1, V2SFmode, 0);
13357 emit_insn (gen_sse_loadlps (op0, op0, m));
13358 m = adjust_address (op1, V2SFmode, 8);
13359 emit_insn (gen_sse_loadhps (op0, op0, m));
13362 else if (MEM_P (op0))
13364 /* If we're optimizing for size, movups is the smallest. */
13365 if (optimize_insn_for_size_p ())
13367 op0 = gen_lowpart (V4SFmode, op0);
13368 op1 = gen_lowpart (V4SFmode, op1);
13369 emit_insn (gen_sse_movups (op0, op1));
13373 /* ??? Similar to above, only less clear because of quote
13374 typeless stores unquote. */
13375 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13376 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13378 op0 = gen_lowpart (V16QImode, op0);
13379 op1 = gen_lowpart (V16QImode, op1);
13380 emit_insn (gen_sse2_movdqu (op0, op1));
13384 if (TARGET_SSE2 && mode == V2DFmode)
13386 m = adjust_address (op0, DFmode, 0);
13387 emit_insn (gen_sse2_storelpd (m, op1));
13388 m = adjust_address (op0, DFmode, 8);
13389 emit_insn (gen_sse2_storehpd (m, op1));
13393 if (mode != V4SFmode)
13394 op1 = gen_lowpart (V4SFmode, op1);
13395 m = adjust_address (op0, V2SFmode, 0);
13396 emit_insn (gen_sse_storelps (m, op1));
13397 m = adjust_address (op0, V2SFmode, 8);
13398 emit_insn (gen_sse_storehps (m, op1));
13402 gcc_unreachable ();
13405 /* Expand a push in MODE. This is some mode for which we do not support
13406 proper push instructions, at least from the registers that we expect
13407 the value to live in. */
13410 ix86_expand_push (enum machine_mode mode, rtx x)
13414 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13415 GEN_INT (-GET_MODE_SIZE (mode)),
13416 stack_pointer_rtx, 1, OPTAB_DIRECT);
13417 if (tmp != stack_pointer_rtx)
13418 emit_move_insn (stack_pointer_rtx, tmp);
13420 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13422 /* When we push an operand onto stack, it has to be aligned at least
13423 at the function argument boundary. However since we don't have
13424 the argument type, we can't determine the actual argument
13426 emit_move_insn (tmp, x);
13429 /* Helper function of ix86_fixup_binary_operands to canonicalize
13430 operand order. Returns true if the operands should be swapped. */
13433 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13436 rtx dst = operands[0];
13437 rtx src1 = operands[1];
13438 rtx src2 = operands[2];
13440 /* If the operation is not commutative, we can't do anything. */
13441 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13444 /* Highest priority is that src1 should match dst. */
13445 if (rtx_equal_p (dst, src1))
13447 if (rtx_equal_p (dst, src2))
13450 /* Next highest priority is that immediate constants come second. */
13451 if (immediate_operand (src2, mode))
13453 if (immediate_operand (src1, mode))
13456 /* Lowest priority is that memory references should come second. */
13466 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13467 destination to use for the operation. If different from the true
13468 destination in operands[0], a copy operation will be required. */
13471 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13474 rtx dst = operands[0];
13475 rtx src1 = operands[1];
13476 rtx src2 = operands[2];
13478 /* Canonicalize operand order. */
13479 if (ix86_swap_binary_operands_p (code, mode, operands))
13483 /* It is invalid to swap operands of different modes. */
13484 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13491 /* Both source operands cannot be in memory. */
13492 if (MEM_P (src1) && MEM_P (src2))
13494 /* Optimization: Only read from memory once. */
13495 if (rtx_equal_p (src1, src2))
13497 src2 = force_reg (mode, src2);
13501 src2 = force_reg (mode, src2);
13504 /* If the destination is memory, and we do not have matching source
13505 operands, do things in registers. */
13506 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13507 dst = gen_reg_rtx (mode);
13509 /* Source 1 cannot be a constant. */
13510 if (CONSTANT_P (src1))
13511 src1 = force_reg (mode, src1);
13513 /* Source 1 cannot be a non-matching memory. */
13514 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13515 src1 = force_reg (mode, src1);
13517 operands[1] = src1;
13518 operands[2] = src2;
13522 /* Similarly, but assume that the destination has already been
13523 set up properly. */
13526 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13527 enum machine_mode mode, rtx operands[])
13529 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13530 gcc_assert (dst == operands[0]);
13533 /* Attempt to expand a binary operator. Make the expansion closer to the
13534 actual machine, then just general_operand, which will allow 3 separate
13535 memory references (one output, two input) in a single insn. */
13538 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13541 rtx src1, src2, dst, op, clob;
13543 dst = ix86_fixup_binary_operands (code, mode, operands);
13544 src1 = operands[1];
13545 src2 = operands[2];
13547 /* Emit the instruction. */
13549 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13550 if (reload_in_progress)
13552 /* Reload doesn't know about the flags register, and doesn't know that
13553 it doesn't want to clobber it. We can only do this with PLUS. */
13554 gcc_assert (code == PLUS);
13559 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13560 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13563 /* Fix up the destination if needed. */
13564 if (dst != operands[0])
13565 emit_move_insn (operands[0], dst);
13568 /* Return TRUE or FALSE depending on whether the binary operator meets the
13569 appropriate constraints. */
13572 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13575 rtx dst = operands[0];
13576 rtx src1 = operands[1];
13577 rtx src2 = operands[2];
13579 /* Both source operands cannot be in memory. */
13580 if (MEM_P (src1) && MEM_P (src2))
13583 /* Canonicalize operand order for commutative operators. */
13584 if (ix86_swap_binary_operands_p (code, mode, operands))
13591 /* If the destination is memory, we must have a matching source operand. */
13592 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13595 /* Source 1 cannot be a constant. */
13596 if (CONSTANT_P (src1))
13599 /* Source 1 cannot be a non-matching memory. */
13600 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13606 /* Attempt to expand a unary operator. Make the expansion closer to the
13607 actual machine, then just general_operand, which will allow 2 separate
13608 memory references (one output, one input) in a single insn. */
13611 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13614 int matching_memory;
13615 rtx src, dst, op, clob;
13620 /* If the destination is memory, and we do not have matching source
13621 operands, do things in registers. */
13622 matching_memory = 0;
13625 if (rtx_equal_p (dst, src))
13626 matching_memory = 1;
13628 dst = gen_reg_rtx (mode);
13631 /* When source operand is memory, destination must match. */
13632 if (MEM_P (src) && !matching_memory)
13633 src = force_reg (mode, src);
13635 /* Emit the instruction. */
13637 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13638 if (reload_in_progress || code == NOT)
13640 /* Reload doesn't know about the flags register, and doesn't know that
13641 it doesn't want to clobber it. */
13642 gcc_assert (code == NOT);
13647 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13648 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13651 /* Fix up the destination if needed. */
13652 if (dst != operands[0])
13653 emit_move_insn (operands[0], dst);
13656 #define LEA_SEARCH_THRESHOLD 12
13658 /* Search backward for non-agu definition of register number REGNO1
13659 or register number REGNO2 in INSN's basic block until
13660 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13661 2. Reach BB boundary, or
13662 3. Reach agu definition.
13663 Returns the distance between the non-agu definition point and INSN.
13664 If no definition point, returns -1. */
13667 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13670 basic_block bb = BLOCK_FOR_INSN (insn);
13673 enum attr_type insn_type;
13675 if (insn != BB_HEAD (bb))
13677 rtx prev = PREV_INSN (insn);
13678 while (prev && distance < LEA_SEARCH_THRESHOLD)
13683 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13684 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13685 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13686 && (regno1 == DF_REF_REGNO (*def_rec)
13687 || regno2 == DF_REF_REGNO (*def_rec)))
13689 insn_type = get_attr_type (prev);
13690 if (insn_type != TYPE_LEA)
13694 if (prev == BB_HEAD (bb))
13696 prev = PREV_INSN (prev);
13700 if (distance < LEA_SEARCH_THRESHOLD)
13704 bool simple_loop = false;
13706 FOR_EACH_EDGE (e, ei, bb->preds)
13709 simple_loop = true;
13715 rtx prev = BB_END (bb);
13718 && distance < LEA_SEARCH_THRESHOLD)
13723 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13724 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13725 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13726 && (regno1 == DF_REF_REGNO (*def_rec)
13727 || regno2 == DF_REF_REGNO (*def_rec)))
13729 insn_type = get_attr_type (prev);
13730 if (insn_type != TYPE_LEA)
13734 prev = PREV_INSN (prev);
13742 /* get_attr_type may modify recog data. We want to make sure
13743 that recog data is valid for instruction INSN, on which
13744 distance_non_agu_define is called. INSN is unchanged here. */
13745 extract_insn_cached (insn);
13749 /* Return the distance between INSN and the next insn that uses
13750 register number REGNO0 in memory address. Return -1 if no such
13751 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13754 distance_agu_use (unsigned int regno0, rtx insn)
13756 basic_block bb = BLOCK_FOR_INSN (insn);
13761 if (insn != BB_END (bb))
13763 rtx next = NEXT_INSN (insn);
13764 while (next && distance < LEA_SEARCH_THRESHOLD)
13770 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13771 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13772 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13773 && regno0 == DF_REF_REGNO (*use_rec))
13775 /* Return DISTANCE if OP0 is used in memory
13776 address in NEXT. */
13780 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13781 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13782 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13783 && regno0 == DF_REF_REGNO (*def_rec))
13785 /* Return -1 if OP0 is set in NEXT. */
13789 if (next == BB_END (bb))
13791 next = NEXT_INSN (next);
13795 if (distance < LEA_SEARCH_THRESHOLD)
13799 bool simple_loop = false;
13801 FOR_EACH_EDGE (e, ei, bb->succs)
13804 simple_loop = true;
13810 rtx next = BB_HEAD (bb);
13813 && distance < LEA_SEARCH_THRESHOLD)
13819 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13820 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13821 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13822 && regno0 == DF_REF_REGNO (*use_rec))
13824 /* Return DISTANCE if OP0 is used in memory
13825 address in NEXT. */
13829 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13830 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13831 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13832 && regno0 == DF_REF_REGNO (*def_rec))
13834 /* Return -1 if OP0 is set in NEXT. */
13839 next = NEXT_INSN (next);
13847 /* Define this macro to tune LEA priority vs ADD, it take effect when
13848 there is a dilemma of choicing LEA or ADD
13849 Negative value: ADD is more preferred than LEA
13851 Positive value: LEA is more preferred than ADD*/
13852 #define IX86_LEA_PRIORITY 2
13854 /* Return true if it is ok to optimize an ADD operation to LEA
13855 operation to avoid flag register consumation. For the processors
13856 like ATOM, if the destination register of LEA holds an actual
13857 address which will be used soon, LEA is better and otherwise ADD
13861 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13862 rtx insn, rtx operands[])
13864 unsigned int regno0 = true_regnum (operands[0]);
13865 unsigned int regno1 = true_regnum (operands[1]);
13866 unsigned int regno2;
13868 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13869 return regno0 != regno1;
13871 regno2 = true_regnum (operands[2]);
13873 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13874 if (regno0 != regno1 && regno0 != regno2)
13878 int dist_define, dist_use;
13879 dist_define = distance_non_agu_define (regno1, regno2, insn);
13880 if (dist_define <= 0)
13883 /* If this insn has both backward non-agu dependence and forward
13884 agu dependence, the one with short distance take effect. */
13885 dist_use = distance_agu_use (regno0, insn);
13887 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13894 /* Return true if destination reg of SET_BODY is shift count of
13898 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13904 /* Retrieve destination of SET_BODY. */
13905 switch (GET_CODE (set_body))
13908 set_dest = SET_DEST (set_body);
13909 if (!set_dest || !REG_P (set_dest))
13913 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13914 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13922 /* Retrieve shift count of USE_BODY. */
13923 switch (GET_CODE (use_body))
13926 shift_rtx = XEXP (use_body, 1);
13929 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13930 if (ix86_dep_by_shift_count_body (set_body,
13931 XVECEXP (use_body, 0, i)))
13939 && (GET_CODE (shift_rtx) == ASHIFT
13940 || GET_CODE (shift_rtx) == LSHIFTRT
13941 || GET_CODE (shift_rtx) == ASHIFTRT
13942 || GET_CODE (shift_rtx) == ROTATE
13943 || GET_CODE (shift_rtx) == ROTATERT))
13945 rtx shift_count = XEXP (shift_rtx, 1);
13947 /* Return true if shift count is dest of SET_BODY. */
13948 if (REG_P (shift_count)
13949 && true_regnum (set_dest) == true_regnum (shift_count))
13956 /* Return true if destination reg of SET_INSN is shift count of
13960 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13962 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13963 PATTERN (use_insn));
13966 /* Return TRUE or FALSE depending on whether the unary operator meets the
13967 appropriate constraints. */
13970 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13971 enum machine_mode mode ATTRIBUTE_UNUSED,
13972 rtx operands[2] ATTRIBUTE_UNUSED)
13974 /* If one of operands is memory, source and destination must match. */
13975 if ((MEM_P (operands[0])
13976 || MEM_P (operands[1]))
13977 && ! rtx_equal_p (operands[0], operands[1]))
13982 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13983 are ok, keeping in mind the possible movddup alternative. */
13986 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13988 if (MEM_P (operands[0]))
13989 return rtx_equal_p (operands[0], operands[1 + high]);
13990 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13991 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13995 /* Post-reload splitter for converting an SF or DFmode value in an
13996 SSE register into an unsigned SImode. */
13999 ix86_split_convert_uns_si_sse (rtx operands[])
14001 enum machine_mode vecmode;
14002 rtx value, large, zero_or_two31, input, two31, x;
14004 large = operands[1];
14005 zero_or_two31 = operands[2];
14006 input = operands[3];
14007 two31 = operands[4];
14008 vecmode = GET_MODE (large);
14009 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14011 /* Load up the value into the low element. We must ensure that the other
14012 elements are valid floats -- zero is the easiest such value. */
14015 if (vecmode == V4SFmode)
14016 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14018 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14022 input = gen_rtx_REG (vecmode, REGNO (input));
14023 emit_move_insn (value, CONST0_RTX (vecmode));
14024 if (vecmode == V4SFmode)
14025 emit_insn (gen_sse_movss (value, value, input));
14027 emit_insn (gen_sse2_movsd (value, value, input));
14030 emit_move_insn (large, two31);
14031 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14033 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14034 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14036 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14037 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14039 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14040 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14042 large = gen_rtx_REG (V4SImode, REGNO (large));
14043 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14045 x = gen_rtx_REG (V4SImode, REGNO (value));
14046 if (vecmode == V4SFmode)
14047 emit_insn (gen_sse2_cvttps2dq (x, value));
14049 emit_insn (gen_sse2_cvttpd2dq (x, value));
14052 emit_insn (gen_xorv4si3 (value, value, large));
14055 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14056 Expects the 64-bit DImode to be supplied in a pair of integral
14057 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14058 -mfpmath=sse, !optimize_size only. */
14061 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14063 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14064 rtx int_xmm, fp_xmm;
14065 rtx biases, exponents;
14068 int_xmm = gen_reg_rtx (V4SImode);
14069 if (TARGET_INTER_UNIT_MOVES)
14070 emit_insn (gen_movdi_to_sse (int_xmm, input));
14071 else if (TARGET_SSE_SPLIT_REGS)
14073 emit_clobber (int_xmm);
14074 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14078 x = gen_reg_rtx (V2DImode);
14079 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14080 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14083 x = gen_rtx_CONST_VECTOR (V4SImode,
14084 gen_rtvec (4, GEN_INT (0x43300000UL),
14085 GEN_INT (0x45300000UL),
14086 const0_rtx, const0_rtx));
14087 exponents = validize_mem (force_const_mem (V4SImode, x));
14089 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14090 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14092 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14093 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14094 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14095 (0x1.0p84 + double(fp_value_hi_xmm)).
14096 Note these exponents differ by 32. */
14098 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14100 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14101 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14102 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14103 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14104 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14105 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14106 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14107 biases = validize_mem (force_const_mem (V2DFmode, biases));
14108 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14110 /* Add the upper and lower DFmode values together. */
14112 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14115 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14116 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14117 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14120 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14123 /* Not used, but eases macroization of patterns. */
14125 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14126 rtx input ATTRIBUTE_UNUSED)
14128 gcc_unreachable ();
14131 /* Convert an unsigned SImode value into a DFmode. Only currently used
14132 for SSE, but applicable anywhere. */
14135 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14137 REAL_VALUE_TYPE TWO31r;
14140 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14141 NULL, 1, OPTAB_DIRECT);
14143 fp = gen_reg_rtx (DFmode);
14144 emit_insn (gen_floatsidf2 (fp, x));
14146 real_ldexp (&TWO31r, &dconst1, 31);
14147 x = const_double_from_real_value (TWO31r, DFmode);
14149 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14151 emit_move_insn (target, x);
14154 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14155 32-bit mode; otherwise we have a direct convert instruction. */
14158 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14160 REAL_VALUE_TYPE TWO32r;
14161 rtx fp_lo, fp_hi, x;
14163 fp_lo = gen_reg_rtx (DFmode);
14164 fp_hi = gen_reg_rtx (DFmode);
14166 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14168 real_ldexp (&TWO32r, &dconst1, 32);
14169 x = const_double_from_real_value (TWO32r, DFmode);
14170 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14172 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14174 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14177 emit_move_insn (target, x);
14180 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14181 For x86_32, -mfpmath=sse, !optimize_size only. */
14183 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14185 REAL_VALUE_TYPE ONE16r;
14186 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14188 real_ldexp (&ONE16r, &dconst1, 16);
14189 x = const_double_from_real_value (ONE16r, SFmode);
14190 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14191 NULL, 0, OPTAB_DIRECT);
14192 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14193 NULL, 0, OPTAB_DIRECT);
14194 fp_hi = gen_reg_rtx (SFmode);
14195 fp_lo = gen_reg_rtx (SFmode);
14196 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14197 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14198 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14200 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14202 if (!rtx_equal_p (target, fp_hi))
14203 emit_move_insn (target, fp_hi);
14206 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14207 then replicate the value for all elements of the vector
14211 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14218 v = gen_rtvec (4, value, value, value, value);
14219 return gen_rtx_CONST_VECTOR (V4SImode, v);
14223 v = gen_rtvec (2, value, value);
14224 return gen_rtx_CONST_VECTOR (V2DImode, v);
14228 v = gen_rtvec (4, value, value, value, value);
14230 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14231 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14232 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14236 v = gen_rtvec (2, value, value);
14238 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14239 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14242 gcc_unreachable ();
14246 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14247 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14248 for an SSE register. If VECT is true, then replicate the mask for
14249 all elements of the vector register. If INVERT is true, then create
14250 a mask excluding the sign bit. */
14253 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14255 enum machine_mode vec_mode, imode;
14256 HOST_WIDE_INT hi, lo;
14261 /* Find the sign bit, sign extended to 2*HWI. */
14267 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14268 lo = 0x80000000, hi = lo < 0;
14274 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14275 if (HOST_BITS_PER_WIDE_INT >= 64)
14276 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14278 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14283 vec_mode = VOIDmode;
14284 if (HOST_BITS_PER_WIDE_INT >= 64)
14287 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14294 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14298 lo = ~lo, hi = ~hi;
14304 mask = immed_double_const (lo, hi, imode);
14306 vec = gen_rtvec (2, v, mask);
14307 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14308 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14315 gcc_unreachable ();
14319 lo = ~lo, hi = ~hi;
14321 /* Force this value into the low part of a fp vector constant. */
14322 mask = immed_double_const (lo, hi, imode);
14323 mask = gen_lowpart (mode, mask);
14325 if (vec_mode == VOIDmode)
14326 return force_reg (mode, mask);
14328 v = ix86_build_const_vector (mode, vect, mask);
14329 return force_reg (vec_mode, v);
14332 /* Generate code for floating point ABS or NEG. */
14335 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14338 rtx mask, set, use, clob, dst, src;
14339 bool use_sse = false;
14340 bool vector_mode = VECTOR_MODE_P (mode);
14341 enum machine_mode elt_mode = mode;
14345 elt_mode = GET_MODE_INNER (mode);
14348 else if (mode == TFmode)
14350 else if (TARGET_SSE_MATH)
14351 use_sse = SSE_FLOAT_MODE_P (mode);
14353 /* NEG and ABS performed with SSE use bitwise mask operations.
14354 Create the appropriate mask now. */
14356 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14365 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14366 set = gen_rtx_SET (VOIDmode, dst, set);
14371 set = gen_rtx_fmt_e (code, mode, src);
14372 set = gen_rtx_SET (VOIDmode, dst, set);
14375 use = gen_rtx_USE (VOIDmode, mask);
14376 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14377 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14378 gen_rtvec (3, set, use, clob)));
14385 /* Expand a copysign operation. Special case operand 0 being a constant. */
14388 ix86_expand_copysign (rtx operands[])
14390 enum machine_mode mode;
14391 rtx dest, op0, op1, mask, nmask;
14393 dest = operands[0];
14397 mode = GET_MODE (dest);
14399 if (GET_CODE (op0) == CONST_DOUBLE)
14401 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14403 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14404 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14406 if (mode == SFmode || mode == DFmode)
14408 enum machine_mode vmode;
14410 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14412 if (op0 == CONST0_RTX (mode))
14413 op0 = CONST0_RTX (vmode);
14416 rtx v = ix86_build_const_vector (mode, false, op0);
14418 op0 = force_reg (vmode, v);
14421 else if (op0 != CONST0_RTX (mode))
14422 op0 = force_reg (mode, op0);
14424 mask = ix86_build_signbit_mask (mode, 0, 0);
14426 if (mode == SFmode)
14427 copysign_insn = gen_copysignsf3_const;
14428 else if (mode == DFmode)
14429 copysign_insn = gen_copysigndf3_const;
14431 copysign_insn = gen_copysigntf3_const;
14433 emit_insn (copysign_insn (dest, op0, op1, mask));
14437 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14439 nmask = ix86_build_signbit_mask (mode, 0, 1);
14440 mask = ix86_build_signbit_mask (mode, 0, 0);
14442 if (mode == SFmode)
14443 copysign_insn = gen_copysignsf3_var;
14444 else if (mode == DFmode)
14445 copysign_insn = gen_copysigndf3_var;
14447 copysign_insn = gen_copysigntf3_var;
14449 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14453 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14454 be a constant, and so has already been expanded into a vector constant. */
14457 ix86_split_copysign_const (rtx operands[])
14459 enum machine_mode mode, vmode;
14460 rtx dest, op0, mask, x;
14462 dest = operands[0];
14464 mask = operands[3];
14466 mode = GET_MODE (dest);
14467 vmode = GET_MODE (mask);
14469 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14470 x = gen_rtx_AND (vmode, dest, mask);
14471 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14473 if (op0 != CONST0_RTX (vmode))
14475 x = gen_rtx_IOR (vmode, dest, op0);
14476 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14480 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14481 so we have to do two masks. */
14484 ix86_split_copysign_var (rtx operands[])
14486 enum machine_mode mode, vmode;
14487 rtx dest, scratch, op0, op1, mask, nmask, x;
14489 dest = operands[0];
14490 scratch = operands[1];
14493 nmask = operands[4];
14494 mask = operands[5];
14496 mode = GET_MODE (dest);
14497 vmode = GET_MODE (mask);
14499 if (rtx_equal_p (op0, op1))
14501 /* Shouldn't happen often (it's useless, obviously), but when it does
14502 we'd generate incorrect code if we continue below. */
14503 emit_move_insn (dest, op0);
14507 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14509 gcc_assert (REGNO (op1) == REGNO (scratch));
14511 x = gen_rtx_AND (vmode, scratch, mask);
14512 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14515 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14516 x = gen_rtx_NOT (vmode, dest);
14517 x = gen_rtx_AND (vmode, x, op0);
14518 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14522 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14524 x = gen_rtx_AND (vmode, scratch, mask);
14526 else /* alternative 2,4 */
14528 gcc_assert (REGNO (mask) == REGNO (scratch));
14529 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14530 x = gen_rtx_AND (vmode, scratch, op1);
14532 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14534 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14536 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14537 x = gen_rtx_AND (vmode, dest, nmask);
14539 else /* alternative 3,4 */
14541 gcc_assert (REGNO (nmask) == REGNO (dest));
14543 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14544 x = gen_rtx_AND (vmode, dest, op0);
14546 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14549 x = gen_rtx_IOR (vmode, dest, scratch);
14550 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14553 /* Return TRUE or FALSE depending on whether the first SET in INSN
14554 has source and destination with matching CC modes, and that the
14555 CC mode is at least as constrained as REQ_MODE. */
14558 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14561 enum machine_mode set_mode;
14563 set = PATTERN (insn);
14564 if (GET_CODE (set) == PARALLEL)
14565 set = XVECEXP (set, 0, 0);
14566 gcc_assert (GET_CODE (set) == SET);
14567 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14569 set_mode = GET_MODE (SET_DEST (set));
14573 if (req_mode != CCNOmode
14574 && (req_mode != CCmode
14575 || XEXP (SET_SRC (set), 1) != const0_rtx))
14579 if (req_mode == CCGCmode)
14583 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14587 if (req_mode == CCZmode)
14598 gcc_unreachable ();
14601 return (GET_MODE (SET_SRC (set)) == set_mode);
14604 /* Generate insn patterns to do an integer compare of OPERANDS. */
14607 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14609 enum machine_mode cmpmode;
14612 cmpmode = SELECT_CC_MODE (code, op0, op1);
14613 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14615 /* This is very simple, but making the interface the same as in the
14616 FP case makes the rest of the code easier. */
14617 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14618 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14620 /* Return the test that should be put into the flags user, i.e.
14621 the bcc, scc, or cmov instruction. */
14622 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14625 /* Figure out whether to use ordered or unordered fp comparisons.
14626 Return the appropriate mode to use. */
14629 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14631 /* ??? In order to make all comparisons reversible, we do all comparisons
14632 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14633 all forms trapping and nontrapping comparisons, we can make inequality
14634 comparisons trapping again, since it results in better code when using
14635 FCOM based compares. */
14636 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14640 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14642 enum machine_mode mode = GET_MODE (op0);
14644 if (SCALAR_FLOAT_MODE_P (mode))
14646 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14647 return ix86_fp_compare_mode (code);
14652 /* Only zero flag is needed. */
14653 case EQ: /* ZF=0 */
14654 case NE: /* ZF!=0 */
14656 /* Codes needing carry flag. */
14657 case GEU: /* CF=0 */
14658 case LTU: /* CF=1 */
14659 /* Detect overflow checks. They need just the carry flag. */
14660 if (GET_CODE (op0) == PLUS
14661 && rtx_equal_p (op1, XEXP (op0, 0)))
14665 case GTU: /* CF=0 & ZF=0 */
14666 case LEU: /* CF=1 | ZF=1 */
14667 /* Detect overflow checks. They need just the carry flag. */
14668 if (GET_CODE (op0) == MINUS
14669 && rtx_equal_p (op1, XEXP (op0, 0)))
14673 /* Codes possibly doable only with sign flag when
14674 comparing against zero. */
14675 case GE: /* SF=OF or SF=0 */
14676 case LT: /* SF<>OF or SF=1 */
14677 if (op1 == const0_rtx)
14680 /* For other cases Carry flag is not required. */
14682 /* Codes doable only with sign flag when comparing
14683 against zero, but we miss jump instruction for it
14684 so we need to use relational tests against overflow
14685 that thus needs to be zero. */
14686 case GT: /* ZF=0 & SF=OF */
14687 case LE: /* ZF=1 | SF<>OF */
14688 if (op1 == const0_rtx)
14692 /* strcmp pattern do (use flags) and combine may ask us for proper
14697 gcc_unreachable ();
14701 /* Return the fixed registers used for condition codes. */
14704 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14711 /* If two condition code modes are compatible, return a condition code
14712 mode which is compatible with both. Otherwise, return
14715 static enum machine_mode
14716 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14721 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14724 if ((m1 == CCGCmode && m2 == CCGOCmode)
14725 || (m1 == CCGOCmode && m2 == CCGCmode))
14731 gcc_unreachable ();
14761 /* These are only compatible with themselves, which we already
14768 /* Return a comparison we can do and that it is equivalent to
14769 swap_condition (code) apart possibly from orderedness.
14770 But, never change orderedness if TARGET_IEEE_FP, returning
14771 UNKNOWN in that case if necessary. */
14773 static enum rtx_code
14774 ix86_fp_swap_condition (enum rtx_code code)
14778 case GT: /* GTU - CF=0 & ZF=0 */
14779 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14780 case GE: /* GEU - CF=0 */
14781 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14782 case UNLT: /* LTU - CF=1 */
14783 return TARGET_IEEE_FP ? UNKNOWN : GT;
14784 case UNLE: /* LEU - CF=1 | ZF=1 */
14785 return TARGET_IEEE_FP ? UNKNOWN : GE;
14787 return swap_condition (code);
14791 /* Return cost of comparison CODE using the best strategy for performance.
14792 All following functions do use number of instructions as a cost metrics.
14793 In future this should be tweaked to compute bytes for optimize_size and
14794 take into account performance of various instructions on various CPUs. */
14797 ix86_fp_comparison_cost (enum rtx_code code)
14801 /* The cost of code using bit-twiddling on %ah. */
14818 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14822 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14825 gcc_unreachable ();
14828 switch (ix86_fp_comparison_strategy (code))
14830 case IX86_FPCMP_COMI:
14831 return arith_cost > 4 ? 3 : 2;
14832 case IX86_FPCMP_SAHF:
14833 return arith_cost > 4 ? 4 : 3;
14839 /* Return strategy to use for floating-point. We assume that fcomi is always
14840 preferrable where available, since that is also true when looking at size
14841 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14843 enum ix86_fpcmp_strategy
14844 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14846 /* Do fcomi/sahf based test when profitable. */
14849 return IX86_FPCMP_COMI;
14851 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14852 return IX86_FPCMP_SAHF;
14854 return IX86_FPCMP_ARITH;
14857 /* Swap, force into registers, or otherwise massage the two operands
14858 to a fp comparison. The operands are updated in place; the new
14859 comparison code is returned. */
14861 static enum rtx_code
14862 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14864 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14865 rtx op0 = *pop0, op1 = *pop1;
14866 enum machine_mode op_mode = GET_MODE (op0);
14867 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14869 /* All of the unordered compare instructions only work on registers.
14870 The same is true of the fcomi compare instructions. The XFmode
14871 compare instructions require registers except when comparing
14872 against zero or when converting operand 1 from fixed point to
14876 && (fpcmp_mode == CCFPUmode
14877 || (op_mode == XFmode
14878 && ! (standard_80387_constant_p (op0) == 1
14879 || standard_80387_constant_p (op1) == 1)
14880 && GET_CODE (op1) != FLOAT)
14881 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14883 op0 = force_reg (op_mode, op0);
14884 op1 = force_reg (op_mode, op1);
14888 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14889 things around if they appear profitable, otherwise force op0
14890 into a register. */
14892 if (standard_80387_constant_p (op0) == 0
14894 && ! (standard_80387_constant_p (op1) == 0
14897 enum rtx_code new_code = ix86_fp_swap_condition (code);
14898 if (new_code != UNKNOWN)
14901 tmp = op0, op0 = op1, op1 = tmp;
14907 op0 = force_reg (op_mode, op0);
14909 if (CONSTANT_P (op1))
14911 int tmp = standard_80387_constant_p (op1);
14913 op1 = validize_mem (force_const_mem (op_mode, op1));
14917 op1 = force_reg (op_mode, op1);
14920 op1 = force_reg (op_mode, op1);
14924 /* Try to rearrange the comparison to make it cheaper. */
14925 if (ix86_fp_comparison_cost (code)
14926 > ix86_fp_comparison_cost (swap_condition (code))
14927 && (REG_P (op1) || can_create_pseudo_p ()))
14930 tmp = op0, op0 = op1, op1 = tmp;
14931 code = swap_condition (code);
14933 op0 = force_reg (op_mode, op0);
14941 /* Convert comparison codes we use to represent FP comparison to integer
14942 code that will result in proper branch. Return UNKNOWN if no such code
14946 ix86_fp_compare_code_to_integer (enum rtx_code code)
14975 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14978 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14980 enum machine_mode fpcmp_mode, intcmp_mode;
14983 fpcmp_mode = ix86_fp_compare_mode (code);
14984 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14986 /* Do fcomi/sahf based test when profitable. */
14987 switch (ix86_fp_comparison_strategy (code))
14989 case IX86_FPCMP_COMI:
14990 intcmp_mode = fpcmp_mode;
14991 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14992 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14997 case IX86_FPCMP_SAHF:
14998 intcmp_mode = fpcmp_mode;
14999 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15000 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15004 scratch = gen_reg_rtx (HImode);
15005 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15006 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15009 case IX86_FPCMP_ARITH:
15010 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15011 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15012 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15014 scratch = gen_reg_rtx (HImode);
15015 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15017 /* In the unordered case, we have to check C2 for NaN's, which
15018 doesn't happen to work out to anything nice combination-wise.
15019 So do some bit twiddling on the value we've got in AH to come
15020 up with an appropriate set of condition codes. */
15022 intcmp_mode = CCNOmode;
15027 if (code == GT || !TARGET_IEEE_FP)
15029 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15034 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15035 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15036 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15037 intcmp_mode = CCmode;
15043 if (code == LT && TARGET_IEEE_FP)
15045 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15046 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15047 intcmp_mode = CCmode;
15052 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15058 if (code == GE || !TARGET_IEEE_FP)
15060 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15065 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15066 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15072 if (code == LE && TARGET_IEEE_FP)
15074 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15075 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15076 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15077 intcmp_mode = CCmode;
15082 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15088 if (code == EQ && TARGET_IEEE_FP)
15090 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15091 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15092 intcmp_mode = CCmode;
15097 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15103 if (code == NE && TARGET_IEEE_FP)
15105 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15106 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15112 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15118 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15122 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15127 gcc_unreachable ();
15135 /* Return the test that should be put into the flags user, i.e.
15136 the bcc, scc, or cmov instruction. */
15137 return gen_rtx_fmt_ee (code, VOIDmode,
15138 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15143 ix86_expand_compare (enum rtx_code code)
15146 op0 = ix86_compare_op0;
15147 op1 = ix86_compare_op1;
15149 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15150 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15152 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15154 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15155 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15158 ret = ix86_expand_int_compare (code, op0, op1);
15164 ix86_expand_branch (enum rtx_code code, rtx label)
15168 switch (GET_MODE (ix86_compare_op0))
15177 tmp = ix86_expand_compare (code);
15178 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15179 gen_rtx_LABEL_REF (VOIDmode, label),
15181 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15188 /* Expand DImode branch into multiple compare+branch. */
15190 rtx lo[2], hi[2], label2;
15191 enum rtx_code code1, code2, code3;
15192 enum machine_mode submode;
15194 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15196 tmp = ix86_compare_op0;
15197 ix86_compare_op0 = ix86_compare_op1;
15198 ix86_compare_op1 = tmp;
15199 code = swap_condition (code);
15201 if (GET_MODE (ix86_compare_op0) == DImode)
15203 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15204 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15209 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15210 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15214 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15215 avoid two branches. This costs one extra insn, so disable when
15216 optimizing for size. */
15218 if ((code == EQ || code == NE)
15219 && (!optimize_insn_for_size_p ()
15220 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15225 if (hi[1] != const0_rtx)
15226 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15227 NULL_RTX, 0, OPTAB_WIDEN);
15230 if (lo[1] != const0_rtx)
15231 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15232 NULL_RTX, 0, OPTAB_WIDEN);
15234 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15235 NULL_RTX, 0, OPTAB_WIDEN);
15237 ix86_compare_op0 = tmp;
15238 ix86_compare_op1 = const0_rtx;
15239 ix86_expand_branch (code, label);
15243 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15244 op1 is a constant and the low word is zero, then we can just
15245 examine the high word. Similarly for low word -1 and
15246 less-or-equal-than or greater-than. */
15248 if (CONST_INT_P (hi[1]))
15251 case LT: case LTU: case GE: case GEU:
15252 if (lo[1] == const0_rtx)
15254 ix86_compare_op0 = hi[0];
15255 ix86_compare_op1 = hi[1];
15256 ix86_expand_branch (code, label);
15260 case LE: case LEU: case GT: case GTU:
15261 if (lo[1] == constm1_rtx)
15263 ix86_compare_op0 = hi[0];
15264 ix86_compare_op1 = hi[1];
15265 ix86_expand_branch (code, label);
15273 /* Otherwise, we need two or three jumps. */
15275 label2 = gen_label_rtx ();
15278 code2 = swap_condition (code);
15279 code3 = unsigned_condition (code);
15283 case LT: case GT: case LTU: case GTU:
15286 case LE: code1 = LT; code2 = GT; break;
15287 case GE: code1 = GT; code2 = LT; break;
15288 case LEU: code1 = LTU; code2 = GTU; break;
15289 case GEU: code1 = GTU; code2 = LTU; break;
15291 case EQ: code1 = UNKNOWN; code2 = NE; break;
15292 case NE: code2 = UNKNOWN; break;
15295 gcc_unreachable ();
15300 * if (hi(a) < hi(b)) goto true;
15301 * if (hi(a) > hi(b)) goto false;
15302 * if (lo(a) < lo(b)) goto true;
15306 ix86_compare_op0 = hi[0];
15307 ix86_compare_op1 = hi[1];
15309 if (code1 != UNKNOWN)
15310 ix86_expand_branch (code1, label);
15311 if (code2 != UNKNOWN)
15312 ix86_expand_branch (code2, label2);
15314 ix86_compare_op0 = lo[0];
15315 ix86_compare_op1 = lo[1];
15316 ix86_expand_branch (code3, label);
15318 if (code2 != UNKNOWN)
15319 emit_label (label2);
15324 /* If we have already emitted a compare insn, go straight to simple.
15325 ix86_expand_compare won't emit anything if ix86_compare_emitted
15327 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15332 /* Split branch based on floating point condition. */
15334 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15335 rtx target1, rtx target2, rtx tmp, rtx pushed)
15340 if (target2 != pc_rtx)
15343 code = reverse_condition_maybe_unordered (code);
15348 condition = ix86_expand_fp_compare (code, op1, op2,
15351 /* Remove pushed operand from stack. */
15353 ix86_free_from_memory (GET_MODE (pushed));
15355 i = emit_jump_insn (gen_rtx_SET
15357 gen_rtx_IF_THEN_ELSE (VOIDmode,
15358 condition, target1, target2)));
15359 if (split_branch_probability >= 0)
15360 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15364 ix86_expand_setcc (enum rtx_code code, rtx dest)
15368 gcc_assert (GET_MODE (dest) == QImode);
15370 ret = ix86_expand_compare (code);
15371 PUT_MODE (ret, QImode);
15372 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15375 /* Expand comparison setting or clearing carry flag. Return true when
15376 successful and set pop for the operation. */
15378 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15380 enum machine_mode mode =
15381 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15383 /* Do not handle DImode compares that go through special path. */
15384 if (mode == (TARGET_64BIT ? TImode : DImode))
15387 if (SCALAR_FLOAT_MODE_P (mode))
15389 rtx compare_op, compare_seq;
15391 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15393 /* Shortcut: following common codes never translate
15394 into carry flag compares. */
15395 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15396 || code == ORDERED || code == UNORDERED)
15399 /* These comparisons require zero flag; swap operands so they won't. */
15400 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15401 && !TARGET_IEEE_FP)
15406 code = swap_condition (code);
15409 /* Try to expand the comparison and verify that we end up with
15410 carry flag based comparison. This fails to be true only when
15411 we decide to expand comparison using arithmetic that is not
15412 too common scenario. */
15414 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15415 compare_seq = get_insns ();
15418 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15419 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15420 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15422 code = GET_CODE (compare_op);
15424 if (code != LTU && code != GEU)
15427 emit_insn (compare_seq);
15432 if (!INTEGRAL_MODE_P (mode))
15441 /* Convert a==0 into (unsigned)a<1. */
15444 if (op1 != const0_rtx)
15447 code = (code == EQ ? LTU : GEU);
15450 /* Convert a>b into b<a or a>=b-1. */
15453 if (CONST_INT_P (op1))
15455 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15456 /* Bail out on overflow. We still can swap operands but that
15457 would force loading of the constant into register. */
15458 if (op1 == const0_rtx
15459 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15461 code = (code == GTU ? GEU : LTU);
15468 code = (code == GTU ? LTU : GEU);
15472 /* Convert a>=0 into (unsigned)a<0x80000000. */
15475 if (mode == DImode || op1 != const0_rtx)
15477 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15478 code = (code == LT ? GEU : LTU);
15482 if (mode == DImode || op1 != constm1_rtx)
15484 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15485 code = (code == LE ? GEU : LTU);
15491 /* Swapping operands may cause constant to appear as first operand. */
15492 if (!nonimmediate_operand (op0, VOIDmode))
15494 if (!can_create_pseudo_p ())
15496 op0 = force_reg (mode, op0);
15498 ix86_compare_op0 = op0;
15499 ix86_compare_op1 = op1;
15500 *pop = ix86_expand_compare (code);
15501 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15506 ix86_expand_int_movcc (rtx operands[])
15508 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15509 rtx compare_seq, compare_op;
15510 enum machine_mode mode = GET_MODE (operands[0]);
15511 bool sign_bit_compare_p = false;
15514 ix86_compare_op0 = XEXP (operands[1], 0);
15515 ix86_compare_op1 = XEXP (operands[1], 1);
15516 compare_op = ix86_expand_compare (code);
15517 compare_seq = get_insns ();
15520 compare_code = GET_CODE (compare_op);
15522 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15523 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15524 sign_bit_compare_p = true;
15526 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15527 HImode insns, we'd be swallowed in word prefix ops. */
15529 if ((mode != HImode || TARGET_FAST_PREFIX)
15530 && (mode != (TARGET_64BIT ? TImode : DImode))
15531 && CONST_INT_P (operands[2])
15532 && CONST_INT_P (operands[3]))
15534 rtx out = operands[0];
15535 HOST_WIDE_INT ct = INTVAL (operands[2]);
15536 HOST_WIDE_INT cf = INTVAL (operands[3]);
15537 HOST_WIDE_INT diff;
15540 /* Sign bit compares are better done using shifts than we do by using
15542 if (sign_bit_compare_p
15543 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15544 ix86_compare_op1, &compare_op))
15546 /* Detect overlap between destination and compare sources. */
15549 if (!sign_bit_compare_p)
15552 bool fpcmp = false;
15554 compare_code = GET_CODE (compare_op);
15556 flags = XEXP (compare_op, 0);
15558 if (GET_MODE (flags) == CCFPmode
15559 || GET_MODE (flags) == CCFPUmode)
15563 = ix86_fp_compare_code_to_integer (compare_code);
15566 /* To simplify rest of code, restrict to the GEU case. */
15567 if (compare_code == LTU)
15569 HOST_WIDE_INT tmp = ct;
15572 compare_code = reverse_condition (compare_code);
15573 code = reverse_condition (code);
15578 PUT_CODE (compare_op,
15579 reverse_condition_maybe_unordered
15580 (GET_CODE (compare_op)));
15582 PUT_CODE (compare_op,
15583 reverse_condition (GET_CODE (compare_op)));
15587 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15588 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15589 tmp = gen_reg_rtx (mode);
15591 if (mode == DImode)
15592 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15594 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15595 flags, compare_op));
15599 if (code == GT || code == GE)
15600 code = reverse_condition (code);
15603 HOST_WIDE_INT tmp = ct;
15608 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15609 ix86_compare_op1, VOIDmode, 0, -1);
15622 tmp = expand_simple_binop (mode, PLUS,
15624 copy_rtx (tmp), 1, OPTAB_DIRECT);
15635 tmp = expand_simple_binop (mode, IOR,
15637 copy_rtx (tmp), 1, OPTAB_DIRECT);
15639 else if (diff == -1 && ct)
15649 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15651 tmp = expand_simple_binop (mode, PLUS,
15652 copy_rtx (tmp), GEN_INT (cf),
15653 copy_rtx (tmp), 1, OPTAB_DIRECT);
15661 * andl cf - ct, dest
15671 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15674 tmp = expand_simple_binop (mode, AND,
15676 gen_int_mode (cf - ct, mode),
15677 copy_rtx (tmp), 1, OPTAB_DIRECT);
15679 tmp = expand_simple_binop (mode, PLUS,
15680 copy_rtx (tmp), GEN_INT (ct),
15681 copy_rtx (tmp), 1, OPTAB_DIRECT);
15684 if (!rtx_equal_p (tmp, out))
15685 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15687 return 1; /* DONE */
15692 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15695 tmp = ct, ct = cf, cf = tmp;
15698 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15700 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15702 /* We may be reversing unordered compare to normal compare, that
15703 is not valid in general (we may convert non-trapping condition
15704 to trapping one), however on i386 we currently emit all
15705 comparisons unordered. */
15706 compare_code = reverse_condition_maybe_unordered (compare_code);
15707 code = reverse_condition_maybe_unordered (code);
15711 compare_code = reverse_condition (compare_code);
15712 code = reverse_condition (code);
15716 compare_code = UNKNOWN;
15717 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15718 && CONST_INT_P (ix86_compare_op1))
15720 if (ix86_compare_op1 == const0_rtx
15721 && (code == LT || code == GE))
15722 compare_code = code;
15723 else if (ix86_compare_op1 == constm1_rtx)
15727 else if (code == GT)
15732 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15733 if (compare_code != UNKNOWN
15734 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15735 && (cf == -1 || ct == -1))
15737 /* If lea code below could be used, only optimize
15738 if it results in a 2 insn sequence. */
15740 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15741 || diff == 3 || diff == 5 || diff == 9)
15742 || (compare_code == LT && ct == -1)
15743 || (compare_code == GE && cf == -1))
15746 * notl op1 (if necessary)
15754 code = reverse_condition (code);
15757 out = emit_store_flag (out, code, ix86_compare_op0,
15758 ix86_compare_op1, VOIDmode, 0, -1);
15760 out = expand_simple_binop (mode, IOR,
15762 out, 1, OPTAB_DIRECT);
15763 if (out != operands[0])
15764 emit_move_insn (operands[0], out);
15766 return 1; /* DONE */
15771 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15772 || diff == 3 || diff == 5 || diff == 9)
15773 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15775 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15781 * lea cf(dest*(ct-cf)),dest
15785 * This also catches the degenerate setcc-only case.
15791 out = emit_store_flag (out, code, ix86_compare_op0,
15792 ix86_compare_op1, VOIDmode, 0, 1);
15795 /* On x86_64 the lea instruction operates on Pmode, so we need
15796 to get arithmetics done in proper mode to match. */
15798 tmp = copy_rtx (out);
15802 out1 = copy_rtx (out);
15803 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15807 tmp = gen_rtx_PLUS (mode, tmp, out1);
15813 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15816 if (!rtx_equal_p (tmp, out))
15819 out = force_operand (tmp, copy_rtx (out));
15821 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15823 if (!rtx_equal_p (out, operands[0]))
15824 emit_move_insn (operands[0], copy_rtx (out));
15826 return 1; /* DONE */
15830 * General case: Jumpful:
15831 * xorl dest,dest cmpl op1, op2
15832 * cmpl op1, op2 movl ct, dest
15833 * setcc dest jcc 1f
15834 * decl dest movl cf, dest
15835 * andl (cf-ct),dest 1:
15838 * Size 20. Size 14.
15840 * This is reasonably steep, but branch mispredict costs are
15841 * high on modern cpus, so consider failing only if optimizing
15845 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15846 && BRANCH_COST (optimize_insn_for_speed_p (),
15851 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15856 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15858 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15860 /* We may be reversing unordered compare to normal compare,
15861 that is not valid in general (we may convert non-trapping
15862 condition to trapping one), however on i386 we currently
15863 emit all comparisons unordered. */
15864 code = reverse_condition_maybe_unordered (code);
15868 code = reverse_condition (code);
15869 if (compare_code != UNKNOWN)
15870 compare_code = reverse_condition (compare_code);
15874 if (compare_code != UNKNOWN)
15876 /* notl op1 (if needed)
15881 For x < 0 (resp. x <= -1) there will be no notl,
15882 so if possible swap the constants to get rid of the
15884 True/false will be -1/0 while code below (store flag
15885 followed by decrement) is 0/-1, so the constants need
15886 to be exchanged once more. */
15888 if (compare_code == GE || !cf)
15890 code = reverse_condition (code);
15895 HOST_WIDE_INT tmp = cf;
15900 out = emit_store_flag (out, code, ix86_compare_op0,
15901 ix86_compare_op1, VOIDmode, 0, -1);
15905 out = emit_store_flag (out, code, ix86_compare_op0,
15906 ix86_compare_op1, VOIDmode, 0, 1);
15908 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15909 copy_rtx (out), 1, OPTAB_DIRECT);
15912 out = expand_simple_binop (mode, AND, copy_rtx (out),
15913 gen_int_mode (cf - ct, mode),
15914 copy_rtx (out), 1, OPTAB_DIRECT);
15916 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15917 copy_rtx (out), 1, OPTAB_DIRECT);
15918 if (!rtx_equal_p (out, operands[0]))
15919 emit_move_insn (operands[0], copy_rtx (out));
15921 return 1; /* DONE */
15925 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15927 /* Try a few things more with specific constants and a variable. */
15930 rtx var, orig_out, out, tmp;
15932 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15933 return 0; /* FAIL */
15935 /* If one of the two operands is an interesting constant, load a
15936 constant with the above and mask it in with a logical operation. */
15938 if (CONST_INT_P (operands[2]))
15941 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15942 operands[3] = constm1_rtx, op = and_optab;
15943 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15944 operands[3] = const0_rtx, op = ior_optab;
15946 return 0; /* FAIL */
15948 else if (CONST_INT_P (operands[3]))
15951 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15952 operands[2] = constm1_rtx, op = and_optab;
15953 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15954 operands[2] = const0_rtx, op = ior_optab;
15956 return 0; /* FAIL */
15959 return 0; /* FAIL */
15961 orig_out = operands[0];
15962 tmp = gen_reg_rtx (mode);
15965 /* Recurse to get the constant loaded. */
15966 if (ix86_expand_int_movcc (operands) == 0)
15967 return 0; /* FAIL */
15969 /* Mask in the interesting variable. */
15970 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15972 if (!rtx_equal_p (out, orig_out))
15973 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15975 return 1; /* DONE */
15979 * For comparison with above,
15989 if (! nonimmediate_operand (operands[2], mode))
15990 operands[2] = force_reg (mode, operands[2]);
15991 if (! nonimmediate_operand (operands[3], mode))
15992 operands[3] = force_reg (mode, operands[3]);
15994 if (! register_operand (operands[2], VOIDmode)
15996 || ! register_operand (operands[3], VOIDmode)))
15997 operands[2] = force_reg (mode, operands[2]);
16000 && ! register_operand (operands[3], VOIDmode))
16001 operands[3] = force_reg (mode, operands[3]);
16003 emit_insn (compare_seq);
16004 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16005 gen_rtx_IF_THEN_ELSE (mode,
16006 compare_op, operands[2],
16009 return 1; /* DONE */
16012 /* Swap, force into registers, or otherwise massage the two operands
16013 to an sse comparison with a mask result. Thus we differ a bit from
16014 ix86_prepare_fp_compare_args which expects to produce a flags result.
16016 The DEST operand exists to help determine whether to commute commutative
16017 operators. The POP0/POP1 operands are updated in place. The new
16018 comparison code is returned, or UNKNOWN if not implementable. */
16020 static enum rtx_code
16021 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16022 rtx *pop0, rtx *pop1)
16030 /* We have no LTGT as an operator. We could implement it with
16031 NE & ORDERED, but this requires an extra temporary. It's
16032 not clear that it's worth it. */
16039 /* These are supported directly. */
16046 /* For commutative operators, try to canonicalize the destination
16047 operand to be first in the comparison - this helps reload to
16048 avoid extra moves. */
16049 if (!dest || !rtx_equal_p (dest, *pop1))
16057 /* These are not supported directly. Swap the comparison operands
16058 to transform into something that is supported. */
16062 code = swap_condition (code);
16066 gcc_unreachable ();
16072 /* Detect conditional moves that exactly match min/max operational
16073 semantics. Note that this is IEEE safe, as long as we don't
16074 interchange the operands.
16076 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16077 and TRUE if the operation is successful and instructions are emitted. */
16080 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16081 rtx cmp_op1, rtx if_true, rtx if_false)
16083 enum machine_mode mode;
16089 else if (code == UNGE)
16092 if_true = if_false;
16098 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16100 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16105 mode = GET_MODE (dest);
16107 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16108 but MODE may be a vector mode and thus not appropriate. */
16109 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16111 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16114 if_true = force_reg (mode, if_true);
16115 v = gen_rtvec (2, if_true, if_false);
16116 tmp = gen_rtx_UNSPEC (mode, v, u);
16120 code = is_min ? SMIN : SMAX;
16121 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16124 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16128 /* Expand an sse vector comparison. Return the register with the result. */
16131 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16132 rtx op_true, rtx op_false)
16134 enum machine_mode mode = GET_MODE (dest);
16137 cmp_op0 = force_reg (mode, cmp_op0);
16138 if (!nonimmediate_operand (cmp_op1, mode))
16139 cmp_op1 = force_reg (mode, cmp_op1);
16142 || reg_overlap_mentioned_p (dest, op_true)
16143 || reg_overlap_mentioned_p (dest, op_false))
16144 dest = gen_reg_rtx (mode);
16146 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16147 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16152 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16153 operations. This is used for both scalar and vector conditional moves. */
16156 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16158 enum machine_mode mode = GET_MODE (dest);
16161 if (op_false == CONST0_RTX (mode))
16163 op_true = force_reg (mode, op_true);
16164 x = gen_rtx_AND (mode, cmp, op_true);
16165 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16167 else if (op_true == CONST0_RTX (mode))
16169 op_false = force_reg (mode, op_false);
16170 x = gen_rtx_NOT (mode, cmp);
16171 x = gen_rtx_AND (mode, x, op_false);
16172 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16174 else if (TARGET_XOP)
16176 rtx pcmov = gen_rtx_SET (mode, dest,
16177 gen_rtx_IF_THEN_ELSE (mode, cmp,
16184 op_true = force_reg (mode, op_true);
16185 op_false = force_reg (mode, op_false);
16187 t2 = gen_reg_rtx (mode);
16189 t3 = gen_reg_rtx (mode);
16193 x = gen_rtx_AND (mode, op_true, cmp);
16194 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16196 x = gen_rtx_NOT (mode, cmp);
16197 x = gen_rtx_AND (mode, x, op_false);
16198 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16200 x = gen_rtx_IOR (mode, t3, t2);
16201 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16205 /* Expand a floating-point conditional move. Return true if successful. */
16208 ix86_expand_fp_movcc (rtx operands[])
16210 enum machine_mode mode = GET_MODE (operands[0]);
16211 enum rtx_code code = GET_CODE (operands[1]);
16212 rtx tmp, compare_op;
16214 ix86_compare_op0 = XEXP (operands[1], 0);
16215 ix86_compare_op1 = XEXP (operands[1], 1);
16216 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16218 enum machine_mode cmode;
16220 /* Since we've no cmove for sse registers, don't force bad register
16221 allocation just to gain access to it. Deny movcc when the
16222 comparison mode doesn't match the move mode. */
16223 cmode = GET_MODE (ix86_compare_op0);
16224 if (cmode == VOIDmode)
16225 cmode = GET_MODE (ix86_compare_op1);
16229 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16231 &ix86_compare_op1);
16232 if (code == UNKNOWN)
16235 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16236 ix86_compare_op1, operands[2],
16240 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16241 ix86_compare_op1, operands[2], operands[3]);
16242 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16246 /* The floating point conditional move instructions don't directly
16247 support conditions resulting from a signed integer comparison. */
16249 compare_op = ix86_expand_compare (code);
16250 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16252 tmp = gen_reg_rtx (QImode);
16253 ix86_expand_setcc (code, tmp);
16255 ix86_compare_op0 = tmp;
16256 ix86_compare_op1 = const0_rtx;
16257 compare_op = ix86_expand_compare (code);
16260 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16261 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16262 operands[2], operands[3])));
16267 /* Expand a floating-point vector conditional move; a vcond operation
16268 rather than a movcc operation. */
16271 ix86_expand_fp_vcond (rtx operands[])
16273 enum rtx_code code = GET_CODE (operands[3]);
16276 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16277 &operands[4], &operands[5]);
16278 if (code == UNKNOWN)
16281 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16282 operands[5], operands[1], operands[2]))
16285 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16286 operands[1], operands[2]);
16287 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16291 /* Expand a signed/unsigned integral vector conditional move. */
16294 ix86_expand_int_vcond (rtx operands[])
16296 enum machine_mode mode = GET_MODE (operands[0]);
16297 enum rtx_code code = GET_CODE (operands[3]);
16298 bool negate = false;
16301 cop0 = operands[4];
16302 cop1 = operands[5];
16304 /* XOP supports all of the comparisons on all vector int types. */
16307 /* Canonicalize the comparison to EQ, GT, GTU. */
16318 code = reverse_condition (code);
16324 code = reverse_condition (code);
16330 code = swap_condition (code);
16331 x = cop0, cop0 = cop1, cop1 = x;
16335 gcc_unreachable ();
16338 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16339 if (mode == V2DImode)
16344 /* SSE4.1 supports EQ. */
16345 if (!TARGET_SSE4_1)
16351 /* SSE4.2 supports GT/GTU. */
16352 if (!TARGET_SSE4_2)
16357 gcc_unreachable ();
16361 /* Unsigned parallel compare is not supported by the hardware.
16362 Play some tricks to turn this into a signed comparison
16366 cop0 = force_reg (mode, cop0);
16374 rtx (*gen_sub3) (rtx, rtx, rtx);
16376 /* Subtract (-(INT MAX) - 1) from both operands to make
16378 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16380 gen_sub3 = (mode == V4SImode
16381 ? gen_subv4si3 : gen_subv2di3);
16382 t1 = gen_reg_rtx (mode);
16383 emit_insn (gen_sub3 (t1, cop0, mask));
16385 t2 = gen_reg_rtx (mode);
16386 emit_insn (gen_sub3 (t2, cop1, mask));
16396 /* Perform a parallel unsigned saturating subtraction. */
16397 x = gen_reg_rtx (mode);
16398 emit_insn (gen_rtx_SET (VOIDmode, x,
16399 gen_rtx_US_MINUS (mode, cop0, cop1)));
16402 cop1 = CONST0_RTX (mode);
16408 gcc_unreachable ();
16413 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16414 operands[1+negate], operands[2-negate]);
16416 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16417 operands[2-negate]);
16421 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16422 true if we should do zero extension, else sign extension. HIGH_P is
16423 true if we want the N/2 high elements, else the low elements. */
16426 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16428 enum machine_mode imode = GET_MODE (operands[1]);
16429 rtx (*unpack)(rtx, rtx, rtx);
16436 unpack = gen_vec_interleave_highv16qi;
16438 unpack = gen_vec_interleave_lowv16qi;
16442 unpack = gen_vec_interleave_highv8hi;
16444 unpack = gen_vec_interleave_lowv8hi;
16448 unpack = gen_vec_interleave_highv4si;
16450 unpack = gen_vec_interleave_lowv4si;
16453 gcc_unreachable ();
16456 dest = gen_lowpart (imode, operands[0]);
16459 se = force_reg (imode, CONST0_RTX (imode));
16461 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16462 operands[1], pc_rtx, pc_rtx);
16464 emit_insn (unpack (dest, operands[1], se));
16467 /* This function performs the same task as ix86_expand_sse_unpack,
16468 but with SSE4.1 instructions. */
16471 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16473 enum machine_mode imode = GET_MODE (operands[1]);
16474 rtx (*unpack)(rtx, rtx);
16481 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16483 unpack = gen_sse4_1_extendv8qiv8hi2;
16487 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16489 unpack = gen_sse4_1_extendv4hiv4si2;
16493 unpack = gen_sse4_1_zero_extendv2siv2di2;
16495 unpack = gen_sse4_1_extendv2siv2di2;
16498 gcc_unreachable ();
16501 dest = operands[0];
16504 /* Shift higher 8 bytes to lower 8 bytes. */
16505 src = gen_reg_rtx (imode);
16506 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16507 gen_lowpart (V1TImode, operands[1]),
16513 emit_insn (unpack (dest, src));
16516 /* Expand conditional increment or decrement using adb/sbb instructions.
16517 The default case using setcc followed by the conditional move can be
16518 done by generic code. */
16520 ix86_expand_int_addcc (rtx operands[])
16522 enum rtx_code code = GET_CODE (operands[1]);
16524 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16526 rtx val = const0_rtx;
16527 bool fpcmp = false;
16528 enum machine_mode mode;
16530 ix86_compare_op0 = XEXP (operands[1], 0);
16531 ix86_compare_op1 = XEXP (operands[1], 1);
16532 if (operands[3] != const1_rtx
16533 && operands[3] != constm1_rtx)
16535 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16536 ix86_compare_op1, &compare_op))
16538 code = GET_CODE (compare_op);
16540 flags = XEXP (compare_op, 0);
16542 if (GET_MODE (flags) == CCFPmode
16543 || GET_MODE (flags) == CCFPUmode)
16546 code = ix86_fp_compare_code_to_integer (code);
16553 PUT_CODE (compare_op,
16554 reverse_condition_maybe_unordered
16555 (GET_CODE (compare_op)));
16557 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16560 mode = GET_MODE (operands[0]);
16562 /* Construct either adc or sbb insn. */
16563 if ((code == LTU) == (operands[3] == constm1_rtx))
16568 insn = gen_subqi3_carry;
16571 insn = gen_subhi3_carry;
16574 insn = gen_subsi3_carry;
16577 insn = gen_subdi3_carry;
16580 gcc_unreachable ();
16588 insn = gen_addqi3_carry;
16591 insn = gen_addhi3_carry;
16594 insn = gen_addsi3_carry;
16597 insn = gen_adddi3_carry;
16600 gcc_unreachable ();
16603 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16605 return 1; /* DONE */
16609 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16610 works for floating pointer parameters and nonoffsetable memories.
16611 For pushes, it returns just stack offsets; the values will be saved
16612 in the right order. Maximally three parts are generated. */
16615 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16620 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16622 size = (GET_MODE_SIZE (mode) + 4) / 8;
16624 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16625 gcc_assert (size >= 2 && size <= 4);
16627 /* Optimize constant pool reference to immediates. This is used by fp
16628 moves, that force all constants to memory to allow combining. */
16629 if (MEM_P (operand) && MEM_READONLY_P (operand))
16631 rtx tmp = maybe_get_pool_constant (operand);
16636 if (MEM_P (operand) && !offsettable_memref_p (operand))
16638 /* The only non-offsetable memories we handle are pushes. */
16639 int ok = push_operand (operand, VOIDmode);
16643 operand = copy_rtx (operand);
16644 PUT_MODE (operand, Pmode);
16645 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16649 if (GET_CODE (operand) == CONST_VECTOR)
16651 enum machine_mode imode = int_mode_for_mode (mode);
16652 /* Caution: if we looked through a constant pool memory above,
16653 the operand may actually have a different mode now. That's
16654 ok, since we want to pun this all the way back to an integer. */
16655 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16656 gcc_assert (operand != NULL);
16662 if (mode == DImode)
16663 split_di (&operand, 1, &parts[0], &parts[1]);
16668 if (REG_P (operand))
16670 gcc_assert (reload_completed);
16671 for (i = 0; i < size; i++)
16672 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16674 else if (offsettable_memref_p (operand))
16676 operand = adjust_address (operand, SImode, 0);
16677 parts[0] = operand;
16678 for (i = 1; i < size; i++)
16679 parts[i] = adjust_address (operand, SImode, 4 * i);
16681 else if (GET_CODE (operand) == CONST_DOUBLE)
16686 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16690 real_to_target (l, &r, mode);
16691 parts[3] = gen_int_mode (l[3], SImode);
16692 parts[2] = gen_int_mode (l[2], SImode);
16695 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16696 parts[2] = gen_int_mode (l[2], SImode);
16699 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16702 gcc_unreachable ();
16704 parts[1] = gen_int_mode (l[1], SImode);
16705 parts[0] = gen_int_mode (l[0], SImode);
16708 gcc_unreachable ();
16713 if (mode == TImode)
16714 split_ti (&operand, 1, &parts[0], &parts[1]);
16715 if (mode == XFmode || mode == TFmode)
16717 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16718 if (REG_P (operand))
16720 gcc_assert (reload_completed);
16721 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16722 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16724 else if (offsettable_memref_p (operand))
16726 operand = adjust_address (operand, DImode, 0);
16727 parts[0] = operand;
16728 parts[1] = adjust_address (operand, upper_mode, 8);
16730 else if (GET_CODE (operand) == CONST_DOUBLE)
16735 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16736 real_to_target (l, &r, mode);
16738 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16739 if (HOST_BITS_PER_WIDE_INT >= 64)
16742 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16743 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16746 parts[0] = immed_double_const (l[0], l[1], DImode);
16748 if (upper_mode == SImode)
16749 parts[1] = gen_int_mode (l[2], SImode);
16750 else if (HOST_BITS_PER_WIDE_INT >= 64)
16753 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16754 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16757 parts[1] = immed_double_const (l[2], l[3], DImode);
16760 gcc_unreachable ();
16767 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16768 Return false when normal moves are needed; true when all required
16769 insns have been emitted. Operands 2-4 contain the input values
16770 int the correct order; operands 5-7 contain the output values. */
16773 ix86_split_long_move (rtx operands[])
16778 int collisions = 0;
16779 enum machine_mode mode = GET_MODE (operands[0]);
16780 bool collisionparts[4];
16782 /* The DFmode expanders may ask us to move double.
16783 For 64bit target this is single move. By hiding the fact
16784 here we simplify i386.md splitters. */
16785 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16787 /* Optimize constant pool reference to immediates. This is used by
16788 fp moves, that force all constants to memory to allow combining. */
16790 if (MEM_P (operands[1])
16791 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16792 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16793 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16794 if (push_operand (operands[0], VOIDmode))
16796 operands[0] = copy_rtx (operands[0]);
16797 PUT_MODE (operands[0], Pmode);
16800 operands[0] = gen_lowpart (DImode, operands[0]);
16801 operands[1] = gen_lowpart (DImode, operands[1]);
16802 emit_move_insn (operands[0], operands[1]);
16806 /* The only non-offsettable memory we handle is push. */
16807 if (push_operand (operands[0], VOIDmode))
16810 gcc_assert (!MEM_P (operands[0])
16811 || offsettable_memref_p (operands[0]));
16813 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16814 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16816 /* When emitting push, take care for source operands on the stack. */
16817 if (push && MEM_P (operands[1])
16818 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16820 rtx src_base = XEXP (part[1][nparts - 1], 0);
16822 /* Compensate for the stack decrement by 4. */
16823 if (!TARGET_64BIT && nparts == 3
16824 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16825 src_base = plus_constant (src_base, 4);
16827 /* src_base refers to the stack pointer and is
16828 automatically decreased by emitted push. */
16829 for (i = 0; i < nparts; i++)
16830 part[1][i] = change_address (part[1][i],
16831 GET_MODE (part[1][i]), src_base);
16834 /* We need to do copy in the right order in case an address register
16835 of the source overlaps the destination. */
16836 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16840 for (i = 0; i < nparts; i++)
16843 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16844 if (collisionparts[i])
16848 /* Collision in the middle part can be handled by reordering. */
16849 if (collisions == 1 && nparts == 3 && collisionparts [1])
16851 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16852 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16854 else if (collisions == 1
16856 && (collisionparts [1] || collisionparts [2]))
16858 if (collisionparts [1])
16860 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16861 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16865 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16866 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16870 /* If there are more collisions, we can't handle it by reordering.
16871 Do an lea to the last part and use only one colliding move. */
16872 else if (collisions > 1)
16878 base = part[0][nparts - 1];
16880 /* Handle the case when the last part isn't valid for lea.
16881 Happens in 64-bit mode storing the 12-byte XFmode. */
16882 if (GET_MODE (base) != Pmode)
16883 base = gen_rtx_REG (Pmode, REGNO (base));
16885 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16886 part[1][0] = replace_equiv_address (part[1][0], base);
16887 for (i = 1; i < nparts; i++)
16889 tmp = plus_constant (base, UNITS_PER_WORD * i);
16890 part[1][i] = replace_equiv_address (part[1][i], tmp);
16901 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16902 emit_insn (gen_addsi3 (stack_pointer_rtx,
16903 stack_pointer_rtx, GEN_INT (-4)));
16904 emit_move_insn (part[0][2], part[1][2]);
16906 else if (nparts == 4)
16908 emit_move_insn (part[0][3], part[1][3]);
16909 emit_move_insn (part[0][2], part[1][2]);
16914 /* In 64bit mode we don't have 32bit push available. In case this is
16915 register, it is OK - we will just use larger counterpart. We also
16916 retype memory - these comes from attempt to avoid REX prefix on
16917 moving of second half of TFmode value. */
16918 if (GET_MODE (part[1][1]) == SImode)
16920 switch (GET_CODE (part[1][1]))
16923 part[1][1] = adjust_address (part[1][1], DImode, 0);
16927 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16931 gcc_unreachable ();
16934 if (GET_MODE (part[1][0]) == SImode)
16935 part[1][0] = part[1][1];
16938 emit_move_insn (part[0][1], part[1][1]);
16939 emit_move_insn (part[0][0], part[1][0]);
16943 /* Choose correct order to not overwrite the source before it is copied. */
16944 if ((REG_P (part[0][0])
16945 && REG_P (part[1][1])
16946 && (REGNO (part[0][0]) == REGNO (part[1][1])
16948 && REGNO (part[0][0]) == REGNO (part[1][2]))
16950 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16952 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16954 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16956 operands[2 + i] = part[0][j];
16957 operands[6 + i] = part[1][j];
16962 for (i = 0; i < nparts; i++)
16964 operands[2 + i] = part[0][i];
16965 operands[6 + i] = part[1][i];
16969 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16970 if (optimize_insn_for_size_p ())
16972 for (j = 0; j < nparts - 1; j++)
16973 if (CONST_INT_P (operands[6 + j])
16974 && operands[6 + j] != const0_rtx
16975 && REG_P (operands[2 + j]))
16976 for (i = j; i < nparts - 1; i++)
16977 if (CONST_INT_P (operands[7 + i])
16978 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16979 operands[7 + i] = operands[2 + j];
16982 for (i = 0; i < nparts; i++)
16983 emit_move_insn (operands[2 + i], operands[6 + i]);
16988 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16989 left shift by a constant, either using a single shift or
16990 a sequence of add instructions. */
16993 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16997 emit_insn ((mode == DImode
16999 : gen_adddi3) (operand, operand, operand));
17001 else if (!optimize_insn_for_size_p ()
17002 && count * ix86_cost->add <= ix86_cost->shift_const)
17005 for (i=0; i<count; i++)
17007 emit_insn ((mode == DImode
17009 : gen_adddi3) (operand, operand, operand));
17013 emit_insn ((mode == DImode
17015 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17019 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17021 rtx low[2], high[2];
17023 const int single_width = mode == DImode ? 32 : 64;
17025 if (CONST_INT_P (operands[2]))
17027 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17028 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17030 if (count >= single_width)
17032 emit_move_insn (high[0], low[1]);
17033 emit_move_insn (low[0], const0_rtx);
17035 if (count > single_width)
17036 ix86_expand_ashl_const (high[0], count - single_width, mode);
17040 if (!rtx_equal_p (operands[0], operands[1]))
17041 emit_move_insn (operands[0], operands[1]);
17042 emit_insn ((mode == DImode
17044 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17045 ix86_expand_ashl_const (low[0], count, mode);
17050 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17052 if (operands[1] == const1_rtx)
17054 /* Assuming we've chosen a QImode capable registers, then 1 << N
17055 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17056 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17058 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17060 ix86_expand_clear (low[0]);
17061 ix86_expand_clear (high[0]);
17062 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17064 d = gen_lowpart (QImode, low[0]);
17065 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17066 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17067 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17069 d = gen_lowpart (QImode, high[0]);
17070 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17071 s = gen_rtx_NE (QImode, flags, const0_rtx);
17072 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17075 /* Otherwise, we can get the same results by manually performing
17076 a bit extract operation on bit 5/6, and then performing the two
17077 shifts. The two methods of getting 0/1 into low/high are exactly
17078 the same size. Avoiding the shift in the bit extract case helps
17079 pentium4 a bit; no one else seems to care much either way. */
17084 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17085 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17087 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17088 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17090 emit_insn ((mode == DImode
17092 : gen_lshrdi3) (high[0], high[0],
17093 GEN_INT (mode == DImode ? 5 : 6)));
17094 emit_insn ((mode == DImode
17096 : gen_anddi3) (high[0], high[0], const1_rtx));
17097 emit_move_insn (low[0], high[0]);
17098 emit_insn ((mode == DImode
17100 : gen_xordi3) (low[0], low[0], const1_rtx));
17103 emit_insn ((mode == DImode
17105 : gen_ashldi3) (low[0], low[0], operands[2]));
17106 emit_insn ((mode == DImode
17108 : gen_ashldi3) (high[0], high[0], operands[2]));
17112 if (operands[1] == constm1_rtx)
17114 /* For -1 << N, we can avoid the shld instruction, because we
17115 know that we're shifting 0...31/63 ones into a -1. */
17116 emit_move_insn (low[0], constm1_rtx);
17117 if (optimize_insn_for_size_p ())
17118 emit_move_insn (high[0], low[0]);
17120 emit_move_insn (high[0], constm1_rtx);
17124 if (!rtx_equal_p (operands[0], operands[1]))
17125 emit_move_insn (operands[0], operands[1]);
17127 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17128 emit_insn ((mode == DImode
17130 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17133 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
17135 if (TARGET_CMOVE && scratch)
17137 ix86_expand_clear (scratch);
17138 emit_insn ((mode == DImode
17139 ? gen_x86_shift_adj_1
17140 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
17144 emit_insn ((mode == DImode
17145 ? gen_x86_shift_adj_2
17146 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
17150 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17152 rtx low[2], high[2];
17154 const int single_width = mode == DImode ? 32 : 64;
17156 if (CONST_INT_P (operands[2]))
17158 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17159 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17161 if (count == single_width * 2 - 1)
17163 emit_move_insn (high[0], high[1]);
17164 emit_insn ((mode == DImode
17166 : gen_ashrdi3) (high[0], high[0],
17167 GEN_INT (single_width - 1)));
17168 emit_move_insn (low[0], high[0]);
17171 else if (count >= single_width)
17173 emit_move_insn (low[0], high[1]);
17174 emit_move_insn (high[0], low[0]);
17175 emit_insn ((mode == DImode
17177 : gen_ashrdi3) (high[0], high[0],
17178 GEN_INT (single_width - 1)));
17179 if (count > single_width)
17180 emit_insn ((mode == DImode
17182 : gen_ashrdi3) (low[0], low[0],
17183 GEN_INT (count - single_width)));
17187 if (!rtx_equal_p (operands[0], operands[1]))
17188 emit_move_insn (operands[0], operands[1]);
17189 emit_insn ((mode == DImode
17191 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17192 emit_insn ((mode == DImode
17194 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17199 if (!rtx_equal_p (operands[0], operands[1]))
17200 emit_move_insn (operands[0], operands[1]);
17202 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17204 emit_insn ((mode == DImode
17206 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17207 emit_insn ((mode == DImode
17209 : gen_ashrdi3) (high[0], high[0], operands[2]));
17211 if (TARGET_CMOVE && scratch)
17213 emit_move_insn (scratch, high[0]);
17214 emit_insn ((mode == DImode
17216 : gen_ashrdi3) (scratch, scratch,
17217 GEN_INT (single_width - 1)));
17218 emit_insn ((mode == DImode
17219 ? gen_x86_shift_adj_1
17220 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17224 emit_insn ((mode == DImode
17225 ? gen_x86_shift_adj_3
17226 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
17231 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17233 rtx low[2], high[2];
17235 const int single_width = mode == DImode ? 32 : 64;
17237 if (CONST_INT_P (operands[2]))
17239 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17240 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17242 if (count >= single_width)
17244 emit_move_insn (low[0], high[1]);
17245 ix86_expand_clear (high[0]);
17247 if (count > single_width)
17248 emit_insn ((mode == DImode
17250 : gen_lshrdi3) (low[0], low[0],
17251 GEN_INT (count - single_width)));
17255 if (!rtx_equal_p (operands[0], operands[1]))
17256 emit_move_insn (operands[0], operands[1]);
17257 emit_insn ((mode == DImode
17259 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17260 emit_insn ((mode == DImode
17262 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17267 if (!rtx_equal_p (operands[0], operands[1]))
17268 emit_move_insn (operands[0], operands[1]);
17270 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17272 emit_insn ((mode == DImode
17274 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17275 emit_insn ((mode == DImode
17277 : gen_lshrdi3) (high[0], high[0], operands[2]));
17279 /* Heh. By reversing the arguments, we can reuse this pattern. */
17280 if (TARGET_CMOVE && scratch)
17282 ix86_expand_clear (scratch);
17283 emit_insn ((mode == DImode
17284 ? gen_x86_shift_adj_1
17285 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17289 emit_insn ((mode == DImode
17290 ? gen_x86_shift_adj_2
17291 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
17295 /* Predict just emitted jump instruction to be taken with probability PROB. */
17297 predict_jump (int prob)
17299 rtx insn = get_last_insn ();
17300 gcc_assert (JUMP_P (insn));
17301 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17304 /* Helper function for the string operations below. Dest VARIABLE whether
17305 it is aligned to VALUE bytes. If true, jump to the label. */
17307 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17309 rtx label = gen_label_rtx ();
17310 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17311 if (GET_MODE (variable) == DImode)
17312 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17314 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17315 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17318 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17320 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17324 /* Adjust COUNTER by the VALUE. */
17326 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17328 if (GET_MODE (countreg) == DImode)
17329 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17331 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17334 /* Zero extend possibly SImode EXP to Pmode register. */
17336 ix86_zero_extend_to_Pmode (rtx exp)
17339 if (GET_MODE (exp) == VOIDmode)
17340 return force_reg (Pmode, exp);
17341 if (GET_MODE (exp) == Pmode)
17342 return copy_to_mode_reg (Pmode, exp);
17343 r = gen_reg_rtx (Pmode);
17344 emit_insn (gen_zero_extendsidi2 (r, exp));
17348 /* Divide COUNTREG by SCALE. */
17350 scale_counter (rtx countreg, int scale)
17356 if (CONST_INT_P (countreg))
17357 return GEN_INT (INTVAL (countreg) / scale);
17358 gcc_assert (REG_P (countreg));
17360 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17361 GEN_INT (exact_log2 (scale)),
17362 NULL, 1, OPTAB_DIRECT);
17366 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17367 DImode for constant loop counts. */
17369 static enum machine_mode
17370 counter_mode (rtx count_exp)
17372 if (GET_MODE (count_exp) != VOIDmode)
17373 return GET_MODE (count_exp);
17374 if (!CONST_INT_P (count_exp))
17376 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17381 /* When SRCPTR is non-NULL, output simple loop to move memory
17382 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17383 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17384 equivalent loop to set memory by VALUE (supposed to be in MODE).
17386 The size is rounded down to whole number of chunk size moved at once.
17387 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17391 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17392 rtx destptr, rtx srcptr, rtx value,
17393 rtx count, enum machine_mode mode, int unroll,
17396 rtx out_label, top_label, iter, tmp;
17397 enum machine_mode iter_mode = counter_mode (count);
17398 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17399 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17405 top_label = gen_label_rtx ();
17406 out_label = gen_label_rtx ();
17407 iter = gen_reg_rtx (iter_mode);
17409 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17410 NULL, 1, OPTAB_DIRECT);
17411 /* Those two should combine. */
17412 if (piece_size == const1_rtx)
17414 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17416 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17418 emit_move_insn (iter, const0_rtx);
17420 emit_label (top_label);
17422 tmp = convert_modes (Pmode, iter_mode, iter, true);
17423 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17424 destmem = change_address (destmem, mode, x_addr);
17428 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17429 srcmem = change_address (srcmem, mode, y_addr);
17431 /* When unrolling for chips that reorder memory reads and writes,
17432 we can save registers by using single temporary.
17433 Also using 4 temporaries is overkill in 32bit mode. */
17434 if (!TARGET_64BIT && 0)
17436 for (i = 0; i < unroll; i++)
17441 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17443 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17445 emit_move_insn (destmem, srcmem);
17451 gcc_assert (unroll <= 4);
17452 for (i = 0; i < unroll; i++)
17454 tmpreg[i] = gen_reg_rtx (mode);
17458 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17460 emit_move_insn (tmpreg[i], srcmem);
17462 for (i = 0; i < unroll; i++)
17467 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17469 emit_move_insn (destmem, tmpreg[i]);
17474 for (i = 0; i < unroll; i++)
17478 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17479 emit_move_insn (destmem, value);
17482 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17483 true, OPTAB_LIB_WIDEN);
17485 emit_move_insn (iter, tmp);
17487 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17489 if (expected_size != -1)
17491 expected_size /= GET_MODE_SIZE (mode) * unroll;
17492 if (expected_size == 0)
17494 else if (expected_size > REG_BR_PROB_BASE)
17495 predict_jump (REG_BR_PROB_BASE - 1);
17497 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17500 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17501 iter = ix86_zero_extend_to_Pmode (iter);
17502 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17503 true, OPTAB_LIB_WIDEN);
17504 if (tmp != destptr)
17505 emit_move_insn (destptr, tmp);
17508 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17509 true, OPTAB_LIB_WIDEN);
17511 emit_move_insn (srcptr, tmp);
17513 emit_label (out_label);
17516 /* Output "rep; mov" instruction.
17517 Arguments have same meaning as for previous function */
17519 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17520 rtx destptr, rtx srcptr,
17522 enum machine_mode mode)
17528 /* If the size is known, it is shorter to use rep movs. */
17529 if (mode == QImode && CONST_INT_P (count)
17530 && !(INTVAL (count) & 3))
17533 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17534 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17535 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17536 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17537 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17538 if (mode != QImode)
17540 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17541 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17542 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17543 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17544 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17545 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17549 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17550 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17552 if (CONST_INT_P (count))
17554 count = GEN_INT (INTVAL (count)
17555 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17556 destmem = shallow_copy_rtx (destmem);
17557 srcmem = shallow_copy_rtx (srcmem);
17558 set_mem_size (destmem, count);
17559 set_mem_size (srcmem, count);
17563 if (MEM_SIZE (destmem))
17564 set_mem_size (destmem, NULL_RTX);
17565 if (MEM_SIZE (srcmem))
17566 set_mem_size (srcmem, NULL_RTX);
17568 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17572 /* Output "rep; stos" instruction.
17573 Arguments have same meaning as for previous function */
17575 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17576 rtx count, enum machine_mode mode,
17582 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17583 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17584 value = force_reg (mode, gen_lowpart (mode, value));
17585 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17586 if (mode != QImode)
17588 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17589 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17590 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17593 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17594 if (orig_value == const0_rtx && CONST_INT_P (count))
17596 count = GEN_INT (INTVAL (count)
17597 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17598 destmem = shallow_copy_rtx (destmem);
17599 set_mem_size (destmem, count);
17601 else if (MEM_SIZE (destmem))
17602 set_mem_size (destmem, NULL_RTX);
17603 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17607 emit_strmov (rtx destmem, rtx srcmem,
17608 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17610 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17611 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17612 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17615 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17617 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17618 rtx destptr, rtx srcptr, rtx count, int max_size)
17621 if (CONST_INT_P (count))
17623 HOST_WIDE_INT countval = INTVAL (count);
17626 if ((countval & 0x10) && max_size > 16)
17630 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17631 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17634 gcc_unreachable ();
17637 if ((countval & 0x08) && max_size > 8)
17640 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17643 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17644 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17648 if ((countval & 0x04) && max_size > 4)
17650 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17653 if ((countval & 0x02) && max_size > 2)
17655 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17658 if ((countval & 0x01) && max_size > 1)
17660 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17667 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17668 count, 1, OPTAB_DIRECT);
17669 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17670 count, QImode, 1, 4);
17674 /* When there are stringops, we can cheaply increase dest and src pointers.
17675 Otherwise we save code size by maintaining offset (zero is readily
17676 available from preceding rep operation) and using x86 addressing modes.
17678 if (TARGET_SINGLE_STRINGOP)
17682 rtx label = ix86_expand_aligntest (count, 4, true);
17683 src = change_address (srcmem, SImode, srcptr);
17684 dest = change_address (destmem, SImode, destptr);
17685 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17686 emit_label (label);
17687 LABEL_NUSES (label) = 1;
17691 rtx label = ix86_expand_aligntest (count, 2, true);
17692 src = change_address (srcmem, HImode, srcptr);
17693 dest = change_address (destmem, HImode, destptr);
17694 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17695 emit_label (label);
17696 LABEL_NUSES (label) = 1;
17700 rtx label = ix86_expand_aligntest (count, 1, true);
17701 src = change_address (srcmem, QImode, srcptr);
17702 dest = change_address (destmem, QImode, destptr);
17703 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17704 emit_label (label);
17705 LABEL_NUSES (label) = 1;
17710 rtx offset = force_reg (Pmode, const0_rtx);
17715 rtx label = ix86_expand_aligntest (count, 4, true);
17716 src = change_address (srcmem, SImode, srcptr);
17717 dest = change_address (destmem, SImode, destptr);
17718 emit_move_insn (dest, src);
17719 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17720 true, OPTAB_LIB_WIDEN);
17722 emit_move_insn (offset, tmp);
17723 emit_label (label);
17724 LABEL_NUSES (label) = 1;
17728 rtx label = ix86_expand_aligntest (count, 2, true);
17729 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17730 src = change_address (srcmem, HImode, tmp);
17731 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17732 dest = change_address (destmem, HImode, tmp);
17733 emit_move_insn (dest, src);
17734 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17735 true, OPTAB_LIB_WIDEN);
17737 emit_move_insn (offset, tmp);
17738 emit_label (label);
17739 LABEL_NUSES (label) = 1;
17743 rtx label = ix86_expand_aligntest (count, 1, true);
17744 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17745 src = change_address (srcmem, QImode, tmp);
17746 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17747 dest = change_address (destmem, QImode, tmp);
17748 emit_move_insn (dest, src);
17749 emit_label (label);
17750 LABEL_NUSES (label) = 1;
17755 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17757 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17758 rtx count, int max_size)
17761 expand_simple_binop (counter_mode (count), AND, count,
17762 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17763 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17764 gen_lowpart (QImode, value), count, QImode,
17768 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17770 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17774 if (CONST_INT_P (count))
17776 HOST_WIDE_INT countval = INTVAL (count);
17779 if ((countval & 0x10) && max_size > 16)
17783 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17784 emit_insn (gen_strset (destptr, dest, value));
17785 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17786 emit_insn (gen_strset (destptr, dest, value));
17789 gcc_unreachable ();
17792 if ((countval & 0x08) && max_size > 8)
17796 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17797 emit_insn (gen_strset (destptr, dest, value));
17801 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17802 emit_insn (gen_strset (destptr, dest, value));
17803 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17804 emit_insn (gen_strset (destptr, dest, value));
17808 if ((countval & 0x04) && max_size > 4)
17810 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17811 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17814 if ((countval & 0x02) && max_size > 2)
17816 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17817 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17820 if ((countval & 0x01) && max_size > 1)
17822 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17823 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17830 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17835 rtx label = ix86_expand_aligntest (count, 16, true);
17838 dest = change_address (destmem, DImode, destptr);
17839 emit_insn (gen_strset (destptr, dest, value));
17840 emit_insn (gen_strset (destptr, dest, value));
17844 dest = change_address (destmem, SImode, destptr);
17845 emit_insn (gen_strset (destptr, dest, value));
17846 emit_insn (gen_strset (destptr, dest, value));
17847 emit_insn (gen_strset (destptr, dest, value));
17848 emit_insn (gen_strset (destptr, dest, value));
17850 emit_label (label);
17851 LABEL_NUSES (label) = 1;
17855 rtx label = ix86_expand_aligntest (count, 8, true);
17858 dest = change_address (destmem, DImode, destptr);
17859 emit_insn (gen_strset (destptr, dest, value));
17863 dest = change_address (destmem, SImode, destptr);
17864 emit_insn (gen_strset (destptr, dest, value));
17865 emit_insn (gen_strset (destptr, dest, value));
17867 emit_label (label);
17868 LABEL_NUSES (label) = 1;
17872 rtx label = ix86_expand_aligntest (count, 4, true);
17873 dest = change_address (destmem, SImode, destptr);
17874 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17875 emit_label (label);
17876 LABEL_NUSES (label) = 1;
17880 rtx label = ix86_expand_aligntest (count, 2, true);
17881 dest = change_address (destmem, HImode, destptr);
17882 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17883 emit_label (label);
17884 LABEL_NUSES (label) = 1;
17888 rtx label = ix86_expand_aligntest (count, 1, true);
17889 dest = change_address (destmem, QImode, destptr);
17890 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17891 emit_label (label);
17892 LABEL_NUSES (label) = 1;
17896 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17897 DESIRED_ALIGNMENT. */
17899 expand_movmem_prologue (rtx destmem, rtx srcmem,
17900 rtx destptr, rtx srcptr, rtx count,
17901 int align, int desired_alignment)
17903 if (align <= 1 && desired_alignment > 1)
17905 rtx label = ix86_expand_aligntest (destptr, 1, false);
17906 srcmem = change_address (srcmem, QImode, srcptr);
17907 destmem = change_address (destmem, QImode, destptr);
17908 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17909 ix86_adjust_counter (count, 1);
17910 emit_label (label);
17911 LABEL_NUSES (label) = 1;
17913 if (align <= 2 && desired_alignment > 2)
17915 rtx label = ix86_expand_aligntest (destptr, 2, false);
17916 srcmem = change_address (srcmem, HImode, srcptr);
17917 destmem = change_address (destmem, HImode, destptr);
17918 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17919 ix86_adjust_counter (count, 2);
17920 emit_label (label);
17921 LABEL_NUSES (label) = 1;
17923 if (align <= 4 && desired_alignment > 4)
17925 rtx label = ix86_expand_aligntest (destptr, 4, false);
17926 srcmem = change_address (srcmem, SImode, srcptr);
17927 destmem = change_address (destmem, SImode, destptr);
17928 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17929 ix86_adjust_counter (count, 4);
17930 emit_label (label);
17931 LABEL_NUSES (label) = 1;
17933 gcc_assert (desired_alignment <= 8);
17936 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17937 ALIGN_BYTES is how many bytes need to be copied. */
17939 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17940 int desired_align, int align_bytes)
17943 rtx src_size, dst_size;
17945 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17946 if (src_align_bytes >= 0)
17947 src_align_bytes = desired_align - src_align_bytes;
17948 src_size = MEM_SIZE (src);
17949 dst_size = MEM_SIZE (dst);
17950 if (align_bytes & 1)
17952 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17953 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17955 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17957 if (align_bytes & 2)
17959 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17960 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17961 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17962 set_mem_align (dst, 2 * BITS_PER_UNIT);
17963 if (src_align_bytes >= 0
17964 && (src_align_bytes & 1) == (align_bytes & 1)
17965 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17966 set_mem_align (src, 2 * BITS_PER_UNIT);
17968 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17970 if (align_bytes & 4)
17972 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17973 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17974 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17975 set_mem_align (dst, 4 * BITS_PER_UNIT);
17976 if (src_align_bytes >= 0)
17978 unsigned int src_align = 0;
17979 if ((src_align_bytes & 3) == (align_bytes & 3))
17981 else if ((src_align_bytes & 1) == (align_bytes & 1))
17983 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17984 set_mem_align (src, src_align * BITS_PER_UNIT);
17987 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17989 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17990 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17991 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17992 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17993 if (src_align_bytes >= 0)
17995 unsigned int src_align = 0;
17996 if ((src_align_bytes & 7) == (align_bytes & 7))
17998 else if ((src_align_bytes & 3) == (align_bytes & 3))
18000 else if ((src_align_bytes & 1) == (align_bytes & 1))
18002 if (src_align > (unsigned int) desired_align)
18003 src_align = desired_align;
18004 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18005 set_mem_align (src, src_align * BITS_PER_UNIT);
18008 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18010 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18015 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18016 DESIRED_ALIGNMENT. */
18018 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18019 int align, int desired_alignment)
18021 if (align <= 1 && desired_alignment > 1)
18023 rtx label = ix86_expand_aligntest (destptr, 1, false);
18024 destmem = change_address (destmem, QImode, destptr);
18025 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18026 ix86_adjust_counter (count, 1);
18027 emit_label (label);
18028 LABEL_NUSES (label) = 1;
18030 if (align <= 2 && desired_alignment > 2)
18032 rtx label = ix86_expand_aligntest (destptr, 2, false);
18033 destmem = change_address (destmem, HImode, destptr);
18034 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18035 ix86_adjust_counter (count, 2);
18036 emit_label (label);
18037 LABEL_NUSES (label) = 1;
18039 if (align <= 4 && desired_alignment > 4)
18041 rtx label = ix86_expand_aligntest (destptr, 4, false);
18042 destmem = change_address (destmem, SImode, destptr);
18043 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18044 ix86_adjust_counter (count, 4);
18045 emit_label (label);
18046 LABEL_NUSES (label) = 1;
18048 gcc_assert (desired_alignment <= 8);
18051 /* Set enough from DST to align DST known to by aligned by ALIGN to
18052 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18054 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18055 int desired_align, int align_bytes)
18058 rtx dst_size = MEM_SIZE (dst);
18059 if (align_bytes & 1)
18061 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18063 emit_insn (gen_strset (destreg, dst,
18064 gen_lowpart (QImode, value)));
18066 if (align_bytes & 2)
18068 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18069 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18070 set_mem_align (dst, 2 * BITS_PER_UNIT);
18072 emit_insn (gen_strset (destreg, dst,
18073 gen_lowpart (HImode, value)));
18075 if (align_bytes & 4)
18077 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18078 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18079 set_mem_align (dst, 4 * BITS_PER_UNIT);
18081 emit_insn (gen_strset (destreg, dst,
18082 gen_lowpart (SImode, value)));
18084 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18085 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18086 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18088 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18092 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18093 static enum stringop_alg
18094 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18095 int *dynamic_check)
18097 const struct stringop_algs * algs;
18098 bool optimize_for_speed;
18099 /* Algorithms using the rep prefix want at least edi and ecx;
18100 additionally, memset wants eax and memcpy wants esi. Don't
18101 consider such algorithms if the user has appropriated those
18102 registers for their own purposes. */
18103 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18105 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18107 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18108 || (alg != rep_prefix_1_byte \
18109 && alg != rep_prefix_4_byte \
18110 && alg != rep_prefix_8_byte))
18111 const struct processor_costs *cost;
18113 /* Even if the string operation call is cold, we still might spend a lot
18114 of time processing large blocks. */
18115 if (optimize_function_for_size_p (cfun)
18116 || (optimize_insn_for_size_p ()
18117 && expected_size != -1 && expected_size < 256))
18118 optimize_for_speed = false;
18120 optimize_for_speed = true;
18122 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18124 *dynamic_check = -1;
18126 algs = &cost->memset[TARGET_64BIT != 0];
18128 algs = &cost->memcpy[TARGET_64BIT != 0];
18129 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18130 return stringop_alg;
18131 /* rep; movq or rep; movl is the smallest variant. */
18132 else if (!optimize_for_speed)
18134 if (!count || (count & 3))
18135 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18137 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18139 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18141 else if (expected_size != -1 && expected_size < 4)
18142 return loop_1_byte;
18143 else if (expected_size != -1)
18146 enum stringop_alg alg = libcall;
18147 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18149 /* We get here if the algorithms that were not libcall-based
18150 were rep-prefix based and we are unable to use rep prefixes
18151 based on global register usage. Break out of the loop and
18152 use the heuristic below. */
18153 if (algs->size[i].max == 0)
18155 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18157 enum stringop_alg candidate = algs->size[i].alg;
18159 if (candidate != libcall && ALG_USABLE_P (candidate))
18161 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18162 last non-libcall inline algorithm. */
18163 if (TARGET_INLINE_ALL_STRINGOPS)
18165 /* When the current size is best to be copied by a libcall,
18166 but we are still forced to inline, run the heuristic below
18167 that will pick code for medium sized blocks. */
18168 if (alg != libcall)
18172 else if (ALG_USABLE_P (candidate))
18176 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18178 /* When asked to inline the call anyway, try to pick meaningful choice.
18179 We look for maximal size of block that is faster to copy by hand and
18180 take blocks of at most of that size guessing that average size will
18181 be roughly half of the block.
18183 If this turns out to be bad, we might simply specify the preferred
18184 choice in ix86_costs. */
18185 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18186 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18189 enum stringop_alg alg;
18191 bool any_alg_usable_p = true;
18193 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18195 enum stringop_alg candidate = algs->size[i].alg;
18196 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18198 if (candidate != libcall && candidate
18199 && ALG_USABLE_P (candidate))
18200 max = algs->size[i].max;
18202 /* If there aren't any usable algorithms, then recursing on
18203 smaller sizes isn't going to find anything. Just return the
18204 simple byte-at-a-time copy loop. */
18205 if (!any_alg_usable_p)
18207 /* Pick something reasonable. */
18208 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18209 *dynamic_check = 128;
18210 return loop_1_byte;
18214 alg = decide_alg (count, max / 2, memset, dynamic_check);
18215 gcc_assert (*dynamic_check == -1);
18216 gcc_assert (alg != libcall);
18217 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18218 *dynamic_check = max;
18221 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18222 #undef ALG_USABLE_P
18225 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18226 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18228 decide_alignment (int align,
18229 enum stringop_alg alg,
18232 int desired_align = 0;
18236 gcc_unreachable ();
18238 case unrolled_loop:
18239 desired_align = GET_MODE_SIZE (Pmode);
18241 case rep_prefix_8_byte:
18244 case rep_prefix_4_byte:
18245 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18246 copying whole cacheline at once. */
18247 if (TARGET_PENTIUMPRO)
18252 case rep_prefix_1_byte:
18253 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18254 copying whole cacheline at once. */
18255 if (TARGET_PENTIUMPRO)
18269 if (desired_align < align)
18270 desired_align = align;
18271 if (expected_size != -1 && expected_size < 4)
18272 desired_align = align;
18273 return desired_align;
18276 /* Return the smallest power of 2 greater than VAL. */
18278 smallest_pow2_greater_than (int val)
18286 /* Expand string move (memcpy) operation. Use i386 string operations when
18287 profitable. expand_setmem contains similar code. The code depends upon
18288 architecture, block size and alignment, but always has the same
18291 1) Prologue guard: Conditional that jumps up to epilogues for small
18292 blocks that can be handled by epilogue alone. This is faster but
18293 also needed for correctness, since prologue assume the block is larger
18294 than the desired alignment.
18296 Optional dynamic check for size and libcall for large
18297 blocks is emitted here too, with -minline-stringops-dynamically.
18299 2) Prologue: copy first few bytes in order to get destination aligned
18300 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18301 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18302 We emit either a jump tree on power of two sized blocks, or a byte loop.
18304 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18305 with specified algorithm.
18307 4) Epilogue: code copying tail of the block that is too small to be
18308 handled by main body (or up to size guarded by prologue guard). */
18311 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18312 rtx expected_align_exp, rtx expected_size_exp)
18318 rtx jump_around_label = NULL;
18319 HOST_WIDE_INT align = 1;
18320 unsigned HOST_WIDE_INT count = 0;
18321 HOST_WIDE_INT expected_size = -1;
18322 int size_needed = 0, epilogue_size_needed;
18323 int desired_align = 0, align_bytes = 0;
18324 enum stringop_alg alg;
18326 bool need_zero_guard = false;
18328 if (CONST_INT_P (align_exp))
18329 align = INTVAL (align_exp);
18330 /* i386 can do misaligned access on reasonably increased cost. */
18331 if (CONST_INT_P (expected_align_exp)
18332 && INTVAL (expected_align_exp) > align)
18333 align = INTVAL (expected_align_exp);
18334 /* ALIGN is the minimum of destination and source alignment, but we care here
18335 just about destination alignment. */
18336 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18337 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18339 if (CONST_INT_P (count_exp))
18340 count = expected_size = INTVAL (count_exp);
18341 if (CONST_INT_P (expected_size_exp) && count == 0)
18342 expected_size = INTVAL (expected_size_exp);
18344 /* Make sure we don't need to care about overflow later on. */
18345 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18348 /* Step 0: Decide on preferred algorithm, desired alignment and
18349 size of chunks to be copied by main loop. */
18351 alg = decide_alg (count, expected_size, false, &dynamic_check);
18352 desired_align = decide_alignment (align, alg, expected_size);
18354 if (!TARGET_ALIGN_STRINGOPS)
18355 align = desired_align;
18357 if (alg == libcall)
18359 gcc_assert (alg != no_stringop);
18361 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18362 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18363 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18368 gcc_unreachable ();
18370 need_zero_guard = true;
18371 size_needed = GET_MODE_SIZE (Pmode);
18373 case unrolled_loop:
18374 need_zero_guard = true;
18375 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18377 case rep_prefix_8_byte:
18380 case rep_prefix_4_byte:
18383 case rep_prefix_1_byte:
18387 need_zero_guard = true;
18392 epilogue_size_needed = size_needed;
18394 /* Step 1: Prologue guard. */
18396 /* Alignment code needs count to be in register. */
18397 if (CONST_INT_P (count_exp) && desired_align > align)
18399 if (INTVAL (count_exp) > desired_align
18400 && INTVAL (count_exp) > size_needed)
18403 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18404 if (align_bytes <= 0)
18407 align_bytes = desired_align - align_bytes;
18409 if (align_bytes == 0)
18410 count_exp = force_reg (counter_mode (count_exp), count_exp);
18412 gcc_assert (desired_align >= 1 && align >= 1);
18414 /* Ensure that alignment prologue won't copy past end of block. */
18415 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18417 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18418 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18419 Make sure it is power of 2. */
18420 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18424 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18426 /* If main algorithm works on QImode, no epilogue is needed.
18427 For small sizes just don't align anything. */
18428 if (size_needed == 1)
18429 desired_align = align;
18436 label = gen_label_rtx ();
18437 emit_cmp_and_jump_insns (count_exp,
18438 GEN_INT (epilogue_size_needed),
18439 LTU, 0, counter_mode (count_exp), 1, label);
18440 if (expected_size == -1 || expected_size < epilogue_size_needed)
18441 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18443 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18447 /* Emit code to decide on runtime whether library call or inline should be
18449 if (dynamic_check != -1)
18451 if (CONST_INT_P (count_exp))
18453 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18455 emit_block_move_via_libcall (dst, src, count_exp, false);
18456 count_exp = const0_rtx;
18462 rtx hot_label = gen_label_rtx ();
18463 jump_around_label = gen_label_rtx ();
18464 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18465 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18466 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18467 emit_block_move_via_libcall (dst, src, count_exp, false);
18468 emit_jump (jump_around_label);
18469 emit_label (hot_label);
18473 /* Step 2: Alignment prologue. */
18475 if (desired_align > align)
18477 if (align_bytes == 0)
18479 /* Except for the first move in epilogue, we no longer know
18480 constant offset in aliasing info. It don't seems to worth
18481 the pain to maintain it for the first move, so throw away
18483 src = change_address (src, BLKmode, srcreg);
18484 dst = change_address (dst, BLKmode, destreg);
18485 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18490 /* If we know how many bytes need to be stored before dst is
18491 sufficiently aligned, maintain aliasing info accurately. */
18492 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18493 desired_align, align_bytes);
18494 count_exp = plus_constant (count_exp, -align_bytes);
18495 count -= align_bytes;
18497 if (need_zero_guard
18498 && (count < (unsigned HOST_WIDE_INT) size_needed
18499 || (align_bytes == 0
18500 && count < ((unsigned HOST_WIDE_INT) size_needed
18501 + desired_align - align))))
18503 /* It is possible that we copied enough so the main loop will not
18505 gcc_assert (size_needed > 1);
18506 if (label == NULL_RTX)
18507 label = gen_label_rtx ();
18508 emit_cmp_and_jump_insns (count_exp,
18509 GEN_INT (size_needed),
18510 LTU, 0, counter_mode (count_exp), 1, label);
18511 if (expected_size == -1
18512 || expected_size < (desired_align - align) / 2 + size_needed)
18513 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18515 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18518 if (label && size_needed == 1)
18520 emit_label (label);
18521 LABEL_NUSES (label) = 1;
18523 epilogue_size_needed = 1;
18525 else if (label == NULL_RTX)
18526 epilogue_size_needed = size_needed;
18528 /* Step 3: Main loop. */
18534 gcc_unreachable ();
18536 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18537 count_exp, QImode, 1, expected_size);
18540 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18541 count_exp, Pmode, 1, expected_size);
18543 case unrolled_loop:
18544 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18545 registers for 4 temporaries anyway. */
18546 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18547 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18550 case rep_prefix_8_byte:
18551 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18554 case rep_prefix_4_byte:
18555 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18558 case rep_prefix_1_byte:
18559 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18563 /* Adjust properly the offset of src and dest memory for aliasing. */
18564 if (CONST_INT_P (count_exp))
18566 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18567 (count / size_needed) * size_needed);
18568 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18569 (count / size_needed) * size_needed);
18573 src = change_address (src, BLKmode, srcreg);
18574 dst = change_address (dst, BLKmode, destreg);
18577 /* Step 4: Epilogue to copy the remaining bytes. */
18581 /* When the main loop is done, COUNT_EXP might hold original count,
18582 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18583 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18584 bytes. Compensate if needed. */
18586 if (size_needed < epilogue_size_needed)
18589 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18590 GEN_INT (size_needed - 1), count_exp, 1,
18592 if (tmp != count_exp)
18593 emit_move_insn (count_exp, tmp);
18595 emit_label (label);
18596 LABEL_NUSES (label) = 1;
18599 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18600 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18601 epilogue_size_needed);
18602 if (jump_around_label)
18603 emit_label (jump_around_label);
18607 /* Helper function for memcpy. For QImode value 0xXY produce
18608 0xXYXYXYXY of wide specified by MODE. This is essentially
18609 a * 0x10101010, but we can do slightly better than
18610 synth_mult by unwinding the sequence by hand on CPUs with
18613 promote_duplicated_reg (enum machine_mode mode, rtx val)
18615 enum machine_mode valmode = GET_MODE (val);
18617 int nops = mode == DImode ? 3 : 2;
18619 gcc_assert (mode == SImode || mode == DImode);
18620 if (val == const0_rtx)
18621 return copy_to_mode_reg (mode, const0_rtx);
18622 if (CONST_INT_P (val))
18624 HOST_WIDE_INT v = INTVAL (val) & 255;
18628 if (mode == DImode)
18629 v |= (v << 16) << 16;
18630 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18633 if (valmode == VOIDmode)
18635 if (valmode != QImode)
18636 val = gen_lowpart (QImode, val);
18637 if (mode == QImode)
18639 if (!TARGET_PARTIAL_REG_STALL)
18641 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18642 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18643 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18644 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18646 rtx reg = convert_modes (mode, QImode, val, true);
18647 tmp = promote_duplicated_reg (mode, const1_rtx);
18648 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18653 rtx reg = convert_modes (mode, QImode, val, true);
18655 if (!TARGET_PARTIAL_REG_STALL)
18656 if (mode == SImode)
18657 emit_insn (gen_movsi_insv_1 (reg, reg));
18659 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18662 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18663 NULL, 1, OPTAB_DIRECT);
18665 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18667 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18668 NULL, 1, OPTAB_DIRECT);
18669 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18670 if (mode == SImode)
18672 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18673 NULL, 1, OPTAB_DIRECT);
18674 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18679 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18680 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18681 alignment from ALIGN to DESIRED_ALIGN. */
18683 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18688 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18689 promoted_val = promote_duplicated_reg (DImode, val);
18690 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18691 promoted_val = promote_duplicated_reg (SImode, val);
18692 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18693 promoted_val = promote_duplicated_reg (HImode, val);
18695 promoted_val = val;
18697 return promoted_val;
18700 /* Expand string clear operation (bzero). Use i386 string operations when
18701 profitable. See expand_movmem comment for explanation of individual
18702 steps performed. */
18704 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18705 rtx expected_align_exp, rtx expected_size_exp)
18710 rtx jump_around_label = NULL;
18711 HOST_WIDE_INT align = 1;
18712 unsigned HOST_WIDE_INT count = 0;
18713 HOST_WIDE_INT expected_size = -1;
18714 int size_needed = 0, epilogue_size_needed;
18715 int desired_align = 0, align_bytes = 0;
18716 enum stringop_alg alg;
18717 rtx promoted_val = NULL;
18718 bool force_loopy_epilogue = false;
18720 bool need_zero_guard = false;
18722 if (CONST_INT_P (align_exp))
18723 align = INTVAL (align_exp);
18724 /* i386 can do misaligned access on reasonably increased cost. */
18725 if (CONST_INT_P (expected_align_exp)
18726 && INTVAL (expected_align_exp) > align)
18727 align = INTVAL (expected_align_exp);
18728 if (CONST_INT_P (count_exp))
18729 count = expected_size = INTVAL (count_exp);
18730 if (CONST_INT_P (expected_size_exp) && count == 0)
18731 expected_size = INTVAL (expected_size_exp);
18733 /* Make sure we don't need to care about overflow later on. */
18734 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18737 /* Step 0: Decide on preferred algorithm, desired alignment and
18738 size of chunks to be copied by main loop. */
18740 alg = decide_alg (count, expected_size, true, &dynamic_check);
18741 desired_align = decide_alignment (align, alg, expected_size);
18743 if (!TARGET_ALIGN_STRINGOPS)
18744 align = desired_align;
18746 if (alg == libcall)
18748 gcc_assert (alg != no_stringop);
18750 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18751 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18756 gcc_unreachable ();
18758 need_zero_guard = true;
18759 size_needed = GET_MODE_SIZE (Pmode);
18761 case unrolled_loop:
18762 need_zero_guard = true;
18763 size_needed = GET_MODE_SIZE (Pmode) * 4;
18765 case rep_prefix_8_byte:
18768 case rep_prefix_4_byte:
18771 case rep_prefix_1_byte:
18775 need_zero_guard = true;
18779 epilogue_size_needed = size_needed;
18781 /* Step 1: Prologue guard. */
18783 /* Alignment code needs count to be in register. */
18784 if (CONST_INT_P (count_exp) && desired_align > align)
18786 if (INTVAL (count_exp) > desired_align
18787 && INTVAL (count_exp) > size_needed)
18790 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18791 if (align_bytes <= 0)
18794 align_bytes = desired_align - align_bytes;
18796 if (align_bytes == 0)
18798 enum machine_mode mode = SImode;
18799 if (TARGET_64BIT && (count & ~0xffffffff))
18801 count_exp = force_reg (mode, count_exp);
18804 /* Do the cheap promotion to allow better CSE across the
18805 main loop and epilogue (ie one load of the big constant in the
18806 front of all code. */
18807 if (CONST_INT_P (val_exp))
18808 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18809 desired_align, align);
18810 /* Ensure that alignment prologue won't copy past end of block. */
18811 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18813 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18814 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18815 Make sure it is power of 2. */
18816 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18818 /* To improve performance of small blocks, we jump around the VAL
18819 promoting mode. This mean that if the promoted VAL is not constant,
18820 we might not use it in the epilogue and have to use byte
18822 if (epilogue_size_needed > 2 && !promoted_val)
18823 force_loopy_epilogue = true;
18826 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18828 /* If main algorithm works on QImode, no epilogue is needed.
18829 For small sizes just don't align anything. */
18830 if (size_needed == 1)
18831 desired_align = align;
18838 label = gen_label_rtx ();
18839 emit_cmp_and_jump_insns (count_exp,
18840 GEN_INT (epilogue_size_needed),
18841 LTU, 0, counter_mode (count_exp), 1, label);
18842 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18843 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18845 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18848 if (dynamic_check != -1)
18850 rtx hot_label = gen_label_rtx ();
18851 jump_around_label = gen_label_rtx ();
18852 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18853 LEU, 0, counter_mode (count_exp), 1, hot_label);
18854 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18855 set_storage_via_libcall (dst, count_exp, val_exp, false);
18856 emit_jump (jump_around_label);
18857 emit_label (hot_label);
18860 /* Step 2: Alignment prologue. */
18862 /* Do the expensive promotion once we branched off the small blocks. */
18864 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18865 desired_align, align);
18866 gcc_assert (desired_align >= 1 && align >= 1);
18868 if (desired_align > align)
18870 if (align_bytes == 0)
18872 /* Except for the first move in epilogue, we no longer know
18873 constant offset in aliasing info. It don't seems to worth
18874 the pain to maintain it for the first move, so throw away
18876 dst = change_address (dst, BLKmode, destreg);
18877 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18882 /* If we know how many bytes need to be stored before dst is
18883 sufficiently aligned, maintain aliasing info accurately. */
18884 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18885 desired_align, align_bytes);
18886 count_exp = plus_constant (count_exp, -align_bytes);
18887 count -= align_bytes;
18889 if (need_zero_guard
18890 && (count < (unsigned HOST_WIDE_INT) size_needed
18891 || (align_bytes == 0
18892 && count < ((unsigned HOST_WIDE_INT) size_needed
18893 + desired_align - align))))
18895 /* It is possible that we copied enough so the main loop will not
18897 gcc_assert (size_needed > 1);
18898 if (label == NULL_RTX)
18899 label = gen_label_rtx ();
18900 emit_cmp_and_jump_insns (count_exp,
18901 GEN_INT (size_needed),
18902 LTU, 0, counter_mode (count_exp), 1, label);
18903 if (expected_size == -1
18904 || expected_size < (desired_align - align) / 2 + size_needed)
18905 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18907 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18910 if (label && size_needed == 1)
18912 emit_label (label);
18913 LABEL_NUSES (label) = 1;
18915 promoted_val = val_exp;
18916 epilogue_size_needed = 1;
18918 else if (label == NULL_RTX)
18919 epilogue_size_needed = size_needed;
18921 /* Step 3: Main loop. */
18927 gcc_unreachable ();
18929 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18930 count_exp, QImode, 1, expected_size);
18933 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18934 count_exp, Pmode, 1, expected_size);
18936 case unrolled_loop:
18937 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18938 count_exp, Pmode, 4, expected_size);
18940 case rep_prefix_8_byte:
18941 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18944 case rep_prefix_4_byte:
18945 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18948 case rep_prefix_1_byte:
18949 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18953 /* Adjust properly the offset of src and dest memory for aliasing. */
18954 if (CONST_INT_P (count_exp))
18955 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18956 (count / size_needed) * size_needed);
18958 dst = change_address (dst, BLKmode, destreg);
18960 /* Step 4: Epilogue to copy the remaining bytes. */
18964 /* When the main loop is done, COUNT_EXP might hold original count,
18965 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18966 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18967 bytes. Compensate if needed. */
18969 if (size_needed < epilogue_size_needed)
18972 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18973 GEN_INT (size_needed - 1), count_exp, 1,
18975 if (tmp != count_exp)
18976 emit_move_insn (count_exp, tmp);
18978 emit_label (label);
18979 LABEL_NUSES (label) = 1;
18982 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18984 if (force_loopy_epilogue)
18985 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18986 epilogue_size_needed);
18988 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18989 epilogue_size_needed);
18991 if (jump_around_label)
18992 emit_label (jump_around_label);
18996 /* Expand the appropriate insns for doing strlen if not just doing
18999 out = result, initialized with the start address
19000 align_rtx = alignment of the address.
19001 scratch = scratch register, initialized with the startaddress when
19002 not aligned, otherwise undefined
19004 This is just the body. It needs the initializations mentioned above and
19005 some address computing at the end. These things are done in i386.md. */
19008 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19012 rtx align_2_label = NULL_RTX;
19013 rtx align_3_label = NULL_RTX;
19014 rtx align_4_label = gen_label_rtx ();
19015 rtx end_0_label = gen_label_rtx ();
19017 rtx tmpreg = gen_reg_rtx (SImode);
19018 rtx scratch = gen_reg_rtx (SImode);
19022 if (CONST_INT_P (align_rtx))
19023 align = INTVAL (align_rtx);
19025 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19027 /* Is there a known alignment and is it less than 4? */
19030 rtx scratch1 = gen_reg_rtx (Pmode);
19031 emit_move_insn (scratch1, out);
19032 /* Is there a known alignment and is it not 2? */
19035 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19036 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19038 /* Leave just the 3 lower bits. */
19039 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19040 NULL_RTX, 0, OPTAB_WIDEN);
19042 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19043 Pmode, 1, align_4_label);
19044 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19045 Pmode, 1, align_2_label);
19046 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19047 Pmode, 1, align_3_label);
19051 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19052 check if is aligned to 4 - byte. */
19054 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19055 NULL_RTX, 0, OPTAB_WIDEN);
19057 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19058 Pmode, 1, align_4_label);
19061 mem = change_address (src, QImode, out);
19063 /* Now compare the bytes. */
19065 /* Compare the first n unaligned byte on a byte per byte basis. */
19066 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19067 QImode, 1, end_0_label);
19069 /* Increment the address. */
19070 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19072 /* Not needed with an alignment of 2 */
19075 emit_label (align_2_label);
19077 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19080 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19082 emit_label (align_3_label);
19085 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19088 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19091 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19092 align this loop. It gives only huge programs, but does not help to
19094 emit_label (align_4_label);
19096 mem = change_address (src, SImode, out);
19097 emit_move_insn (scratch, mem);
19098 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19100 /* This formula yields a nonzero result iff one of the bytes is zero.
19101 This saves three branches inside loop and many cycles. */
19103 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19104 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19105 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19106 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19107 gen_int_mode (0x80808080, SImode)));
19108 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19113 rtx reg = gen_reg_rtx (SImode);
19114 rtx reg2 = gen_reg_rtx (Pmode);
19115 emit_move_insn (reg, tmpreg);
19116 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19118 /* If zero is not in the first two bytes, move two bytes forward. */
19119 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19120 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19121 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19122 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19123 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19126 /* Emit lea manually to avoid clobbering of flags. */
19127 emit_insn (gen_rtx_SET (SImode, reg2,
19128 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19130 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19131 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19132 emit_insn (gen_rtx_SET (VOIDmode, out,
19133 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19139 rtx end_2_label = gen_label_rtx ();
19140 /* Is zero in the first two bytes? */
19142 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19143 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19144 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19145 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19146 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19148 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19149 JUMP_LABEL (tmp) = end_2_label;
19151 /* Not in the first two. Move two bytes forward. */
19152 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19153 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19155 emit_label (end_2_label);
19159 /* Avoid branch in fixing the byte. */
19160 tmpreg = gen_lowpart (QImode, tmpreg);
19161 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19162 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19163 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19164 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19166 emit_label (end_0_label);
19169 /* Expand strlen. */
19172 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19174 rtx addr, scratch1, scratch2, scratch3, scratch4;
19176 /* The generic case of strlen expander is long. Avoid it's
19177 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19179 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19180 && !TARGET_INLINE_ALL_STRINGOPS
19181 && !optimize_insn_for_size_p ()
19182 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19185 addr = force_reg (Pmode, XEXP (src, 0));
19186 scratch1 = gen_reg_rtx (Pmode);
19188 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19189 && !optimize_insn_for_size_p ())
19191 /* Well it seems that some optimizer does not combine a call like
19192 foo(strlen(bar), strlen(bar));
19193 when the move and the subtraction is done here. It does calculate
19194 the length just once when these instructions are done inside of
19195 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19196 often used and I use one fewer register for the lifetime of
19197 output_strlen_unroll() this is better. */
19199 emit_move_insn (out, addr);
19201 ix86_expand_strlensi_unroll_1 (out, src, align);
19203 /* strlensi_unroll_1 returns the address of the zero at the end of
19204 the string, like memchr(), so compute the length by subtracting
19205 the start address. */
19206 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19212 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19213 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19216 scratch2 = gen_reg_rtx (Pmode);
19217 scratch3 = gen_reg_rtx (Pmode);
19218 scratch4 = force_reg (Pmode, constm1_rtx);
19220 emit_move_insn (scratch3, addr);
19221 eoschar = force_reg (QImode, eoschar);
19223 src = replace_equiv_address_nv (src, scratch3);
19225 /* If .md starts supporting :P, this can be done in .md. */
19226 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19227 scratch4), UNSPEC_SCAS);
19228 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19229 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19230 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19235 /* For given symbol (function) construct code to compute address of it's PLT
19236 entry in large x86-64 PIC model. */
19238 construct_plt_address (rtx symbol)
19240 rtx tmp = gen_reg_rtx (Pmode);
19241 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19243 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19244 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19246 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19247 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19252 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19254 rtx pop, int sibcall)
19256 rtx use = NULL, call;
19258 if (pop == const0_rtx)
19260 gcc_assert (!TARGET_64BIT || !pop);
19262 if (TARGET_MACHO && !TARGET_64BIT)
19265 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19266 fnaddr = machopic_indirect_call_target (fnaddr);
19271 /* Static functions and indirect calls don't need the pic register. */
19272 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19273 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19274 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19275 use_reg (&use, pic_offset_table_rtx);
19278 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19280 rtx al = gen_rtx_REG (QImode, AX_REG);
19281 emit_move_insn (al, callarg2);
19282 use_reg (&use, al);
19285 if (ix86_cmodel == CM_LARGE_PIC
19287 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19288 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19289 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19291 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19292 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19294 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19295 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19298 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19300 call = gen_rtx_SET (VOIDmode, retval, call);
19303 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19304 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19305 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19308 && ix86_cfun_abi () == MS_ABI
19309 && (!callarg2 || INTVAL (callarg2) != -2))
19311 /* We need to represent that SI and DI registers are clobbered
19313 static int clobbered_registers[] = {
19314 XMM6_REG, XMM7_REG, XMM8_REG,
19315 XMM9_REG, XMM10_REG, XMM11_REG,
19316 XMM12_REG, XMM13_REG, XMM14_REG,
19317 XMM15_REG, SI_REG, DI_REG
19320 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19321 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19322 UNSPEC_MS_TO_SYSV_CALL);
19326 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19327 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19330 (SSE_REGNO_P (clobbered_registers[i])
19332 clobbered_registers[i]));
19334 call = gen_rtx_PARALLEL (VOIDmode,
19335 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19339 call = emit_call_insn (call);
19341 CALL_INSN_FUNCTION_USAGE (call) = use;
19345 /* Clear stack slot assignments remembered from previous functions.
19346 This is called from INIT_EXPANDERS once before RTL is emitted for each
19349 static struct machine_function *
19350 ix86_init_machine_status (void)
19352 struct machine_function *f;
19354 f = GGC_CNEW (struct machine_function);
19355 f->use_fast_prologue_epilogue_nregs = -1;
19356 f->tls_descriptor_call_expanded_p = 0;
19357 f->call_abi = ix86_abi;
19362 /* Return a MEM corresponding to a stack slot with mode MODE.
19363 Allocate a new slot if necessary.
19365 The RTL for a function can have several slots available: N is
19366 which slot to use. */
19369 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19371 struct stack_local_entry *s;
19373 gcc_assert (n < MAX_386_STACK_LOCALS);
19375 /* Virtual slot is valid only before vregs are instantiated. */
19376 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19378 for (s = ix86_stack_locals; s; s = s->next)
19379 if (s->mode == mode && s->n == n)
19380 return copy_rtx (s->rtl);
19382 s = (struct stack_local_entry *)
19383 ggc_alloc (sizeof (struct stack_local_entry));
19386 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19388 s->next = ix86_stack_locals;
19389 ix86_stack_locals = s;
19393 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19395 static GTY(()) rtx ix86_tls_symbol;
19397 ix86_tls_get_addr (void)
19400 if (!ix86_tls_symbol)
19402 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19403 (TARGET_ANY_GNU_TLS
19405 ? "___tls_get_addr"
19406 : "__tls_get_addr");
19409 return ix86_tls_symbol;
19412 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19414 static GTY(()) rtx ix86_tls_module_base_symbol;
19416 ix86_tls_module_base (void)
19419 if (!ix86_tls_module_base_symbol)
19421 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19422 "_TLS_MODULE_BASE_");
19423 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19424 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19427 return ix86_tls_module_base_symbol;
19430 /* Calculate the length of the memory address in the instruction
19431 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19434 memory_address_length (rtx addr)
19436 struct ix86_address parts;
19437 rtx base, index, disp;
19441 if (GET_CODE (addr) == PRE_DEC
19442 || GET_CODE (addr) == POST_INC
19443 || GET_CODE (addr) == PRE_MODIFY
19444 || GET_CODE (addr) == POST_MODIFY)
19447 ok = ix86_decompose_address (addr, &parts);
19450 if (parts.base && GET_CODE (parts.base) == SUBREG)
19451 parts.base = SUBREG_REG (parts.base);
19452 if (parts.index && GET_CODE (parts.index) == SUBREG)
19453 parts.index = SUBREG_REG (parts.index);
19456 index = parts.index;
19461 - esp as the base always wants an index,
19462 - ebp as the base always wants a displacement,
19463 - r12 as the base always wants an index,
19464 - r13 as the base always wants a displacement. */
19466 /* Register Indirect. */
19467 if (base && !index && !disp)
19469 /* esp (for its index) and ebp (for its displacement) need
19470 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19473 && (addr == arg_pointer_rtx
19474 || addr == frame_pointer_rtx
19475 || REGNO (addr) == SP_REG
19476 || REGNO (addr) == BP_REG
19477 || REGNO (addr) == R12_REG
19478 || REGNO (addr) == R13_REG))
19482 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19483 is not disp32, but disp32(%rip), so for disp32
19484 SIB byte is needed, unless print_operand_address
19485 optimizes it into disp32(%rip) or (%rip) is implied
19487 else if (disp && !base && !index)
19494 if (GET_CODE (disp) == CONST)
19495 symbol = XEXP (disp, 0);
19496 if (GET_CODE (symbol) == PLUS
19497 && CONST_INT_P (XEXP (symbol, 1)))
19498 symbol = XEXP (symbol, 0);
19500 if (GET_CODE (symbol) != LABEL_REF
19501 && (GET_CODE (symbol) != SYMBOL_REF
19502 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19503 && (GET_CODE (symbol) != UNSPEC
19504 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19505 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19512 /* Find the length of the displacement constant. */
19515 if (base && satisfies_constraint_K (disp))
19520 /* ebp always wants a displacement. Similarly r13. */
19521 else if (base && REG_P (base)
19522 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19525 /* An index requires the two-byte modrm form.... */
19527 /* ...like esp (or r12), which always wants an index. */
19528 || base == arg_pointer_rtx
19529 || base == frame_pointer_rtx
19530 || (base && REG_P (base)
19531 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19548 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19549 is set, expect that insn have 8bit immediate alternative. */
19551 ix86_attr_length_immediate_default (rtx insn, int shortform)
19555 extract_insn_cached (insn);
19556 for (i = recog_data.n_operands - 1; i >= 0; --i)
19557 if (CONSTANT_P (recog_data.operand[i]))
19559 enum attr_mode mode = get_attr_mode (insn);
19562 if (shortform && CONST_INT_P (recog_data.operand[i]))
19564 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19571 ival = trunc_int_for_mode (ival, HImode);
19574 ival = trunc_int_for_mode (ival, SImode);
19579 if (IN_RANGE (ival, -128, 127))
19596 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19601 fatal_insn ("unknown insn mode", insn);
19606 /* Compute default value for "length_address" attribute. */
19608 ix86_attr_length_address_default (rtx insn)
19612 if (get_attr_type (insn) == TYPE_LEA)
19614 rtx set = PATTERN (insn), addr;
19616 if (GET_CODE (set) == PARALLEL)
19617 set = XVECEXP (set, 0, 0);
19619 gcc_assert (GET_CODE (set) == SET);
19621 addr = SET_SRC (set);
19622 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19624 if (GET_CODE (addr) == ZERO_EXTEND)
19625 addr = XEXP (addr, 0);
19626 if (GET_CODE (addr) == SUBREG)
19627 addr = SUBREG_REG (addr);
19630 return memory_address_length (addr);
19633 extract_insn_cached (insn);
19634 for (i = recog_data.n_operands - 1; i >= 0; --i)
19635 if (MEM_P (recog_data.operand[i]))
19637 constrain_operands_cached (reload_completed);
19638 if (which_alternative != -1)
19640 const char *constraints = recog_data.constraints[i];
19641 int alt = which_alternative;
19643 while (*constraints == '=' || *constraints == '+')
19646 while (*constraints++ != ',')
19648 /* Skip ignored operands. */
19649 if (*constraints == 'X')
19652 return memory_address_length (XEXP (recog_data.operand[i], 0));
19657 /* Compute default value for "length_vex" attribute. It includes
19658 2 or 3 byte VEX prefix and 1 opcode byte. */
19661 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19666 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19667 byte VEX prefix. */
19668 if (!has_0f_opcode || has_vex_w)
19671 /* We can always use 2 byte VEX prefix in 32bit. */
19675 extract_insn_cached (insn);
19677 for (i = recog_data.n_operands - 1; i >= 0; --i)
19678 if (REG_P (recog_data.operand[i]))
19680 /* REX.W bit uses 3 byte VEX prefix. */
19681 if (GET_MODE (recog_data.operand[i]) == DImode
19682 && GENERAL_REG_P (recog_data.operand[i]))
19687 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19688 if (MEM_P (recog_data.operand[i])
19689 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19696 /* Return the maximum number of instructions a cpu can issue. */
19699 ix86_issue_rate (void)
19703 case PROCESSOR_PENTIUM:
19704 case PROCESSOR_ATOM:
19708 case PROCESSOR_PENTIUMPRO:
19709 case PROCESSOR_PENTIUM4:
19710 case PROCESSOR_ATHLON:
19712 case PROCESSOR_AMDFAM10:
19713 case PROCESSOR_NOCONA:
19714 case PROCESSOR_GENERIC32:
19715 case PROCESSOR_GENERIC64:
19718 case PROCESSOR_CORE2:
19726 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19727 by DEP_INSN and nothing set by DEP_INSN. */
19730 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19734 /* Simplify the test for uninteresting insns. */
19735 if (insn_type != TYPE_SETCC
19736 && insn_type != TYPE_ICMOV
19737 && insn_type != TYPE_FCMOV
19738 && insn_type != TYPE_IBR)
19741 if ((set = single_set (dep_insn)) != 0)
19743 set = SET_DEST (set);
19746 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19747 && XVECLEN (PATTERN (dep_insn), 0) == 2
19748 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19749 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19751 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19752 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19757 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19760 /* This test is true if the dependent insn reads the flags but
19761 not any other potentially set register. */
19762 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19765 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19771 /* Return true iff USE_INSN has a memory address with operands set by
19775 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19778 extract_insn_cached (use_insn);
19779 for (i = recog_data.n_operands - 1; i >= 0; --i)
19780 if (MEM_P (recog_data.operand[i]))
19782 rtx addr = XEXP (recog_data.operand[i], 0);
19783 return modified_in_p (addr, set_insn) != 0;
19789 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19791 enum attr_type insn_type, dep_insn_type;
19792 enum attr_memory memory;
19794 int dep_insn_code_number;
19796 /* Anti and output dependencies have zero cost on all CPUs. */
19797 if (REG_NOTE_KIND (link) != 0)
19800 dep_insn_code_number = recog_memoized (dep_insn);
19802 /* If we can't recognize the insns, we can't really do anything. */
19803 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19806 insn_type = get_attr_type (insn);
19807 dep_insn_type = get_attr_type (dep_insn);
19811 case PROCESSOR_PENTIUM:
19812 /* Address Generation Interlock adds a cycle of latency. */
19813 if (insn_type == TYPE_LEA)
19815 rtx addr = PATTERN (insn);
19817 if (GET_CODE (addr) == PARALLEL)
19818 addr = XVECEXP (addr, 0, 0);
19820 gcc_assert (GET_CODE (addr) == SET);
19822 addr = SET_SRC (addr);
19823 if (modified_in_p (addr, dep_insn))
19826 else if (ix86_agi_dependent (dep_insn, insn))
19829 /* ??? Compares pair with jump/setcc. */
19830 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19833 /* Floating point stores require value to be ready one cycle earlier. */
19834 if (insn_type == TYPE_FMOV
19835 && get_attr_memory (insn) == MEMORY_STORE
19836 && !ix86_agi_dependent (dep_insn, insn))
19840 case PROCESSOR_PENTIUMPRO:
19841 memory = get_attr_memory (insn);
19843 /* INT->FP conversion is expensive. */
19844 if (get_attr_fp_int_src (dep_insn))
19847 /* There is one cycle extra latency between an FP op and a store. */
19848 if (insn_type == TYPE_FMOV
19849 && (set = single_set (dep_insn)) != NULL_RTX
19850 && (set2 = single_set (insn)) != NULL_RTX
19851 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19852 && MEM_P (SET_DEST (set2)))
19855 /* Show ability of reorder buffer to hide latency of load by executing
19856 in parallel with previous instruction in case
19857 previous instruction is not needed to compute the address. */
19858 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19859 && !ix86_agi_dependent (dep_insn, insn))
19861 /* Claim moves to take one cycle, as core can issue one load
19862 at time and the next load can start cycle later. */
19863 if (dep_insn_type == TYPE_IMOV
19864 || dep_insn_type == TYPE_FMOV)
19872 memory = get_attr_memory (insn);
19874 /* The esp dependency is resolved before the instruction is really
19876 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19877 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19880 /* INT->FP conversion is expensive. */
19881 if (get_attr_fp_int_src (dep_insn))
19884 /* Show ability of reorder buffer to hide latency of load by executing
19885 in parallel with previous instruction in case
19886 previous instruction is not needed to compute the address. */
19887 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19888 && !ix86_agi_dependent (dep_insn, insn))
19890 /* Claim moves to take one cycle, as core can issue one load
19891 at time and the next load can start cycle later. */
19892 if (dep_insn_type == TYPE_IMOV
19893 || dep_insn_type == TYPE_FMOV)
19902 case PROCESSOR_ATHLON:
19904 case PROCESSOR_AMDFAM10:
19905 case PROCESSOR_ATOM:
19906 case PROCESSOR_GENERIC32:
19907 case PROCESSOR_GENERIC64:
19908 memory = get_attr_memory (insn);
19910 /* Show ability of reorder buffer to hide latency of load by executing
19911 in parallel with previous instruction in case
19912 previous instruction is not needed to compute the address. */
19913 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19914 && !ix86_agi_dependent (dep_insn, insn))
19916 enum attr_unit unit = get_attr_unit (insn);
19919 /* Because of the difference between the length of integer and
19920 floating unit pipeline preparation stages, the memory operands
19921 for floating point are cheaper.
19923 ??? For Athlon it the difference is most probably 2. */
19924 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19927 loadcost = TARGET_ATHLON ? 2 : 0;
19929 if (cost >= loadcost)
19942 /* How many alternative schedules to try. This should be as wide as the
19943 scheduling freedom in the DFA, but no wider. Making this value too
19944 large results extra work for the scheduler. */
19947 ia32_multipass_dfa_lookahead (void)
19951 case PROCESSOR_PENTIUM:
19954 case PROCESSOR_PENTIUMPRO:
19964 /* Compute the alignment given to a constant that is being placed in memory.
19965 EXP is the constant and ALIGN is the alignment that the object would
19967 The value of this function is used instead of that alignment to align
19971 ix86_constant_alignment (tree exp, int align)
19973 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19974 || TREE_CODE (exp) == INTEGER_CST)
19976 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19978 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19981 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19982 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19983 return BITS_PER_WORD;
19988 /* Compute the alignment for a static variable.
19989 TYPE is the data type, and ALIGN is the alignment that
19990 the object would ordinarily have. The value of this function is used
19991 instead of that alignment to align the object. */
19994 ix86_data_alignment (tree type, int align)
19996 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19998 if (AGGREGATE_TYPE_P (type)
19999 && TYPE_SIZE (type)
20000 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20001 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20002 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20003 && align < max_align)
20006 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20007 to 16byte boundary. */
20010 if (AGGREGATE_TYPE_P (type)
20011 && TYPE_SIZE (type)
20012 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20013 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20014 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20018 if (TREE_CODE (type) == ARRAY_TYPE)
20020 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20022 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20025 else if (TREE_CODE (type) == COMPLEX_TYPE)
20028 if (TYPE_MODE (type) == DCmode && align < 64)
20030 if ((TYPE_MODE (type) == XCmode
20031 || TYPE_MODE (type) == TCmode) && align < 128)
20034 else if ((TREE_CODE (type) == RECORD_TYPE
20035 || TREE_CODE (type) == UNION_TYPE
20036 || TREE_CODE (type) == QUAL_UNION_TYPE)
20037 && TYPE_FIELDS (type))
20039 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20041 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20044 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20045 || TREE_CODE (type) == INTEGER_TYPE)
20047 if (TYPE_MODE (type) == DFmode && align < 64)
20049 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20056 /* Compute the alignment for a local variable or a stack slot. EXP is
20057 the data type or decl itself, MODE is the widest mode available and
20058 ALIGN is the alignment that the object would ordinarily have. The
20059 value of this macro is used instead of that alignment to align the
20063 ix86_local_alignment (tree exp, enum machine_mode mode,
20064 unsigned int align)
20068 if (exp && DECL_P (exp))
20070 type = TREE_TYPE (exp);
20079 /* Don't do dynamic stack realignment for long long objects with
20080 -mpreferred-stack-boundary=2. */
20083 && ix86_preferred_stack_boundary < 64
20084 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20085 && (!type || !TYPE_USER_ALIGN (type))
20086 && (!decl || !DECL_USER_ALIGN (decl)))
20089 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20090 register in MODE. We will return the largest alignment of XF
20094 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20095 align = GET_MODE_ALIGNMENT (DFmode);
20099 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20100 to 16byte boundary. */
20103 if (AGGREGATE_TYPE_P (type)
20104 && TYPE_SIZE (type)
20105 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20106 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20107 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20110 if (TREE_CODE (type) == ARRAY_TYPE)
20112 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20114 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20117 else if (TREE_CODE (type) == COMPLEX_TYPE)
20119 if (TYPE_MODE (type) == DCmode && align < 64)
20121 if ((TYPE_MODE (type) == XCmode
20122 || TYPE_MODE (type) == TCmode) && align < 128)
20125 else if ((TREE_CODE (type) == RECORD_TYPE
20126 || TREE_CODE (type) == UNION_TYPE
20127 || TREE_CODE (type) == QUAL_UNION_TYPE)
20128 && TYPE_FIELDS (type))
20130 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20132 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20135 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20136 || TREE_CODE (type) == INTEGER_TYPE)
20139 if (TYPE_MODE (type) == DFmode && align < 64)
20141 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20147 /* Compute the minimum required alignment for dynamic stack realignment
20148 purposes for a local variable, parameter or a stack slot. EXP is
20149 the data type or decl itself, MODE is its mode and ALIGN is the
20150 alignment that the object would ordinarily have. */
20153 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20154 unsigned int align)
20158 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20161 if (exp && DECL_P (exp))
20163 type = TREE_TYPE (exp);
20172 /* Don't do dynamic stack realignment for long long objects with
20173 -mpreferred-stack-boundary=2. */
20174 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20175 && (!type || !TYPE_USER_ALIGN (type))
20176 && (!decl || !DECL_USER_ALIGN (decl)))
20182 /* Find a location for the static chain incoming to a nested function.
20183 This is a register, unless all free registers are used by arguments. */
20186 ix86_static_chain (const_tree fndecl, bool incoming_p)
20190 if (!DECL_STATIC_CHAIN (fndecl))
20195 /* We always use R10 in 64-bit mode. */
20201 /* By default in 32-bit mode we use ECX to pass the static chain. */
20204 fntype = TREE_TYPE (fndecl);
20205 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20207 /* Fastcall functions use ecx/edx for arguments, which leaves
20208 us with EAX for the static chain. */
20211 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20213 /* Thiscall functions use ecx for arguments, which leaves
20214 us with EAX for the static chain. */
20217 else if (ix86_function_regparm (fntype, fndecl) == 3)
20219 /* For regparm 3, we have no free call-clobbered registers in
20220 which to store the static chain. In order to implement this,
20221 we have the trampoline push the static chain to the stack.
20222 However, we can't push a value below the return address when
20223 we call the nested function directly, so we have to use an
20224 alternate entry point. For this we use ESI, and have the
20225 alternate entry point push ESI, so that things appear the
20226 same once we're executing the nested function. */
20229 if (fndecl == current_function_decl)
20230 ix86_static_chain_on_stack = true;
20231 return gen_frame_mem (SImode,
20232 plus_constant (arg_pointer_rtx, -8));
20238 return gen_rtx_REG (Pmode, regno);
20241 /* Emit RTL insns to initialize the variable parts of a trampoline.
20242 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20243 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20244 to be passed to the target function. */
20247 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20251 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20258 /* Depending on the static chain location, either load a register
20259 with a constant, or push the constant to the stack. All of the
20260 instructions are the same size. */
20261 chain = ix86_static_chain (fndecl, true);
20264 if (REGNO (chain) == CX_REG)
20266 else if (REGNO (chain) == AX_REG)
20269 gcc_unreachable ();
20274 mem = adjust_address (m_tramp, QImode, 0);
20275 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20277 mem = adjust_address (m_tramp, SImode, 1);
20278 emit_move_insn (mem, chain_value);
20280 /* Compute offset from the end of the jmp to the target function.
20281 In the case in which the trampoline stores the static chain on
20282 the stack, we need to skip the first insn which pushes the
20283 (call-saved) register static chain; this push is 1 byte. */
20284 disp = expand_binop (SImode, sub_optab, fnaddr,
20285 plus_constant (XEXP (m_tramp, 0),
20286 MEM_P (chain) ? 9 : 10),
20287 NULL_RTX, 1, OPTAB_DIRECT);
20289 mem = adjust_address (m_tramp, QImode, 5);
20290 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20292 mem = adjust_address (m_tramp, SImode, 6);
20293 emit_move_insn (mem, disp);
20299 /* Load the function address to r11. Try to load address using
20300 the shorter movl instead of movabs. We may want to support
20301 movq for kernel mode, but kernel does not use trampolines at
20303 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20305 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20307 mem = adjust_address (m_tramp, HImode, offset);
20308 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20310 mem = adjust_address (m_tramp, SImode, offset + 2);
20311 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20316 mem = adjust_address (m_tramp, HImode, offset);
20317 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20319 mem = adjust_address (m_tramp, DImode, offset + 2);
20320 emit_move_insn (mem, fnaddr);
20324 /* Load static chain using movabs to r10. */
20325 mem = adjust_address (m_tramp, HImode, offset);
20326 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20328 mem = adjust_address (m_tramp, DImode, offset + 2);
20329 emit_move_insn (mem, chain_value);
20332 /* Jump to r11; the last (unused) byte is a nop, only there to
20333 pad the write out to a single 32-bit store. */
20334 mem = adjust_address (m_tramp, SImode, offset);
20335 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20338 gcc_assert (offset <= TRAMPOLINE_SIZE);
20341 #ifdef ENABLE_EXECUTE_STACK
20342 #ifdef CHECK_EXECUTE_STACK_ENABLED
20343 if (CHECK_EXECUTE_STACK_ENABLED)
20345 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20346 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20350 /* The following file contains several enumerations and data structures
20351 built from the definitions in i386-builtin-types.def. */
20353 #include "i386-builtin-types.inc"
20355 /* Table for the ix86 builtin non-function types. */
20356 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20358 /* Retrieve an element from the above table, building some of
20359 the types lazily. */
20362 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20364 unsigned int index;
20367 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20369 type = ix86_builtin_type_tab[(int) tcode];
20373 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20374 if (tcode <= IX86_BT_LAST_VECT)
20376 enum machine_mode mode;
20378 index = tcode - IX86_BT_LAST_PRIM - 1;
20379 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20380 mode = ix86_builtin_type_vect_mode[index];
20382 type = build_vector_type_for_mode (itype, mode);
20388 index = tcode - IX86_BT_LAST_VECT - 1;
20389 if (tcode <= IX86_BT_LAST_PTR)
20390 quals = TYPE_UNQUALIFIED;
20392 quals = TYPE_QUAL_CONST;
20394 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20395 if (quals != TYPE_UNQUALIFIED)
20396 itype = build_qualified_type (itype, quals);
20398 type = build_pointer_type (itype);
20401 ix86_builtin_type_tab[(int) tcode] = type;
20405 /* Table for the ix86 builtin function types. */
20406 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20408 /* Retrieve an element from the above table, building some of
20409 the types lazily. */
20412 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20416 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20418 type = ix86_builtin_func_type_tab[(int) tcode];
20422 if (tcode <= IX86_BT_LAST_FUNC)
20424 unsigned start = ix86_builtin_func_start[(int) tcode];
20425 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20426 tree rtype, atype, args = void_list_node;
20429 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20430 for (i = after - 1; i > start; --i)
20432 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20433 args = tree_cons (NULL, atype, args);
20436 type = build_function_type (rtype, args);
20440 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20441 enum ix86_builtin_func_type icode;
20443 icode = ix86_builtin_func_alias_base[index];
20444 type = ix86_get_builtin_func_type (icode);
20447 ix86_builtin_func_type_tab[(int) tcode] = type;
20452 /* Codes for all the SSE/MMX builtins. */
20455 IX86_BUILTIN_ADDPS,
20456 IX86_BUILTIN_ADDSS,
20457 IX86_BUILTIN_DIVPS,
20458 IX86_BUILTIN_DIVSS,
20459 IX86_BUILTIN_MULPS,
20460 IX86_BUILTIN_MULSS,
20461 IX86_BUILTIN_SUBPS,
20462 IX86_BUILTIN_SUBSS,
20464 IX86_BUILTIN_CMPEQPS,
20465 IX86_BUILTIN_CMPLTPS,
20466 IX86_BUILTIN_CMPLEPS,
20467 IX86_BUILTIN_CMPGTPS,
20468 IX86_BUILTIN_CMPGEPS,
20469 IX86_BUILTIN_CMPNEQPS,
20470 IX86_BUILTIN_CMPNLTPS,
20471 IX86_BUILTIN_CMPNLEPS,
20472 IX86_BUILTIN_CMPNGTPS,
20473 IX86_BUILTIN_CMPNGEPS,
20474 IX86_BUILTIN_CMPORDPS,
20475 IX86_BUILTIN_CMPUNORDPS,
20476 IX86_BUILTIN_CMPEQSS,
20477 IX86_BUILTIN_CMPLTSS,
20478 IX86_BUILTIN_CMPLESS,
20479 IX86_BUILTIN_CMPNEQSS,
20480 IX86_BUILTIN_CMPNLTSS,
20481 IX86_BUILTIN_CMPNLESS,
20482 IX86_BUILTIN_CMPNGTSS,
20483 IX86_BUILTIN_CMPNGESS,
20484 IX86_BUILTIN_CMPORDSS,
20485 IX86_BUILTIN_CMPUNORDSS,
20487 IX86_BUILTIN_COMIEQSS,
20488 IX86_BUILTIN_COMILTSS,
20489 IX86_BUILTIN_COMILESS,
20490 IX86_BUILTIN_COMIGTSS,
20491 IX86_BUILTIN_COMIGESS,
20492 IX86_BUILTIN_COMINEQSS,
20493 IX86_BUILTIN_UCOMIEQSS,
20494 IX86_BUILTIN_UCOMILTSS,
20495 IX86_BUILTIN_UCOMILESS,
20496 IX86_BUILTIN_UCOMIGTSS,
20497 IX86_BUILTIN_UCOMIGESS,
20498 IX86_BUILTIN_UCOMINEQSS,
20500 IX86_BUILTIN_CVTPI2PS,
20501 IX86_BUILTIN_CVTPS2PI,
20502 IX86_BUILTIN_CVTSI2SS,
20503 IX86_BUILTIN_CVTSI642SS,
20504 IX86_BUILTIN_CVTSS2SI,
20505 IX86_BUILTIN_CVTSS2SI64,
20506 IX86_BUILTIN_CVTTPS2PI,
20507 IX86_BUILTIN_CVTTSS2SI,
20508 IX86_BUILTIN_CVTTSS2SI64,
20510 IX86_BUILTIN_MAXPS,
20511 IX86_BUILTIN_MAXSS,
20512 IX86_BUILTIN_MINPS,
20513 IX86_BUILTIN_MINSS,
20515 IX86_BUILTIN_LOADUPS,
20516 IX86_BUILTIN_STOREUPS,
20517 IX86_BUILTIN_MOVSS,
20519 IX86_BUILTIN_MOVHLPS,
20520 IX86_BUILTIN_MOVLHPS,
20521 IX86_BUILTIN_LOADHPS,
20522 IX86_BUILTIN_LOADLPS,
20523 IX86_BUILTIN_STOREHPS,
20524 IX86_BUILTIN_STORELPS,
20526 IX86_BUILTIN_MASKMOVQ,
20527 IX86_BUILTIN_MOVMSKPS,
20528 IX86_BUILTIN_PMOVMSKB,
20530 IX86_BUILTIN_MOVNTPS,
20531 IX86_BUILTIN_MOVNTQ,
20533 IX86_BUILTIN_LOADDQU,
20534 IX86_BUILTIN_STOREDQU,
20536 IX86_BUILTIN_PACKSSWB,
20537 IX86_BUILTIN_PACKSSDW,
20538 IX86_BUILTIN_PACKUSWB,
20540 IX86_BUILTIN_PADDB,
20541 IX86_BUILTIN_PADDW,
20542 IX86_BUILTIN_PADDD,
20543 IX86_BUILTIN_PADDQ,
20544 IX86_BUILTIN_PADDSB,
20545 IX86_BUILTIN_PADDSW,
20546 IX86_BUILTIN_PADDUSB,
20547 IX86_BUILTIN_PADDUSW,
20548 IX86_BUILTIN_PSUBB,
20549 IX86_BUILTIN_PSUBW,
20550 IX86_BUILTIN_PSUBD,
20551 IX86_BUILTIN_PSUBQ,
20552 IX86_BUILTIN_PSUBSB,
20553 IX86_BUILTIN_PSUBSW,
20554 IX86_BUILTIN_PSUBUSB,
20555 IX86_BUILTIN_PSUBUSW,
20558 IX86_BUILTIN_PANDN,
20562 IX86_BUILTIN_PAVGB,
20563 IX86_BUILTIN_PAVGW,
20565 IX86_BUILTIN_PCMPEQB,
20566 IX86_BUILTIN_PCMPEQW,
20567 IX86_BUILTIN_PCMPEQD,
20568 IX86_BUILTIN_PCMPGTB,
20569 IX86_BUILTIN_PCMPGTW,
20570 IX86_BUILTIN_PCMPGTD,
20572 IX86_BUILTIN_PMADDWD,
20574 IX86_BUILTIN_PMAXSW,
20575 IX86_BUILTIN_PMAXUB,
20576 IX86_BUILTIN_PMINSW,
20577 IX86_BUILTIN_PMINUB,
20579 IX86_BUILTIN_PMULHUW,
20580 IX86_BUILTIN_PMULHW,
20581 IX86_BUILTIN_PMULLW,
20583 IX86_BUILTIN_PSADBW,
20584 IX86_BUILTIN_PSHUFW,
20586 IX86_BUILTIN_PSLLW,
20587 IX86_BUILTIN_PSLLD,
20588 IX86_BUILTIN_PSLLQ,
20589 IX86_BUILTIN_PSRAW,
20590 IX86_BUILTIN_PSRAD,
20591 IX86_BUILTIN_PSRLW,
20592 IX86_BUILTIN_PSRLD,
20593 IX86_BUILTIN_PSRLQ,
20594 IX86_BUILTIN_PSLLWI,
20595 IX86_BUILTIN_PSLLDI,
20596 IX86_BUILTIN_PSLLQI,
20597 IX86_BUILTIN_PSRAWI,
20598 IX86_BUILTIN_PSRADI,
20599 IX86_BUILTIN_PSRLWI,
20600 IX86_BUILTIN_PSRLDI,
20601 IX86_BUILTIN_PSRLQI,
20603 IX86_BUILTIN_PUNPCKHBW,
20604 IX86_BUILTIN_PUNPCKHWD,
20605 IX86_BUILTIN_PUNPCKHDQ,
20606 IX86_BUILTIN_PUNPCKLBW,
20607 IX86_BUILTIN_PUNPCKLWD,
20608 IX86_BUILTIN_PUNPCKLDQ,
20610 IX86_BUILTIN_SHUFPS,
20612 IX86_BUILTIN_RCPPS,
20613 IX86_BUILTIN_RCPSS,
20614 IX86_BUILTIN_RSQRTPS,
20615 IX86_BUILTIN_RSQRTPS_NR,
20616 IX86_BUILTIN_RSQRTSS,
20617 IX86_BUILTIN_RSQRTF,
20618 IX86_BUILTIN_SQRTPS,
20619 IX86_BUILTIN_SQRTPS_NR,
20620 IX86_BUILTIN_SQRTSS,
20622 IX86_BUILTIN_UNPCKHPS,
20623 IX86_BUILTIN_UNPCKLPS,
20625 IX86_BUILTIN_ANDPS,
20626 IX86_BUILTIN_ANDNPS,
20628 IX86_BUILTIN_XORPS,
20631 IX86_BUILTIN_LDMXCSR,
20632 IX86_BUILTIN_STMXCSR,
20633 IX86_BUILTIN_SFENCE,
20635 /* 3DNow! Original */
20636 IX86_BUILTIN_FEMMS,
20637 IX86_BUILTIN_PAVGUSB,
20638 IX86_BUILTIN_PF2ID,
20639 IX86_BUILTIN_PFACC,
20640 IX86_BUILTIN_PFADD,
20641 IX86_BUILTIN_PFCMPEQ,
20642 IX86_BUILTIN_PFCMPGE,
20643 IX86_BUILTIN_PFCMPGT,
20644 IX86_BUILTIN_PFMAX,
20645 IX86_BUILTIN_PFMIN,
20646 IX86_BUILTIN_PFMUL,
20647 IX86_BUILTIN_PFRCP,
20648 IX86_BUILTIN_PFRCPIT1,
20649 IX86_BUILTIN_PFRCPIT2,
20650 IX86_BUILTIN_PFRSQIT1,
20651 IX86_BUILTIN_PFRSQRT,
20652 IX86_BUILTIN_PFSUB,
20653 IX86_BUILTIN_PFSUBR,
20654 IX86_BUILTIN_PI2FD,
20655 IX86_BUILTIN_PMULHRW,
20657 /* 3DNow! Athlon Extensions */
20658 IX86_BUILTIN_PF2IW,
20659 IX86_BUILTIN_PFNACC,
20660 IX86_BUILTIN_PFPNACC,
20661 IX86_BUILTIN_PI2FW,
20662 IX86_BUILTIN_PSWAPDSI,
20663 IX86_BUILTIN_PSWAPDSF,
20666 IX86_BUILTIN_ADDPD,
20667 IX86_BUILTIN_ADDSD,
20668 IX86_BUILTIN_DIVPD,
20669 IX86_BUILTIN_DIVSD,
20670 IX86_BUILTIN_MULPD,
20671 IX86_BUILTIN_MULSD,
20672 IX86_BUILTIN_SUBPD,
20673 IX86_BUILTIN_SUBSD,
20675 IX86_BUILTIN_CMPEQPD,
20676 IX86_BUILTIN_CMPLTPD,
20677 IX86_BUILTIN_CMPLEPD,
20678 IX86_BUILTIN_CMPGTPD,
20679 IX86_BUILTIN_CMPGEPD,
20680 IX86_BUILTIN_CMPNEQPD,
20681 IX86_BUILTIN_CMPNLTPD,
20682 IX86_BUILTIN_CMPNLEPD,
20683 IX86_BUILTIN_CMPNGTPD,
20684 IX86_BUILTIN_CMPNGEPD,
20685 IX86_BUILTIN_CMPORDPD,
20686 IX86_BUILTIN_CMPUNORDPD,
20687 IX86_BUILTIN_CMPEQSD,
20688 IX86_BUILTIN_CMPLTSD,
20689 IX86_BUILTIN_CMPLESD,
20690 IX86_BUILTIN_CMPNEQSD,
20691 IX86_BUILTIN_CMPNLTSD,
20692 IX86_BUILTIN_CMPNLESD,
20693 IX86_BUILTIN_CMPORDSD,
20694 IX86_BUILTIN_CMPUNORDSD,
20696 IX86_BUILTIN_COMIEQSD,
20697 IX86_BUILTIN_COMILTSD,
20698 IX86_BUILTIN_COMILESD,
20699 IX86_BUILTIN_COMIGTSD,
20700 IX86_BUILTIN_COMIGESD,
20701 IX86_BUILTIN_COMINEQSD,
20702 IX86_BUILTIN_UCOMIEQSD,
20703 IX86_BUILTIN_UCOMILTSD,
20704 IX86_BUILTIN_UCOMILESD,
20705 IX86_BUILTIN_UCOMIGTSD,
20706 IX86_BUILTIN_UCOMIGESD,
20707 IX86_BUILTIN_UCOMINEQSD,
20709 IX86_BUILTIN_MAXPD,
20710 IX86_BUILTIN_MAXSD,
20711 IX86_BUILTIN_MINPD,
20712 IX86_BUILTIN_MINSD,
20714 IX86_BUILTIN_ANDPD,
20715 IX86_BUILTIN_ANDNPD,
20717 IX86_BUILTIN_XORPD,
20719 IX86_BUILTIN_SQRTPD,
20720 IX86_BUILTIN_SQRTSD,
20722 IX86_BUILTIN_UNPCKHPD,
20723 IX86_BUILTIN_UNPCKLPD,
20725 IX86_BUILTIN_SHUFPD,
20727 IX86_BUILTIN_LOADUPD,
20728 IX86_BUILTIN_STOREUPD,
20729 IX86_BUILTIN_MOVSD,
20731 IX86_BUILTIN_LOADHPD,
20732 IX86_BUILTIN_LOADLPD,
20734 IX86_BUILTIN_CVTDQ2PD,
20735 IX86_BUILTIN_CVTDQ2PS,
20737 IX86_BUILTIN_CVTPD2DQ,
20738 IX86_BUILTIN_CVTPD2PI,
20739 IX86_BUILTIN_CVTPD2PS,
20740 IX86_BUILTIN_CVTTPD2DQ,
20741 IX86_BUILTIN_CVTTPD2PI,
20743 IX86_BUILTIN_CVTPI2PD,
20744 IX86_BUILTIN_CVTSI2SD,
20745 IX86_BUILTIN_CVTSI642SD,
20747 IX86_BUILTIN_CVTSD2SI,
20748 IX86_BUILTIN_CVTSD2SI64,
20749 IX86_BUILTIN_CVTSD2SS,
20750 IX86_BUILTIN_CVTSS2SD,
20751 IX86_BUILTIN_CVTTSD2SI,
20752 IX86_BUILTIN_CVTTSD2SI64,
20754 IX86_BUILTIN_CVTPS2DQ,
20755 IX86_BUILTIN_CVTPS2PD,
20756 IX86_BUILTIN_CVTTPS2DQ,
20758 IX86_BUILTIN_MOVNTI,
20759 IX86_BUILTIN_MOVNTPD,
20760 IX86_BUILTIN_MOVNTDQ,
20762 IX86_BUILTIN_MOVQ128,
20765 IX86_BUILTIN_MASKMOVDQU,
20766 IX86_BUILTIN_MOVMSKPD,
20767 IX86_BUILTIN_PMOVMSKB128,
20769 IX86_BUILTIN_PACKSSWB128,
20770 IX86_BUILTIN_PACKSSDW128,
20771 IX86_BUILTIN_PACKUSWB128,
20773 IX86_BUILTIN_PADDB128,
20774 IX86_BUILTIN_PADDW128,
20775 IX86_BUILTIN_PADDD128,
20776 IX86_BUILTIN_PADDQ128,
20777 IX86_BUILTIN_PADDSB128,
20778 IX86_BUILTIN_PADDSW128,
20779 IX86_BUILTIN_PADDUSB128,
20780 IX86_BUILTIN_PADDUSW128,
20781 IX86_BUILTIN_PSUBB128,
20782 IX86_BUILTIN_PSUBW128,
20783 IX86_BUILTIN_PSUBD128,
20784 IX86_BUILTIN_PSUBQ128,
20785 IX86_BUILTIN_PSUBSB128,
20786 IX86_BUILTIN_PSUBSW128,
20787 IX86_BUILTIN_PSUBUSB128,
20788 IX86_BUILTIN_PSUBUSW128,
20790 IX86_BUILTIN_PAND128,
20791 IX86_BUILTIN_PANDN128,
20792 IX86_BUILTIN_POR128,
20793 IX86_BUILTIN_PXOR128,
20795 IX86_BUILTIN_PAVGB128,
20796 IX86_BUILTIN_PAVGW128,
20798 IX86_BUILTIN_PCMPEQB128,
20799 IX86_BUILTIN_PCMPEQW128,
20800 IX86_BUILTIN_PCMPEQD128,
20801 IX86_BUILTIN_PCMPGTB128,
20802 IX86_BUILTIN_PCMPGTW128,
20803 IX86_BUILTIN_PCMPGTD128,
20805 IX86_BUILTIN_PMADDWD128,
20807 IX86_BUILTIN_PMAXSW128,
20808 IX86_BUILTIN_PMAXUB128,
20809 IX86_BUILTIN_PMINSW128,
20810 IX86_BUILTIN_PMINUB128,
20812 IX86_BUILTIN_PMULUDQ,
20813 IX86_BUILTIN_PMULUDQ128,
20814 IX86_BUILTIN_PMULHUW128,
20815 IX86_BUILTIN_PMULHW128,
20816 IX86_BUILTIN_PMULLW128,
20818 IX86_BUILTIN_PSADBW128,
20819 IX86_BUILTIN_PSHUFHW,
20820 IX86_BUILTIN_PSHUFLW,
20821 IX86_BUILTIN_PSHUFD,
20823 IX86_BUILTIN_PSLLDQI128,
20824 IX86_BUILTIN_PSLLWI128,
20825 IX86_BUILTIN_PSLLDI128,
20826 IX86_BUILTIN_PSLLQI128,
20827 IX86_BUILTIN_PSRAWI128,
20828 IX86_BUILTIN_PSRADI128,
20829 IX86_BUILTIN_PSRLDQI128,
20830 IX86_BUILTIN_PSRLWI128,
20831 IX86_BUILTIN_PSRLDI128,
20832 IX86_BUILTIN_PSRLQI128,
20834 IX86_BUILTIN_PSLLDQ128,
20835 IX86_BUILTIN_PSLLW128,
20836 IX86_BUILTIN_PSLLD128,
20837 IX86_BUILTIN_PSLLQ128,
20838 IX86_BUILTIN_PSRAW128,
20839 IX86_BUILTIN_PSRAD128,
20840 IX86_BUILTIN_PSRLW128,
20841 IX86_BUILTIN_PSRLD128,
20842 IX86_BUILTIN_PSRLQ128,
20844 IX86_BUILTIN_PUNPCKHBW128,
20845 IX86_BUILTIN_PUNPCKHWD128,
20846 IX86_BUILTIN_PUNPCKHDQ128,
20847 IX86_BUILTIN_PUNPCKHQDQ128,
20848 IX86_BUILTIN_PUNPCKLBW128,
20849 IX86_BUILTIN_PUNPCKLWD128,
20850 IX86_BUILTIN_PUNPCKLDQ128,
20851 IX86_BUILTIN_PUNPCKLQDQ128,
20853 IX86_BUILTIN_CLFLUSH,
20854 IX86_BUILTIN_MFENCE,
20855 IX86_BUILTIN_LFENCE,
20857 IX86_BUILTIN_BSRSI,
20858 IX86_BUILTIN_BSRDI,
20859 IX86_BUILTIN_RDPMC,
20860 IX86_BUILTIN_RDTSC,
20861 IX86_BUILTIN_RDTSCP,
20862 IX86_BUILTIN_ROLQI,
20863 IX86_BUILTIN_ROLHI,
20864 IX86_BUILTIN_RORQI,
20865 IX86_BUILTIN_RORHI,
20868 IX86_BUILTIN_ADDSUBPS,
20869 IX86_BUILTIN_HADDPS,
20870 IX86_BUILTIN_HSUBPS,
20871 IX86_BUILTIN_MOVSHDUP,
20872 IX86_BUILTIN_MOVSLDUP,
20873 IX86_BUILTIN_ADDSUBPD,
20874 IX86_BUILTIN_HADDPD,
20875 IX86_BUILTIN_HSUBPD,
20876 IX86_BUILTIN_LDDQU,
20878 IX86_BUILTIN_MONITOR,
20879 IX86_BUILTIN_MWAIT,
20882 IX86_BUILTIN_PHADDW,
20883 IX86_BUILTIN_PHADDD,
20884 IX86_BUILTIN_PHADDSW,
20885 IX86_BUILTIN_PHSUBW,
20886 IX86_BUILTIN_PHSUBD,
20887 IX86_BUILTIN_PHSUBSW,
20888 IX86_BUILTIN_PMADDUBSW,
20889 IX86_BUILTIN_PMULHRSW,
20890 IX86_BUILTIN_PSHUFB,
20891 IX86_BUILTIN_PSIGNB,
20892 IX86_BUILTIN_PSIGNW,
20893 IX86_BUILTIN_PSIGND,
20894 IX86_BUILTIN_PALIGNR,
20895 IX86_BUILTIN_PABSB,
20896 IX86_BUILTIN_PABSW,
20897 IX86_BUILTIN_PABSD,
20899 IX86_BUILTIN_PHADDW128,
20900 IX86_BUILTIN_PHADDD128,
20901 IX86_BUILTIN_PHADDSW128,
20902 IX86_BUILTIN_PHSUBW128,
20903 IX86_BUILTIN_PHSUBD128,
20904 IX86_BUILTIN_PHSUBSW128,
20905 IX86_BUILTIN_PMADDUBSW128,
20906 IX86_BUILTIN_PMULHRSW128,
20907 IX86_BUILTIN_PSHUFB128,
20908 IX86_BUILTIN_PSIGNB128,
20909 IX86_BUILTIN_PSIGNW128,
20910 IX86_BUILTIN_PSIGND128,
20911 IX86_BUILTIN_PALIGNR128,
20912 IX86_BUILTIN_PABSB128,
20913 IX86_BUILTIN_PABSW128,
20914 IX86_BUILTIN_PABSD128,
20916 /* AMDFAM10 - SSE4A New Instructions. */
20917 IX86_BUILTIN_MOVNTSD,
20918 IX86_BUILTIN_MOVNTSS,
20919 IX86_BUILTIN_EXTRQI,
20920 IX86_BUILTIN_EXTRQ,
20921 IX86_BUILTIN_INSERTQI,
20922 IX86_BUILTIN_INSERTQ,
20925 IX86_BUILTIN_BLENDPD,
20926 IX86_BUILTIN_BLENDPS,
20927 IX86_BUILTIN_BLENDVPD,
20928 IX86_BUILTIN_BLENDVPS,
20929 IX86_BUILTIN_PBLENDVB128,
20930 IX86_BUILTIN_PBLENDW128,
20935 IX86_BUILTIN_INSERTPS128,
20937 IX86_BUILTIN_MOVNTDQA,
20938 IX86_BUILTIN_MPSADBW128,
20939 IX86_BUILTIN_PACKUSDW128,
20940 IX86_BUILTIN_PCMPEQQ,
20941 IX86_BUILTIN_PHMINPOSUW128,
20943 IX86_BUILTIN_PMAXSB128,
20944 IX86_BUILTIN_PMAXSD128,
20945 IX86_BUILTIN_PMAXUD128,
20946 IX86_BUILTIN_PMAXUW128,
20948 IX86_BUILTIN_PMINSB128,
20949 IX86_BUILTIN_PMINSD128,
20950 IX86_BUILTIN_PMINUD128,
20951 IX86_BUILTIN_PMINUW128,
20953 IX86_BUILTIN_PMOVSXBW128,
20954 IX86_BUILTIN_PMOVSXBD128,
20955 IX86_BUILTIN_PMOVSXBQ128,
20956 IX86_BUILTIN_PMOVSXWD128,
20957 IX86_BUILTIN_PMOVSXWQ128,
20958 IX86_BUILTIN_PMOVSXDQ128,
20960 IX86_BUILTIN_PMOVZXBW128,
20961 IX86_BUILTIN_PMOVZXBD128,
20962 IX86_BUILTIN_PMOVZXBQ128,
20963 IX86_BUILTIN_PMOVZXWD128,
20964 IX86_BUILTIN_PMOVZXWQ128,
20965 IX86_BUILTIN_PMOVZXDQ128,
20967 IX86_BUILTIN_PMULDQ128,
20968 IX86_BUILTIN_PMULLD128,
20970 IX86_BUILTIN_ROUNDPD,
20971 IX86_BUILTIN_ROUNDPS,
20972 IX86_BUILTIN_ROUNDSD,
20973 IX86_BUILTIN_ROUNDSS,
20975 IX86_BUILTIN_PTESTZ,
20976 IX86_BUILTIN_PTESTC,
20977 IX86_BUILTIN_PTESTNZC,
20979 IX86_BUILTIN_VEC_INIT_V2SI,
20980 IX86_BUILTIN_VEC_INIT_V4HI,
20981 IX86_BUILTIN_VEC_INIT_V8QI,
20982 IX86_BUILTIN_VEC_EXT_V2DF,
20983 IX86_BUILTIN_VEC_EXT_V2DI,
20984 IX86_BUILTIN_VEC_EXT_V4SF,
20985 IX86_BUILTIN_VEC_EXT_V4SI,
20986 IX86_BUILTIN_VEC_EXT_V8HI,
20987 IX86_BUILTIN_VEC_EXT_V2SI,
20988 IX86_BUILTIN_VEC_EXT_V4HI,
20989 IX86_BUILTIN_VEC_EXT_V16QI,
20990 IX86_BUILTIN_VEC_SET_V2DI,
20991 IX86_BUILTIN_VEC_SET_V4SF,
20992 IX86_BUILTIN_VEC_SET_V4SI,
20993 IX86_BUILTIN_VEC_SET_V8HI,
20994 IX86_BUILTIN_VEC_SET_V4HI,
20995 IX86_BUILTIN_VEC_SET_V16QI,
20997 IX86_BUILTIN_VEC_PACK_SFIX,
21000 IX86_BUILTIN_CRC32QI,
21001 IX86_BUILTIN_CRC32HI,
21002 IX86_BUILTIN_CRC32SI,
21003 IX86_BUILTIN_CRC32DI,
21005 IX86_BUILTIN_PCMPESTRI128,
21006 IX86_BUILTIN_PCMPESTRM128,
21007 IX86_BUILTIN_PCMPESTRA128,
21008 IX86_BUILTIN_PCMPESTRC128,
21009 IX86_BUILTIN_PCMPESTRO128,
21010 IX86_BUILTIN_PCMPESTRS128,
21011 IX86_BUILTIN_PCMPESTRZ128,
21012 IX86_BUILTIN_PCMPISTRI128,
21013 IX86_BUILTIN_PCMPISTRM128,
21014 IX86_BUILTIN_PCMPISTRA128,
21015 IX86_BUILTIN_PCMPISTRC128,
21016 IX86_BUILTIN_PCMPISTRO128,
21017 IX86_BUILTIN_PCMPISTRS128,
21018 IX86_BUILTIN_PCMPISTRZ128,
21020 IX86_BUILTIN_PCMPGTQ,
21022 /* AES instructions */
21023 IX86_BUILTIN_AESENC128,
21024 IX86_BUILTIN_AESENCLAST128,
21025 IX86_BUILTIN_AESDEC128,
21026 IX86_BUILTIN_AESDECLAST128,
21027 IX86_BUILTIN_AESIMC128,
21028 IX86_BUILTIN_AESKEYGENASSIST128,
21030 /* PCLMUL instruction */
21031 IX86_BUILTIN_PCLMULQDQ128,
21034 IX86_BUILTIN_ADDPD256,
21035 IX86_BUILTIN_ADDPS256,
21036 IX86_BUILTIN_ADDSUBPD256,
21037 IX86_BUILTIN_ADDSUBPS256,
21038 IX86_BUILTIN_ANDPD256,
21039 IX86_BUILTIN_ANDPS256,
21040 IX86_BUILTIN_ANDNPD256,
21041 IX86_BUILTIN_ANDNPS256,
21042 IX86_BUILTIN_BLENDPD256,
21043 IX86_BUILTIN_BLENDPS256,
21044 IX86_BUILTIN_BLENDVPD256,
21045 IX86_BUILTIN_BLENDVPS256,
21046 IX86_BUILTIN_DIVPD256,
21047 IX86_BUILTIN_DIVPS256,
21048 IX86_BUILTIN_DPPS256,
21049 IX86_BUILTIN_HADDPD256,
21050 IX86_BUILTIN_HADDPS256,
21051 IX86_BUILTIN_HSUBPD256,
21052 IX86_BUILTIN_HSUBPS256,
21053 IX86_BUILTIN_MAXPD256,
21054 IX86_BUILTIN_MAXPS256,
21055 IX86_BUILTIN_MINPD256,
21056 IX86_BUILTIN_MINPS256,
21057 IX86_BUILTIN_MULPD256,
21058 IX86_BUILTIN_MULPS256,
21059 IX86_BUILTIN_ORPD256,
21060 IX86_BUILTIN_ORPS256,
21061 IX86_BUILTIN_SHUFPD256,
21062 IX86_BUILTIN_SHUFPS256,
21063 IX86_BUILTIN_SUBPD256,
21064 IX86_BUILTIN_SUBPS256,
21065 IX86_BUILTIN_XORPD256,
21066 IX86_BUILTIN_XORPS256,
21067 IX86_BUILTIN_CMPSD,
21068 IX86_BUILTIN_CMPSS,
21069 IX86_BUILTIN_CMPPD,
21070 IX86_BUILTIN_CMPPS,
21071 IX86_BUILTIN_CMPPD256,
21072 IX86_BUILTIN_CMPPS256,
21073 IX86_BUILTIN_CVTDQ2PD256,
21074 IX86_BUILTIN_CVTDQ2PS256,
21075 IX86_BUILTIN_CVTPD2PS256,
21076 IX86_BUILTIN_CVTPS2DQ256,
21077 IX86_BUILTIN_CVTPS2PD256,
21078 IX86_BUILTIN_CVTTPD2DQ256,
21079 IX86_BUILTIN_CVTPD2DQ256,
21080 IX86_BUILTIN_CVTTPS2DQ256,
21081 IX86_BUILTIN_EXTRACTF128PD256,
21082 IX86_BUILTIN_EXTRACTF128PS256,
21083 IX86_BUILTIN_EXTRACTF128SI256,
21084 IX86_BUILTIN_VZEROALL,
21085 IX86_BUILTIN_VZEROUPPER,
21086 IX86_BUILTIN_VPERMILVARPD,
21087 IX86_BUILTIN_VPERMILVARPS,
21088 IX86_BUILTIN_VPERMILVARPD256,
21089 IX86_BUILTIN_VPERMILVARPS256,
21090 IX86_BUILTIN_VPERMILPD,
21091 IX86_BUILTIN_VPERMILPS,
21092 IX86_BUILTIN_VPERMILPD256,
21093 IX86_BUILTIN_VPERMILPS256,
21094 IX86_BUILTIN_VPERMIL2PD,
21095 IX86_BUILTIN_VPERMIL2PS,
21096 IX86_BUILTIN_VPERMIL2PD256,
21097 IX86_BUILTIN_VPERMIL2PS256,
21098 IX86_BUILTIN_VPERM2F128PD256,
21099 IX86_BUILTIN_VPERM2F128PS256,
21100 IX86_BUILTIN_VPERM2F128SI256,
21101 IX86_BUILTIN_VBROADCASTSS,
21102 IX86_BUILTIN_VBROADCASTSD256,
21103 IX86_BUILTIN_VBROADCASTSS256,
21104 IX86_BUILTIN_VBROADCASTPD256,
21105 IX86_BUILTIN_VBROADCASTPS256,
21106 IX86_BUILTIN_VINSERTF128PD256,
21107 IX86_BUILTIN_VINSERTF128PS256,
21108 IX86_BUILTIN_VINSERTF128SI256,
21109 IX86_BUILTIN_LOADUPD256,
21110 IX86_BUILTIN_LOADUPS256,
21111 IX86_BUILTIN_STOREUPD256,
21112 IX86_BUILTIN_STOREUPS256,
21113 IX86_BUILTIN_LDDQU256,
21114 IX86_BUILTIN_MOVNTDQ256,
21115 IX86_BUILTIN_MOVNTPD256,
21116 IX86_BUILTIN_MOVNTPS256,
21117 IX86_BUILTIN_LOADDQU256,
21118 IX86_BUILTIN_STOREDQU256,
21119 IX86_BUILTIN_MASKLOADPD,
21120 IX86_BUILTIN_MASKLOADPS,
21121 IX86_BUILTIN_MASKSTOREPD,
21122 IX86_BUILTIN_MASKSTOREPS,
21123 IX86_BUILTIN_MASKLOADPD256,
21124 IX86_BUILTIN_MASKLOADPS256,
21125 IX86_BUILTIN_MASKSTOREPD256,
21126 IX86_BUILTIN_MASKSTOREPS256,
21127 IX86_BUILTIN_MOVSHDUP256,
21128 IX86_BUILTIN_MOVSLDUP256,
21129 IX86_BUILTIN_MOVDDUP256,
21131 IX86_BUILTIN_SQRTPD256,
21132 IX86_BUILTIN_SQRTPS256,
21133 IX86_BUILTIN_SQRTPS_NR256,
21134 IX86_BUILTIN_RSQRTPS256,
21135 IX86_BUILTIN_RSQRTPS_NR256,
21137 IX86_BUILTIN_RCPPS256,
21139 IX86_BUILTIN_ROUNDPD256,
21140 IX86_BUILTIN_ROUNDPS256,
21142 IX86_BUILTIN_UNPCKHPD256,
21143 IX86_BUILTIN_UNPCKLPD256,
21144 IX86_BUILTIN_UNPCKHPS256,
21145 IX86_BUILTIN_UNPCKLPS256,
21147 IX86_BUILTIN_SI256_SI,
21148 IX86_BUILTIN_PS256_PS,
21149 IX86_BUILTIN_PD256_PD,
21150 IX86_BUILTIN_SI_SI256,
21151 IX86_BUILTIN_PS_PS256,
21152 IX86_BUILTIN_PD_PD256,
21154 IX86_BUILTIN_VTESTZPD,
21155 IX86_BUILTIN_VTESTCPD,
21156 IX86_BUILTIN_VTESTNZCPD,
21157 IX86_BUILTIN_VTESTZPS,
21158 IX86_BUILTIN_VTESTCPS,
21159 IX86_BUILTIN_VTESTNZCPS,
21160 IX86_BUILTIN_VTESTZPD256,
21161 IX86_BUILTIN_VTESTCPD256,
21162 IX86_BUILTIN_VTESTNZCPD256,
21163 IX86_BUILTIN_VTESTZPS256,
21164 IX86_BUILTIN_VTESTCPS256,
21165 IX86_BUILTIN_VTESTNZCPS256,
21166 IX86_BUILTIN_PTESTZ256,
21167 IX86_BUILTIN_PTESTC256,
21168 IX86_BUILTIN_PTESTNZC256,
21170 IX86_BUILTIN_MOVMSKPD256,
21171 IX86_BUILTIN_MOVMSKPS256,
21173 /* TFmode support builtins. */
21175 IX86_BUILTIN_HUGE_VALQ,
21176 IX86_BUILTIN_FABSQ,
21177 IX86_BUILTIN_COPYSIGNQ,
21179 /* Vectorizer support builtins. */
21180 IX86_BUILTIN_CPYSGNPS,
21181 IX86_BUILTIN_CPYSGNPD,
21183 IX86_BUILTIN_CVTUDQ2PS,
21185 IX86_BUILTIN_VEC_PERM_V2DF,
21186 IX86_BUILTIN_VEC_PERM_V4SF,
21187 IX86_BUILTIN_VEC_PERM_V2DI,
21188 IX86_BUILTIN_VEC_PERM_V4SI,
21189 IX86_BUILTIN_VEC_PERM_V8HI,
21190 IX86_BUILTIN_VEC_PERM_V16QI,
21191 IX86_BUILTIN_VEC_PERM_V2DI_U,
21192 IX86_BUILTIN_VEC_PERM_V4SI_U,
21193 IX86_BUILTIN_VEC_PERM_V8HI_U,
21194 IX86_BUILTIN_VEC_PERM_V16QI_U,
21195 IX86_BUILTIN_VEC_PERM_V4DF,
21196 IX86_BUILTIN_VEC_PERM_V8SF,
21198 /* FMA4 and XOP instructions. */
21199 IX86_BUILTIN_VFMADDSS,
21200 IX86_BUILTIN_VFMADDSD,
21201 IX86_BUILTIN_VFMADDPS,
21202 IX86_BUILTIN_VFMADDPD,
21203 IX86_BUILTIN_VFMSUBSS,
21204 IX86_BUILTIN_VFMSUBSD,
21205 IX86_BUILTIN_VFMSUBPS,
21206 IX86_BUILTIN_VFMSUBPD,
21207 IX86_BUILTIN_VFMADDSUBPS,
21208 IX86_BUILTIN_VFMADDSUBPD,
21209 IX86_BUILTIN_VFMSUBADDPS,
21210 IX86_BUILTIN_VFMSUBADDPD,
21211 IX86_BUILTIN_VFNMADDSS,
21212 IX86_BUILTIN_VFNMADDSD,
21213 IX86_BUILTIN_VFNMADDPS,
21214 IX86_BUILTIN_VFNMADDPD,
21215 IX86_BUILTIN_VFNMSUBSS,
21216 IX86_BUILTIN_VFNMSUBSD,
21217 IX86_BUILTIN_VFNMSUBPS,
21218 IX86_BUILTIN_VFNMSUBPD,
21219 IX86_BUILTIN_VFMADDPS256,
21220 IX86_BUILTIN_VFMADDPD256,
21221 IX86_BUILTIN_VFMSUBPS256,
21222 IX86_BUILTIN_VFMSUBPD256,
21223 IX86_BUILTIN_VFMADDSUBPS256,
21224 IX86_BUILTIN_VFMADDSUBPD256,
21225 IX86_BUILTIN_VFMSUBADDPS256,
21226 IX86_BUILTIN_VFMSUBADDPD256,
21227 IX86_BUILTIN_VFNMADDPS256,
21228 IX86_BUILTIN_VFNMADDPD256,
21229 IX86_BUILTIN_VFNMSUBPS256,
21230 IX86_BUILTIN_VFNMSUBPD256,
21232 IX86_BUILTIN_VPCMOV,
21233 IX86_BUILTIN_VPCMOV_V2DI,
21234 IX86_BUILTIN_VPCMOV_V4SI,
21235 IX86_BUILTIN_VPCMOV_V8HI,
21236 IX86_BUILTIN_VPCMOV_V16QI,
21237 IX86_BUILTIN_VPCMOV_V4SF,
21238 IX86_BUILTIN_VPCMOV_V2DF,
21239 IX86_BUILTIN_VPCMOV256,
21240 IX86_BUILTIN_VPCMOV_V4DI256,
21241 IX86_BUILTIN_VPCMOV_V8SI256,
21242 IX86_BUILTIN_VPCMOV_V16HI256,
21243 IX86_BUILTIN_VPCMOV_V32QI256,
21244 IX86_BUILTIN_VPCMOV_V8SF256,
21245 IX86_BUILTIN_VPCMOV_V4DF256,
21247 IX86_BUILTIN_VPPERM,
21249 IX86_BUILTIN_VPMACSSWW,
21250 IX86_BUILTIN_VPMACSWW,
21251 IX86_BUILTIN_VPMACSSWD,
21252 IX86_BUILTIN_VPMACSWD,
21253 IX86_BUILTIN_VPMACSSDD,
21254 IX86_BUILTIN_VPMACSDD,
21255 IX86_BUILTIN_VPMACSSDQL,
21256 IX86_BUILTIN_VPMACSSDQH,
21257 IX86_BUILTIN_VPMACSDQL,
21258 IX86_BUILTIN_VPMACSDQH,
21259 IX86_BUILTIN_VPMADCSSWD,
21260 IX86_BUILTIN_VPMADCSWD,
21262 IX86_BUILTIN_VPHADDBW,
21263 IX86_BUILTIN_VPHADDBD,
21264 IX86_BUILTIN_VPHADDBQ,
21265 IX86_BUILTIN_VPHADDWD,
21266 IX86_BUILTIN_VPHADDWQ,
21267 IX86_BUILTIN_VPHADDDQ,
21268 IX86_BUILTIN_VPHADDUBW,
21269 IX86_BUILTIN_VPHADDUBD,
21270 IX86_BUILTIN_VPHADDUBQ,
21271 IX86_BUILTIN_VPHADDUWD,
21272 IX86_BUILTIN_VPHADDUWQ,
21273 IX86_BUILTIN_VPHADDUDQ,
21274 IX86_BUILTIN_VPHSUBBW,
21275 IX86_BUILTIN_VPHSUBWD,
21276 IX86_BUILTIN_VPHSUBDQ,
21278 IX86_BUILTIN_VPROTB,
21279 IX86_BUILTIN_VPROTW,
21280 IX86_BUILTIN_VPROTD,
21281 IX86_BUILTIN_VPROTQ,
21282 IX86_BUILTIN_VPROTB_IMM,
21283 IX86_BUILTIN_VPROTW_IMM,
21284 IX86_BUILTIN_VPROTD_IMM,
21285 IX86_BUILTIN_VPROTQ_IMM,
21287 IX86_BUILTIN_VPSHLB,
21288 IX86_BUILTIN_VPSHLW,
21289 IX86_BUILTIN_VPSHLD,
21290 IX86_BUILTIN_VPSHLQ,
21291 IX86_BUILTIN_VPSHAB,
21292 IX86_BUILTIN_VPSHAW,
21293 IX86_BUILTIN_VPSHAD,
21294 IX86_BUILTIN_VPSHAQ,
21296 IX86_BUILTIN_VFRCZSS,
21297 IX86_BUILTIN_VFRCZSD,
21298 IX86_BUILTIN_VFRCZPS,
21299 IX86_BUILTIN_VFRCZPD,
21300 IX86_BUILTIN_VFRCZPS256,
21301 IX86_BUILTIN_VFRCZPD256,
21303 IX86_BUILTIN_VPCOMEQUB,
21304 IX86_BUILTIN_VPCOMNEUB,
21305 IX86_BUILTIN_VPCOMLTUB,
21306 IX86_BUILTIN_VPCOMLEUB,
21307 IX86_BUILTIN_VPCOMGTUB,
21308 IX86_BUILTIN_VPCOMGEUB,
21309 IX86_BUILTIN_VPCOMFALSEUB,
21310 IX86_BUILTIN_VPCOMTRUEUB,
21312 IX86_BUILTIN_VPCOMEQUW,
21313 IX86_BUILTIN_VPCOMNEUW,
21314 IX86_BUILTIN_VPCOMLTUW,
21315 IX86_BUILTIN_VPCOMLEUW,
21316 IX86_BUILTIN_VPCOMGTUW,
21317 IX86_BUILTIN_VPCOMGEUW,
21318 IX86_BUILTIN_VPCOMFALSEUW,
21319 IX86_BUILTIN_VPCOMTRUEUW,
21321 IX86_BUILTIN_VPCOMEQUD,
21322 IX86_BUILTIN_VPCOMNEUD,
21323 IX86_BUILTIN_VPCOMLTUD,
21324 IX86_BUILTIN_VPCOMLEUD,
21325 IX86_BUILTIN_VPCOMGTUD,
21326 IX86_BUILTIN_VPCOMGEUD,
21327 IX86_BUILTIN_VPCOMFALSEUD,
21328 IX86_BUILTIN_VPCOMTRUEUD,
21330 IX86_BUILTIN_VPCOMEQUQ,
21331 IX86_BUILTIN_VPCOMNEUQ,
21332 IX86_BUILTIN_VPCOMLTUQ,
21333 IX86_BUILTIN_VPCOMLEUQ,
21334 IX86_BUILTIN_VPCOMGTUQ,
21335 IX86_BUILTIN_VPCOMGEUQ,
21336 IX86_BUILTIN_VPCOMFALSEUQ,
21337 IX86_BUILTIN_VPCOMTRUEUQ,
21339 IX86_BUILTIN_VPCOMEQB,
21340 IX86_BUILTIN_VPCOMNEB,
21341 IX86_BUILTIN_VPCOMLTB,
21342 IX86_BUILTIN_VPCOMLEB,
21343 IX86_BUILTIN_VPCOMGTB,
21344 IX86_BUILTIN_VPCOMGEB,
21345 IX86_BUILTIN_VPCOMFALSEB,
21346 IX86_BUILTIN_VPCOMTRUEB,
21348 IX86_BUILTIN_VPCOMEQW,
21349 IX86_BUILTIN_VPCOMNEW,
21350 IX86_BUILTIN_VPCOMLTW,
21351 IX86_BUILTIN_VPCOMLEW,
21352 IX86_BUILTIN_VPCOMGTW,
21353 IX86_BUILTIN_VPCOMGEW,
21354 IX86_BUILTIN_VPCOMFALSEW,
21355 IX86_BUILTIN_VPCOMTRUEW,
21357 IX86_BUILTIN_VPCOMEQD,
21358 IX86_BUILTIN_VPCOMNED,
21359 IX86_BUILTIN_VPCOMLTD,
21360 IX86_BUILTIN_VPCOMLED,
21361 IX86_BUILTIN_VPCOMGTD,
21362 IX86_BUILTIN_VPCOMGED,
21363 IX86_BUILTIN_VPCOMFALSED,
21364 IX86_BUILTIN_VPCOMTRUED,
21366 IX86_BUILTIN_VPCOMEQQ,
21367 IX86_BUILTIN_VPCOMNEQ,
21368 IX86_BUILTIN_VPCOMLTQ,
21369 IX86_BUILTIN_VPCOMLEQ,
21370 IX86_BUILTIN_VPCOMGTQ,
21371 IX86_BUILTIN_VPCOMGEQ,
21372 IX86_BUILTIN_VPCOMFALSEQ,
21373 IX86_BUILTIN_VPCOMTRUEQ,
21375 /* LWP instructions. */
21376 IX86_BUILTIN_LLWPCB,
21377 IX86_BUILTIN_SLWPCB,
21378 IX86_BUILTIN_LWPVAL32,
21379 IX86_BUILTIN_LWPVAL64,
21380 IX86_BUILTIN_LWPINS32,
21381 IX86_BUILTIN_LWPINS64,
21388 /* Table for the ix86 builtin decls. */
21389 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21391 /* Table of all of the builtin functions that are possible with different ISA's
21392 but are waiting to be built until a function is declared to use that
21394 struct builtin_isa {
21395 const char *name; /* function name */
21396 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21397 int isa; /* isa_flags this builtin is defined for */
21398 bool const_p; /* true if the declaration is constant */
21399 bool set_and_not_built_p;
21402 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21405 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21406 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21407 function decl in the ix86_builtins array. Returns the function decl or
21408 NULL_TREE, if the builtin was not added.
21410 If the front end has a special hook for builtin functions, delay adding
21411 builtin functions that aren't in the current ISA until the ISA is changed
21412 with function specific optimization. Doing so, can save about 300K for the
21413 default compiler. When the builtin is expanded, check at that time whether
21416 If the front end doesn't have a special hook, record all builtins, even if
21417 it isn't an instruction set in the current ISA in case the user uses
21418 function specific options for a different ISA, so that we don't get scope
21419 errors if a builtin is added in the middle of a function scope. */
21422 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21423 enum ix86_builtins code)
21425 tree decl = NULL_TREE;
21427 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21429 ix86_builtins_isa[(int) code].isa = mask;
21432 || (mask & ix86_isa_flags) != 0
21433 || (lang_hooks.builtin_function
21434 == lang_hooks.builtin_function_ext_scope))
21437 tree type = ix86_get_builtin_func_type (tcode);
21438 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21440 ix86_builtins[(int) code] = decl;
21441 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21445 ix86_builtins[(int) code] = NULL_TREE;
21446 ix86_builtins_isa[(int) code].tcode = tcode;
21447 ix86_builtins_isa[(int) code].name = name;
21448 ix86_builtins_isa[(int) code].const_p = false;
21449 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21456 /* Like def_builtin, but also marks the function decl "const". */
21459 def_builtin_const (int mask, const char *name,
21460 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21462 tree decl = def_builtin (mask, name, tcode, code);
21464 TREE_READONLY (decl) = 1;
21466 ix86_builtins_isa[(int) code].const_p = true;
21471 /* Add any new builtin functions for a given ISA that may not have been
21472 declared. This saves a bit of space compared to adding all of the
21473 declarations to the tree, even if we didn't use them. */
21476 ix86_add_new_builtins (int isa)
21480 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21482 if ((ix86_builtins_isa[i].isa & isa) != 0
21483 && ix86_builtins_isa[i].set_and_not_built_p)
21487 /* Don't define the builtin again. */
21488 ix86_builtins_isa[i].set_and_not_built_p = false;
21490 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21491 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21492 type, i, BUILT_IN_MD, NULL,
21495 ix86_builtins[i] = decl;
21496 if (ix86_builtins_isa[i].const_p)
21497 TREE_READONLY (decl) = 1;
21502 /* Bits for builtin_description.flag. */
21504 /* Set when we don't support the comparison natively, and should
21505 swap_comparison in order to support it. */
21506 #define BUILTIN_DESC_SWAP_OPERANDS 1
21508 struct builtin_description
21510 const unsigned int mask;
21511 const enum insn_code icode;
21512 const char *const name;
21513 const enum ix86_builtins code;
21514 const enum rtx_code comparison;
21518 static const struct builtin_description bdesc_comi[] =
21520 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21521 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21522 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21523 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21524 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21525 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21526 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21527 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21528 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21529 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21530 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21531 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21532 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21533 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21534 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21535 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21536 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21537 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21538 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21539 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21540 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21541 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21542 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21546 static const struct builtin_description bdesc_pcmpestr[] =
21549 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21550 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21551 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21552 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21553 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21554 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21555 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21558 static const struct builtin_description bdesc_pcmpistr[] =
21561 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21562 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21563 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21564 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21565 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21566 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21567 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21570 /* Special builtins with variable number of arguments. */
21571 static const struct builtin_description bdesc_special_args[] =
21573 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21574 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21577 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21580 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21583 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21584 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21585 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21587 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21588 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21589 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21590 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21592 /* SSE or 3DNow!A */
21593 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21594 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21597 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21598 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21600 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21602 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21603 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21604 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21608 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21611 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21614 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21617 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21618 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21621 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21622 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21624 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21625 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21626 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21627 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21628 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21630 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21631 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21632 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21633 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21634 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21635 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21636 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21638 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21639 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21640 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21642 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21643 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21644 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21645 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21646 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21647 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21648 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21649 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21651 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21652 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21653 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21654 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21655 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21656 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21660 /* Builtins with variable number of arguments. */
21661 static const struct builtin_description bdesc_args[] =
21663 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21664 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21665 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21666 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21667 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21668 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21669 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21672 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21673 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21674 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21675 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21676 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21677 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21679 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21680 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21681 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21682 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21683 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21684 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21685 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21686 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21688 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21689 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21691 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21692 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21693 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21694 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21696 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21697 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21698 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21699 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21700 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21701 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21703 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21704 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21705 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21706 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21707 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21708 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21710 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21711 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21712 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21714 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21716 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21717 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21718 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21719 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21720 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21721 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21723 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21724 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21725 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21726 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21727 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21728 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21730 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21731 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21732 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21733 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21736 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21737 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21738 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21739 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21741 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21742 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21743 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21744 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21745 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21746 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21747 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21748 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21749 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21750 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21751 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21752 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21753 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21754 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21755 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21758 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21759 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21760 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21761 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21762 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21763 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21766 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21767 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21768 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21769 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21770 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21771 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21772 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21774 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21775 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21776 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21777 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21779 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21781 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21782 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21783 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21784 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21785 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21786 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21787 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21788 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21790 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21791 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21792 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21793 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21794 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21795 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21796 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21797 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21798 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21799 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21800 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21801 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21802 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21803 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21804 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21805 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21806 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21807 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21808 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21809 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21810 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21811 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21813 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21814 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21815 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21816 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21818 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21819 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21820 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21821 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21823 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21825 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21826 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21827 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21828 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21829 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21831 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21832 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21833 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21835 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21837 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21838 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21839 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21841 /* SSE MMX or 3Dnow!A */
21842 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21843 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21844 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21846 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21847 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21848 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21849 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21851 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21852 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21854 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21857 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21859 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21860 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21861 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21862 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21863 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21864 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21867 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21869 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21870 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21872 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21877 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21879 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21883 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21885 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21889 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21890 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21893 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21897 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21898 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21902 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21903 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21906 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21915 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21918 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21921 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21926 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21927 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21932 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21933 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21938 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21939 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21942 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21944 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21945 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21947 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21948 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21949 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21950 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21951 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21954 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21956 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21957 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21959 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21960 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21963 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21965 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21968 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21971 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21974 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21976 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21977 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21980 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21981 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21982 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21983 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21985 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21986 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21987 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21988 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21989 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21990 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21991 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21992 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21994 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21995 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21996 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21998 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21999 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22001 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22002 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22004 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22006 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22007 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22011 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22012 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22013 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22014 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22015 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22016 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22017 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22020 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22021 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22022 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22023 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22024 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22033 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22034 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22039 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22041 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22044 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22048 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22049 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22051 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22052 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22053 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22054 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22055 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22056 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22059 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22060 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22061 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22062 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22063 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22064 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22066 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22067 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22068 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22069 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22070 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22071 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22072 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22073 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22074 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22075 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22076 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22077 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22078 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22079 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22080 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22081 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22082 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22083 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22084 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22085 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22086 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22087 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22088 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22089 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22092 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22093 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22096 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22097 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22098 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22099 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22100 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22101 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22102 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22103 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22104 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22105 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22107 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22108 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22109 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22110 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22111 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22112 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22113 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22114 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22115 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22116 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22117 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22118 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22119 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22121 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22122 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22123 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22124 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22125 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22126 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22127 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22128 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22129 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22130 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22131 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22132 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22135 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22136 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22137 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22138 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22140 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22141 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22142 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22145 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22146 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22147 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22148 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22149 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22152 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22153 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22154 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22155 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22158 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22159 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22161 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22162 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22163 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22164 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22167 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22170 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22171 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22172 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22173 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22174 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22175 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22176 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22177 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22178 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22179 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22180 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22181 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22182 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22188 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22190 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22193 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22194 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22198 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22200 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22202 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22203 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22204 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22205 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22208 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22209 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22210 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22214 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22217 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22221 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22224 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22225 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22226 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22227 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22228 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22229 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22230 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22231 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22232 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22233 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22234 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22235 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22237 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22238 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22239 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22241 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22242 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22243 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22244 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22245 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22247 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22249 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22250 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22252 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22253 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22254 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22255 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22257 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22258 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22259 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22260 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22261 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22262 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22264 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22265 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22266 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22267 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22268 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22269 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22270 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22271 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22272 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22273 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22274 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22275 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22276 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22277 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22278 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22280 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22281 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22283 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22286 /* FMA4 and XOP. */
22287 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22288 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22289 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22290 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22291 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22292 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22293 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22294 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22295 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22296 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22297 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22298 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22299 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22300 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22301 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22302 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22303 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22304 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22305 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22306 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22307 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22308 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22309 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22310 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22311 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22312 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22313 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22314 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22315 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22316 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22317 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22318 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22319 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22320 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22321 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22322 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22323 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22324 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22325 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22326 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22327 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22328 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22329 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22330 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22331 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22332 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22333 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22334 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22335 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22336 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22337 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22338 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22340 static const struct builtin_description bdesc_multi_arg[] =
22342 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22343 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22344 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22345 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22346 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22347 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22348 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22349 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22351 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22352 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22353 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22354 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22355 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22356 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22357 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22358 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22360 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22361 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22362 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22363 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22365 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22366 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22367 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22368 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22370 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22371 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22372 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22373 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22375 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22376 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22377 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22378 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22380 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22381 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22382 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22383 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22384 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22385 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22386 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22388 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22400 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22404 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22408 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22412 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22416 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22417 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22418 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22420 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22423 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22424 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22428 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22429 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22432 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22435 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22436 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22440 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22441 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22442 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22444 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22447 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22448 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22452 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22456 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22457 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22460 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22463 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22465 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22468 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22471 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22473 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22476 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22477 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22479 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22480 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22481 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22483 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22484 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22485 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22486 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22487 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22488 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22489 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22491 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22492 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22493 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22494 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22495 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22496 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22497 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22499 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22500 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22501 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22502 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22503 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22504 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22505 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22507 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22508 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22509 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22510 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22511 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22512 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22513 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22515 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22516 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22517 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22518 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22519 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22520 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22521 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22522 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22524 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22525 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22527 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22528 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22530 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22531 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22534 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22536 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22540 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22541 in the current target ISA to allow the user to compile particular modules
22542 with different target specific options that differ from the command line
22545 ix86_init_mmx_sse_builtins (void)
22547 const struct builtin_description * d;
22548 enum ix86_builtin_func_type ftype;
22551 /* Add all special builtins with variable number of operands. */
22552 for (i = 0, d = bdesc_special_args;
22553 i < ARRAY_SIZE (bdesc_special_args);
22559 ftype = (enum ix86_builtin_func_type) d->flag;
22560 def_builtin (d->mask, d->name, ftype, d->code);
22563 /* Add all builtins with variable number of operands. */
22564 for (i = 0, d = bdesc_args;
22565 i < ARRAY_SIZE (bdesc_args);
22571 ftype = (enum ix86_builtin_func_type) d->flag;
22572 def_builtin_const (d->mask, d->name, ftype, d->code);
22575 /* pcmpestr[im] insns. */
22576 for (i = 0, d = bdesc_pcmpestr;
22577 i < ARRAY_SIZE (bdesc_pcmpestr);
22580 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22581 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22583 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22584 def_builtin_const (d->mask, d->name, ftype, d->code);
22587 /* pcmpistr[im] insns. */
22588 for (i = 0, d = bdesc_pcmpistr;
22589 i < ARRAY_SIZE (bdesc_pcmpistr);
22592 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22593 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22595 ftype = INT_FTYPE_V16QI_V16QI_INT;
22596 def_builtin_const (d->mask, d->name, ftype, d->code);
22599 /* comi/ucomi insns. */
22600 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22602 if (d->mask == OPTION_MASK_ISA_SSE2)
22603 ftype = INT_FTYPE_V2DF_V2DF;
22605 ftype = INT_FTYPE_V4SF_V4SF;
22606 def_builtin_const (d->mask, d->name, ftype, d->code);
22610 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22611 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22612 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22613 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22615 /* SSE or 3DNow!A */
22616 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22617 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22618 IX86_BUILTIN_MASKMOVQ);
22621 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22622 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22624 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22625 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22626 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22627 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22630 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22631 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22632 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22633 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22636 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22637 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22638 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22639 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22640 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22641 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22642 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22643 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22644 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22645 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22646 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22647 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22650 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22651 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22653 /* MMX access to the vec_init patterns. */
22654 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22655 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22657 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22658 V4HI_FTYPE_HI_HI_HI_HI,
22659 IX86_BUILTIN_VEC_INIT_V4HI);
22661 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22662 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22663 IX86_BUILTIN_VEC_INIT_V8QI);
22665 /* Access to the vec_extract patterns. */
22666 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22667 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22668 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22669 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22670 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22671 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22672 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22673 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22674 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22675 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22677 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22678 "__builtin_ia32_vec_ext_v4hi",
22679 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22681 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22682 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22684 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22685 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22687 /* Access to the vec_set patterns. */
22688 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22689 "__builtin_ia32_vec_set_v2di",
22690 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22692 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22693 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22695 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22696 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22698 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22699 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22701 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22702 "__builtin_ia32_vec_set_v4hi",
22703 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22705 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22706 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22708 /* Add FMA4 multi-arg argument instructions */
22709 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22714 ftype = (enum ix86_builtin_func_type) d->flag;
22715 def_builtin_const (d->mask, d->name, ftype, d->code);
22719 /* Internal method for ix86_init_builtins. */
22722 ix86_init_builtins_va_builtins_abi (void)
22724 tree ms_va_ref, sysv_va_ref;
22725 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22726 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22727 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22728 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22732 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22733 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22734 ms_va_ref = build_reference_type (ms_va_list_type_node);
22736 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22739 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22740 fnvoid_va_start_ms =
22741 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22742 fnvoid_va_end_sysv =
22743 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22744 fnvoid_va_start_sysv =
22745 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22747 fnvoid_va_copy_ms =
22748 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22750 fnvoid_va_copy_sysv =
22751 build_function_type_list (void_type_node, sysv_va_ref,
22752 sysv_va_ref, NULL_TREE);
22754 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22755 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22756 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22757 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22758 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22759 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22760 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22761 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22762 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22763 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22764 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22765 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22769 ix86_init_builtin_types (void)
22771 tree float128_type_node, float80_type_node;
22773 /* The __float80 type. */
22774 float80_type_node = long_double_type_node;
22775 if (TYPE_MODE (float80_type_node) != XFmode)
22777 /* The __float80 type. */
22778 float80_type_node = make_node (REAL_TYPE);
22780 TYPE_PRECISION (float80_type_node) = 80;
22781 layout_type (float80_type_node);
22783 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22785 /* The __float128 type. */
22786 float128_type_node = make_node (REAL_TYPE);
22787 TYPE_PRECISION (float128_type_node) = 128;
22788 layout_type (float128_type_node);
22789 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22791 /* This macro is built by i386-builtin-types.awk. */
22792 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22796 ix86_init_builtins (void)
22800 ix86_init_builtin_types ();
22802 /* TFmode support builtins. */
22803 def_builtin_const (0, "__builtin_infq",
22804 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22805 def_builtin_const (0, "__builtin_huge_valq",
22806 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22808 /* We will expand them to normal call if SSE2 isn't available since
22809 they are used by libgcc. */
22810 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22811 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22812 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22813 TREE_READONLY (t) = 1;
22814 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22816 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22817 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22818 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22819 TREE_READONLY (t) = 1;
22820 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22822 ix86_init_mmx_sse_builtins ();
22825 ix86_init_builtins_va_builtins_abi ();
22828 /* Return the ix86 builtin for CODE. */
22831 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22833 if (code >= IX86_BUILTIN_MAX)
22834 return error_mark_node;
22836 return ix86_builtins[code];
22839 /* Errors in the source file can cause expand_expr to return const0_rtx
22840 where we expect a vector. To avoid crashing, use one of the vector
22841 clear instructions. */
22843 safe_vector_operand (rtx x, enum machine_mode mode)
22845 if (x == const0_rtx)
22846 x = CONST0_RTX (mode);
22850 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22853 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22856 tree arg0 = CALL_EXPR_ARG (exp, 0);
22857 tree arg1 = CALL_EXPR_ARG (exp, 1);
22858 rtx op0 = expand_normal (arg0);
22859 rtx op1 = expand_normal (arg1);
22860 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22861 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22862 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22864 if (VECTOR_MODE_P (mode0))
22865 op0 = safe_vector_operand (op0, mode0);
22866 if (VECTOR_MODE_P (mode1))
22867 op1 = safe_vector_operand (op1, mode1);
22869 if (optimize || !target
22870 || GET_MODE (target) != tmode
22871 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22872 target = gen_reg_rtx (tmode);
22874 if (GET_MODE (op1) == SImode && mode1 == TImode)
22876 rtx x = gen_reg_rtx (V4SImode);
22877 emit_insn (gen_sse2_loadd (x, op1));
22878 op1 = gen_lowpart (TImode, x);
22881 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22882 op0 = copy_to_mode_reg (mode0, op0);
22883 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22884 op1 = copy_to_mode_reg (mode1, op1);
22886 pat = GEN_FCN (icode) (target, op0, op1);
22895 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22898 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22899 enum ix86_builtin_func_type m_type,
22900 enum rtx_code sub_code)
22905 bool comparison_p = false;
22907 bool last_arg_constant = false;
22908 int num_memory = 0;
22911 enum machine_mode mode;
22914 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22918 case MULTI_ARG_4_DF2_DI_I:
22919 case MULTI_ARG_4_DF2_DI_I1:
22920 case MULTI_ARG_4_SF2_SI_I:
22921 case MULTI_ARG_4_SF2_SI_I1:
22923 last_arg_constant = true;
22926 case MULTI_ARG_3_SF:
22927 case MULTI_ARG_3_DF:
22928 case MULTI_ARG_3_SF2:
22929 case MULTI_ARG_3_DF2:
22930 case MULTI_ARG_3_DI:
22931 case MULTI_ARG_3_SI:
22932 case MULTI_ARG_3_SI_DI:
22933 case MULTI_ARG_3_HI:
22934 case MULTI_ARG_3_HI_SI:
22935 case MULTI_ARG_3_QI:
22936 case MULTI_ARG_3_DI2:
22937 case MULTI_ARG_3_SI2:
22938 case MULTI_ARG_3_HI2:
22939 case MULTI_ARG_3_QI2:
22943 case MULTI_ARG_2_SF:
22944 case MULTI_ARG_2_DF:
22945 case MULTI_ARG_2_DI:
22946 case MULTI_ARG_2_SI:
22947 case MULTI_ARG_2_HI:
22948 case MULTI_ARG_2_QI:
22952 case MULTI_ARG_2_DI_IMM:
22953 case MULTI_ARG_2_SI_IMM:
22954 case MULTI_ARG_2_HI_IMM:
22955 case MULTI_ARG_2_QI_IMM:
22957 last_arg_constant = true;
22960 case MULTI_ARG_1_SF:
22961 case MULTI_ARG_1_DF:
22962 case MULTI_ARG_1_SF2:
22963 case MULTI_ARG_1_DF2:
22964 case MULTI_ARG_1_DI:
22965 case MULTI_ARG_1_SI:
22966 case MULTI_ARG_1_HI:
22967 case MULTI_ARG_1_QI:
22968 case MULTI_ARG_1_SI_DI:
22969 case MULTI_ARG_1_HI_DI:
22970 case MULTI_ARG_1_HI_SI:
22971 case MULTI_ARG_1_QI_DI:
22972 case MULTI_ARG_1_QI_SI:
22973 case MULTI_ARG_1_QI_HI:
22977 case MULTI_ARG_2_DI_CMP:
22978 case MULTI_ARG_2_SI_CMP:
22979 case MULTI_ARG_2_HI_CMP:
22980 case MULTI_ARG_2_QI_CMP:
22982 comparison_p = true;
22985 case MULTI_ARG_2_SF_TF:
22986 case MULTI_ARG_2_DF_TF:
22987 case MULTI_ARG_2_DI_TF:
22988 case MULTI_ARG_2_SI_TF:
22989 case MULTI_ARG_2_HI_TF:
22990 case MULTI_ARG_2_QI_TF:
22996 gcc_unreachable ();
22999 if (optimize || !target
23000 || GET_MODE (target) != tmode
23001 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23002 target = gen_reg_rtx (tmode);
23004 gcc_assert (nargs <= 4);
23006 for (i = 0; i < nargs; i++)
23008 tree arg = CALL_EXPR_ARG (exp, i);
23009 rtx op = expand_normal (arg);
23010 int adjust = (comparison_p) ? 1 : 0;
23011 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23013 if (last_arg_constant && i == nargs-1)
23015 if (!CONST_INT_P (op))
23017 error ("last argument must be an immediate");
23018 return gen_reg_rtx (tmode);
23023 if (VECTOR_MODE_P (mode))
23024 op = safe_vector_operand (op, mode);
23026 /* If we aren't optimizing, only allow one memory operand to be
23028 if (memory_operand (op, mode))
23031 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23034 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23036 op = force_reg (mode, op);
23040 args[i].mode = mode;
23046 pat = GEN_FCN (icode) (target, args[0].op);
23051 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23052 GEN_INT ((int)sub_code));
23053 else if (! comparison_p)
23054 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23057 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23061 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23066 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23070 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23074 gcc_unreachable ();
23084 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23085 insns with vec_merge. */
23088 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23092 tree arg0 = CALL_EXPR_ARG (exp, 0);
23093 rtx op1, op0 = expand_normal (arg0);
23094 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23095 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23097 if (optimize || !target
23098 || GET_MODE (target) != tmode
23099 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23100 target = gen_reg_rtx (tmode);
23102 if (VECTOR_MODE_P (mode0))
23103 op0 = safe_vector_operand (op0, mode0);
23105 if ((optimize && !register_operand (op0, mode0))
23106 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23107 op0 = copy_to_mode_reg (mode0, op0);
23110 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23111 op1 = copy_to_mode_reg (mode0, op1);
23113 pat = GEN_FCN (icode) (target, op0, op1);
23120 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23123 ix86_expand_sse_compare (const struct builtin_description *d,
23124 tree exp, rtx target, bool swap)
23127 tree arg0 = CALL_EXPR_ARG (exp, 0);
23128 tree arg1 = CALL_EXPR_ARG (exp, 1);
23129 rtx op0 = expand_normal (arg0);
23130 rtx op1 = expand_normal (arg1);
23132 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23133 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23134 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23135 enum rtx_code comparison = d->comparison;
23137 if (VECTOR_MODE_P (mode0))
23138 op0 = safe_vector_operand (op0, mode0);
23139 if (VECTOR_MODE_P (mode1))
23140 op1 = safe_vector_operand (op1, mode1);
23142 /* Swap operands if we have a comparison that isn't available in
23146 rtx tmp = gen_reg_rtx (mode1);
23147 emit_move_insn (tmp, op1);
23152 if (optimize || !target
23153 || GET_MODE (target) != tmode
23154 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23155 target = gen_reg_rtx (tmode);
23157 if ((optimize && !register_operand (op0, mode0))
23158 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23159 op0 = copy_to_mode_reg (mode0, op0);
23160 if ((optimize && !register_operand (op1, mode1))
23161 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23162 op1 = copy_to_mode_reg (mode1, op1);
23164 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23165 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23172 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23175 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23179 tree arg0 = CALL_EXPR_ARG (exp, 0);
23180 tree arg1 = CALL_EXPR_ARG (exp, 1);
23181 rtx op0 = expand_normal (arg0);
23182 rtx op1 = expand_normal (arg1);
23183 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23184 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23185 enum rtx_code comparison = d->comparison;
23187 if (VECTOR_MODE_P (mode0))
23188 op0 = safe_vector_operand (op0, mode0);
23189 if (VECTOR_MODE_P (mode1))
23190 op1 = safe_vector_operand (op1, mode1);
23192 /* Swap operands if we have a comparison that isn't available in
23194 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23201 target = gen_reg_rtx (SImode);
23202 emit_move_insn (target, const0_rtx);
23203 target = gen_rtx_SUBREG (QImode, target, 0);
23205 if ((optimize && !register_operand (op0, mode0))
23206 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23207 op0 = copy_to_mode_reg (mode0, op0);
23208 if ((optimize && !register_operand (op1, mode1))
23209 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23210 op1 = copy_to_mode_reg (mode1, op1);
23212 pat = GEN_FCN (d->icode) (op0, op1);
23216 emit_insn (gen_rtx_SET (VOIDmode,
23217 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23218 gen_rtx_fmt_ee (comparison, QImode,
23222 return SUBREG_REG (target);
23225 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23228 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23232 tree arg0 = CALL_EXPR_ARG (exp, 0);
23233 tree arg1 = CALL_EXPR_ARG (exp, 1);
23234 rtx op0 = expand_normal (arg0);
23235 rtx op1 = expand_normal (arg1);
23236 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23237 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23238 enum rtx_code comparison = d->comparison;
23240 if (VECTOR_MODE_P (mode0))
23241 op0 = safe_vector_operand (op0, mode0);
23242 if (VECTOR_MODE_P (mode1))
23243 op1 = safe_vector_operand (op1, mode1);
23245 target = gen_reg_rtx (SImode);
23246 emit_move_insn (target, const0_rtx);
23247 target = gen_rtx_SUBREG (QImode, target, 0);
23249 if ((optimize && !register_operand (op0, mode0))
23250 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23251 op0 = copy_to_mode_reg (mode0, op0);
23252 if ((optimize && !register_operand (op1, mode1))
23253 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23254 op1 = copy_to_mode_reg (mode1, op1);
23256 pat = GEN_FCN (d->icode) (op0, op1);
23260 emit_insn (gen_rtx_SET (VOIDmode,
23261 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23262 gen_rtx_fmt_ee (comparison, QImode,
23266 return SUBREG_REG (target);
23269 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23272 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23273 tree exp, rtx target)
23276 tree arg0 = CALL_EXPR_ARG (exp, 0);
23277 tree arg1 = CALL_EXPR_ARG (exp, 1);
23278 tree arg2 = CALL_EXPR_ARG (exp, 2);
23279 tree arg3 = CALL_EXPR_ARG (exp, 3);
23280 tree arg4 = CALL_EXPR_ARG (exp, 4);
23281 rtx scratch0, scratch1;
23282 rtx op0 = expand_normal (arg0);
23283 rtx op1 = expand_normal (arg1);
23284 rtx op2 = expand_normal (arg2);
23285 rtx op3 = expand_normal (arg3);
23286 rtx op4 = expand_normal (arg4);
23287 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23289 tmode0 = insn_data[d->icode].operand[0].mode;
23290 tmode1 = insn_data[d->icode].operand[1].mode;
23291 modev2 = insn_data[d->icode].operand[2].mode;
23292 modei3 = insn_data[d->icode].operand[3].mode;
23293 modev4 = insn_data[d->icode].operand[4].mode;
23294 modei5 = insn_data[d->icode].operand[5].mode;
23295 modeimm = insn_data[d->icode].operand[6].mode;
23297 if (VECTOR_MODE_P (modev2))
23298 op0 = safe_vector_operand (op0, modev2);
23299 if (VECTOR_MODE_P (modev4))
23300 op2 = safe_vector_operand (op2, modev4);
23302 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23303 op0 = copy_to_mode_reg (modev2, op0);
23304 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23305 op1 = copy_to_mode_reg (modei3, op1);
23306 if ((optimize && !register_operand (op2, modev4))
23307 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23308 op2 = copy_to_mode_reg (modev4, op2);
23309 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23310 op3 = copy_to_mode_reg (modei5, op3);
23312 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23314 error ("the fifth argument must be a 8-bit immediate");
23318 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23320 if (optimize || !target
23321 || GET_MODE (target) != tmode0
23322 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23323 target = gen_reg_rtx (tmode0);
23325 scratch1 = gen_reg_rtx (tmode1);
23327 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23329 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23331 if (optimize || !target
23332 || GET_MODE (target) != tmode1
23333 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23334 target = gen_reg_rtx (tmode1);
23336 scratch0 = gen_reg_rtx (tmode0);
23338 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23342 gcc_assert (d->flag);
23344 scratch0 = gen_reg_rtx (tmode0);
23345 scratch1 = gen_reg_rtx (tmode1);
23347 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23357 target = gen_reg_rtx (SImode);
23358 emit_move_insn (target, const0_rtx);
23359 target = gen_rtx_SUBREG (QImode, target, 0);
23362 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23363 gen_rtx_fmt_ee (EQ, QImode,
23364 gen_rtx_REG ((enum machine_mode) d->flag,
23367 return SUBREG_REG (target);
23374 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23377 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23378 tree exp, rtx target)
23381 tree arg0 = CALL_EXPR_ARG (exp, 0);
23382 tree arg1 = CALL_EXPR_ARG (exp, 1);
23383 tree arg2 = CALL_EXPR_ARG (exp, 2);
23384 rtx scratch0, scratch1;
23385 rtx op0 = expand_normal (arg0);
23386 rtx op1 = expand_normal (arg1);
23387 rtx op2 = expand_normal (arg2);
23388 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23390 tmode0 = insn_data[d->icode].operand[0].mode;
23391 tmode1 = insn_data[d->icode].operand[1].mode;
23392 modev2 = insn_data[d->icode].operand[2].mode;
23393 modev3 = insn_data[d->icode].operand[3].mode;
23394 modeimm = insn_data[d->icode].operand[4].mode;
23396 if (VECTOR_MODE_P (modev2))
23397 op0 = safe_vector_operand (op0, modev2);
23398 if (VECTOR_MODE_P (modev3))
23399 op1 = safe_vector_operand (op1, modev3);
23401 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23402 op0 = copy_to_mode_reg (modev2, op0);
23403 if ((optimize && !register_operand (op1, modev3))
23404 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23405 op1 = copy_to_mode_reg (modev3, op1);
23407 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23409 error ("the third argument must be a 8-bit immediate");
23413 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23415 if (optimize || !target
23416 || GET_MODE (target) != tmode0
23417 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23418 target = gen_reg_rtx (tmode0);
23420 scratch1 = gen_reg_rtx (tmode1);
23422 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23424 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23426 if (optimize || !target
23427 || GET_MODE (target) != tmode1
23428 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23429 target = gen_reg_rtx (tmode1);
23431 scratch0 = gen_reg_rtx (tmode0);
23433 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23437 gcc_assert (d->flag);
23439 scratch0 = gen_reg_rtx (tmode0);
23440 scratch1 = gen_reg_rtx (tmode1);
23442 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23452 target = gen_reg_rtx (SImode);
23453 emit_move_insn (target, const0_rtx);
23454 target = gen_rtx_SUBREG (QImode, target, 0);
23457 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23458 gen_rtx_fmt_ee (EQ, QImode,
23459 gen_rtx_REG ((enum machine_mode) d->flag,
23462 return SUBREG_REG (target);
23468 /* Subroutine of ix86_expand_builtin to take care of insns with
23469 variable number of operands. */
23472 ix86_expand_args_builtin (const struct builtin_description *d,
23473 tree exp, rtx target)
23475 rtx pat, real_target;
23476 unsigned int i, nargs;
23477 unsigned int nargs_constant = 0;
23478 int num_memory = 0;
23482 enum machine_mode mode;
23484 bool last_arg_count = false;
23485 enum insn_code icode = d->icode;
23486 const struct insn_data *insn_p = &insn_data[icode];
23487 enum machine_mode tmode = insn_p->operand[0].mode;
23488 enum machine_mode rmode = VOIDmode;
23490 enum rtx_code comparison = d->comparison;
23492 switch ((enum ix86_builtin_func_type) d->flag)
23494 case INT_FTYPE_V8SF_V8SF_PTEST:
23495 case INT_FTYPE_V4DI_V4DI_PTEST:
23496 case INT_FTYPE_V4DF_V4DF_PTEST:
23497 case INT_FTYPE_V4SF_V4SF_PTEST:
23498 case INT_FTYPE_V2DI_V2DI_PTEST:
23499 case INT_FTYPE_V2DF_V2DF_PTEST:
23500 return ix86_expand_sse_ptest (d, exp, target);
23501 case FLOAT128_FTYPE_FLOAT128:
23502 case FLOAT_FTYPE_FLOAT:
23503 case INT_FTYPE_INT:
23504 case UINT64_FTYPE_INT:
23505 case UINT16_FTYPE_UINT16:
23506 case INT64_FTYPE_INT64:
23507 case INT64_FTYPE_V4SF:
23508 case INT64_FTYPE_V2DF:
23509 case INT_FTYPE_V16QI:
23510 case INT_FTYPE_V8QI:
23511 case INT_FTYPE_V8SF:
23512 case INT_FTYPE_V4DF:
23513 case INT_FTYPE_V4SF:
23514 case INT_FTYPE_V2DF:
23515 case V16QI_FTYPE_V16QI:
23516 case V8SI_FTYPE_V8SF:
23517 case V8SI_FTYPE_V4SI:
23518 case V8HI_FTYPE_V8HI:
23519 case V8HI_FTYPE_V16QI:
23520 case V8QI_FTYPE_V8QI:
23521 case V8SF_FTYPE_V8SF:
23522 case V8SF_FTYPE_V8SI:
23523 case V8SF_FTYPE_V4SF:
23524 case V4SI_FTYPE_V4SI:
23525 case V4SI_FTYPE_V16QI:
23526 case V4SI_FTYPE_V4SF:
23527 case V4SI_FTYPE_V8SI:
23528 case V4SI_FTYPE_V8HI:
23529 case V4SI_FTYPE_V4DF:
23530 case V4SI_FTYPE_V2DF:
23531 case V4HI_FTYPE_V4HI:
23532 case V4DF_FTYPE_V4DF:
23533 case V4DF_FTYPE_V4SI:
23534 case V4DF_FTYPE_V4SF:
23535 case V4DF_FTYPE_V2DF:
23536 case V4SF_FTYPE_V4SF:
23537 case V4SF_FTYPE_V4SI:
23538 case V4SF_FTYPE_V8SF:
23539 case V4SF_FTYPE_V4DF:
23540 case V4SF_FTYPE_V2DF:
23541 case V2DI_FTYPE_V2DI:
23542 case V2DI_FTYPE_V16QI:
23543 case V2DI_FTYPE_V8HI:
23544 case V2DI_FTYPE_V4SI:
23545 case V2DF_FTYPE_V2DF:
23546 case V2DF_FTYPE_V4SI:
23547 case V2DF_FTYPE_V4DF:
23548 case V2DF_FTYPE_V4SF:
23549 case V2DF_FTYPE_V2SI:
23550 case V2SI_FTYPE_V2SI:
23551 case V2SI_FTYPE_V4SF:
23552 case V2SI_FTYPE_V2SF:
23553 case V2SI_FTYPE_V2DF:
23554 case V2SF_FTYPE_V2SF:
23555 case V2SF_FTYPE_V2SI:
23558 case V4SF_FTYPE_V4SF_VEC_MERGE:
23559 case V2DF_FTYPE_V2DF_VEC_MERGE:
23560 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23561 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23562 case V16QI_FTYPE_V16QI_V16QI:
23563 case V16QI_FTYPE_V8HI_V8HI:
23564 case V8QI_FTYPE_V8QI_V8QI:
23565 case V8QI_FTYPE_V4HI_V4HI:
23566 case V8HI_FTYPE_V8HI_V8HI:
23567 case V8HI_FTYPE_V16QI_V16QI:
23568 case V8HI_FTYPE_V4SI_V4SI:
23569 case V8SF_FTYPE_V8SF_V8SF:
23570 case V8SF_FTYPE_V8SF_V8SI:
23571 case V4SI_FTYPE_V4SI_V4SI:
23572 case V4SI_FTYPE_V8HI_V8HI:
23573 case V4SI_FTYPE_V4SF_V4SF:
23574 case V4SI_FTYPE_V2DF_V2DF:
23575 case V4HI_FTYPE_V4HI_V4HI:
23576 case V4HI_FTYPE_V8QI_V8QI:
23577 case V4HI_FTYPE_V2SI_V2SI:
23578 case V4DF_FTYPE_V4DF_V4DF:
23579 case V4DF_FTYPE_V4DF_V4DI:
23580 case V4SF_FTYPE_V4SF_V4SF:
23581 case V4SF_FTYPE_V4SF_V4SI:
23582 case V4SF_FTYPE_V4SF_V2SI:
23583 case V4SF_FTYPE_V4SF_V2DF:
23584 case V4SF_FTYPE_V4SF_DI:
23585 case V4SF_FTYPE_V4SF_SI:
23586 case V2DI_FTYPE_V2DI_V2DI:
23587 case V2DI_FTYPE_V16QI_V16QI:
23588 case V2DI_FTYPE_V4SI_V4SI:
23589 case V2DI_FTYPE_V2DI_V16QI:
23590 case V2DI_FTYPE_V2DF_V2DF:
23591 case V2SI_FTYPE_V2SI_V2SI:
23592 case V2SI_FTYPE_V4HI_V4HI:
23593 case V2SI_FTYPE_V2SF_V2SF:
23594 case V2DF_FTYPE_V2DF_V2DF:
23595 case V2DF_FTYPE_V2DF_V4SF:
23596 case V2DF_FTYPE_V2DF_V2DI:
23597 case V2DF_FTYPE_V2DF_DI:
23598 case V2DF_FTYPE_V2DF_SI:
23599 case V2SF_FTYPE_V2SF_V2SF:
23600 case V1DI_FTYPE_V1DI_V1DI:
23601 case V1DI_FTYPE_V8QI_V8QI:
23602 case V1DI_FTYPE_V2SI_V2SI:
23603 if (comparison == UNKNOWN)
23604 return ix86_expand_binop_builtin (icode, exp, target);
23607 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23608 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23609 gcc_assert (comparison != UNKNOWN);
23613 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23614 case V8HI_FTYPE_V8HI_SI_COUNT:
23615 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23616 case V4SI_FTYPE_V4SI_SI_COUNT:
23617 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23618 case V4HI_FTYPE_V4HI_SI_COUNT:
23619 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23620 case V2DI_FTYPE_V2DI_SI_COUNT:
23621 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23622 case V2SI_FTYPE_V2SI_SI_COUNT:
23623 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23624 case V1DI_FTYPE_V1DI_SI_COUNT:
23626 last_arg_count = true;
23628 case UINT64_FTYPE_UINT64_UINT64:
23629 case UINT_FTYPE_UINT_UINT:
23630 case UINT_FTYPE_UINT_USHORT:
23631 case UINT_FTYPE_UINT_UCHAR:
23632 case UINT16_FTYPE_UINT16_INT:
23633 case UINT8_FTYPE_UINT8_INT:
23636 case V2DI_FTYPE_V2DI_INT_CONVERT:
23639 nargs_constant = 1;
23641 case V8HI_FTYPE_V8HI_INT:
23642 case V8SF_FTYPE_V8SF_INT:
23643 case V4SI_FTYPE_V4SI_INT:
23644 case V4SI_FTYPE_V8SI_INT:
23645 case V4HI_FTYPE_V4HI_INT:
23646 case V4DF_FTYPE_V4DF_INT:
23647 case V4SF_FTYPE_V4SF_INT:
23648 case V4SF_FTYPE_V8SF_INT:
23649 case V2DI_FTYPE_V2DI_INT:
23650 case V2DF_FTYPE_V2DF_INT:
23651 case V2DF_FTYPE_V4DF_INT:
23653 nargs_constant = 1;
23655 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23656 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23657 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23658 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23659 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23662 case V16QI_FTYPE_V16QI_V16QI_INT:
23663 case V8HI_FTYPE_V8HI_V8HI_INT:
23664 case V8SI_FTYPE_V8SI_V8SI_INT:
23665 case V8SI_FTYPE_V8SI_V4SI_INT:
23666 case V8SF_FTYPE_V8SF_V8SF_INT:
23667 case V8SF_FTYPE_V8SF_V4SF_INT:
23668 case V4SI_FTYPE_V4SI_V4SI_INT:
23669 case V4DF_FTYPE_V4DF_V4DF_INT:
23670 case V4DF_FTYPE_V4DF_V2DF_INT:
23671 case V4SF_FTYPE_V4SF_V4SF_INT:
23672 case V2DI_FTYPE_V2DI_V2DI_INT:
23673 case V2DF_FTYPE_V2DF_V2DF_INT:
23675 nargs_constant = 1;
23677 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23680 nargs_constant = 1;
23682 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23685 nargs_constant = 1;
23687 case V2DI_FTYPE_V2DI_UINT_UINT:
23689 nargs_constant = 2;
23691 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23692 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23693 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23694 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23696 nargs_constant = 1;
23698 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23700 nargs_constant = 2;
23703 gcc_unreachable ();
23706 gcc_assert (nargs <= ARRAY_SIZE (args));
23708 if (comparison != UNKNOWN)
23710 gcc_assert (nargs == 2);
23711 return ix86_expand_sse_compare (d, exp, target, swap);
23714 if (rmode == VOIDmode || rmode == tmode)
23718 || GET_MODE (target) != tmode
23719 || ! (*insn_p->operand[0].predicate) (target, tmode))
23720 target = gen_reg_rtx (tmode);
23721 real_target = target;
23725 target = gen_reg_rtx (rmode);
23726 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23729 for (i = 0; i < nargs; i++)
23731 tree arg = CALL_EXPR_ARG (exp, i);
23732 rtx op = expand_normal (arg);
23733 enum machine_mode mode = insn_p->operand[i + 1].mode;
23734 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23736 if (last_arg_count && (i + 1) == nargs)
23738 /* SIMD shift insns take either an 8-bit immediate or
23739 register as count. But builtin functions take int as
23740 count. If count doesn't match, we put it in register. */
23743 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23744 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23745 op = copy_to_reg (op);
23748 else if ((nargs - i) <= nargs_constant)
23753 case CODE_FOR_sse4_1_roundpd:
23754 case CODE_FOR_sse4_1_roundps:
23755 case CODE_FOR_sse4_1_roundsd:
23756 case CODE_FOR_sse4_1_roundss:
23757 case CODE_FOR_sse4_1_blendps:
23758 case CODE_FOR_avx_blendpd256:
23759 case CODE_FOR_avx_vpermilv4df:
23760 case CODE_FOR_avx_roundpd256:
23761 case CODE_FOR_avx_roundps256:
23762 error ("the last argument must be a 4-bit immediate");
23765 case CODE_FOR_sse4_1_blendpd:
23766 case CODE_FOR_avx_vpermilv2df:
23767 case CODE_FOR_xop_vpermil2v2df3:
23768 case CODE_FOR_xop_vpermil2v4sf3:
23769 case CODE_FOR_xop_vpermil2v4df3:
23770 case CODE_FOR_xop_vpermil2v8sf3:
23771 error ("the last argument must be a 2-bit immediate");
23774 case CODE_FOR_avx_vextractf128v4df:
23775 case CODE_FOR_avx_vextractf128v8sf:
23776 case CODE_FOR_avx_vextractf128v8si:
23777 case CODE_FOR_avx_vinsertf128v4df:
23778 case CODE_FOR_avx_vinsertf128v8sf:
23779 case CODE_FOR_avx_vinsertf128v8si:
23780 error ("the last argument must be a 1-bit immediate");
23783 case CODE_FOR_avx_cmpsdv2df3:
23784 case CODE_FOR_avx_cmpssv4sf3:
23785 case CODE_FOR_avx_cmppdv2df3:
23786 case CODE_FOR_avx_cmppsv4sf3:
23787 case CODE_FOR_avx_cmppdv4df3:
23788 case CODE_FOR_avx_cmppsv8sf3:
23789 error ("the last argument must be a 5-bit immediate");
23793 switch (nargs_constant)
23796 if ((nargs - i) == nargs_constant)
23798 error ("the next to last argument must be an 8-bit immediate");
23802 error ("the last argument must be an 8-bit immediate");
23805 gcc_unreachable ();
23812 if (VECTOR_MODE_P (mode))
23813 op = safe_vector_operand (op, mode);
23815 /* If we aren't optimizing, only allow one memory operand to
23817 if (memory_operand (op, mode))
23820 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23822 if (optimize || !match || num_memory > 1)
23823 op = copy_to_mode_reg (mode, op);
23827 op = copy_to_reg (op);
23828 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23833 args[i].mode = mode;
23839 pat = GEN_FCN (icode) (real_target, args[0].op);
23842 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23845 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23849 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23850 args[2].op, args[3].op);
23853 gcc_unreachable ();
23863 /* Subroutine of ix86_expand_builtin to take care of special insns
23864 with variable number of operands. */
23867 ix86_expand_special_args_builtin (const struct builtin_description *d,
23868 tree exp, rtx target)
23872 unsigned int i, nargs, arg_adjust, memory;
23876 enum machine_mode mode;
23878 enum insn_code icode = d->icode;
23879 bool last_arg_constant = false;
23880 const struct insn_data *insn_p = &insn_data[icode];
23881 enum machine_mode tmode = insn_p->operand[0].mode;
23882 enum { load, store } klass;
23884 switch ((enum ix86_builtin_func_type) d->flag)
23886 case VOID_FTYPE_VOID:
23887 emit_insn (GEN_FCN (icode) (target));
23889 case UINT64_FTYPE_VOID:
23894 case UINT64_FTYPE_PUNSIGNED:
23895 case V2DI_FTYPE_PV2DI:
23896 case V32QI_FTYPE_PCCHAR:
23897 case V16QI_FTYPE_PCCHAR:
23898 case V8SF_FTYPE_PCV4SF:
23899 case V8SF_FTYPE_PCFLOAT:
23900 case V4SF_FTYPE_PCFLOAT:
23901 case V4DF_FTYPE_PCV2DF:
23902 case V4DF_FTYPE_PCDOUBLE:
23903 case V2DF_FTYPE_PCDOUBLE:
23904 case VOID_FTYPE_PVOID:
23909 case VOID_FTYPE_PV2SF_V4SF:
23910 case VOID_FTYPE_PV4DI_V4DI:
23911 case VOID_FTYPE_PV2DI_V2DI:
23912 case VOID_FTYPE_PCHAR_V32QI:
23913 case VOID_FTYPE_PCHAR_V16QI:
23914 case VOID_FTYPE_PFLOAT_V8SF:
23915 case VOID_FTYPE_PFLOAT_V4SF:
23916 case VOID_FTYPE_PDOUBLE_V4DF:
23917 case VOID_FTYPE_PDOUBLE_V2DF:
23918 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23919 case VOID_FTYPE_PINT_INT:
23922 /* Reserve memory operand for target. */
23923 memory = ARRAY_SIZE (args);
23925 case V4SF_FTYPE_V4SF_PCV2SF:
23926 case V2DF_FTYPE_V2DF_PCDOUBLE:
23931 case V8SF_FTYPE_PCV8SF_V8SF:
23932 case V4DF_FTYPE_PCV4DF_V4DF:
23933 case V4SF_FTYPE_PCV4SF_V4SF:
23934 case V2DF_FTYPE_PCV2DF_V2DF:
23939 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23940 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23941 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23942 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23945 /* Reserve memory operand for target. */
23946 memory = ARRAY_SIZE (args);
23948 case VOID_FTYPE_UINT_UINT_UINT:
23949 case VOID_FTYPE_UINT64_UINT_UINT:
23950 case UCHAR_FTYPE_UINT_UINT_UINT:
23951 case UCHAR_FTYPE_UINT64_UINT_UINT:
23954 memory = ARRAY_SIZE (args);
23955 last_arg_constant = true;
23958 gcc_unreachable ();
23961 gcc_assert (nargs <= ARRAY_SIZE (args));
23963 if (klass == store)
23965 arg = CALL_EXPR_ARG (exp, 0);
23966 op = expand_normal (arg);
23967 gcc_assert (target == 0);
23968 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23976 || GET_MODE (target) != tmode
23977 || ! (*insn_p->operand[0].predicate) (target, tmode))
23978 target = gen_reg_rtx (tmode);
23981 for (i = 0; i < nargs; i++)
23983 enum machine_mode mode = insn_p->operand[i + 1].mode;
23986 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23987 op = expand_normal (arg);
23988 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23990 if (last_arg_constant && (i + 1) == nargs)
23994 if (icode == CODE_FOR_lwp_lwpvalsi3
23995 || icode == CODE_FOR_lwp_lwpinssi3
23996 || icode == CODE_FOR_lwp_lwpvaldi3
23997 || icode == CODE_FOR_lwp_lwpinsdi3)
23998 error ("the last argument must be a 32-bit immediate");
24000 error ("the last argument must be an 8-bit immediate");
24008 /* This must be the memory operand. */
24009 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24010 gcc_assert (GET_MODE (op) == mode
24011 || GET_MODE (op) == VOIDmode);
24015 /* This must be register. */
24016 if (VECTOR_MODE_P (mode))
24017 op = safe_vector_operand (op, mode);
24019 gcc_assert (GET_MODE (op) == mode
24020 || GET_MODE (op) == VOIDmode);
24021 op = copy_to_mode_reg (mode, op);
24026 args[i].mode = mode;
24032 pat = GEN_FCN (icode) (target);
24035 pat = GEN_FCN (icode) (target, args[0].op);
24038 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24041 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24044 gcc_unreachable ();
24050 return klass == store ? 0 : target;
24053 /* Return the integer constant in ARG. Constrain it to be in the range
24054 of the subparts of VEC_TYPE; issue an error if not. */
24057 get_element_number (tree vec_type, tree arg)
24059 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24061 if (!host_integerp (arg, 1)
24062 || (elt = tree_low_cst (arg, 1), elt > max))
24064 error ("selector must be an integer constant in the range 0..%wi", max);
24071 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24072 ix86_expand_vector_init. We DO have language-level syntax for this, in
24073 the form of (type){ init-list }. Except that since we can't place emms
24074 instructions from inside the compiler, we can't allow the use of MMX
24075 registers unless the user explicitly asks for it. So we do *not* define
24076 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24077 we have builtins invoked by mmintrin.h that gives us license to emit
24078 these sorts of instructions. */
24081 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24083 enum machine_mode tmode = TYPE_MODE (type);
24084 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24085 int i, n_elt = GET_MODE_NUNITS (tmode);
24086 rtvec v = rtvec_alloc (n_elt);
24088 gcc_assert (VECTOR_MODE_P (tmode));
24089 gcc_assert (call_expr_nargs (exp) == n_elt);
24091 for (i = 0; i < n_elt; ++i)
24093 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24094 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24097 if (!target || !register_operand (target, tmode))
24098 target = gen_reg_rtx (tmode);
24100 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24104 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24105 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24106 had a language-level syntax for referencing vector elements. */
24109 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24111 enum machine_mode tmode, mode0;
24116 arg0 = CALL_EXPR_ARG (exp, 0);
24117 arg1 = CALL_EXPR_ARG (exp, 1);
24119 op0 = expand_normal (arg0);
24120 elt = get_element_number (TREE_TYPE (arg0), arg1);
24122 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24123 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24124 gcc_assert (VECTOR_MODE_P (mode0));
24126 op0 = force_reg (mode0, op0);
24128 if (optimize || !target || !register_operand (target, tmode))
24129 target = gen_reg_rtx (tmode);
24131 ix86_expand_vector_extract (true, target, op0, elt);
24136 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24137 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24138 a language-level syntax for referencing vector elements. */
24141 ix86_expand_vec_set_builtin (tree exp)
24143 enum machine_mode tmode, mode1;
24144 tree arg0, arg1, arg2;
24146 rtx op0, op1, target;
24148 arg0 = CALL_EXPR_ARG (exp, 0);
24149 arg1 = CALL_EXPR_ARG (exp, 1);
24150 arg2 = CALL_EXPR_ARG (exp, 2);
24152 tmode = TYPE_MODE (TREE_TYPE (arg0));
24153 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24154 gcc_assert (VECTOR_MODE_P (tmode));
24156 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24157 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24158 elt = get_element_number (TREE_TYPE (arg0), arg2);
24160 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24161 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24163 op0 = force_reg (tmode, op0);
24164 op1 = force_reg (mode1, op1);
24166 /* OP0 is the source of these builtin functions and shouldn't be
24167 modified. Create a copy, use it and return it as target. */
24168 target = gen_reg_rtx (tmode);
24169 emit_move_insn (target, op0);
24170 ix86_expand_vector_set (true, target, op1, elt);
24175 /* Expand an expression EXP that calls a built-in function,
24176 with result going to TARGET if that's convenient
24177 (and in mode MODE if that's convenient).
24178 SUBTARGET may be used as the target for computing one of EXP's operands.
24179 IGNORE is nonzero if the value is to be ignored. */
24182 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24183 enum machine_mode mode ATTRIBUTE_UNUSED,
24184 int ignore ATTRIBUTE_UNUSED)
24186 const struct builtin_description *d;
24188 enum insn_code icode;
24189 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24190 tree arg0, arg1, arg2;
24191 rtx op0, op1, op2, pat;
24192 enum machine_mode mode0, mode1, mode2;
24193 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24195 /* Determine whether the builtin function is available under the current ISA.
24196 Originally the builtin was not created if it wasn't applicable to the
24197 current ISA based on the command line switches. With function specific
24198 options, we need to check in the context of the function making the call
24199 whether it is supported. */
24200 if (ix86_builtins_isa[fcode].isa
24201 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24203 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24204 NULL, NULL, false);
24207 error ("%qE needs unknown isa option", fndecl);
24210 gcc_assert (opts != NULL);
24211 error ("%qE needs isa option %s", fndecl, opts);
24219 case IX86_BUILTIN_MASKMOVQ:
24220 case IX86_BUILTIN_MASKMOVDQU:
24221 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24222 ? CODE_FOR_mmx_maskmovq
24223 : CODE_FOR_sse2_maskmovdqu);
24224 /* Note the arg order is different from the operand order. */
24225 arg1 = CALL_EXPR_ARG (exp, 0);
24226 arg2 = CALL_EXPR_ARG (exp, 1);
24227 arg0 = CALL_EXPR_ARG (exp, 2);
24228 op0 = expand_normal (arg0);
24229 op1 = expand_normal (arg1);
24230 op2 = expand_normal (arg2);
24231 mode0 = insn_data[icode].operand[0].mode;
24232 mode1 = insn_data[icode].operand[1].mode;
24233 mode2 = insn_data[icode].operand[2].mode;
24235 op0 = force_reg (Pmode, op0);
24236 op0 = gen_rtx_MEM (mode1, op0);
24238 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24239 op0 = copy_to_mode_reg (mode0, op0);
24240 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24241 op1 = copy_to_mode_reg (mode1, op1);
24242 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24243 op2 = copy_to_mode_reg (mode2, op2);
24244 pat = GEN_FCN (icode) (op0, op1, op2);
24250 case IX86_BUILTIN_LDMXCSR:
24251 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24252 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24253 emit_move_insn (target, op0);
24254 emit_insn (gen_sse_ldmxcsr (target));
24257 case IX86_BUILTIN_STMXCSR:
24258 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24259 emit_insn (gen_sse_stmxcsr (target));
24260 return copy_to_mode_reg (SImode, target);
24262 case IX86_BUILTIN_CLFLUSH:
24263 arg0 = CALL_EXPR_ARG (exp, 0);
24264 op0 = expand_normal (arg0);
24265 icode = CODE_FOR_sse2_clflush;
24266 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24267 op0 = copy_to_mode_reg (Pmode, op0);
24269 emit_insn (gen_sse2_clflush (op0));
24272 case IX86_BUILTIN_MONITOR:
24273 arg0 = CALL_EXPR_ARG (exp, 0);
24274 arg1 = CALL_EXPR_ARG (exp, 1);
24275 arg2 = CALL_EXPR_ARG (exp, 2);
24276 op0 = expand_normal (arg0);
24277 op1 = expand_normal (arg1);
24278 op2 = expand_normal (arg2);
24280 op0 = copy_to_mode_reg (Pmode, op0);
24282 op1 = copy_to_mode_reg (SImode, op1);
24284 op2 = copy_to_mode_reg (SImode, op2);
24285 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24288 case IX86_BUILTIN_MWAIT:
24289 arg0 = CALL_EXPR_ARG (exp, 0);
24290 arg1 = CALL_EXPR_ARG (exp, 1);
24291 op0 = expand_normal (arg0);
24292 op1 = expand_normal (arg1);
24294 op0 = copy_to_mode_reg (SImode, op0);
24296 op1 = copy_to_mode_reg (SImode, op1);
24297 emit_insn (gen_sse3_mwait (op0, op1));
24300 case IX86_BUILTIN_VEC_INIT_V2SI:
24301 case IX86_BUILTIN_VEC_INIT_V4HI:
24302 case IX86_BUILTIN_VEC_INIT_V8QI:
24303 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24305 case IX86_BUILTIN_VEC_EXT_V2DF:
24306 case IX86_BUILTIN_VEC_EXT_V2DI:
24307 case IX86_BUILTIN_VEC_EXT_V4SF:
24308 case IX86_BUILTIN_VEC_EXT_V4SI:
24309 case IX86_BUILTIN_VEC_EXT_V8HI:
24310 case IX86_BUILTIN_VEC_EXT_V2SI:
24311 case IX86_BUILTIN_VEC_EXT_V4HI:
24312 case IX86_BUILTIN_VEC_EXT_V16QI:
24313 return ix86_expand_vec_ext_builtin (exp, target);
24315 case IX86_BUILTIN_VEC_SET_V2DI:
24316 case IX86_BUILTIN_VEC_SET_V4SF:
24317 case IX86_BUILTIN_VEC_SET_V4SI:
24318 case IX86_BUILTIN_VEC_SET_V8HI:
24319 case IX86_BUILTIN_VEC_SET_V4HI:
24320 case IX86_BUILTIN_VEC_SET_V16QI:
24321 return ix86_expand_vec_set_builtin (exp);
24323 case IX86_BUILTIN_VEC_PERM_V2DF:
24324 case IX86_BUILTIN_VEC_PERM_V4SF:
24325 case IX86_BUILTIN_VEC_PERM_V2DI:
24326 case IX86_BUILTIN_VEC_PERM_V4SI:
24327 case IX86_BUILTIN_VEC_PERM_V8HI:
24328 case IX86_BUILTIN_VEC_PERM_V16QI:
24329 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24330 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24331 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24332 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24333 case IX86_BUILTIN_VEC_PERM_V4DF:
24334 case IX86_BUILTIN_VEC_PERM_V8SF:
24335 return ix86_expand_vec_perm_builtin (exp);
24337 case IX86_BUILTIN_INFQ:
24338 case IX86_BUILTIN_HUGE_VALQ:
24340 REAL_VALUE_TYPE inf;
24344 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24346 tmp = validize_mem (force_const_mem (mode, tmp));
24349 target = gen_reg_rtx (mode);
24351 emit_move_insn (target, tmp);
24355 case IX86_BUILTIN_LLWPCB:
24356 arg0 = CALL_EXPR_ARG (exp, 0);
24357 op0 = expand_normal (arg0);
24358 icode = CODE_FOR_lwp_llwpcb;
24359 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24360 op0 = copy_to_mode_reg (Pmode, op0);
24361 emit_insn (gen_lwp_llwpcb (op0));
24364 case IX86_BUILTIN_SLWPCB:
24365 icode = CODE_FOR_lwp_slwpcb;
24367 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24368 target = gen_reg_rtx (Pmode);
24369 emit_insn (gen_lwp_slwpcb (target));
24376 for (i = 0, d = bdesc_special_args;
24377 i < ARRAY_SIZE (bdesc_special_args);
24379 if (d->code == fcode)
24380 return ix86_expand_special_args_builtin (d, exp, target);
24382 for (i = 0, d = bdesc_args;
24383 i < ARRAY_SIZE (bdesc_args);
24385 if (d->code == fcode)
24388 case IX86_BUILTIN_FABSQ:
24389 case IX86_BUILTIN_COPYSIGNQ:
24391 /* Emit a normal call if SSE2 isn't available. */
24392 return expand_call (exp, target, ignore);
24394 return ix86_expand_args_builtin (d, exp, target);
24397 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24398 if (d->code == fcode)
24399 return ix86_expand_sse_comi (d, exp, target);
24401 for (i = 0, d = bdesc_pcmpestr;
24402 i < ARRAY_SIZE (bdesc_pcmpestr);
24404 if (d->code == fcode)
24405 return ix86_expand_sse_pcmpestr (d, exp, target);
24407 for (i = 0, d = bdesc_pcmpistr;
24408 i < ARRAY_SIZE (bdesc_pcmpistr);
24410 if (d->code == fcode)
24411 return ix86_expand_sse_pcmpistr (d, exp, target);
24413 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24414 if (d->code == fcode)
24415 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24416 (enum ix86_builtin_func_type)
24417 d->flag, d->comparison);
24419 gcc_unreachable ();
24422 /* Returns a function decl for a vectorized version of the builtin function
24423 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24424 if it is not available. */
24427 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24430 enum machine_mode in_mode, out_mode;
24432 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24434 if (TREE_CODE (type_out) != VECTOR_TYPE
24435 || TREE_CODE (type_in) != VECTOR_TYPE
24436 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24439 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24440 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24441 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24442 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24446 case BUILT_IN_SQRT:
24447 if (out_mode == DFmode && out_n == 2
24448 && in_mode == DFmode && in_n == 2)
24449 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24452 case BUILT_IN_SQRTF:
24453 if (out_mode == SFmode && out_n == 4
24454 && in_mode == SFmode && in_n == 4)
24455 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24458 case BUILT_IN_LRINT:
24459 if (out_mode == SImode && out_n == 4
24460 && in_mode == DFmode && in_n == 2)
24461 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24464 case BUILT_IN_LRINTF:
24465 if (out_mode == SImode && out_n == 4
24466 && in_mode == SFmode && in_n == 4)
24467 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24470 case BUILT_IN_COPYSIGN:
24471 if (out_mode == DFmode && out_n == 2
24472 && in_mode == DFmode && in_n == 2)
24473 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24476 case BUILT_IN_COPYSIGNF:
24477 if (out_mode == SFmode && out_n == 4
24478 && in_mode == SFmode && in_n == 4)
24479 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24486 /* Dispatch to a handler for a vectorization library. */
24487 if (ix86_veclib_handler)
24488 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24494 /* Handler for an SVML-style interface to
24495 a library with vectorized intrinsics. */
24498 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24501 tree fntype, new_fndecl, args;
24504 enum machine_mode el_mode, in_mode;
24507 /* The SVML is suitable for unsafe math only. */
24508 if (!flag_unsafe_math_optimizations)
24511 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24512 n = TYPE_VECTOR_SUBPARTS (type_out);
24513 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24514 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24515 if (el_mode != in_mode
24523 case BUILT_IN_LOG10:
24525 case BUILT_IN_TANH:
24527 case BUILT_IN_ATAN:
24528 case BUILT_IN_ATAN2:
24529 case BUILT_IN_ATANH:
24530 case BUILT_IN_CBRT:
24531 case BUILT_IN_SINH:
24533 case BUILT_IN_ASINH:
24534 case BUILT_IN_ASIN:
24535 case BUILT_IN_COSH:
24537 case BUILT_IN_ACOSH:
24538 case BUILT_IN_ACOS:
24539 if (el_mode != DFmode || n != 2)
24543 case BUILT_IN_EXPF:
24544 case BUILT_IN_LOGF:
24545 case BUILT_IN_LOG10F:
24546 case BUILT_IN_POWF:
24547 case BUILT_IN_TANHF:
24548 case BUILT_IN_TANF:
24549 case BUILT_IN_ATANF:
24550 case BUILT_IN_ATAN2F:
24551 case BUILT_IN_ATANHF:
24552 case BUILT_IN_CBRTF:
24553 case BUILT_IN_SINHF:
24554 case BUILT_IN_SINF:
24555 case BUILT_IN_ASINHF:
24556 case BUILT_IN_ASINF:
24557 case BUILT_IN_COSHF:
24558 case BUILT_IN_COSF:
24559 case BUILT_IN_ACOSHF:
24560 case BUILT_IN_ACOSF:
24561 if (el_mode != SFmode || n != 4)
24569 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24571 if (fn == BUILT_IN_LOGF)
24572 strcpy (name, "vmlsLn4");
24573 else if (fn == BUILT_IN_LOG)
24574 strcpy (name, "vmldLn2");
24577 sprintf (name, "vmls%s", bname+10);
24578 name[strlen (name)-1] = '4';
24581 sprintf (name, "vmld%s2", bname+10);
24583 /* Convert to uppercase. */
24587 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24588 args = TREE_CHAIN (args))
24592 fntype = build_function_type_list (type_out, type_in, NULL);
24594 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24596 /* Build a function declaration for the vectorized function. */
24597 new_fndecl = build_decl (BUILTINS_LOCATION,
24598 FUNCTION_DECL, get_identifier (name), fntype);
24599 TREE_PUBLIC (new_fndecl) = 1;
24600 DECL_EXTERNAL (new_fndecl) = 1;
24601 DECL_IS_NOVOPS (new_fndecl) = 1;
24602 TREE_READONLY (new_fndecl) = 1;
24607 /* Handler for an ACML-style interface to
24608 a library with vectorized intrinsics. */
24611 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24613 char name[20] = "__vr.._";
24614 tree fntype, new_fndecl, args;
24617 enum machine_mode el_mode, in_mode;
24620 /* The ACML is 64bits only and suitable for unsafe math only as
24621 it does not correctly support parts of IEEE with the required
24622 precision such as denormals. */
24624 || !flag_unsafe_math_optimizations)
24627 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24628 n = TYPE_VECTOR_SUBPARTS (type_out);
24629 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24630 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24631 if (el_mode != in_mode
24641 case BUILT_IN_LOG2:
24642 case BUILT_IN_LOG10:
24645 if (el_mode != DFmode
24650 case BUILT_IN_SINF:
24651 case BUILT_IN_COSF:
24652 case BUILT_IN_EXPF:
24653 case BUILT_IN_POWF:
24654 case BUILT_IN_LOGF:
24655 case BUILT_IN_LOG2F:
24656 case BUILT_IN_LOG10F:
24659 if (el_mode != SFmode
24668 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24669 sprintf (name + 7, "%s", bname+10);
24672 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24673 args = TREE_CHAIN (args))
24677 fntype = build_function_type_list (type_out, type_in, NULL);
24679 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24681 /* Build a function declaration for the vectorized function. */
24682 new_fndecl = build_decl (BUILTINS_LOCATION,
24683 FUNCTION_DECL, get_identifier (name), fntype);
24684 TREE_PUBLIC (new_fndecl) = 1;
24685 DECL_EXTERNAL (new_fndecl) = 1;
24686 DECL_IS_NOVOPS (new_fndecl) = 1;
24687 TREE_READONLY (new_fndecl) = 1;
24693 /* Returns a decl of a function that implements conversion of an integer vector
24694 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24695 side of the conversion.
24696 Return NULL_TREE if it is not available. */
24699 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24701 if (! (TARGET_SSE2 && TREE_CODE (type) == VECTOR_TYPE))
24707 switch (TYPE_MODE (type))
24710 return TYPE_UNSIGNED (type)
24711 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24712 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24717 case FIX_TRUNC_EXPR:
24718 switch (TYPE_MODE (type))
24721 return TYPE_UNSIGNED (type)
24723 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24733 /* Returns a code for a target-specific builtin that implements
24734 reciprocal of the function, or NULL_TREE if not available. */
24737 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24738 bool sqrt ATTRIBUTE_UNUSED)
24740 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24741 && flag_finite_math_only && !flag_trapping_math
24742 && flag_unsafe_math_optimizations))
24746 /* Machine dependent builtins. */
24749 /* Vectorized version of sqrt to rsqrt conversion. */
24750 case IX86_BUILTIN_SQRTPS_NR:
24751 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24757 /* Normal builtins. */
24760 /* Sqrt to rsqrt conversion. */
24761 case BUILT_IN_SQRTF:
24762 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24769 /* Helper for avx_vpermilps256_operand et al. This is also used by
24770 the expansion functions to turn the parallel back into a mask.
24771 The return value is 0 for no match and the imm8+1 for a match. */
24774 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24776 unsigned i, nelt = GET_MODE_NUNITS (mode);
24778 unsigned char ipar[8];
24780 if (XVECLEN (par, 0) != (int) nelt)
24783 /* Validate that all of the elements are constants, and not totally
24784 out of range. Copy the data into an integral array to make the
24785 subsequent checks easier. */
24786 for (i = 0; i < nelt; ++i)
24788 rtx er = XVECEXP (par, 0, i);
24789 unsigned HOST_WIDE_INT ei;
24791 if (!CONST_INT_P (er))
24802 /* In the 256-bit DFmode case, we can only move elements within
24804 for (i = 0; i < 2; ++i)
24808 mask |= ipar[i] << i;
24810 for (i = 2; i < 4; ++i)
24814 mask |= (ipar[i] - 2) << i;
24819 /* In the 256-bit SFmode case, we have full freedom of movement
24820 within the low 128-bit lane, but the high 128-bit lane must
24821 mirror the exact same pattern. */
24822 for (i = 0; i < 4; ++i)
24823 if (ipar[i] + 4 != ipar[i + 4])
24830 /* In the 128-bit case, we've full freedom in the placement of
24831 the elements from the source operand. */
24832 for (i = 0; i < nelt; ++i)
24833 mask |= ipar[i] << (i * (nelt / 2));
24837 gcc_unreachable ();
24840 /* Make sure success has a non-zero value by adding one. */
24844 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24845 the expansion functions to turn the parallel back into a mask.
24846 The return value is 0 for no match and the imm8+1 for a match. */
24849 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24851 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24853 unsigned char ipar[8];
24855 if (XVECLEN (par, 0) != (int) nelt)
24858 /* Validate that all of the elements are constants, and not totally
24859 out of range. Copy the data into an integral array to make the
24860 subsequent checks easier. */
24861 for (i = 0; i < nelt; ++i)
24863 rtx er = XVECEXP (par, 0, i);
24864 unsigned HOST_WIDE_INT ei;
24866 if (!CONST_INT_P (er))
24869 if (ei >= 2 * nelt)
24874 /* Validate that the halves of the permute are halves. */
24875 for (i = 0; i < nelt2 - 1; ++i)
24876 if (ipar[i] + 1 != ipar[i + 1])
24878 for (i = nelt2; i < nelt - 1; ++i)
24879 if (ipar[i] + 1 != ipar[i + 1])
24882 /* Reconstruct the mask. */
24883 for (i = 0; i < 2; ++i)
24885 unsigned e = ipar[i * nelt2];
24889 mask |= e << (i * 4);
24892 /* Make sure success has a non-zero value by adding one. */
24897 /* Store OPERAND to the memory after reload is completed. This means
24898 that we can't easily use assign_stack_local. */
24900 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24904 gcc_assert (reload_completed);
24905 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24907 result = gen_rtx_MEM (mode,
24908 gen_rtx_PLUS (Pmode,
24910 GEN_INT (-RED_ZONE_SIZE)));
24911 emit_move_insn (result, operand);
24913 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24919 operand = gen_lowpart (DImode, operand);
24923 gen_rtx_SET (VOIDmode,
24924 gen_rtx_MEM (DImode,
24925 gen_rtx_PRE_DEC (DImode,
24926 stack_pointer_rtx)),
24930 gcc_unreachable ();
24932 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24941 split_di (&operand, 1, operands, operands + 1);
24943 gen_rtx_SET (VOIDmode,
24944 gen_rtx_MEM (SImode,
24945 gen_rtx_PRE_DEC (Pmode,
24946 stack_pointer_rtx)),
24949 gen_rtx_SET (VOIDmode,
24950 gen_rtx_MEM (SImode,
24951 gen_rtx_PRE_DEC (Pmode,
24952 stack_pointer_rtx)),
24957 /* Store HImodes as SImodes. */
24958 operand = gen_lowpart (SImode, operand);
24962 gen_rtx_SET (VOIDmode,
24963 gen_rtx_MEM (GET_MODE (operand),
24964 gen_rtx_PRE_DEC (SImode,
24965 stack_pointer_rtx)),
24969 gcc_unreachable ();
24971 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24976 /* Free operand from the memory. */
24978 ix86_free_from_memory (enum machine_mode mode)
24980 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24984 if (mode == DImode || TARGET_64BIT)
24988 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24989 to pop or add instruction if registers are available. */
24990 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24991 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24996 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
24997 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
24999 static const enum reg_class *
25000 i386_ira_cover_classes (void)
25002 static const enum reg_class sse_fpmath_classes[] = {
25003 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25005 static const enum reg_class no_sse_fpmath_classes[] = {
25006 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25009 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25012 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25013 QImode must go into class Q_REGS.
25014 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25015 movdf to do mem-to-mem moves through integer regs. */
25017 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25019 enum machine_mode mode = GET_MODE (x);
25021 /* We're only allowed to return a subclass of CLASS. Many of the
25022 following checks fail for NO_REGS, so eliminate that early. */
25023 if (regclass == NO_REGS)
25026 /* All classes can load zeros. */
25027 if (x == CONST0_RTX (mode))
25030 /* Force constants into memory if we are loading a (nonzero) constant into
25031 an MMX or SSE register. This is because there are no MMX/SSE instructions
25032 to load from a constant. */
25034 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25037 /* Prefer SSE regs only, if we can use them for math. */
25038 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25039 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25041 /* Floating-point constants need more complex checks. */
25042 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25044 /* General regs can load everything. */
25045 if (reg_class_subset_p (regclass, GENERAL_REGS))
25048 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25049 zero above. We only want to wind up preferring 80387 registers if
25050 we plan on doing computation with them. */
25052 && standard_80387_constant_p (x))
25054 /* Limit class to non-sse. */
25055 if (regclass == FLOAT_SSE_REGS)
25057 if (regclass == FP_TOP_SSE_REGS)
25059 if (regclass == FP_SECOND_SSE_REGS)
25060 return FP_SECOND_REG;
25061 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25068 /* Generally when we see PLUS here, it's the function invariant
25069 (plus soft-fp const_int). Which can only be computed into general
25071 if (GET_CODE (x) == PLUS)
25072 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25074 /* QImode constants are easy to load, but non-constant QImode data
25075 must go into Q_REGS. */
25076 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25078 if (reg_class_subset_p (regclass, Q_REGS))
25080 if (reg_class_subset_p (Q_REGS, regclass))
25088 /* Discourage putting floating-point values in SSE registers unless
25089 SSE math is being used, and likewise for the 387 registers. */
25091 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25093 enum machine_mode mode = GET_MODE (x);
25095 /* Restrict the output reload class to the register bank that we are doing
25096 math on. If we would like not to return a subset of CLASS, reject this
25097 alternative: if reload cannot do this, it will still use its choice. */
25098 mode = GET_MODE (x);
25099 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25100 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25102 if (X87_FLOAT_MODE_P (mode))
25104 if (regclass == FP_TOP_SSE_REGS)
25106 else if (regclass == FP_SECOND_SSE_REGS)
25107 return FP_SECOND_REG;
25109 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25115 static enum reg_class
25116 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25117 enum machine_mode mode,
25118 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25120 /* QImode spills from non-QI registers require
25121 intermediate register on 32bit targets. */
25122 if (!in_p && mode == QImode && !TARGET_64BIT
25123 && (rclass == GENERAL_REGS
25124 || rclass == LEGACY_REGS
25125 || rclass == INDEX_REGS))
25134 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25135 regno = true_regnum (x);
25137 /* Return Q_REGS if the operand is in memory. */
25145 /* If we are copying between general and FP registers, we need a memory
25146 location. The same is true for SSE and MMX registers.
25148 To optimize register_move_cost performance, allow inline variant.
25150 The macro can't work reliably when one of the CLASSES is class containing
25151 registers from multiple units (SSE, MMX, integer). We avoid this by never
25152 combining those units in single alternative in the machine description.
25153 Ensure that this constraint holds to avoid unexpected surprises.
25155 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25156 enforce these sanity checks. */
25159 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25160 enum machine_mode mode, int strict)
25162 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25163 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25164 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25165 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25166 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25167 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25169 gcc_assert (!strict);
25173 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25176 /* ??? This is a lie. We do have moves between mmx/general, and for
25177 mmx/sse2. But by saying we need secondary memory we discourage the
25178 register allocator from using the mmx registers unless needed. */
25179 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25182 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25184 /* SSE1 doesn't have any direct moves from other classes. */
25188 /* If the target says that inter-unit moves are more expensive
25189 than moving through memory, then don't generate them. */
25190 if (!TARGET_INTER_UNIT_MOVES)
25193 /* Between SSE and general, we have moves no larger than word size. */
25194 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25202 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25203 enum machine_mode mode, int strict)
25205 return inline_secondary_memory_needed (class1, class2, mode, strict);
25208 /* Return true if the registers in CLASS cannot represent the change from
25209 modes FROM to TO. */
25212 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25213 enum reg_class regclass)
25218 /* x87 registers can't do subreg at all, as all values are reformatted
25219 to extended precision. */
25220 if (MAYBE_FLOAT_CLASS_P (regclass))
25223 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25225 /* Vector registers do not support QI or HImode loads. If we don't
25226 disallow a change to these modes, reload will assume it's ok to
25227 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25228 the vec_dupv4hi pattern. */
25229 if (GET_MODE_SIZE (from) < 4)
25232 /* Vector registers do not support subreg with nonzero offsets, which
25233 are otherwise valid for integer registers. Since we can't see
25234 whether we have a nonzero offset from here, prohibit all
25235 nonparadoxical subregs changing size. */
25236 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25243 /* Return the cost of moving data of mode M between a
25244 register and memory. A value of 2 is the default; this cost is
25245 relative to those in `REGISTER_MOVE_COST'.
25247 This function is used extensively by register_move_cost that is used to
25248 build tables at startup. Make it inline in this case.
25249 When IN is 2, return maximum of in and out move cost.
25251 If moving between registers and memory is more expensive than
25252 between two registers, you should define this macro to express the
25255 Model also increased moving costs of QImode registers in non
25259 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25263 if (FLOAT_CLASS_P (regclass))
25281 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25282 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25284 if (SSE_CLASS_P (regclass))
25287 switch (GET_MODE_SIZE (mode))
25302 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25303 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25305 if (MMX_CLASS_P (regclass))
25308 switch (GET_MODE_SIZE (mode))
25320 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25321 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25323 switch (GET_MODE_SIZE (mode))
25326 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25329 return ix86_cost->int_store[0];
25330 if (TARGET_PARTIAL_REG_DEPENDENCY
25331 && optimize_function_for_speed_p (cfun))
25332 cost = ix86_cost->movzbl_load;
25334 cost = ix86_cost->int_load[0];
25336 return MAX (cost, ix86_cost->int_store[0]);
25342 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25344 return ix86_cost->movzbl_load;
25346 return ix86_cost->int_store[0] + 4;
25351 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25352 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25354 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25355 if (mode == TFmode)
25358 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25360 cost = ix86_cost->int_load[2];
25362 cost = ix86_cost->int_store[2];
25363 return (cost * (((int) GET_MODE_SIZE (mode)
25364 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25369 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25371 return inline_memory_move_cost (mode, regclass, in);
25375 /* Return the cost of moving data from a register in class CLASS1 to
25376 one in class CLASS2.
25378 It is not required that the cost always equal 2 when FROM is the same as TO;
25379 on some machines it is expensive to move between registers if they are not
25380 general registers. */
25383 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25384 enum reg_class class2)
25386 /* In case we require secondary memory, compute cost of the store followed
25387 by load. In order to avoid bad register allocation choices, we need
25388 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25390 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25394 cost += inline_memory_move_cost (mode, class1, 2);
25395 cost += inline_memory_move_cost (mode, class2, 2);
25397 /* In case of copying from general_purpose_register we may emit multiple
25398 stores followed by single load causing memory size mismatch stall.
25399 Count this as arbitrarily high cost of 20. */
25400 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25403 /* In the case of FP/MMX moves, the registers actually overlap, and we
25404 have to switch modes in order to treat them differently. */
25405 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25406 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25412 /* Moves between SSE/MMX and integer unit are expensive. */
25413 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25414 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25416 /* ??? By keeping returned value relatively high, we limit the number
25417 of moves between integer and MMX/SSE registers for all targets.
25418 Additionally, high value prevents problem with x86_modes_tieable_p(),
25419 where integer modes in MMX/SSE registers are not tieable
25420 because of missing QImode and HImode moves to, from or between
25421 MMX/SSE registers. */
25422 return MAX (8, ix86_cost->mmxsse_to_integer);
25424 if (MAYBE_FLOAT_CLASS_P (class1))
25425 return ix86_cost->fp_move;
25426 if (MAYBE_SSE_CLASS_P (class1))
25427 return ix86_cost->sse_move;
25428 if (MAYBE_MMX_CLASS_P (class1))
25429 return ix86_cost->mmx_move;
25433 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25436 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25438 /* Flags and only flags can only hold CCmode values. */
25439 if (CC_REGNO_P (regno))
25440 return GET_MODE_CLASS (mode) == MODE_CC;
25441 if (GET_MODE_CLASS (mode) == MODE_CC
25442 || GET_MODE_CLASS (mode) == MODE_RANDOM
25443 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25445 if (FP_REGNO_P (regno))
25446 return VALID_FP_MODE_P (mode);
25447 if (SSE_REGNO_P (regno))
25449 /* We implement the move patterns for all vector modes into and
25450 out of SSE registers, even when no operation instructions
25451 are available. OImode move is available only when AVX is
25453 return ((TARGET_AVX && mode == OImode)
25454 || VALID_AVX256_REG_MODE (mode)
25455 || VALID_SSE_REG_MODE (mode)
25456 || VALID_SSE2_REG_MODE (mode)
25457 || VALID_MMX_REG_MODE (mode)
25458 || VALID_MMX_REG_MODE_3DNOW (mode));
25460 if (MMX_REGNO_P (regno))
25462 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25463 so if the register is available at all, then we can move data of
25464 the given mode into or out of it. */
25465 return (VALID_MMX_REG_MODE (mode)
25466 || VALID_MMX_REG_MODE_3DNOW (mode));
25469 if (mode == QImode)
25471 /* Take care for QImode values - they can be in non-QI regs,
25472 but then they do cause partial register stalls. */
25473 if (regno <= BX_REG || TARGET_64BIT)
25475 if (!TARGET_PARTIAL_REG_STALL)
25477 return reload_in_progress || reload_completed;
25479 /* We handle both integer and floats in the general purpose registers. */
25480 else if (VALID_INT_MODE_P (mode))
25482 else if (VALID_FP_MODE_P (mode))
25484 else if (VALID_DFP_MODE_P (mode))
25486 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25487 on to use that value in smaller contexts, this can easily force a
25488 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25489 supporting DImode, allow it. */
25490 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25496 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25497 tieable integer mode. */
25500 ix86_tieable_integer_mode_p (enum machine_mode mode)
25509 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25512 return TARGET_64BIT;
25519 /* Return true if MODE1 is accessible in a register that can hold MODE2
25520 without copying. That is, all register classes that can hold MODE2
25521 can also hold MODE1. */
25524 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25526 if (mode1 == mode2)
25529 if (ix86_tieable_integer_mode_p (mode1)
25530 && ix86_tieable_integer_mode_p (mode2))
25533 /* MODE2 being XFmode implies fp stack or general regs, which means we
25534 can tie any smaller floating point modes to it. Note that we do not
25535 tie this with TFmode. */
25536 if (mode2 == XFmode)
25537 return mode1 == SFmode || mode1 == DFmode;
25539 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25540 that we can tie it with SFmode. */
25541 if (mode2 == DFmode)
25542 return mode1 == SFmode;
25544 /* If MODE2 is only appropriate for an SSE register, then tie with
25545 any other mode acceptable to SSE registers. */
25546 if (GET_MODE_SIZE (mode2) == 16
25547 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25548 return (GET_MODE_SIZE (mode1) == 16
25549 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25551 /* If MODE2 is appropriate for an MMX register, then tie
25552 with any other mode acceptable to MMX registers. */
25553 if (GET_MODE_SIZE (mode2) == 8
25554 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25555 return (GET_MODE_SIZE (mode1) == 8
25556 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25561 /* Compute a (partial) cost for rtx X. Return true if the complete
25562 cost has been computed, and false if subexpressions should be
25563 scanned. In either case, *TOTAL contains the cost result. */
25566 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25568 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25569 enum machine_mode mode = GET_MODE (x);
25570 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25578 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25580 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25582 else if (flag_pic && SYMBOLIC_CONST (x)
25584 || (!GET_CODE (x) != LABEL_REF
25585 && (GET_CODE (x) != SYMBOL_REF
25586 || !SYMBOL_REF_LOCAL_P (x)))))
25593 if (mode == VOIDmode)
25596 switch (standard_80387_constant_p (x))
25601 default: /* Other constants */
25606 /* Start with (MEM (SYMBOL_REF)), since that's where
25607 it'll probably end up. Add a penalty for size. */
25608 *total = (COSTS_N_INSNS (1)
25609 + (flag_pic != 0 && !TARGET_64BIT)
25610 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25616 /* The zero extensions is often completely free on x86_64, so make
25617 it as cheap as possible. */
25618 if (TARGET_64BIT && mode == DImode
25619 && GET_MODE (XEXP (x, 0)) == SImode)
25621 else if (TARGET_ZERO_EXTEND_WITH_AND)
25622 *total = cost->add;
25624 *total = cost->movzx;
25628 *total = cost->movsx;
25632 if (CONST_INT_P (XEXP (x, 1))
25633 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25635 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25638 *total = cost->add;
25641 if ((value == 2 || value == 3)
25642 && cost->lea <= cost->shift_const)
25644 *total = cost->lea;
25654 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25656 if (CONST_INT_P (XEXP (x, 1)))
25658 if (INTVAL (XEXP (x, 1)) > 32)
25659 *total = cost->shift_const + COSTS_N_INSNS (2);
25661 *total = cost->shift_const * 2;
25665 if (GET_CODE (XEXP (x, 1)) == AND)
25666 *total = cost->shift_var * 2;
25668 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25673 if (CONST_INT_P (XEXP (x, 1)))
25674 *total = cost->shift_const;
25676 *total = cost->shift_var;
25681 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25683 /* ??? SSE scalar cost should be used here. */
25684 *total = cost->fmul;
25687 else if (X87_FLOAT_MODE_P (mode))
25689 *total = cost->fmul;
25692 else if (FLOAT_MODE_P (mode))
25694 /* ??? SSE vector cost should be used here. */
25695 *total = cost->fmul;
25700 rtx op0 = XEXP (x, 0);
25701 rtx op1 = XEXP (x, 1);
25703 if (CONST_INT_P (XEXP (x, 1)))
25705 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25706 for (nbits = 0; value != 0; value &= value - 1)
25710 /* This is arbitrary. */
25713 /* Compute costs correctly for widening multiplication. */
25714 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25715 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25716 == GET_MODE_SIZE (mode))
25718 int is_mulwiden = 0;
25719 enum machine_mode inner_mode = GET_MODE (op0);
25721 if (GET_CODE (op0) == GET_CODE (op1))
25722 is_mulwiden = 1, op1 = XEXP (op1, 0);
25723 else if (CONST_INT_P (op1))
25725 if (GET_CODE (op0) == SIGN_EXTEND)
25726 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25729 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25733 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25736 *total = (cost->mult_init[MODE_INDEX (mode)]
25737 + nbits * cost->mult_bit
25738 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25747 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25748 /* ??? SSE cost should be used here. */
25749 *total = cost->fdiv;
25750 else if (X87_FLOAT_MODE_P (mode))
25751 *total = cost->fdiv;
25752 else if (FLOAT_MODE_P (mode))
25753 /* ??? SSE vector cost should be used here. */
25754 *total = cost->fdiv;
25756 *total = cost->divide[MODE_INDEX (mode)];
25760 if (GET_MODE_CLASS (mode) == MODE_INT
25761 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25763 if (GET_CODE (XEXP (x, 0)) == PLUS
25764 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25765 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25766 && CONSTANT_P (XEXP (x, 1)))
25768 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25769 if (val == 2 || val == 4 || val == 8)
25771 *total = cost->lea;
25772 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25773 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25774 outer_code, speed);
25775 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25779 else if (GET_CODE (XEXP (x, 0)) == MULT
25780 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25782 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25783 if (val == 2 || val == 4 || val == 8)
25785 *total = cost->lea;
25786 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25787 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25791 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25793 *total = cost->lea;
25794 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25795 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25796 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25803 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25805 /* ??? SSE cost should be used here. */
25806 *total = cost->fadd;
25809 else if (X87_FLOAT_MODE_P (mode))
25811 *total = cost->fadd;
25814 else if (FLOAT_MODE_P (mode))
25816 /* ??? SSE vector cost should be used here. */
25817 *total = cost->fadd;
25825 if (!TARGET_64BIT && mode == DImode)
25827 *total = (cost->add * 2
25828 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25829 << (GET_MODE (XEXP (x, 0)) != DImode))
25830 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25831 << (GET_MODE (XEXP (x, 1)) != DImode)));
25837 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25839 /* ??? SSE cost should be used here. */
25840 *total = cost->fchs;
25843 else if (X87_FLOAT_MODE_P (mode))
25845 *total = cost->fchs;
25848 else if (FLOAT_MODE_P (mode))
25850 /* ??? SSE vector cost should be used here. */
25851 *total = cost->fchs;
25857 if (!TARGET_64BIT && mode == DImode)
25858 *total = cost->add * 2;
25860 *total = cost->add;
25864 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25865 && XEXP (XEXP (x, 0), 1) == const1_rtx
25866 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25867 && XEXP (x, 1) == const0_rtx)
25869 /* This kind of construct is implemented using test[bwl].
25870 Treat it as if we had an AND. */
25871 *total = (cost->add
25872 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25873 + rtx_cost (const1_rtx, outer_code, speed));
25879 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25884 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25885 /* ??? SSE cost should be used here. */
25886 *total = cost->fabs;
25887 else if (X87_FLOAT_MODE_P (mode))
25888 *total = cost->fabs;
25889 else if (FLOAT_MODE_P (mode))
25890 /* ??? SSE vector cost should be used here. */
25891 *total = cost->fabs;
25895 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25896 /* ??? SSE cost should be used here. */
25897 *total = cost->fsqrt;
25898 else if (X87_FLOAT_MODE_P (mode))
25899 *total = cost->fsqrt;
25900 else if (FLOAT_MODE_P (mode))
25901 /* ??? SSE vector cost should be used here. */
25902 *total = cost->fsqrt;
25906 if (XINT (x, 1) == UNSPEC_TP)
25913 case VEC_DUPLICATE:
25914 /* ??? Assume all of these vector manipulation patterns are
25915 recognizable. In which case they all pretty much have the
25917 *total = COSTS_N_INSNS (1);
25927 static int current_machopic_label_num;
25929 /* Given a symbol name and its associated stub, write out the
25930 definition of the stub. */
25933 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25935 unsigned int length;
25936 char *binder_name, *symbol_name, lazy_ptr_name[32];
25937 int label = ++current_machopic_label_num;
25939 /* For 64-bit we shouldn't get here. */
25940 gcc_assert (!TARGET_64BIT);
25942 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25943 symb = (*targetm.strip_name_encoding) (symb);
25945 length = strlen (stub);
25946 binder_name = XALLOCAVEC (char, length + 32);
25947 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25949 length = strlen (symb);
25950 symbol_name = XALLOCAVEC (char, length + 32);
25951 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25953 sprintf (lazy_ptr_name, "L%d$lz", label);
25956 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25958 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25960 fprintf (file, "%s:\n", stub);
25961 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25965 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25966 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25967 fprintf (file, "\tjmp\t*%%edx\n");
25970 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25972 fprintf (file, "%s:\n", binder_name);
25976 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25977 fputs ("\tpushl\t%eax\n", file);
25980 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25982 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
25984 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25985 fprintf (file, "%s:\n", lazy_ptr_name);
25986 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25987 fprintf (file, ASM_LONG "%s\n", binder_name);
25989 #endif /* TARGET_MACHO */
25991 /* Order the registers for register allocator. */
25994 x86_order_regs_for_local_alloc (void)
25999 /* First allocate the local general purpose registers. */
26000 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26001 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26002 reg_alloc_order [pos++] = i;
26004 /* Global general purpose registers. */
26005 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26006 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26007 reg_alloc_order [pos++] = i;
26009 /* x87 registers come first in case we are doing FP math
26011 if (!TARGET_SSE_MATH)
26012 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26013 reg_alloc_order [pos++] = i;
26015 /* SSE registers. */
26016 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26017 reg_alloc_order [pos++] = i;
26018 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26019 reg_alloc_order [pos++] = i;
26021 /* x87 registers. */
26022 if (TARGET_SSE_MATH)
26023 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26024 reg_alloc_order [pos++] = i;
26026 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26027 reg_alloc_order [pos++] = i;
26029 /* Initialize the rest of array as we do not allocate some registers
26031 while (pos < FIRST_PSEUDO_REGISTER)
26032 reg_alloc_order [pos++] = 0;
26035 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26036 struct attribute_spec.handler. */
26038 ix86_handle_abi_attribute (tree *node, tree name,
26039 tree args ATTRIBUTE_UNUSED,
26040 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26042 if (TREE_CODE (*node) != FUNCTION_TYPE
26043 && TREE_CODE (*node) != METHOD_TYPE
26044 && TREE_CODE (*node) != FIELD_DECL
26045 && TREE_CODE (*node) != TYPE_DECL)
26047 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26049 *no_add_attrs = true;
26054 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26056 *no_add_attrs = true;
26060 /* Can combine regparm with all attributes but fastcall. */
26061 if (is_attribute_p ("ms_abi", name))
26063 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26065 error ("ms_abi and sysv_abi attributes are not compatible");
26070 else if (is_attribute_p ("sysv_abi", name))
26072 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26074 error ("ms_abi and sysv_abi attributes are not compatible");
26083 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26084 struct attribute_spec.handler. */
26086 ix86_handle_struct_attribute (tree *node, tree name,
26087 tree args ATTRIBUTE_UNUSED,
26088 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26091 if (DECL_P (*node))
26093 if (TREE_CODE (*node) == TYPE_DECL)
26094 type = &TREE_TYPE (*node);
26099 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26100 || TREE_CODE (*type) == UNION_TYPE)))
26102 warning (OPT_Wattributes, "%qE attribute ignored",
26104 *no_add_attrs = true;
26107 else if ((is_attribute_p ("ms_struct", name)
26108 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26109 || ((is_attribute_p ("gcc_struct", name)
26110 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26112 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26114 *no_add_attrs = true;
26121 ix86_handle_fndecl_attribute (tree *node, tree name,
26122 tree args ATTRIBUTE_UNUSED,
26123 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26125 if (TREE_CODE (*node) != FUNCTION_DECL)
26127 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26129 *no_add_attrs = true;
26135 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26140 #ifndef HAVE_AS_IX86_SWAP
26141 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26148 ix86_ms_bitfield_layout_p (const_tree record_type)
26150 return (TARGET_MS_BITFIELD_LAYOUT &&
26151 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26152 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26155 /* Returns an expression indicating where the this parameter is
26156 located on entry to the FUNCTION. */
26159 x86_this_parameter (tree function)
26161 tree type = TREE_TYPE (function);
26162 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26167 const int *parm_regs;
26169 if (ix86_function_type_abi (type) == MS_ABI)
26170 parm_regs = x86_64_ms_abi_int_parameter_registers;
26172 parm_regs = x86_64_int_parameter_registers;
26173 return gen_rtx_REG (DImode, parm_regs[aggr]);
26176 nregs = ix86_function_regparm (type, function);
26178 if (nregs > 0 && !stdarg_p (type))
26182 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26183 regno = aggr ? DX_REG : CX_REG;
26184 /* ???: To be verified. It is not absolutely clear how aggregates
26185 have to be treated for thiscall. We assume that they are
26186 identical to fastcall. */
26187 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26188 regno = aggr ? DX_REG : CX_REG;
26196 return gen_rtx_MEM (SImode,
26197 plus_constant (stack_pointer_rtx, 4));
26200 return gen_rtx_REG (SImode, regno);
26203 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26206 /* Determine whether x86_output_mi_thunk can succeed. */
26209 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26210 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26211 HOST_WIDE_INT vcall_offset, const_tree function)
26213 /* 64-bit can handle anything. */
26217 /* For 32-bit, everything's fine if we have one free register. */
26218 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26221 /* Need a free register for vcall_offset. */
26225 /* Need a free register for GOT references. */
26226 if (flag_pic && !(*targetm.binds_local_p) (function))
26229 /* Otherwise ok. */
26233 /* Output the assembler code for a thunk function. THUNK_DECL is the
26234 declaration for the thunk function itself, FUNCTION is the decl for
26235 the target function. DELTA is an immediate constant offset to be
26236 added to THIS. If VCALL_OFFSET is nonzero, the word at
26237 *(*this + vcall_offset) should be added to THIS. */
26240 x86_output_mi_thunk (FILE *file,
26241 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26242 HOST_WIDE_INT vcall_offset, tree function)
26245 rtx this_param = x86_this_parameter (function);
26248 /* Make sure unwind info is emitted for the thunk if needed. */
26249 final_start_function (emit_barrier (), file, 1);
26251 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26252 pull it in now and let DELTA benefit. */
26253 if (REG_P (this_param))
26254 this_reg = this_param;
26255 else if (vcall_offset)
26257 /* Put the this parameter into %eax. */
26258 xops[0] = this_param;
26259 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26260 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26263 this_reg = NULL_RTX;
26265 /* Adjust the this parameter by a fixed constant. */
26268 xops[0] = GEN_INT (delta);
26269 xops[1] = this_reg ? this_reg : this_param;
26272 if (!x86_64_general_operand (xops[0], DImode))
26274 tmp = gen_rtx_REG (DImode, R10_REG);
26276 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26278 xops[1] = this_param;
26280 if (x86_maybe_negate_const_int (&xops[0], DImode))
26281 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26283 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26285 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26286 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26288 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26291 /* Adjust the this parameter by a value stored in the vtable. */
26295 tmp = gen_rtx_REG (DImode, R10_REG);
26298 int tmp_regno = CX_REG;
26299 if (lookup_attribute ("fastcall",
26300 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26301 || lookup_attribute ("thiscall",
26302 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26303 tmp_regno = AX_REG;
26304 tmp = gen_rtx_REG (SImode, tmp_regno);
26307 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26309 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26311 /* Adjust the this parameter. */
26312 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26313 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26315 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26316 xops[0] = GEN_INT (vcall_offset);
26318 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26319 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26321 xops[1] = this_reg;
26322 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26325 /* If necessary, drop THIS back to its stack slot. */
26326 if (this_reg && this_reg != this_param)
26328 xops[0] = this_reg;
26329 xops[1] = this_param;
26330 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26333 xops[0] = XEXP (DECL_RTL (function), 0);
26336 if (!flag_pic || (*targetm.binds_local_p) (function))
26337 output_asm_insn ("jmp\t%P0", xops);
26338 /* All thunks should be in the same object as their target,
26339 and thus binds_local_p should be true. */
26340 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26341 gcc_unreachable ();
26344 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26345 tmp = gen_rtx_CONST (Pmode, tmp);
26346 tmp = gen_rtx_MEM (QImode, tmp);
26348 output_asm_insn ("jmp\t%A0", xops);
26353 if (!flag_pic || (*targetm.binds_local_p) (function))
26354 output_asm_insn ("jmp\t%P0", xops);
26359 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26360 tmp = (gen_rtx_SYMBOL_REF
26362 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26363 tmp = gen_rtx_MEM (QImode, tmp);
26365 output_asm_insn ("jmp\t%0", xops);
26368 #endif /* TARGET_MACHO */
26370 tmp = gen_rtx_REG (SImode, CX_REG);
26371 output_set_got (tmp, NULL_RTX);
26374 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26375 output_asm_insn ("jmp\t{*}%1", xops);
26378 final_end_function ();
26382 x86_file_start (void)
26384 default_file_start ();
26386 darwin_file_start ();
26388 if (X86_FILE_START_VERSION_DIRECTIVE)
26389 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26390 if (X86_FILE_START_FLTUSED)
26391 fputs ("\t.global\t__fltused\n", asm_out_file);
26392 if (ix86_asm_dialect == ASM_INTEL)
26393 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26397 x86_field_alignment (tree field, int computed)
26399 enum machine_mode mode;
26400 tree type = TREE_TYPE (field);
26402 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26404 mode = TYPE_MODE (strip_array_types (type));
26405 if (mode == DFmode || mode == DCmode
26406 || GET_MODE_CLASS (mode) == MODE_INT
26407 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26408 return MIN (32, computed);
26412 /* Output assembler code to FILE to increment profiler label # LABELNO
26413 for profiling a function entry. */
26415 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26419 #ifndef NO_PROFILE_COUNTERS
26420 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26423 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26424 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26426 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26430 #ifndef NO_PROFILE_COUNTERS
26431 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26434 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26438 #ifndef NO_PROFILE_COUNTERS
26439 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26442 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26446 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26447 /* We don't have exact information about the insn sizes, but we may assume
26448 quite safely that we are informed about all 1 byte insns and memory
26449 address sizes. This is enough to eliminate unnecessary padding in
26453 min_insn_size (rtx insn)
26457 if (!INSN_P (insn) || !active_insn_p (insn))
26460 /* Discard alignments we've emit and jump instructions. */
26461 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26462 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26464 if (JUMP_TABLE_DATA_P (insn))
26467 /* Important case - calls are always 5 bytes.
26468 It is common to have many calls in the row. */
26470 && symbolic_reference_mentioned_p (PATTERN (insn))
26471 && !SIBLING_CALL_P (insn))
26473 len = get_attr_length (insn);
26477 /* For normal instructions we rely on get_attr_length being exact,
26478 with a few exceptions. */
26479 if (!JUMP_P (insn))
26481 enum attr_type type = get_attr_type (insn);
26486 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26487 || asm_noperands (PATTERN (insn)) >= 0)
26494 /* Otherwise trust get_attr_length. */
26498 l = get_attr_length_address (insn);
26499 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26508 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26512 ix86_avoid_jump_mispredicts (void)
26514 rtx insn, start = get_insns ();
26515 int nbytes = 0, njumps = 0;
26518 /* Look for all minimal intervals of instructions containing 4 jumps.
26519 The intervals are bounded by START and INSN. NBYTES is the total
26520 size of instructions in the interval including INSN and not including
26521 START. When the NBYTES is smaller than 16 bytes, it is possible
26522 that the end of START and INSN ends up in the same 16byte page.
26524 The smallest offset in the page INSN can start is the case where START
26525 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26526 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26528 for (insn = start; insn; insn = NEXT_INSN (insn))
26532 if (LABEL_P (insn))
26534 int align = label_to_alignment (insn);
26535 int max_skip = label_to_max_skip (insn);
26539 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26540 already in the current 16 byte page, because otherwise
26541 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26542 bytes to reach 16 byte boundary. */
26544 || (align <= 3 && max_skip != (1 << align) - 1))
26547 fprintf (dump_file, "Label %i with max_skip %i\n",
26548 INSN_UID (insn), max_skip);
26551 while (nbytes + max_skip >= 16)
26553 start = NEXT_INSN (start);
26554 if ((JUMP_P (start)
26555 && GET_CODE (PATTERN (start)) != ADDR_VEC
26556 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26558 njumps--, isjump = 1;
26561 nbytes -= min_insn_size (start);
26567 min_size = min_insn_size (insn);
26568 nbytes += min_size;
26570 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26571 INSN_UID (insn), min_size);
26573 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26574 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26582 start = NEXT_INSN (start);
26583 if ((JUMP_P (start)
26584 && GET_CODE (PATTERN (start)) != ADDR_VEC
26585 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26587 njumps--, isjump = 1;
26590 nbytes -= min_insn_size (start);
26592 gcc_assert (njumps >= 0);
26594 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26595 INSN_UID (start), INSN_UID (insn), nbytes);
26597 if (njumps == 3 && isjump && nbytes < 16)
26599 int padsize = 15 - nbytes + min_insn_size (insn);
26602 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26603 INSN_UID (insn), padsize);
26604 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26610 /* AMD Athlon works faster
26611 when RET is not destination of conditional jump or directly preceded
26612 by other jump instruction. We avoid the penalty by inserting NOP just
26613 before the RET instructions in such cases. */
26615 ix86_pad_returns (void)
26620 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26622 basic_block bb = e->src;
26623 rtx ret = BB_END (bb);
26625 bool replace = false;
26627 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26628 || optimize_bb_for_size_p (bb))
26630 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26631 if (active_insn_p (prev) || LABEL_P (prev))
26633 if (prev && LABEL_P (prev))
26638 FOR_EACH_EDGE (e, ei, bb->preds)
26639 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26640 && !(e->flags & EDGE_FALLTHRU))
26645 prev = prev_active_insn (ret);
26647 && ((JUMP_P (prev) && any_condjump_p (prev))
26650 /* Empty functions get branch mispredict even when the jump destination
26651 is not visible to us. */
26652 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26657 emit_jump_insn_before (gen_return_internal_long (), ret);
26663 /* Implement machine specific optimizations. We implement padding of returns
26664 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26668 if (optimize && optimize_function_for_speed_p (cfun))
26670 if (TARGET_PAD_RETURNS)
26671 ix86_pad_returns ();
26672 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26673 if (TARGET_FOUR_JUMP_LIMIT)
26674 ix86_avoid_jump_mispredicts ();
26679 /* Return nonzero when QImode register that must be represented via REX prefix
26682 x86_extended_QIreg_mentioned_p (rtx insn)
26685 extract_insn_cached (insn);
26686 for (i = 0; i < recog_data.n_operands; i++)
26687 if (REG_P (recog_data.operand[i])
26688 && REGNO (recog_data.operand[i]) > BX_REG)
26693 /* Return nonzero when P points to register encoded via REX prefix.
26694 Called via for_each_rtx. */
26696 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26698 unsigned int regno;
26701 regno = REGNO (*p);
26702 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26705 /* Return true when INSN mentions register that must be encoded using REX
26708 x86_extended_reg_mentioned_p (rtx insn)
26710 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26711 extended_reg_mentioned_1, NULL);
26714 /* If profitable, negate (without causing overflow) integer constant
26715 of mode MODE at location LOC. Return true in this case. */
26717 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26721 if (!CONST_INT_P (*loc))
26727 /* DImode x86_64 constants must fit in 32 bits. */
26728 gcc_assert (x86_64_immediate_operand (*loc, mode));
26739 gcc_unreachable ();
26742 /* Avoid overflows. */
26743 if (mode_signbit_p (mode, *loc))
26746 val = INTVAL (*loc);
26748 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26749 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26750 if ((val < 0 && val != -128)
26753 *loc = GEN_INT (-val);
26760 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26761 optabs would emit if we didn't have TFmode patterns. */
26764 x86_emit_floatuns (rtx operands[2])
26766 rtx neglab, donelab, i0, i1, f0, in, out;
26767 enum machine_mode mode, inmode;
26769 inmode = GET_MODE (operands[1]);
26770 gcc_assert (inmode == SImode || inmode == DImode);
26773 in = force_reg (inmode, operands[1]);
26774 mode = GET_MODE (out);
26775 neglab = gen_label_rtx ();
26776 donelab = gen_label_rtx ();
26777 f0 = gen_reg_rtx (mode);
26779 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26781 expand_float (out, in, 0);
26783 emit_jump_insn (gen_jump (donelab));
26786 emit_label (neglab);
26788 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26790 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26792 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26794 expand_float (f0, i0, 0);
26796 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26798 emit_label (donelab);
26801 /* AVX does not support 32-byte integer vector operations,
26802 thus the longest vector we are faced with is V16QImode. */
26803 #define MAX_VECT_LEN 16
26805 struct expand_vec_perm_d
26807 rtx target, op0, op1;
26808 unsigned char perm[MAX_VECT_LEN];
26809 enum machine_mode vmode;
26810 unsigned char nelt;
26814 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26815 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26817 /* Get a vector mode of the same size as the original but with elements
26818 twice as wide. This is only guaranteed to apply to integral vectors. */
26820 static inline enum machine_mode
26821 get_mode_wider_vector (enum machine_mode o)
26823 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26824 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26825 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26826 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26830 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26831 with all elements equal to VAR. Return true if successful. */
26834 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26835 rtx target, rtx val)
26858 /* First attempt to recognize VAL as-is. */
26859 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26860 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26861 if (recog_memoized (insn) < 0)
26864 /* If that fails, force VAL into a register. */
26867 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26868 seq = get_insns ();
26871 emit_insn_before (seq, insn);
26873 ok = recog_memoized (insn) >= 0;
26882 if (TARGET_SSE || TARGET_3DNOW_A)
26886 val = gen_lowpart (SImode, val);
26887 x = gen_rtx_TRUNCATE (HImode, val);
26888 x = gen_rtx_VEC_DUPLICATE (mode, x);
26889 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26902 struct expand_vec_perm_d dperm;
26906 memset (&dperm, 0, sizeof (dperm));
26907 dperm.target = target;
26908 dperm.vmode = mode;
26909 dperm.nelt = GET_MODE_NUNITS (mode);
26910 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26912 /* Extend to SImode using a paradoxical SUBREG. */
26913 tmp1 = gen_reg_rtx (SImode);
26914 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26916 /* Insert the SImode value as low element of a V4SImode vector. */
26917 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26918 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26920 ok = (expand_vec_perm_1 (&dperm)
26921 || expand_vec_perm_broadcast_1 (&dperm));
26933 /* Replicate the value once into the next wider mode and recurse. */
26935 enum machine_mode smode, wsmode, wvmode;
26938 smode = GET_MODE_INNER (mode);
26939 wvmode = get_mode_wider_vector (mode);
26940 wsmode = GET_MODE_INNER (wvmode);
26942 val = convert_modes (wsmode, smode, val, true);
26943 x = expand_simple_binop (wsmode, ASHIFT, val,
26944 GEN_INT (GET_MODE_BITSIZE (smode)),
26945 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26946 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26948 x = gen_lowpart (wvmode, target);
26949 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
26957 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
26958 rtx x = gen_reg_rtx (hvmode);
26960 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
26963 x = gen_rtx_VEC_CONCAT (mode, x, x);
26964 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26973 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26974 whose ONE_VAR element is VAR, and other elements are zero. Return true
26978 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26979 rtx target, rtx var, int one_var)
26981 enum machine_mode vsimode;
26984 bool use_vector_set = false;
26989 /* For SSE4.1, we normally use vector set. But if the second
26990 element is zero and inter-unit moves are OK, we use movq
26992 use_vector_set = (TARGET_64BIT
26994 && !(TARGET_INTER_UNIT_MOVES
27000 use_vector_set = TARGET_SSE4_1;
27003 use_vector_set = TARGET_SSE2;
27006 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27013 use_vector_set = TARGET_AVX;
27016 /* Use ix86_expand_vector_set in 64bit mode only. */
27017 use_vector_set = TARGET_AVX && TARGET_64BIT;
27023 if (use_vector_set)
27025 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27026 var = force_reg (GET_MODE_INNER (mode), var);
27027 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27043 var = force_reg (GET_MODE_INNER (mode), var);
27044 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27045 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27050 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27051 new_target = gen_reg_rtx (mode);
27053 new_target = target;
27054 var = force_reg (GET_MODE_INNER (mode), var);
27055 x = gen_rtx_VEC_DUPLICATE (mode, var);
27056 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27057 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27060 /* We need to shuffle the value to the correct position, so
27061 create a new pseudo to store the intermediate result. */
27063 /* With SSE2, we can use the integer shuffle insns. */
27064 if (mode != V4SFmode && TARGET_SSE2)
27066 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27068 GEN_INT (one_var == 1 ? 0 : 1),
27069 GEN_INT (one_var == 2 ? 0 : 1),
27070 GEN_INT (one_var == 3 ? 0 : 1)));
27071 if (target != new_target)
27072 emit_move_insn (target, new_target);
27076 /* Otherwise convert the intermediate result to V4SFmode and
27077 use the SSE1 shuffle instructions. */
27078 if (mode != V4SFmode)
27080 tmp = gen_reg_rtx (V4SFmode);
27081 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27086 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27088 GEN_INT (one_var == 1 ? 0 : 1),
27089 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27090 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27092 if (mode != V4SFmode)
27093 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27094 else if (tmp != target)
27095 emit_move_insn (target, tmp);
27097 else if (target != new_target)
27098 emit_move_insn (target, new_target);
27103 vsimode = V4SImode;
27109 vsimode = V2SImode;
27115 /* Zero extend the variable element to SImode and recurse. */
27116 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27118 x = gen_reg_rtx (vsimode);
27119 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27121 gcc_unreachable ();
27123 emit_move_insn (target, gen_lowpart (mode, x));
27131 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27132 consisting of the values in VALS. It is known that all elements
27133 except ONE_VAR are constants. Return true if successful. */
27136 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27137 rtx target, rtx vals, int one_var)
27139 rtx var = XVECEXP (vals, 0, one_var);
27140 enum machine_mode wmode;
27143 const_vec = copy_rtx (vals);
27144 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27145 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27153 /* For the two element vectors, it's just as easy to use
27154 the general case. */
27158 /* Use ix86_expand_vector_set in 64bit mode only. */
27181 /* There's no way to set one QImode entry easily. Combine
27182 the variable value with its adjacent constant value, and
27183 promote to an HImode set. */
27184 x = XVECEXP (vals, 0, one_var ^ 1);
27187 var = convert_modes (HImode, QImode, var, true);
27188 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27189 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27190 x = GEN_INT (INTVAL (x) & 0xff);
27194 var = convert_modes (HImode, QImode, var, true);
27195 x = gen_int_mode (INTVAL (x) << 8, HImode);
27197 if (x != const0_rtx)
27198 var = expand_simple_binop (HImode, IOR, var, x, var,
27199 1, OPTAB_LIB_WIDEN);
27201 x = gen_reg_rtx (wmode);
27202 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27203 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27205 emit_move_insn (target, gen_lowpart (mode, x));
27212 emit_move_insn (target, const_vec);
27213 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27217 /* A subroutine of ix86_expand_vector_init_general. Use vector
27218 concatenate to handle the most general case: all values variable,
27219 and none identical. */
27222 ix86_expand_vector_init_concat (enum machine_mode mode,
27223 rtx target, rtx *ops, int n)
27225 enum machine_mode cmode, hmode = VOIDmode;
27226 rtx first[8], second[4];
27266 gcc_unreachable ();
27269 if (!register_operand (ops[1], cmode))
27270 ops[1] = force_reg (cmode, ops[1]);
27271 if (!register_operand (ops[0], cmode))
27272 ops[0] = force_reg (cmode, ops[0]);
27273 emit_insn (gen_rtx_SET (VOIDmode, target,
27274 gen_rtx_VEC_CONCAT (mode, ops[0],
27294 gcc_unreachable ();
27310 gcc_unreachable ();
27315 /* FIXME: We process inputs backward to help RA. PR 36222. */
27318 for (; i > 0; i -= 2, j--)
27320 first[j] = gen_reg_rtx (cmode);
27321 v = gen_rtvec (2, ops[i - 1], ops[i]);
27322 ix86_expand_vector_init (false, first[j],
27323 gen_rtx_PARALLEL (cmode, v));
27329 gcc_assert (hmode != VOIDmode);
27330 for (i = j = 0; i < n; i += 2, j++)
27332 second[j] = gen_reg_rtx (hmode);
27333 ix86_expand_vector_init_concat (hmode, second [j],
27337 ix86_expand_vector_init_concat (mode, target, second, n);
27340 ix86_expand_vector_init_concat (mode, target, first, n);
27344 gcc_unreachable ();
27348 /* A subroutine of ix86_expand_vector_init_general. Use vector
27349 interleave to handle the most general case: all values variable,
27350 and none identical. */
27353 ix86_expand_vector_init_interleave (enum machine_mode mode,
27354 rtx target, rtx *ops, int n)
27356 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27359 rtx (*gen_load_even) (rtx, rtx, rtx);
27360 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27361 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27366 gen_load_even = gen_vec_setv8hi;
27367 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27368 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27369 inner_mode = HImode;
27370 first_imode = V4SImode;
27371 second_imode = V2DImode;
27372 third_imode = VOIDmode;
27375 gen_load_even = gen_vec_setv16qi;
27376 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27377 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27378 inner_mode = QImode;
27379 first_imode = V8HImode;
27380 second_imode = V4SImode;
27381 third_imode = V2DImode;
27384 gcc_unreachable ();
27387 for (i = 0; i < n; i++)
27389 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27390 op0 = gen_reg_rtx (SImode);
27391 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27393 /* Insert the SImode value as low element of V4SImode vector. */
27394 op1 = gen_reg_rtx (V4SImode);
27395 op0 = gen_rtx_VEC_MERGE (V4SImode,
27396 gen_rtx_VEC_DUPLICATE (V4SImode,
27398 CONST0_RTX (V4SImode),
27400 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27402 /* Cast the V4SImode vector back to a vector in orignal mode. */
27403 op0 = gen_reg_rtx (mode);
27404 emit_move_insn (op0, gen_lowpart (mode, op1));
27406 /* Load even elements into the second positon. */
27407 emit_insn ((*gen_load_even) (op0,
27408 force_reg (inner_mode,
27412 /* Cast vector to FIRST_IMODE vector. */
27413 ops[i] = gen_reg_rtx (first_imode);
27414 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27417 /* Interleave low FIRST_IMODE vectors. */
27418 for (i = j = 0; i < n; i += 2, j++)
27420 op0 = gen_reg_rtx (first_imode);
27421 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27423 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27424 ops[j] = gen_reg_rtx (second_imode);
27425 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27428 /* Interleave low SECOND_IMODE vectors. */
27429 switch (second_imode)
27432 for (i = j = 0; i < n / 2; i += 2, j++)
27434 op0 = gen_reg_rtx (second_imode);
27435 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27438 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27440 ops[j] = gen_reg_rtx (third_imode);
27441 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27443 second_imode = V2DImode;
27444 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27448 op0 = gen_reg_rtx (second_imode);
27449 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27452 /* Cast the SECOND_IMODE vector back to a vector on original
27454 emit_insn (gen_rtx_SET (VOIDmode, target,
27455 gen_lowpart (mode, op0)));
27459 gcc_unreachable ();
27463 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27464 all values variable, and none identical. */
27467 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27468 rtx target, rtx vals)
27470 rtx ops[32], op0, op1;
27471 enum machine_mode half_mode = VOIDmode;
27478 if (!mmx_ok && !TARGET_SSE)
27490 n = GET_MODE_NUNITS (mode);
27491 for (i = 0; i < n; i++)
27492 ops[i] = XVECEXP (vals, 0, i);
27493 ix86_expand_vector_init_concat (mode, target, ops, n);
27497 half_mode = V16QImode;
27501 half_mode = V8HImode;
27505 n = GET_MODE_NUNITS (mode);
27506 for (i = 0; i < n; i++)
27507 ops[i] = XVECEXP (vals, 0, i);
27508 op0 = gen_reg_rtx (half_mode);
27509 op1 = gen_reg_rtx (half_mode);
27510 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27512 ix86_expand_vector_init_interleave (half_mode, op1,
27513 &ops [n >> 1], n >> 2);
27514 emit_insn (gen_rtx_SET (VOIDmode, target,
27515 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27519 if (!TARGET_SSE4_1)
27527 /* Don't use ix86_expand_vector_init_interleave if we can't
27528 move from GPR to SSE register directly. */
27529 if (!TARGET_INTER_UNIT_MOVES)
27532 n = GET_MODE_NUNITS (mode);
27533 for (i = 0; i < n; i++)
27534 ops[i] = XVECEXP (vals, 0, i);
27535 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27543 gcc_unreachable ();
27547 int i, j, n_elts, n_words, n_elt_per_word;
27548 enum machine_mode inner_mode;
27549 rtx words[4], shift;
27551 inner_mode = GET_MODE_INNER (mode);
27552 n_elts = GET_MODE_NUNITS (mode);
27553 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27554 n_elt_per_word = n_elts / n_words;
27555 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27557 for (i = 0; i < n_words; ++i)
27559 rtx word = NULL_RTX;
27561 for (j = 0; j < n_elt_per_word; ++j)
27563 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27564 elt = convert_modes (word_mode, inner_mode, elt, true);
27570 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27571 word, 1, OPTAB_LIB_WIDEN);
27572 word = expand_simple_binop (word_mode, IOR, word, elt,
27573 word, 1, OPTAB_LIB_WIDEN);
27581 emit_move_insn (target, gen_lowpart (mode, words[0]));
27582 else if (n_words == 2)
27584 rtx tmp = gen_reg_rtx (mode);
27585 emit_clobber (tmp);
27586 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27587 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27588 emit_move_insn (target, tmp);
27590 else if (n_words == 4)
27592 rtx tmp = gen_reg_rtx (V4SImode);
27593 gcc_assert (word_mode == SImode);
27594 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27595 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27596 emit_move_insn (target, gen_lowpart (mode, tmp));
27599 gcc_unreachable ();
27603 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27604 instructions unless MMX_OK is true. */
27607 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27609 enum machine_mode mode = GET_MODE (target);
27610 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27611 int n_elts = GET_MODE_NUNITS (mode);
27612 int n_var = 0, one_var = -1;
27613 bool all_same = true, all_const_zero = true;
27617 for (i = 0; i < n_elts; ++i)
27619 x = XVECEXP (vals, 0, i);
27620 if (!(CONST_INT_P (x)
27621 || GET_CODE (x) == CONST_DOUBLE
27622 || GET_CODE (x) == CONST_FIXED))
27623 n_var++, one_var = i;
27624 else if (x != CONST0_RTX (inner_mode))
27625 all_const_zero = false;
27626 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27630 /* Constants are best loaded from the constant pool. */
27633 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27637 /* If all values are identical, broadcast the value. */
27639 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27640 XVECEXP (vals, 0, 0)))
27643 /* Values where only one field is non-constant are best loaded from
27644 the pool and overwritten via move later. */
27648 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27649 XVECEXP (vals, 0, one_var),
27653 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27657 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27661 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27663 enum machine_mode mode = GET_MODE (target);
27664 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27665 enum machine_mode half_mode;
27666 bool use_vec_merge = false;
27668 static rtx (*gen_extract[6][2]) (rtx, rtx)
27670 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27671 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27672 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27673 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27674 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27675 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27677 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27679 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27680 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27681 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27682 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27683 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27684 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27694 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27695 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27697 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27699 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27700 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27706 use_vec_merge = TARGET_SSE4_1;
27714 /* For the two element vectors, we implement a VEC_CONCAT with
27715 the extraction of the other element. */
27717 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27718 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27721 op0 = val, op1 = tmp;
27723 op0 = tmp, op1 = val;
27725 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27726 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27731 use_vec_merge = TARGET_SSE4_1;
27738 use_vec_merge = true;
27742 /* tmp = target = A B C D */
27743 tmp = copy_to_reg (target);
27744 /* target = A A B B */
27745 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27746 /* target = X A B B */
27747 ix86_expand_vector_set (false, target, val, 0);
27748 /* target = A X C D */
27749 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27750 const1_rtx, const0_rtx,
27751 GEN_INT (2+4), GEN_INT (3+4)));
27755 /* tmp = target = A B C D */
27756 tmp = copy_to_reg (target);
27757 /* tmp = X B C D */
27758 ix86_expand_vector_set (false, tmp, val, 0);
27759 /* target = A B X D */
27760 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27761 const0_rtx, const1_rtx,
27762 GEN_INT (0+4), GEN_INT (3+4)));
27766 /* tmp = target = A B C D */
27767 tmp = copy_to_reg (target);
27768 /* tmp = X B C D */
27769 ix86_expand_vector_set (false, tmp, val, 0);
27770 /* target = A B X D */
27771 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27772 const0_rtx, const1_rtx,
27773 GEN_INT (2+4), GEN_INT (0+4)));
27777 gcc_unreachable ();
27782 use_vec_merge = TARGET_SSE4_1;
27786 /* Element 0 handled by vec_merge below. */
27789 use_vec_merge = true;
27795 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27796 store into element 0, then shuffle them back. */
27800 order[0] = GEN_INT (elt);
27801 order[1] = const1_rtx;
27802 order[2] = const2_rtx;
27803 order[3] = GEN_INT (3);
27804 order[elt] = const0_rtx;
27806 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27807 order[1], order[2], order[3]));
27809 ix86_expand_vector_set (false, target, val, 0);
27811 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27812 order[1], order[2], order[3]));
27816 /* For SSE1, we have to reuse the V4SF code. */
27817 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27818 gen_lowpart (SFmode, val), elt);
27823 use_vec_merge = TARGET_SSE2;
27826 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27830 use_vec_merge = TARGET_SSE4_1;
27837 half_mode = V16QImode;
27843 half_mode = V8HImode;
27849 half_mode = V4SImode;
27855 half_mode = V2DImode;
27861 half_mode = V4SFmode;
27867 half_mode = V2DFmode;
27873 /* Compute offset. */
27877 gcc_assert (i <= 1);
27879 /* Extract the half. */
27880 tmp = gen_reg_rtx (half_mode);
27881 emit_insn ((*gen_extract[j][i]) (tmp, target));
27883 /* Put val in tmp at elt. */
27884 ix86_expand_vector_set (false, tmp, val, elt);
27887 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27896 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27897 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27898 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27902 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27904 emit_move_insn (mem, target);
27906 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27907 emit_move_insn (tmp, val);
27909 emit_move_insn (target, mem);
27914 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27916 enum machine_mode mode = GET_MODE (vec);
27917 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27918 bool use_vec_extr = false;
27931 use_vec_extr = true;
27935 use_vec_extr = TARGET_SSE4_1;
27947 tmp = gen_reg_rtx (mode);
27948 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27949 GEN_INT (elt), GEN_INT (elt),
27950 GEN_INT (elt+4), GEN_INT (elt+4)));
27954 tmp = gen_reg_rtx (mode);
27955 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
27959 gcc_unreachable ();
27962 use_vec_extr = true;
27967 use_vec_extr = TARGET_SSE4_1;
27981 tmp = gen_reg_rtx (mode);
27982 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27983 GEN_INT (elt), GEN_INT (elt),
27984 GEN_INT (elt), GEN_INT (elt)));
27988 tmp = gen_reg_rtx (mode);
27989 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
27993 gcc_unreachable ();
27996 use_vec_extr = true;
28001 /* For SSE1, we have to reuse the V4SF code. */
28002 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28003 gen_lowpart (V4SFmode, vec), elt);
28009 use_vec_extr = TARGET_SSE2;
28012 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28016 use_vec_extr = TARGET_SSE4_1;
28020 /* ??? Could extract the appropriate HImode element and shift. */
28027 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28028 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28030 /* Let the rtl optimizers know about the zero extension performed. */
28031 if (inner_mode == QImode || inner_mode == HImode)
28033 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28034 target = gen_lowpart (SImode, target);
28037 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28041 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28043 emit_move_insn (mem, vec);
28045 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28046 emit_move_insn (target, tmp);
28050 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28051 pattern to reduce; DEST is the destination; IN is the input vector. */
28054 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28056 rtx tmp1, tmp2, tmp3;
28058 tmp1 = gen_reg_rtx (V4SFmode);
28059 tmp2 = gen_reg_rtx (V4SFmode);
28060 tmp3 = gen_reg_rtx (V4SFmode);
28062 emit_insn (gen_sse_movhlps (tmp1, in, in));
28063 emit_insn (fn (tmp2, tmp1, in));
28065 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28066 const1_rtx, const1_rtx,
28067 GEN_INT (1+4), GEN_INT (1+4)));
28068 emit_insn (fn (dest, tmp2, tmp3));
28071 /* Target hook for scalar_mode_supported_p. */
28073 ix86_scalar_mode_supported_p (enum machine_mode mode)
28075 if (DECIMAL_FLOAT_MODE_P (mode))
28076 return default_decimal_float_supported_p ();
28077 else if (mode == TFmode)
28080 return default_scalar_mode_supported_p (mode);
28083 /* Implements target hook vector_mode_supported_p. */
28085 ix86_vector_mode_supported_p (enum machine_mode mode)
28087 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28089 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28091 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28093 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28095 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28100 /* Target hook for c_mode_for_suffix. */
28101 static enum machine_mode
28102 ix86_c_mode_for_suffix (char suffix)
28112 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28114 We do this in the new i386 backend to maintain source compatibility
28115 with the old cc0-based compiler. */
28118 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28119 tree inputs ATTRIBUTE_UNUSED,
28122 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28124 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28129 /* Implements target vector targetm.asm.encode_section_info. This
28130 is not used by netware. */
28132 static void ATTRIBUTE_UNUSED
28133 ix86_encode_section_info (tree decl, rtx rtl, int first)
28135 default_encode_section_info (decl, rtl, first);
28137 if (TREE_CODE (decl) == VAR_DECL
28138 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28139 && ix86_in_large_data_p (decl))
28140 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28143 /* Worker function for REVERSE_CONDITION. */
28146 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28148 return (mode != CCFPmode && mode != CCFPUmode
28149 ? reverse_condition (code)
28150 : reverse_condition_maybe_unordered (code));
28153 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28157 output_387_reg_move (rtx insn, rtx *operands)
28159 if (REG_P (operands[0]))
28161 if (REG_P (operands[1])
28162 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28164 if (REGNO (operands[0]) == FIRST_STACK_REG)
28165 return output_387_ffreep (operands, 0);
28166 return "fstp\t%y0";
28168 if (STACK_TOP_P (operands[0]))
28169 return "fld%Z1\t%y1";
28172 else if (MEM_P (operands[0]))
28174 gcc_assert (REG_P (operands[1]));
28175 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28176 return "fstp%Z0\t%y0";
28179 /* There is no non-popping store to memory for XFmode.
28180 So if we need one, follow the store with a load. */
28181 if (GET_MODE (operands[0]) == XFmode)
28182 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28184 return "fst%Z0\t%y0";
28191 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28192 FP status register is set. */
28195 ix86_emit_fp_unordered_jump (rtx label)
28197 rtx reg = gen_reg_rtx (HImode);
28200 emit_insn (gen_x86_fnstsw_1 (reg));
28202 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28204 emit_insn (gen_x86_sahf_1 (reg));
28206 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28207 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28211 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28213 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28214 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28217 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28218 gen_rtx_LABEL_REF (VOIDmode, label),
28220 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28222 emit_jump_insn (temp);
28223 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28226 /* Output code to perform a log1p XFmode calculation. */
28228 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28230 rtx label1 = gen_label_rtx ();
28231 rtx label2 = gen_label_rtx ();
28233 rtx tmp = gen_reg_rtx (XFmode);
28234 rtx tmp2 = gen_reg_rtx (XFmode);
28237 emit_insn (gen_absxf2 (tmp, op1));
28238 test = gen_rtx_GE (VOIDmode, tmp,
28239 CONST_DOUBLE_FROM_REAL_VALUE (
28240 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28242 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28244 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28245 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28246 emit_jump (label2);
28248 emit_label (label1);
28249 emit_move_insn (tmp, CONST1_RTX (XFmode));
28250 emit_insn (gen_addxf3 (tmp, op1, tmp));
28251 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28252 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28254 emit_label (label2);
28257 /* Output code to perform a Newton-Rhapson approximation of a single precision
28258 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28260 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28262 rtx x0, x1, e0, e1, two;
28264 x0 = gen_reg_rtx (mode);
28265 e0 = gen_reg_rtx (mode);
28266 e1 = gen_reg_rtx (mode);
28267 x1 = gen_reg_rtx (mode);
28269 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28271 if (VECTOR_MODE_P (mode))
28272 two = ix86_build_const_vector (SFmode, true, two);
28274 two = force_reg (mode, two);
28276 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28278 /* x0 = rcp(b) estimate */
28279 emit_insn (gen_rtx_SET (VOIDmode, x0,
28280 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28283 emit_insn (gen_rtx_SET (VOIDmode, e0,
28284 gen_rtx_MULT (mode, x0, a)));
28286 emit_insn (gen_rtx_SET (VOIDmode, e1,
28287 gen_rtx_MULT (mode, x0, b)));
28289 emit_insn (gen_rtx_SET (VOIDmode, x1,
28290 gen_rtx_MINUS (mode, two, e1)));
28291 /* res = e0 * x1 */
28292 emit_insn (gen_rtx_SET (VOIDmode, res,
28293 gen_rtx_MULT (mode, e0, x1)));
28296 /* Output code to perform a Newton-Rhapson approximation of a
28297 single precision floating point [reciprocal] square root. */
28299 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28302 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28305 x0 = gen_reg_rtx (mode);
28306 e0 = gen_reg_rtx (mode);
28307 e1 = gen_reg_rtx (mode);
28308 e2 = gen_reg_rtx (mode);
28309 e3 = gen_reg_rtx (mode);
28311 real_from_integer (&r, VOIDmode, -3, -1, 0);
28312 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28314 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28315 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28317 if (VECTOR_MODE_P (mode))
28319 mthree = ix86_build_const_vector (SFmode, true, mthree);
28320 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28323 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28324 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28326 /* x0 = rsqrt(a) estimate */
28327 emit_insn (gen_rtx_SET (VOIDmode, x0,
28328 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28331 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28336 zero = gen_reg_rtx (mode);
28337 mask = gen_reg_rtx (mode);
28339 zero = force_reg (mode, CONST0_RTX(mode));
28340 emit_insn (gen_rtx_SET (VOIDmode, mask,
28341 gen_rtx_NE (mode, zero, a)));
28343 emit_insn (gen_rtx_SET (VOIDmode, x0,
28344 gen_rtx_AND (mode, x0, mask)));
28348 emit_insn (gen_rtx_SET (VOIDmode, e0,
28349 gen_rtx_MULT (mode, x0, a)));
28351 emit_insn (gen_rtx_SET (VOIDmode, e1,
28352 gen_rtx_MULT (mode, e0, x0)));
28355 mthree = force_reg (mode, mthree);
28356 emit_insn (gen_rtx_SET (VOIDmode, e2,
28357 gen_rtx_PLUS (mode, e1, mthree)));
28359 mhalf = force_reg (mode, mhalf);
28361 /* e3 = -.5 * x0 */
28362 emit_insn (gen_rtx_SET (VOIDmode, e3,
28363 gen_rtx_MULT (mode, x0, mhalf)));
28365 /* e3 = -.5 * e0 */
28366 emit_insn (gen_rtx_SET (VOIDmode, e3,
28367 gen_rtx_MULT (mode, e0, mhalf)));
28368 /* ret = e2 * e3 */
28369 emit_insn (gen_rtx_SET (VOIDmode, res,
28370 gen_rtx_MULT (mode, e2, e3)));
28373 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28375 static void ATTRIBUTE_UNUSED
28376 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28379 /* With Binutils 2.15, the "@unwind" marker must be specified on
28380 every occurrence of the ".eh_frame" section, not just the first
28383 && strcmp (name, ".eh_frame") == 0)
28385 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28386 flags & SECTION_WRITE ? "aw" : "a");
28389 default_elf_asm_named_section (name, flags, decl);
28392 /* Return the mangling of TYPE if it is an extended fundamental type. */
28394 static const char *
28395 ix86_mangle_type (const_tree type)
28397 type = TYPE_MAIN_VARIANT (type);
28399 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28400 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28403 switch (TYPE_MODE (type))
28406 /* __float128 is "g". */
28409 /* "long double" or __float80 is "e". */
28416 /* For 32-bit code we can save PIC register setup by using
28417 __stack_chk_fail_local hidden function instead of calling
28418 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28419 register, so it is better to call __stack_chk_fail directly. */
28422 ix86_stack_protect_fail (void)
28424 return TARGET_64BIT
28425 ? default_external_stack_protect_fail ()
28426 : default_hidden_stack_protect_fail ();
28429 /* Select a format to encode pointers in exception handling data. CODE
28430 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28431 true if the symbol may be affected by dynamic relocations.
28433 ??? All x86 object file formats are capable of representing this.
28434 After all, the relocation needed is the same as for the call insn.
28435 Whether or not a particular assembler allows us to enter such, I
28436 guess we'll have to see. */
28438 asm_preferred_eh_data_format (int code, int global)
28442 int type = DW_EH_PE_sdata8;
28444 || ix86_cmodel == CM_SMALL_PIC
28445 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28446 type = DW_EH_PE_sdata4;
28447 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28449 if (ix86_cmodel == CM_SMALL
28450 || (ix86_cmodel == CM_MEDIUM && code))
28451 return DW_EH_PE_udata4;
28452 return DW_EH_PE_absptr;
28455 /* Expand copysign from SIGN to the positive value ABS_VALUE
28456 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28459 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28461 enum machine_mode mode = GET_MODE (sign);
28462 rtx sgn = gen_reg_rtx (mode);
28463 if (mask == NULL_RTX)
28465 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28466 if (!VECTOR_MODE_P (mode))
28468 /* We need to generate a scalar mode mask in this case. */
28469 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28470 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28471 mask = gen_reg_rtx (mode);
28472 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28476 mask = gen_rtx_NOT (mode, mask);
28477 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28478 gen_rtx_AND (mode, mask, sign)));
28479 emit_insn (gen_rtx_SET (VOIDmode, result,
28480 gen_rtx_IOR (mode, abs_value, sgn)));
28483 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28484 mask for masking out the sign-bit is stored in *SMASK, if that is
28487 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28489 enum machine_mode mode = GET_MODE (op0);
28492 xa = gen_reg_rtx (mode);
28493 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28494 if (!VECTOR_MODE_P (mode))
28496 /* We need to generate a scalar mode mask in this case. */
28497 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28498 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28499 mask = gen_reg_rtx (mode);
28500 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28502 emit_insn (gen_rtx_SET (VOIDmode, xa,
28503 gen_rtx_AND (mode, op0, mask)));
28511 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28512 swapping the operands if SWAP_OPERANDS is true. The expanded
28513 code is a forward jump to a newly created label in case the
28514 comparison is true. The generated label rtx is returned. */
28516 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28517 bool swap_operands)
28528 label = gen_label_rtx ();
28529 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28530 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28531 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28532 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28533 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28534 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28535 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28536 JUMP_LABEL (tmp) = label;
28541 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28542 using comparison code CODE. Operands are swapped for the comparison if
28543 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28545 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28546 bool swap_operands)
28548 enum machine_mode mode = GET_MODE (op0);
28549 rtx mask = gen_reg_rtx (mode);
28558 if (mode == DFmode)
28559 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28560 gen_rtx_fmt_ee (code, mode, op0, op1)));
28562 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28563 gen_rtx_fmt_ee (code, mode, op0, op1)));
28568 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28569 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28571 ix86_gen_TWO52 (enum machine_mode mode)
28573 REAL_VALUE_TYPE TWO52r;
28576 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28577 TWO52 = const_double_from_real_value (TWO52r, mode);
28578 TWO52 = force_reg (mode, TWO52);
28583 /* Expand SSE sequence for computing lround from OP1 storing
28586 ix86_expand_lround (rtx op0, rtx op1)
28588 /* C code for the stuff we're doing below:
28589 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28592 enum machine_mode mode = GET_MODE (op1);
28593 const struct real_format *fmt;
28594 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28597 /* load nextafter (0.5, 0.0) */
28598 fmt = REAL_MODE_FORMAT (mode);
28599 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28600 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28602 /* adj = copysign (0.5, op1) */
28603 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28604 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28606 /* adj = op1 + adj */
28607 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28609 /* op0 = (imode)adj */
28610 expand_fix (op0, adj, 0);
28613 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28616 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28618 /* C code for the stuff we're doing below (for do_floor):
28620 xi -= (double)xi > op1 ? 1 : 0;
28623 enum machine_mode fmode = GET_MODE (op1);
28624 enum machine_mode imode = GET_MODE (op0);
28625 rtx ireg, freg, label, tmp;
28627 /* reg = (long)op1 */
28628 ireg = gen_reg_rtx (imode);
28629 expand_fix (ireg, op1, 0);
28631 /* freg = (double)reg */
28632 freg = gen_reg_rtx (fmode);
28633 expand_float (freg, ireg, 0);
28635 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28636 label = ix86_expand_sse_compare_and_jump (UNLE,
28637 freg, op1, !do_floor);
28638 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28639 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28640 emit_move_insn (ireg, tmp);
28642 emit_label (label);
28643 LABEL_NUSES (label) = 1;
28645 emit_move_insn (op0, ireg);
28648 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28649 result in OPERAND0. */
28651 ix86_expand_rint (rtx operand0, rtx operand1)
28653 /* C code for the stuff we're doing below:
28654 xa = fabs (operand1);
28655 if (!isless (xa, 2**52))
28657 xa = xa + 2**52 - 2**52;
28658 return copysign (xa, operand1);
28660 enum machine_mode mode = GET_MODE (operand0);
28661 rtx res, xa, label, TWO52, mask;
28663 res = gen_reg_rtx (mode);
28664 emit_move_insn (res, operand1);
28666 /* xa = abs (operand1) */
28667 xa = ix86_expand_sse_fabs (res, &mask);
28669 /* if (!isless (xa, TWO52)) goto label; */
28670 TWO52 = ix86_gen_TWO52 (mode);
28671 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28673 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28674 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28676 ix86_sse_copysign_to_positive (res, xa, res, mask);
28678 emit_label (label);
28679 LABEL_NUSES (label) = 1;
28681 emit_move_insn (operand0, res);
28684 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28687 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28689 /* C code for the stuff we expand below.
28690 double xa = fabs (x), x2;
28691 if (!isless (xa, TWO52))
28693 xa = xa + TWO52 - TWO52;
28694 x2 = copysign (xa, x);
28703 enum machine_mode mode = GET_MODE (operand0);
28704 rtx xa, TWO52, tmp, label, one, res, mask;
28706 TWO52 = ix86_gen_TWO52 (mode);
28708 /* Temporary for holding the result, initialized to the input
28709 operand to ease control flow. */
28710 res = gen_reg_rtx (mode);
28711 emit_move_insn (res, operand1);
28713 /* xa = abs (operand1) */
28714 xa = ix86_expand_sse_fabs (res, &mask);
28716 /* if (!isless (xa, TWO52)) goto label; */
28717 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28719 /* xa = xa + TWO52 - TWO52; */
28720 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28721 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28723 /* xa = copysign (xa, operand1) */
28724 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28726 /* generate 1.0 or -1.0 */
28727 one = force_reg (mode,
28728 const_double_from_real_value (do_floor
28729 ? dconst1 : dconstm1, mode));
28731 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28732 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28733 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28734 gen_rtx_AND (mode, one, tmp)));
28735 /* We always need to subtract here to preserve signed zero. */
28736 tmp = expand_simple_binop (mode, MINUS,
28737 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28738 emit_move_insn (res, tmp);
28740 emit_label (label);
28741 LABEL_NUSES (label) = 1;
28743 emit_move_insn (operand0, res);
28746 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28749 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28751 /* C code for the stuff we expand below.
28752 double xa = fabs (x), x2;
28753 if (!isless (xa, TWO52))
28755 x2 = (double)(long)x;
28762 if (HONOR_SIGNED_ZEROS (mode))
28763 return copysign (x2, x);
28766 enum machine_mode mode = GET_MODE (operand0);
28767 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28769 TWO52 = ix86_gen_TWO52 (mode);
28771 /* Temporary for holding the result, initialized to the input
28772 operand to ease control flow. */
28773 res = gen_reg_rtx (mode);
28774 emit_move_insn (res, operand1);
28776 /* xa = abs (operand1) */
28777 xa = ix86_expand_sse_fabs (res, &mask);
28779 /* if (!isless (xa, TWO52)) goto label; */
28780 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28782 /* xa = (double)(long)x */
28783 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28784 expand_fix (xi, res, 0);
28785 expand_float (xa, xi, 0);
28788 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28790 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28791 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28792 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28793 gen_rtx_AND (mode, one, tmp)));
28794 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28795 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28796 emit_move_insn (res, tmp);
28798 if (HONOR_SIGNED_ZEROS (mode))
28799 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28801 emit_label (label);
28802 LABEL_NUSES (label) = 1;
28804 emit_move_insn (operand0, res);
28807 /* Expand SSE sequence for computing round from OPERAND1 storing
28808 into OPERAND0. Sequence that works without relying on DImode truncation
28809 via cvttsd2siq that is only available on 64bit targets. */
28811 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28813 /* C code for the stuff we expand below.
28814 double xa = fabs (x), xa2, x2;
28815 if (!isless (xa, TWO52))
28817 Using the absolute value and copying back sign makes
28818 -0.0 -> -0.0 correct.
28819 xa2 = xa + TWO52 - TWO52;
28824 else if (dxa > 0.5)
28826 x2 = copysign (xa2, x);
28829 enum machine_mode mode = GET_MODE (operand0);
28830 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28832 TWO52 = ix86_gen_TWO52 (mode);
28834 /* Temporary for holding the result, initialized to the input
28835 operand to ease control flow. */
28836 res = gen_reg_rtx (mode);
28837 emit_move_insn (res, operand1);
28839 /* xa = abs (operand1) */
28840 xa = ix86_expand_sse_fabs (res, &mask);
28842 /* if (!isless (xa, TWO52)) goto label; */
28843 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28845 /* xa2 = xa + TWO52 - TWO52; */
28846 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28847 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28849 /* dxa = xa2 - xa; */
28850 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28852 /* generate 0.5, 1.0 and -0.5 */
28853 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28854 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28855 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28859 tmp = gen_reg_rtx (mode);
28860 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28861 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28862 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28863 gen_rtx_AND (mode, one, tmp)));
28864 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28865 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28866 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28867 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28868 gen_rtx_AND (mode, one, tmp)));
28869 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28871 /* res = copysign (xa2, operand1) */
28872 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28874 emit_label (label);
28875 LABEL_NUSES (label) = 1;
28877 emit_move_insn (operand0, res);
28880 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28883 ix86_expand_trunc (rtx operand0, rtx operand1)
28885 /* C code for SSE variant we expand below.
28886 double xa = fabs (x), x2;
28887 if (!isless (xa, TWO52))
28889 x2 = (double)(long)x;
28890 if (HONOR_SIGNED_ZEROS (mode))
28891 return copysign (x2, x);
28894 enum machine_mode mode = GET_MODE (operand0);
28895 rtx xa, xi, TWO52, label, res, mask;
28897 TWO52 = ix86_gen_TWO52 (mode);
28899 /* Temporary for holding the result, initialized to the input
28900 operand to ease control flow. */
28901 res = gen_reg_rtx (mode);
28902 emit_move_insn (res, operand1);
28904 /* xa = abs (operand1) */
28905 xa = ix86_expand_sse_fabs (res, &mask);
28907 /* if (!isless (xa, TWO52)) goto label; */
28908 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28910 /* x = (double)(long)x */
28911 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28912 expand_fix (xi, res, 0);
28913 expand_float (res, xi, 0);
28915 if (HONOR_SIGNED_ZEROS (mode))
28916 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28918 emit_label (label);
28919 LABEL_NUSES (label) = 1;
28921 emit_move_insn (operand0, res);
28924 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28927 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28929 enum machine_mode mode = GET_MODE (operand0);
28930 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28932 /* C code for SSE variant we expand below.
28933 double xa = fabs (x), x2;
28934 if (!isless (xa, TWO52))
28936 xa2 = xa + TWO52 - TWO52;
28940 x2 = copysign (xa2, x);
28944 TWO52 = ix86_gen_TWO52 (mode);
28946 /* Temporary for holding the result, initialized to the input
28947 operand to ease control flow. */
28948 res = gen_reg_rtx (mode);
28949 emit_move_insn (res, operand1);
28951 /* xa = abs (operand1) */
28952 xa = ix86_expand_sse_fabs (res, &smask);
28954 /* if (!isless (xa, TWO52)) goto label; */
28955 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28957 /* res = xa + TWO52 - TWO52; */
28958 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28959 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28960 emit_move_insn (res, tmp);
28963 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28965 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28966 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28967 emit_insn (gen_rtx_SET (VOIDmode, mask,
28968 gen_rtx_AND (mode, mask, one)));
28969 tmp = expand_simple_binop (mode, MINUS,
28970 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28971 emit_move_insn (res, tmp);
28973 /* res = copysign (res, operand1) */
28974 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28976 emit_label (label);
28977 LABEL_NUSES (label) = 1;
28979 emit_move_insn (operand0, res);
28982 /* Expand SSE sequence for computing round from OPERAND1 storing
28985 ix86_expand_round (rtx operand0, rtx operand1)
28987 /* C code for the stuff we're doing below:
28988 double xa = fabs (x);
28989 if (!isless (xa, TWO52))
28991 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28992 return copysign (xa, x);
28994 enum machine_mode mode = GET_MODE (operand0);
28995 rtx res, TWO52, xa, label, xi, half, mask;
28996 const struct real_format *fmt;
28997 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28999 /* Temporary for holding the result, initialized to the input
29000 operand to ease control flow. */
29001 res = gen_reg_rtx (mode);
29002 emit_move_insn (res, operand1);
29004 TWO52 = ix86_gen_TWO52 (mode);
29005 xa = ix86_expand_sse_fabs (res, &mask);
29006 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29008 /* load nextafter (0.5, 0.0) */
29009 fmt = REAL_MODE_FORMAT (mode);
29010 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29011 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29013 /* xa = xa + 0.5 */
29014 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29015 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29017 /* xa = (double)(int64_t)xa */
29018 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29019 expand_fix (xi, xa, 0);
29020 expand_float (xa, xi, 0);
29022 /* res = copysign (xa, operand1) */
29023 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29025 emit_label (label);
29026 LABEL_NUSES (label) = 1;
29028 emit_move_insn (operand0, res);
29032 /* Table of valid machine attributes. */
29033 static const struct attribute_spec ix86_attribute_table[] =
29035 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29036 /* Stdcall attribute says callee is responsible for popping arguments
29037 if they are not variable. */
29038 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29039 /* Fastcall attribute says callee is responsible for popping arguments
29040 if they are not variable. */
29041 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29042 /* Thiscall attribute says callee is responsible for popping arguments
29043 if they are not variable. */
29044 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29045 /* Cdecl attribute says the callee is a normal C declaration */
29046 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29047 /* Regparm attribute specifies how many integer arguments are to be
29048 passed in registers. */
29049 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29050 /* Sseregparm attribute says we are using x86_64 calling conventions
29051 for FP arguments. */
29052 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29053 /* force_align_arg_pointer says this function realigns the stack at entry. */
29054 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29055 false, true, true, ix86_handle_cconv_attribute },
29056 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29057 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29058 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29059 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29061 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29062 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29063 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29064 SUBTARGET_ATTRIBUTE_TABLE,
29066 /* ms_abi and sysv_abi calling convention function attributes. */
29067 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29068 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29069 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29071 { NULL, 0, 0, false, false, false, NULL }
29074 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29076 ix86_builtin_vectorization_cost (bool runtime_test)
29078 /* If the branch of the runtime test is taken - i.e. - the vectorized
29079 version is skipped - this incurs a misprediction cost (because the
29080 vectorized version is expected to be the fall-through). So we subtract
29081 the latency of a mispredicted branch from the costs that are incured
29082 when the vectorized version is executed.
29084 TODO: The values in individual target tables have to be tuned or new
29085 fields may be needed. For eg. on K8, the default branch path is the
29086 not-taken path. If the taken path is predicted correctly, the minimum
29087 penalty of going down the taken-path is 1 cycle. If the taken-path is
29088 not predicted correctly, then the minimum penalty is 10 cycles. */
29092 return (-(ix86_cost->cond_taken_branch_cost));
29098 /* Implement targetm.vectorize.builtin_vec_perm. */
29101 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29103 tree itype = TREE_TYPE (vec_type);
29104 bool u = TYPE_UNSIGNED (itype);
29105 enum machine_mode vmode = TYPE_MODE (vec_type);
29106 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29107 bool ok = TARGET_SSE2;
29113 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29116 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29118 itype = ix86_get_builtin_type (IX86_BT_DI);
29123 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29127 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29129 itype = ix86_get_builtin_type (IX86_BT_SI);
29133 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29136 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29139 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29142 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29152 *mask_type = itype;
29153 return ix86_builtins[(int) fcode];
29156 /* Return a vector mode with twice as many elements as VMODE. */
29157 /* ??? Consider moving this to a table generated by genmodes.c. */
29159 static enum machine_mode
29160 doublesize_vector_mode (enum machine_mode vmode)
29164 case V2SFmode: return V4SFmode;
29165 case V1DImode: return V2DImode;
29166 case V2SImode: return V4SImode;
29167 case V4HImode: return V8HImode;
29168 case V8QImode: return V16QImode;
29170 case V2DFmode: return V4DFmode;
29171 case V4SFmode: return V8SFmode;
29172 case V2DImode: return V4DImode;
29173 case V4SImode: return V8SImode;
29174 case V8HImode: return V16HImode;
29175 case V16QImode: return V32QImode;
29177 case V4DFmode: return V8DFmode;
29178 case V8SFmode: return V16SFmode;
29179 case V4DImode: return V8DImode;
29180 case V8SImode: return V16SImode;
29181 case V16HImode: return V32HImode;
29182 case V32QImode: return V64QImode;
29185 gcc_unreachable ();
29189 /* Construct (set target (vec_select op0 (parallel perm))) and
29190 return true if that's a valid instruction in the active ISA. */
29193 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29195 rtx rperm[MAX_VECT_LEN], x;
29198 for (i = 0; i < nelt; ++i)
29199 rperm[i] = GEN_INT (perm[i]);
29201 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29202 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29203 x = gen_rtx_SET (VOIDmode, target, x);
29206 if (recog_memoized (x) < 0)
29214 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29217 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29218 const unsigned char *perm, unsigned nelt)
29220 enum machine_mode v2mode;
29223 v2mode = doublesize_vector_mode (GET_MODE (op0));
29224 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29225 return expand_vselect (target, x, perm, nelt);
29228 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29229 in terms of blendp[sd] / pblendw / pblendvb. */
29232 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29234 enum machine_mode vmode = d->vmode;
29235 unsigned i, mask, nelt = d->nelt;
29236 rtx target, op0, op1, x;
29238 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29240 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29243 /* This is a blend, not a permute. Elements must stay in their
29244 respective lanes. */
29245 for (i = 0; i < nelt; ++i)
29247 unsigned e = d->perm[i];
29248 if (!(e == i || e == i + nelt))
29255 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29256 decision should be extracted elsewhere, so that we only try that
29257 sequence once all budget==3 options have been tried. */
29259 /* For bytes, see if bytes move in pairs so we can use pblendw with
29260 an immediate argument, rather than pblendvb with a vector argument. */
29261 if (vmode == V16QImode)
29263 bool pblendw_ok = true;
29264 for (i = 0; i < 16 && pblendw_ok; i += 2)
29265 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29269 rtx rperm[16], vperm;
29271 for (i = 0; i < nelt; ++i)
29272 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29274 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29275 vperm = force_reg (V16QImode, vperm);
29277 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29282 target = d->target;
29294 for (i = 0; i < nelt; ++i)
29295 mask |= (d->perm[i] >= nelt) << i;
29299 for (i = 0; i < 2; ++i)
29300 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29304 for (i = 0; i < 4; ++i)
29305 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29309 for (i = 0; i < 8; ++i)
29310 mask |= (d->perm[i * 2] >= 16) << i;
29314 target = gen_lowpart (vmode, target);
29315 op0 = gen_lowpart (vmode, op0);
29316 op1 = gen_lowpart (vmode, op1);
29320 gcc_unreachable ();
29323 /* This matches five different patterns with the different modes. */
29324 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29325 x = gen_rtx_SET (VOIDmode, target, x);
29331 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29332 in terms of the variable form of vpermilps.
29334 Note that we will have already failed the immediate input vpermilps,
29335 which requires that the high and low part shuffle be identical; the
29336 variable form doesn't require that. */
29339 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29341 rtx rperm[8], vperm;
29344 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29347 /* We can only permute within the 128-bit lane. */
29348 for (i = 0; i < 8; ++i)
29350 unsigned e = d->perm[i];
29351 if (i < 4 ? e >= 4 : e < 4)
29358 for (i = 0; i < 8; ++i)
29360 unsigned e = d->perm[i];
29362 /* Within each 128-bit lane, the elements of op0 are numbered
29363 from 0 and the elements of op1 are numbered from 4. */
29369 rperm[i] = GEN_INT (e);
29372 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29373 vperm = force_reg (V8SImode, vperm);
29374 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29379 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29380 in terms of pshufb or vpperm. */
29383 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29385 unsigned i, nelt, eltsz;
29386 rtx rperm[16], vperm, target, op0, op1;
29388 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29390 if (GET_MODE_SIZE (d->vmode) != 16)
29397 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29399 for (i = 0; i < nelt; ++i)
29401 unsigned j, e = d->perm[i];
29402 for (j = 0; j < eltsz; ++j)
29403 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29406 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29407 vperm = force_reg (V16QImode, vperm);
29409 target = gen_lowpart (V16QImode, d->target);
29410 op0 = gen_lowpart (V16QImode, d->op0);
29411 if (d->op0 == d->op1)
29412 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29415 op1 = gen_lowpart (V16QImode, d->op1);
29416 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29422 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29423 in a single instruction. */
29426 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29428 unsigned i, nelt = d->nelt;
29429 unsigned char perm2[MAX_VECT_LEN];
29431 /* Check plain VEC_SELECT first, because AVX has instructions that could
29432 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29433 input where SEL+CONCAT may not. */
29434 if (d->op0 == d->op1)
29436 int mask = nelt - 1;
29438 for (i = 0; i < nelt; i++)
29439 perm2[i] = d->perm[i] & mask;
29441 if (expand_vselect (d->target, d->op0, perm2, nelt))
29444 /* There are plenty of patterns in sse.md that are written for
29445 SEL+CONCAT and are not replicated for a single op. Perhaps
29446 that should be changed, to avoid the nastiness here. */
29448 /* Recognize interleave style patterns, which means incrementing
29449 every other permutation operand. */
29450 for (i = 0; i < nelt; i += 2)
29452 perm2[i] = d->perm[i] & mask;
29453 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29455 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29458 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29461 for (i = 0; i < nelt; i += 4)
29463 perm2[i + 0] = d->perm[i + 0] & mask;
29464 perm2[i + 1] = d->perm[i + 1] & mask;
29465 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29466 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29469 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29474 /* Finally, try the fully general two operand permute. */
29475 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29478 /* Recognize interleave style patterns with reversed operands. */
29479 if (d->op0 != d->op1)
29481 for (i = 0; i < nelt; ++i)
29483 unsigned e = d->perm[i];
29491 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29495 /* Try the SSE4.1 blend variable merge instructions. */
29496 if (expand_vec_perm_blend (d))
29499 /* Try one of the AVX vpermil variable permutations. */
29500 if (expand_vec_perm_vpermil (d))
29503 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29504 if (expand_vec_perm_pshufb (d))
29510 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29511 in terms of a pair of pshuflw + pshufhw instructions. */
29514 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29516 unsigned char perm2[MAX_VECT_LEN];
29520 if (d->vmode != V8HImode || d->op0 != d->op1)
29523 /* The two permutations only operate in 64-bit lanes. */
29524 for (i = 0; i < 4; ++i)
29525 if (d->perm[i] >= 4)
29527 for (i = 4; i < 8; ++i)
29528 if (d->perm[i] < 4)
29534 /* Emit the pshuflw. */
29535 memcpy (perm2, d->perm, 4);
29536 for (i = 4; i < 8; ++i)
29538 ok = expand_vselect (d->target, d->op0, perm2, 8);
29541 /* Emit the pshufhw. */
29542 memcpy (perm2 + 4, d->perm + 4, 4);
29543 for (i = 0; i < 4; ++i)
29545 ok = expand_vselect (d->target, d->target, perm2, 8);
29551 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29552 the permutation using the SSSE3 palignr instruction. This succeeds
29553 when all of the elements in PERM fit within one vector and we merely
29554 need to shift them down so that a single vector permutation has a
29555 chance to succeed. */
29558 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29560 unsigned i, nelt = d->nelt;
29565 /* Even with AVX, palignr only operates on 128-bit vectors. */
29566 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29569 min = nelt, max = 0;
29570 for (i = 0; i < nelt; ++i)
29572 unsigned e = d->perm[i];
29578 if (min == 0 || max - min >= nelt)
29581 /* Given that we have SSSE3, we know we'll be able to implement the
29582 single operand permutation after the palignr with pshufb. */
29586 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29587 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29588 gen_lowpart (TImode, d->op1),
29589 gen_lowpart (TImode, d->op0), shift));
29591 d->op0 = d->op1 = d->target;
29594 for (i = 0; i < nelt; ++i)
29596 unsigned e = d->perm[i] - min;
29602 /* Test for the degenerate case where the alignment by itself
29603 produces the desired permutation. */
29607 ok = expand_vec_perm_1 (d);
29613 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29614 a two vector permutation into a single vector permutation by using
29615 an interleave operation to merge the vectors. */
29618 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29620 struct expand_vec_perm_d dremap, dfinal;
29621 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29622 unsigned contents, h1, h2, h3, h4;
29623 unsigned char remap[2 * MAX_VECT_LEN];
29627 if (d->op0 == d->op1)
29630 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29631 lanes. We can use similar techniques with the vperm2f128 instruction,
29632 but it requires slightly different logic. */
29633 if (GET_MODE_SIZE (d->vmode) != 16)
29636 /* Examine from whence the elements come. */
29638 for (i = 0; i < nelt; ++i)
29639 contents |= 1u << d->perm[i];
29641 /* Split the two input vectors into 4 halves. */
29642 h1 = (1u << nelt2) - 1;
29647 memset (remap, 0xff, sizeof (remap));
29650 /* If the elements from the low halves use interleave low, and similarly
29651 for interleave high. If the elements are from mis-matched halves, we
29652 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29653 if ((contents & (h1 | h3)) == contents)
29655 for (i = 0; i < nelt2; ++i)
29658 remap[i + nelt] = i * 2 + 1;
29659 dremap.perm[i * 2] = i;
29660 dremap.perm[i * 2 + 1] = i + nelt;
29663 else if ((contents & (h2 | h4)) == contents)
29665 for (i = 0; i < nelt2; ++i)
29667 remap[i + nelt2] = i * 2;
29668 remap[i + nelt + nelt2] = i * 2 + 1;
29669 dremap.perm[i * 2] = i + nelt2;
29670 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29673 else if ((contents & (h1 | h4)) == contents)
29675 for (i = 0; i < nelt2; ++i)
29678 remap[i + nelt + nelt2] = i + nelt2;
29679 dremap.perm[i] = i;
29680 dremap.perm[i + nelt2] = i + nelt + nelt2;
29684 dremap.vmode = V2DImode;
29686 dremap.perm[0] = 0;
29687 dremap.perm[1] = 3;
29690 else if ((contents & (h2 | h3)) == contents)
29692 for (i = 0; i < nelt2; ++i)
29694 remap[i + nelt2] = i;
29695 remap[i + nelt] = i + nelt2;
29696 dremap.perm[i] = i + nelt2;
29697 dremap.perm[i + nelt2] = i + nelt;
29701 dremap.vmode = V2DImode;
29703 dremap.perm[0] = 1;
29704 dremap.perm[1] = 2;
29710 /* Use the remapping array set up above to move the elements from their
29711 swizzled locations into their final destinations. */
29713 for (i = 0; i < nelt; ++i)
29715 unsigned e = remap[d->perm[i]];
29716 gcc_assert (e < nelt);
29717 dfinal.perm[i] = e;
29719 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29720 dfinal.op1 = dfinal.op0;
29721 dremap.target = dfinal.op0;
29723 /* Test if the final remap can be done with a single insn. For V4SFmode or
29724 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29726 ok = expand_vec_perm_1 (&dfinal);
29727 seq = get_insns ();
29733 if (dremap.vmode != dfinal.vmode)
29735 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29736 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29737 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29740 ok = expand_vec_perm_1 (&dremap);
29747 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29748 permutation with two pshufb insns and an ior. We should have already
29749 failed all two instruction sequences. */
29752 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29754 rtx rperm[2][16], vperm, l, h, op, m128;
29755 unsigned int i, nelt, eltsz;
29757 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29759 gcc_assert (d->op0 != d->op1);
29762 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29764 /* Generate two permutation masks. If the required element is within
29765 the given vector it is shuffled into the proper lane. If the required
29766 element is in the other vector, force a zero into the lane by setting
29767 bit 7 in the permutation mask. */
29768 m128 = GEN_INT (-128);
29769 for (i = 0; i < nelt; ++i)
29771 unsigned j, e = d->perm[i];
29772 unsigned which = (e >= nelt);
29776 for (j = 0; j < eltsz; ++j)
29778 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29779 rperm[1-which][i*eltsz + j] = m128;
29783 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29784 vperm = force_reg (V16QImode, vperm);
29786 l = gen_reg_rtx (V16QImode);
29787 op = gen_lowpart (V16QImode, d->op0);
29788 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29790 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29791 vperm = force_reg (V16QImode, vperm);
29793 h = gen_reg_rtx (V16QImode);
29794 op = gen_lowpart (V16QImode, d->op1);
29795 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29797 op = gen_lowpart (V16QImode, d->target);
29798 emit_insn (gen_iorv16qi3 (op, l, h));
29803 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29804 and extract-odd permutations. */
29807 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29809 rtx t1, t2, t3, t4;
29814 t1 = gen_reg_rtx (V4DFmode);
29815 t2 = gen_reg_rtx (V4DFmode);
29817 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29818 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29819 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29821 /* Now an unpck[lh]pd will produce the result required. */
29823 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29825 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29831 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29832 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29833 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29835 t1 = gen_reg_rtx (V8SFmode);
29836 t2 = gen_reg_rtx (V8SFmode);
29837 t3 = gen_reg_rtx (V8SFmode);
29838 t4 = gen_reg_rtx (V8SFmode);
29840 /* Shuffle within the 128-bit lanes to produce:
29841 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29842 expand_vselect (t1, d->op0, perm1, 8);
29843 expand_vselect (t2, d->op1, perm1, 8);
29845 /* Shuffle the lanes around to produce:
29846 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29847 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29848 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29850 /* Now a vpermil2p will produce the result required. */
29851 /* ??? The vpermil2p requires a vector constant. Another option
29852 is a unpck[lh]ps to merge the two vectors to produce
29853 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29854 vpermilps to get the elements into the final order. */
29857 memcpy (d->perm, odd ? permo: perme, 8);
29858 expand_vec_perm_vpermil (d);
29866 /* These are always directly implementable by expand_vec_perm_1. */
29867 gcc_unreachable ();
29871 return expand_vec_perm_pshufb2 (d);
29874 /* We need 2*log2(N)-1 operations to achieve odd/even
29875 with interleave. */
29876 t1 = gen_reg_rtx (V8HImode);
29877 t2 = gen_reg_rtx (V8HImode);
29878 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29879 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29880 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29881 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29883 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29885 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29892 return expand_vec_perm_pshufb2 (d);
29895 t1 = gen_reg_rtx (V16QImode);
29896 t2 = gen_reg_rtx (V16QImode);
29897 t3 = gen_reg_rtx (V16QImode);
29898 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29899 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29900 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29901 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29902 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29903 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29905 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29907 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29913 gcc_unreachable ();
29919 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29920 extract-even and extract-odd permutations. */
29923 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29925 unsigned i, odd, nelt = d->nelt;
29928 if (odd != 0 && odd != 1)
29931 for (i = 1; i < nelt; ++i)
29932 if (d->perm[i] != 2 * i + odd)
29935 return expand_vec_perm_even_odd_1 (d, odd);
29938 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29939 permutations. We assume that expand_vec_perm_1 has already failed. */
29942 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
29944 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
29945 enum machine_mode vmode = d->vmode;
29946 unsigned char perm2[4];
29954 /* These are special-cased in sse.md so that we can optionally
29955 use the vbroadcast instruction. They expand to two insns
29956 if the input happens to be in a register. */
29957 gcc_unreachable ();
29963 /* These are always implementable using standard shuffle patterns. */
29964 gcc_unreachable ();
29968 /* These can be implemented via interleave. We save one insn by
29969 stopping once we have promoted to V4SImode and then use pshufd. */
29972 optab otab = vec_interleave_low_optab;
29976 otab = vec_interleave_high_optab;
29981 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
29982 vmode = get_mode_wider_vector (vmode);
29983 op0 = gen_lowpart (vmode, op0);
29985 while (vmode != V4SImode);
29987 memset (perm2, elt, 4);
29988 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
29993 gcc_unreachable ();
29997 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29998 broadcast permutations. */
30001 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30003 unsigned i, elt, nelt = d->nelt;
30005 if (d->op0 != d->op1)
30009 for (i = 1; i < nelt; ++i)
30010 if (d->perm[i] != elt)
30013 return expand_vec_perm_broadcast_1 (d);
30016 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30017 With all of the interface bits taken care of, perform the expansion
30018 in D and return true on success. */
30021 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30023 /* Try a single instruction expansion. */
30024 if (expand_vec_perm_1 (d))
30027 /* Try sequences of two instructions. */
30029 if (expand_vec_perm_pshuflw_pshufhw (d))
30032 if (expand_vec_perm_palignr (d))
30035 if (expand_vec_perm_interleave2 (d))
30038 if (expand_vec_perm_broadcast (d))
30041 /* Try sequences of three instructions. */
30043 if (expand_vec_perm_pshufb2 (d))
30046 /* ??? Look for narrow permutations whose element orderings would
30047 allow the promotion to a wider mode. */
30049 /* ??? Look for sequences of interleave or a wider permute that place
30050 the data into the correct lanes for a half-vector shuffle like
30051 pshuf[lh]w or vpermilps. */
30053 /* ??? Look for sequences of interleave that produce the desired results.
30054 The combinatorics of punpck[lh] get pretty ugly... */
30056 if (expand_vec_perm_even_odd (d))
30062 /* Extract the values from the vector CST into the permutation array in D.
30063 Return 0 on error, 1 if all values from the permutation come from the
30064 first vector, 2 if all values from the second vector, and 3 otherwise. */
30067 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30069 tree list = TREE_VECTOR_CST_ELTS (cst);
30070 unsigned i, nelt = d->nelt;
30073 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30075 unsigned HOST_WIDE_INT e;
30077 if (!host_integerp (TREE_VALUE (list), 1))
30079 e = tree_low_cst (TREE_VALUE (list), 1);
30083 ret |= (e < nelt ? 1 : 2);
30086 gcc_assert (list == NULL);
30088 /* For all elements from second vector, fold the elements to first. */
30090 for (i = 0; i < nelt; ++i)
30091 d->perm[i] -= nelt;
30097 ix86_expand_vec_perm_builtin (tree exp)
30099 struct expand_vec_perm_d d;
30100 tree arg0, arg1, arg2;
30102 arg0 = CALL_EXPR_ARG (exp, 0);
30103 arg1 = CALL_EXPR_ARG (exp, 1);
30104 arg2 = CALL_EXPR_ARG (exp, 2);
30106 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30107 d.nelt = GET_MODE_NUNITS (d.vmode);
30108 d.testing_p = false;
30109 gcc_assert (VECTOR_MODE_P (d.vmode));
30111 if (TREE_CODE (arg2) != VECTOR_CST)
30113 error_at (EXPR_LOCATION (exp),
30114 "vector permutation requires vector constant");
30118 switch (extract_vec_perm_cst (&d, arg2))
30124 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30128 if (!operand_equal_p (arg0, arg1, 0))
30130 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30131 d.op0 = force_reg (d.vmode, d.op0);
30132 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30133 d.op1 = force_reg (d.vmode, d.op1);
30137 /* The elements of PERM do not suggest that only the first operand
30138 is used, but both operands are identical. Allow easier matching
30139 of the permutation by folding the permutation into the single
30142 unsigned i, nelt = d.nelt;
30143 for (i = 0; i < nelt; ++i)
30144 if (d.perm[i] >= nelt)
30150 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30151 d.op0 = force_reg (d.vmode, d.op0);
30156 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30157 d.op0 = force_reg (d.vmode, d.op0);
30162 d.target = gen_reg_rtx (d.vmode);
30163 if (ix86_expand_vec_perm_builtin_1 (&d))
30166 /* For compiler generated permutations, we should never got here, because
30167 the compiler should also be checking the ok hook. But since this is a
30168 builtin the user has access too, so don't abort. */
30172 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30175 sorry ("vector permutation (%d %d %d %d)",
30176 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30179 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30180 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30181 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30184 sorry ("vector permutation "
30185 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30186 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30187 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30188 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30189 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30192 gcc_unreachable ();
30195 return CONST0_RTX (d.vmode);
30198 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30201 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30203 struct expand_vec_perm_d d;
30207 d.vmode = TYPE_MODE (vec_type);
30208 d.nelt = GET_MODE_NUNITS (d.vmode);
30209 d.testing_p = true;
30211 /* Given sufficient ISA support we can just return true here
30212 for selected vector modes. */
30213 if (GET_MODE_SIZE (d.vmode) == 16)
30215 /* All implementable with a single vpperm insn. */
30218 /* All implementable with 2 pshufb + 1 ior. */
30221 /* All implementable with shufpd or unpck[lh]pd. */
30226 vec_mask = extract_vec_perm_cst (&d, mask);
30228 /* This hook is cannot be called in response to something that the
30229 user does (unlike the builtin expander) so we shouldn't ever see
30230 an error generated from the extract. */
30231 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30232 one_vec = (vec_mask != 3);
30234 /* Implementable with shufps or pshufd. */
30235 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30238 /* Otherwise we have to go through the motions and see if we can
30239 figure out how to generate the requested permutation. */
30240 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30241 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30243 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30246 ret = ix86_expand_vec_perm_builtin_1 (&d);
30253 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30255 struct expand_vec_perm_d d;
30261 d.vmode = GET_MODE (targ);
30262 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30263 d.testing_p = false;
30265 for (i = 0; i < nelt; ++i)
30266 d.perm[i] = i * 2 + odd;
30268 /* We'll either be able to implement the permutation directly... */
30269 if (expand_vec_perm_1 (&d))
30272 /* ... or we use the special-case patterns. */
30273 expand_vec_perm_even_odd_1 (&d, odd);
30276 /* This function returns the calling abi specific va_list type node.
30277 It returns the FNDECL specific va_list type. */
30280 ix86_fn_abi_va_list (tree fndecl)
30283 return va_list_type_node;
30284 gcc_assert (fndecl != NULL_TREE);
30286 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30287 return ms_va_list_type_node;
30289 return sysv_va_list_type_node;
30292 /* Returns the canonical va_list type specified by TYPE. If there
30293 is no valid TYPE provided, it return NULL_TREE. */
30296 ix86_canonical_va_list_type (tree type)
30300 /* Resolve references and pointers to va_list type. */
30301 if (INDIRECT_REF_P (type))
30302 type = TREE_TYPE (type);
30303 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30304 type = TREE_TYPE (type);
30308 wtype = va_list_type_node;
30309 gcc_assert (wtype != NULL_TREE);
30311 if (TREE_CODE (wtype) == ARRAY_TYPE)
30313 /* If va_list is an array type, the argument may have decayed
30314 to a pointer type, e.g. by being passed to another function.
30315 In that case, unwrap both types so that we can compare the
30316 underlying records. */
30317 if (TREE_CODE (htype) == ARRAY_TYPE
30318 || POINTER_TYPE_P (htype))
30320 wtype = TREE_TYPE (wtype);
30321 htype = TREE_TYPE (htype);
30324 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30325 return va_list_type_node;
30326 wtype = sysv_va_list_type_node;
30327 gcc_assert (wtype != NULL_TREE);
30329 if (TREE_CODE (wtype) == ARRAY_TYPE)
30331 /* If va_list is an array type, the argument may have decayed
30332 to a pointer type, e.g. by being passed to another function.
30333 In that case, unwrap both types so that we can compare the
30334 underlying records. */
30335 if (TREE_CODE (htype) == ARRAY_TYPE
30336 || POINTER_TYPE_P (htype))
30338 wtype = TREE_TYPE (wtype);
30339 htype = TREE_TYPE (htype);
30342 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30343 return sysv_va_list_type_node;
30344 wtype = ms_va_list_type_node;
30345 gcc_assert (wtype != NULL_TREE);
30347 if (TREE_CODE (wtype) == ARRAY_TYPE)
30349 /* If va_list is an array type, the argument may have decayed
30350 to a pointer type, e.g. by being passed to another function.
30351 In that case, unwrap both types so that we can compare the
30352 underlying records. */
30353 if (TREE_CODE (htype) == ARRAY_TYPE
30354 || POINTER_TYPE_P (htype))
30356 wtype = TREE_TYPE (wtype);
30357 htype = TREE_TYPE (htype);
30360 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30361 return ms_va_list_type_node;
30364 return std_canonical_va_list_type (type);
30367 /* Iterate through the target-specific builtin types for va_list.
30368 IDX denotes the iterator, *PTREE is set to the result type of
30369 the va_list builtin, and *PNAME to its internal type.
30370 Returns zero if there is no element for this index, otherwise
30371 IDX should be increased upon the next call.
30372 Note, do not iterate a base builtin's name like __builtin_va_list.
30373 Used from c_common_nodes_and_builtins. */
30376 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30382 *ptree = ms_va_list_type_node;
30383 *pname = "__builtin_ms_va_list";
30386 *ptree = sysv_va_list_type_node;
30387 *pname = "__builtin_sysv_va_list";
30395 /* Initialize the GCC target structure. */
30396 #undef TARGET_RETURN_IN_MEMORY
30397 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30399 #undef TARGET_LEGITIMIZE_ADDRESS
30400 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30402 #undef TARGET_ATTRIBUTE_TABLE
30403 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30404 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30405 # undef TARGET_MERGE_DECL_ATTRIBUTES
30406 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30409 #undef TARGET_COMP_TYPE_ATTRIBUTES
30410 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30412 #undef TARGET_INIT_BUILTINS
30413 #define TARGET_INIT_BUILTINS ix86_init_builtins
30414 #undef TARGET_BUILTIN_DECL
30415 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30416 #undef TARGET_EXPAND_BUILTIN
30417 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30419 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30420 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30421 ix86_builtin_vectorized_function
30423 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30424 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30426 #undef TARGET_BUILTIN_RECIPROCAL
30427 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30429 #undef TARGET_ASM_FUNCTION_EPILOGUE
30430 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30432 #undef TARGET_ENCODE_SECTION_INFO
30433 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30434 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30436 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30439 #undef TARGET_ASM_OPEN_PAREN
30440 #define TARGET_ASM_OPEN_PAREN ""
30441 #undef TARGET_ASM_CLOSE_PAREN
30442 #define TARGET_ASM_CLOSE_PAREN ""
30444 #undef TARGET_ASM_BYTE_OP
30445 #define TARGET_ASM_BYTE_OP ASM_BYTE
30447 #undef TARGET_ASM_ALIGNED_HI_OP
30448 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30449 #undef TARGET_ASM_ALIGNED_SI_OP
30450 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30452 #undef TARGET_ASM_ALIGNED_DI_OP
30453 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30456 #undef TARGET_ASM_UNALIGNED_HI_OP
30457 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30458 #undef TARGET_ASM_UNALIGNED_SI_OP
30459 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30460 #undef TARGET_ASM_UNALIGNED_DI_OP
30461 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30463 #undef TARGET_SCHED_ADJUST_COST
30464 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30465 #undef TARGET_SCHED_ISSUE_RATE
30466 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30467 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30468 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30469 ia32_multipass_dfa_lookahead
30471 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30472 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30475 #undef TARGET_HAVE_TLS
30476 #define TARGET_HAVE_TLS true
30478 #undef TARGET_CANNOT_FORCE_CONST_MEM
30479 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30480 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30481 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30483 #undef TARGET_DELEGITIMIZE_ADDRESS
30484 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30486 #undef TARGET_MS_BITFIELD_LAYOUT_P
30487 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30490 #undef TARGET_BINDS_LOCAL_P
30491 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30493 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30494 #undef TARGET_BINDS_LOCAL_P
30495 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30498 #undef TARGET_ASM_OUTPUT_MI_THUNK
30499 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30500 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30501 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30503 #undef TARGET_ASM_FILE_START
30504 #define TARGET_ASM_FILE_START x86_file_start
30506 #undef TARGET_DEFAULT_TARGET_FLAGS
30507 #define TARGET_DEFAULT_TARGET_FLAGS \
30509 | TARGET_SUBTARGET_DEFAULT \
30510 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30513 #undef TARGET_HANDLE_OPTION
30514 #define TARGET_HANDLE_OPTION ix86_handle_option
30516 #undef TARGET_RTX_COSTS
30517 #define TARGET_RTX_COSTS ix86_rtx_costs
30518 #undef TARGET_ADDRESS_COST
30519 #define TARGET_ADDRESS_COST ix86_address_cost
30521 #undef TARGET_FIXED_CONDITION_CODE_REGS
30522 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30523 #undef TARGET_CC_MODES_COMPATIBLE
30524 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30526 #undef TARGET_MACHINE_DEPENDENT_REORG
30527 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30529 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30530 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30532 #undef TARGET_BUILD_BUILTIN_VA_LIST
30533 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30535 #undef TARGET_FN_ABI_VA_LIST
30536 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30538 #undef TARGET_CANONICAL_VA_LIST_TYPE
30539 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30541 #undef TARGET_EXPAND_BUILTIN_VA_START
30542 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30544 #undef TARGET_MD_ASM_CLOBBERS
30545 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30547 #undef TARGET_PROMOTE_PROTOTYPES
30548 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30549 #undef TARGET_STRUCT_VALUE_RTX
30550 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30551 #undef TARGET_SETUP_INCOMING_VARARGS
30552 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30553 #undef TARGET_MUST_PASS_IN_STACK
30554 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30555 #undef TARGET_PASS_BY_REFERENCE
30556 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30557 #undef TARGET_INTERNAL_ARG_POINTER
30558 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30559 #undef TARGET_UPDATE_STACK_BOUNDARY
30560 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30561 #undef TARGET_GET_DRAP_RTX
30562 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30563 #undef TARGET_STRICT_ARGUMENT_NAMING
30564 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30565 #undef TARGET_STATIC_CHAIN
30566 #define TARGET_STATIC_CHAIN ix86_static_chain
30567 #undef TARGET_TRAMPOLINE_INIT
30568 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30570 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30571 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30573 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30574 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30576 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30577 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30579 #undef TARGET_C_MODE_FOR_SUFFIX
30580 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30583 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30584 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30587 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30588 #undef TARGET_INSERT_ATTRIBUTES
30589 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30592 #undef TARGET_MANGLE_TYPE
30593 #define TARGET_MANGLE_TYPE ix86_mangle_type
30595 #undef TARGET_STACK_PROTECT_FAIL
30596 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30598 #undef TARGET_FUNCTION_VALUE
30599 #define TARGET_FUNCTION_VALUE ix86_function_value
30601 #undef TARGET_SECONDARY_RELOAD
30602 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30604 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30605 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30606 ix86_builtin_vectorization_cost
30607 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30608 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30609 ix86_vectorize_builtin_vec_perm
30610 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30611 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30612 ix86_vectorize_builtin_vec_perm_ok
30614 #undef TARGET_SET_CURRENT_FUNCTION
30615 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30617 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30618 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30620 #undef TARGET_OPTION_SAVE
30621 #define TARGET_OPTION_SAVE ix86_function_specific_save
30623 #undef TARGET_OPTION_RESTORE
30624 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30626 #undef TARGET_OPTION_PRINT
30627 #define TARGET_OPTION_PRINT ix86_function_specific_print
30629 #undef TARGET_CAN_INLINE_P
30630 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30632 #undef TARGET_EXPAND_TO_RTL_HOOK
30633 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30635 #undef TARGET_LEGITIMATE_ADDRESS_P
30636 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30638 #undef TARGET_IRA_COVER_CLASSES
30639 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30641 #undef TARGET_FRAME_POINTER_REQUIRED
30642 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30644 #undef TARGET_CAN_ELIMINATE
30645 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30647 #undef TARGET_ASM_CODE_END
30648 #define TARGET_ASM_CODE_END ix86_code_end
30650 struct gcc_target targetm = TARGET_INITIALIZER;
30652 #include "gt-i386.h"