1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
58 #include "sched-int.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
416 (we ensure the alignment). For small blocks inline loop is still a
417 noticeable win, for bigger blocks either rep movsl or rep movsb is
418 way to go. Rep movsb has apparently more expensive startup time in CPU,
419 but after 4K the difference is down in the noise. */
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
706 100, /* number of parallel prefetches */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very
715 small blocks it is better to use loop. For large blocks, libcall can
716 do nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
780 MOVD reg64, xmmreg Double FADD 3
782 MOVD reg32, xmmreg Double FADD 3
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
792 100, /* number of parallel prefetches */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 struct processor_costs bdver1_cost = {
823 COSTS_N_INSNS (1), /* cost of an add instruction */
824 COSTS_N_INSNS (2), /* cost of a lea instruction */
825 COSTS_N_INSNS (1), /* variable shift costs */
826 COSTS_N_INSNS (1), /* constant shift costs */
827 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
828 COSTS_N_INSNS (4), /* HI */
829 COSTS_N_INSNS (3), /* SI */
830 COSTS_N_INSNS (4), /* DI */
831 COSTS_N_INSNS (5)}, /* other */
832 0, /* cost of multiply per each bit set */
833 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
834 COSTS_N_INSNS (35), /* HI */
835 COSTS_N_INSNS (51), /* SI */
836 COSTS_N_INSNS (83), /* DI */
837 COSTS_N_INSNS (83)}, /* other */
838 COSTS_N_INSNS (1), /* cost of movsx */
839 COSTS_N_INSNS (1), /* cost of movzx */
840 8, /* "large" insn */
842 4, /* cost for loading QImode using movzbl */
843 {3, 4, 3}, /* cost of loading integer registers
844 in QImode, HImode and SImode.
845 Relative to reg-reg move (2). */
846 {3, 4, 3}, /* cost of storing integer registers */
847 4, /* cost of reg,reg fld/fst */
848 {4, 4, 12}, /* cost of loading fp registers
849 in SFmode, DFmode and XFmode */
850 {6, 6, 8}, /* cost of storing fp registers
851 in SFmode, DFmode and XFmode */
852 2, /* cost of moving MMX register */
853 {3, 3}, /* cost of loading MMX registers
854 in SImode and DImode */
855 {4, 4}, /* cost of storing MMX registers
856 in SImode and DImode */
857 2, /* cost of moving SSE register */
858 {4, 4, 3}, /* cost of loading SSE registers
859 in SImode, DImode and TImode */
860 {4, 4, 5}, /* cost of storing SSE registers
861 in SImode, DImode and TImode */
862 3, /* MMX or SSE register to integer */
864 MOVD reg64, xmmreg Double FSTORE 4
865 MOVD reg32, xmmreg Double FSTORE 4
867 MOVD reg64, xmmreg Double FADD 3
869 MOVD reg32, xmmreg Double FADD 3
871 64, /* size of l1 cache. */
872 1024, /* size of l2 cache. */
873 64, /* size of prefetch block */
874 /* New AMD processors never drop prefetches; if they cannot be performed
875 immediately, they are queued. We set number of simultaneous prefetches
876 to a large constant to reflect this (it probably is not a good idea not
877 to limit number of prefetches at all, as their execution also takes some
879 100, /* number of parallel prefetches */
881 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
882 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
883 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
884 COSTS_N_INSNS (2), /* cost of FABS instruction. */
885 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
886 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
888 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
889 very small blocks it is better to use loop. For large blocks, libcall
890 can do nontemporary accesses and beat inline considerably. */
891 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
892 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
893 {{libcall, {{8, loop}, {24, unrolled_loop},
894 {2048, rep_prefix_4_byte}, {-1, libcall}}},
895 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
896 4, /* scalar_stmt_cost. */
897 2, /* scalar load_cost. */
898 2, /* scalar_store_cost. */
899 6, /* vec_stmt_cost. */
900 0, /* vec_to_scalar_cost. */
901 2, /* scalar_to_vec_cost. */
902 2, /* vec_align_load_cost. */
903 2, /* vec_unalign_load_cost. */
904 2, /* vec_store_cost. */
905 2, /* cond_taken_branch_cost. */
906 1, /* cond_not_taken_branch_cost. */
910 struct processor_costs pentium4_cost = {
911 COSTS_N_INSNS (1), /* cost of an add instruction */
912 COSTS_N_INSNS (3), /* cost of a lea instruction */
913 COSTS_N_INSNS (4), /* variable shift costs */
914 COSTS_N_INSNS (4), /* constant shift costs */
915 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
916 COSTS_N_INSNS (15), /* HI */
917 COSTS_N_INSNS (15), /* SI */
918 COSTS_N_INSNS (15), /* DI */
919 COSTS_N_INSNS (15)}, /* other */
920 0, /* cost of multiply per each bit set */
921 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
922 COSTS_N_INSNS (56), /* HI */
923 COSTS_N_INSNS (56), /* SI */
924 COSTS_N_INSNS (56), /* DI */
925 COSTS_N_INSNS (56)}, /* other */
926 COSTS_N_INSNS (1), /* cost of movsx */
927 COSTS_N_INSNS (1), /* cost of movzx */
928 16, /* "large" insn */
930 2, /* cost for loading QImode using movzbl */
931 {4, 5, 4}, /* cost of loading integer registers
932 in QImode, HImode and SImode.
933 Relative to reg-reg move (2). */
934 {2, 3, 2}, /* cost of storing integer registers */
935 2, /* cost of reg,reg fld/fst */
936 {2, 2, 6}, /* cost of loading fp registers
937 in SFmode, DFmode and XFmode */
938 {4, 4, 6}, /* cost of storing fp registers
939 in SFmode, DFmode and XFmode */
940 2, /* cost of moving MMX register */
941 {2, 2}, /* cost of loading MMX registers
942 in SImode and DImode */
943 {2, 2}, /* cost of storing MMX registers
944 in SImode and DImode */
945 12, /* cost of moving SSE register */
946 {12, 12, 12}, /* cost of loading SSE registers
947 in SImode, DImode and TImode */
948 {2, 2, 8}, /* cost of storing SSE registers
949 in SImode, DImode and TImode */
950 10, /* MMX or SSE register to integer */
951 8, /* size of l1 cache. */
952 256, /* size of l2 cache. */
953 64, /* size of prefetch block */
954 6, /* number of parallel prefetches */
956 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
957 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
958 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
959 COSTS_N_INSNS (2), /* cost of FABS instruction. */
960 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
961 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
962 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
963 DUMMY_STRINGOP_ALGS},
964 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
966 DUMMY_STRINGOP_ALGS},
967 1, /* scalar_stmt_cost. */
968 1, /* scalar load_cost. */
969 1, /* scalar_store_cost. */
970 1, /* vec_stmt_cost. */
971 1, /* vec_to_scalar_cost. */
972 1, /* scalar_to_vec_cost. */
973 1, /* vec_align_load_cost. */
974 2, /* vec_unalign_load_cost. */
975 1, /* vec_store_cost. */
976 3, /* cond_taken_branch_cost. */
977 1, /* cond_not_taken_branch_cost. */
981 struct processor_costs nocona_cost = {
982 COSTS_N_INSNS (1), /* cost of an add instruction */
983 COSTS_N_INSNS (1), /* cost of a lea instruction */
984 COSTS_N_INSNS (1), /* variable shift costs */
985 COSTS_N_INSNS (1), /* constant shift costs */
986 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
987 COSTS_N_INSNS (10), /* HI */
988 COSTS_N_INSNS (10), /* SI */
989 COSTS_N_INSNS (10), /* DI */
990 COSTS_N_INSNS (10)}, /* other */
991 0, /* cost of multiply per each bit set */
992 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
993 COSTS_N_INSNS (66), /* HI */
994 COSTS_N_INSNS (66), /* SI */
995 COSTS_N_INSNS (66), /* DI */
996 COSTS_N_INSNS (66)}, /* other */
997 COSTS_N_INSNS (1), /* cost of movsx */
998 COSTS_N_INSNS (1), /* cost of movzx */
999 16, /* "large" insn */
1000 17, /* MOVE_RATIO */
1001 4, /* cost for loading QImode using movzbl */
1002 {4, 4, 4}, /* cost of loading integer registers
1003 in QImode, HImode and SImode.
1004 Relative to reg-reg move (2). */
1005 {4, 4, 4}, /* cost of storing integer registers */
1006 3, /* cost of reg,reg fld/fst */
1007 {12, 12, 12}, /* cost of loading fp registers
1008 in SFmode, DFmode and XFmode */
1009 {4, 4, 4}, /* cost of storing fp registers
1010 in SFmode, DFmode and XFmode */
1011 6, /* cost of moving MMX register */
1012 {12, 12}, /* cost of loading MMX registers
1013 in SImode and DImode */
1014 {12, 12}, /* cost of storing MMX registers
1015 in SImode and DImode */
1016 6, /* cost of moving SSE register */
1017 {12, 12, 12}, /* cost of loading SSE registers
1018 in SImode, DImode and TImode */
1019 {12, 12, 12}, /* cost of storing SSE registers
1020 in SImode, DImode and TImode */
1021 8, /* MMX or SSE register to integer */
1022 8, /* size of l1 cache. */
1023 1024, /* size of l2 cache. */
1024 128, /* size of prefetch block */
1025 8, /* number of parallel prefetches */
1026 1, /* Branch cost */
1027 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1028 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1029 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1030 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1031 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1032 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1033 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1034 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1035 {100000, unrolled_loop}, {-1, libcall}}}},
1036 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1038 {libcall, {{24, loop}, {64, unrolled_loop},
1039 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1040 1, /* scalar_stmt_cost. */
1041 1, /* scalar load_cost. */
1042 1, /* scalar_store_cost. */
1043 1, /* vec_stmt_cost. */
1044 1, /* vec_to_scalar_cost. */
1045 1, /* scalar_to_vec_cost. */
1046 1, /* vec_align_load_cost. */
1047 2, /* vec_unalign_load_cost. */
1048 1, /* vec_store_cost. */
1049 3, /* cond_taken_branch_cost. */
1050 1, /* cond_not_taken_branch_cost. */
1054 struct processor_costs core2_cost = {
1055 COSTS_N_INSNS (1), /* cost of an add instruction */
1056 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1057 COSTS_N_INSNS (1), /* variable shift costs */
1058 COSTS_N_INSNS (1), /* constant shift costs */
1059 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1060 COSTS_N_INSNS (3), /* HI */
1061 COSTS_N_INSNS (3), /* SI */
1062 COSTS_N_INSNS (3), /* DI */
1063 COSTS_N_INSNS (3)}, /* other */
1064 0, /* cost of multiply per each bit set */
1065 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1066 COSTS_N_INSNS (22), /* HI */
1067 COSTS_N_INSNS (22), /* SI */
1068 COSTS_N_INSNS (22), /* DI */
1069 COSTS_N_INSNS (22)}, /* other */
1070 COSTS_N_INSNS (1), /* cost of movsx */
1071 COSTS_N_INSNS (1), /* cost of movzx */
1072 8, /* "large" insn */
1073 16, /* MOVE_RATIO */
1074 2, /* cost for loading QImode using movzbl */
1075 {6, 6, 6}, /* cost of loading integer registers
1076 in QImode, HImode and SImode.
1077 Relative to reg-reg move (2). */
1078 {4, 4, 4}, /* cost of storing integer registers */
1079 2, /* cost of reg,reg fld/fst */
1080 {6, 6, 6}, /* cost of loading fp registers
1081 in SFmode, DFmode and XFmode */
1082 {4, 4, 4}, /* cost of storing fp registers
1083 in SFmode, DFmode and XFmode */
1084 2, /* cost of moving MMX register */
1085 {6, 6}, /* cost of loading MMX registers
1086 in SImode and DImode */
1087 {4, 4}, /* cost of storing MMX registers
1088 in SImode and DImode */
1089 2, /* cost of moving SSE register */
1090 {6, 6, 6}, /* cost of loading SSE registers
1091 in SImode, DImode and TImode */
1092 {4, 4, 4}, /* cost of storing SSE registers
1093 in SImode, DImode and TImode */
1094 2, /* MMX or SSE register to integer */
1095 32, /* size of l1 cache. */
1096 2048, /* size of l2 cache. */
1097 128, /* size of prefetch block */
1098 8, /* number of parallel prefetches */
1099 3, /* Branch cost */
1100 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1101 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1102 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1103 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1104 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1105 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1106 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1107 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1108 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1109 {{libcall, {{8, loop}, {15, unrolled_loop},
1110 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1111 {libcall, {{24, loop}, {32, unrolled_loop},
1112 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1113 1, /* scalar_stmt_cost. */
1114 1, /* scalar load_cost. */
1115 1, /* scalar_store_cost. */
1116 1, /* vec_stmt_cost. */
1117 1, /* vec_to_scalar_cost. */
1118 1, /* scalar_to_vec_cost. */
1119 1, /* vec_align_load_cost. */
1120 2, /* vec_unalign_load_cost. */
1121 1, /* vec_store_cost. */
1122 3, /* cond_taken_branch_cost. */
1123 1, /* cond_not_taken_branch_cost. */
1127 struct processor_costs atom_cost = {
1128 COSTS_N_INSNS (1), /* cost of an add instruction */
1129 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1130 COSTS_N_INSNS (1), /* variable shift costs */
1131 COSTS_N_INSNS (1), /* constant shift costs */
1132 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1133 COSTS_N_INSNS (4), /* HI */
1134 COSTS_N_INSNS (3), /* SI */
1135 COSTS_N_INSNS (4), /* DI */
1136 COSTS_N_INSNS (2)}, /* other */
1137 0, /* cost of multiply per each bit set */
1138 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1139 COSTS_N_INSNS (26), /* HI */
1140 COSTS_N_INSNS (42), /* SI */
1141 COSTS_N_INSNS (74), /* DI */
1142 COSTS_N_INSNS (74)}, /* other */
1143 COSTS_N_INSNS (1), /* cost of movsx */
1144 COSTS_N_INSNS (1), /* cost of movzx */
1145 8, /* "large" insn */
1146 17, /* MOVE_RATIO */
1147 2, /* cost for loading QImode using movzbl */
1148 {4, 4, 4}, /* cost of loading integer registers
1149 in QImode, HImode and SImode.
1150 Relative to reg-reg move (2). */
1151 {4, 4, 4}, /* cost of storing integer registers */
1152 4, /* cost of reg,reg fld/fst */
1153 {12, 12, 12}, /* cost of loading fp registers
1154 in SFmode, DFmode and XFmode */
1155 {6, 6, 8}, /* cost of storing fp registers
1156 in SFmode, DFmode and XFmode */
1157 2, /* cost of moving MMX register */
1158 {8, 8}, /* cost of loading MMX registers
1159 in SImode and DImode */
1160 {8, 8}, /* cost of storing MMX registers
1161 in SImode and DImode */
1162 2, /* cost of moving SSE register */
1163 {8, 8, 8}, /* cost of loading SSE registers
1164 in SImode, DImode and TImode */
1165 {8, 8, 8}, /* cost of storing SSE registers
1166 in SImode, DImode and TImode */
1167 5, /* MMX or SSE register to integer */
1168 32, /* size of l1 cache. */
1169 256, /* size of l2 cache. */
1170 64, /* size of prefetch block */
1171 6, /* number of parallel prefetches */
1172 3, /* Branch cost */
1173 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1174 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1175 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1176 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1177 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1178 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1179 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1180 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1181 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1182 {{libcall, {{8, loop}, {15, unrolled_loop},
1183 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1184 {libcall, {{24, loop}, {32, unrolled_loop},
1185 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1186 1, /* scalar_stmt_cost. */
1187 1, /* scalar load_cost. */
1188 1, /* scalar_store_cost. */
1189 1, /* vec_stmt_cost. */
1190 1, /* vec_to_scalar_cost. */
1191 1, /* scalar_to_vec_cost. */
1192 1, /* vec_align_load_cost. */
1193 2, /* vec_unalign_load_cost. */
1194 1, /* vec_store_cost. */
1195 3, /* cond_taken_branch_cost. */
1196 1, /* cond_not_taken_branch_cost. */
1199 /* Generic64 should produce code tuned for Nocona and K8. */
1201 struct processor_costs generic64_cost = {
1202 COSTS_N_INSNS (1), /* cost of an add instruction */
1203 /* On all chips taken into consideration lea is 2 cycles and more. With
1204 this cost however our current implementation of synth_mult results in
1205 use of unnecessary temporary registers causing regression on several
1206 SPECfp benchmarks. */
1207 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1208 COSTS_N_INSNS (1), /* variable shift costs */
1209 COSTS_N_INSNS (1), /* constant shift costs */
1210 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1211 COSTS_N_INSNS (4), /* HI */
1212 COSTS_N_INSNS (3), /* SI */
1213 COSTS_N_INSNS (4), /* DI */
1214 COSTS_N_INSNS (2)}, /* other */
1215 0, /* cost of multiply per each bit set */
1216 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1217 COSTS_N_INSNS (26), /* HI */
1218 COSTS_N_INSNS (42), /* SI */
1219 COSTS_N_INSNS (74), /* DI */
1220 COSTS_N_INSNS (74)}, /* other */
1221 COSTS_N_INSNS (1), /* cost of movsx */
1222 COSTS_N_INSNS (1), /* cost of movzx */
1223 8, /* "large" insn */
1224 17, /* MOVE_RATIO */
1225 4, /* cost for loading QImode using movzbl */
1226 {4, 4, 4}, /* cost of loading integer registers
1227 in QImode, HImode and SImode.
1228 Relative to reg-reg move (2). */
1229 {4, 4, 4}, /* cost of storing integer registers */
1230 4, /* cost of reg,reg fld/fst */
1231 {12, 12, 12}, /* cost of loading fp registers
1232 in SFmode, DFmode and XFmode */
1233 {6, 6, 8}, /* cost of storing fp registers
1234 in SFmode, DFmode and XFmode */
1235 2, /* cost of moving MMX register */
1236 {8, 8}, /* cost of loading MMX registers
1237 in SImode and DImode */
1238 {8, 8}, /* cost of storing MMX registers
1239 in SImode and DImode */
1240 2, /* cost of moving SSE register */
1241 {8, 8, 8}, /* cost of loading SSE registers
1242 in SImode, DImode and TImode */
1243 {8, 8, 8}, /* cost of storing SSE registers
1244 in SImode, DImode and TImode */
1245 5, /* MMX or SSE register to integer */
1246 32, /* size of l1 cache. */
1247 512, /* size of l2 cache. */
1248 64, /* size of prefetch block */
1249 6, /* number of parallel prefetches */
1250 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1251 value is increased to perhaps more appropriate value of 5. */
1252 3, /* Branch cost */
1253 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1254 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1255 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1256 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1257 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1258 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1259 {DUMMY_STRINGOP_ALGS,
1260 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1261 {DUMMY_STRINGOP_ALGS,
1262 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1263 1, /* scalar_stmt_cost. */
1264 1, /* scalar load_cost. */
1265 1, /* scalar_store_cost. */
1266 1, /* vec_stmt_cost. */
1267 1, /* vec_to_scalar_cost. */
1268 1, /* scalar_to_vec_cost. */
1269 1, /* vec_align_load_cost. */
1270 2, /* vec_unalign_load_cost. */
1271 1, /* vec_store_cost. */
1272 3, /* cond_taken_branch_cost. */
1273 1, /* cond_not_taken_branch_cost. */
1276 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1279 struct processor_costs generic32_cost = {
1280 COSTS_N_INSNS (1), /* cost of an add instruction */
1281 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1282 COSTS_N_INSNS (1), /* variable shift costs */
1283 COSTS_N_INSNS (1), /* constant shift costs */
1284 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1285 COSTS_N_INSNS (4), /* HI */
1286 COSTS_N_INSNS (3), /* SI */
1287 COSTS_N_INSNS (4), /* DI */
1288 COSTS_N_INSNS (2)}, /* other */
1289 0, /* cost of multiply per each bit set */
1290 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1291 COSTS_N_INSNS (26), /* HI */
1292 COSTS_N_INSNS (42), /* SI */
1293 COSTS_N_INSNS (74), /* DI */
1294 COSTS_N_INSNS (74)}, /* other */
1295 COSTS_N_INSNS (1), /* cost of movsx */
1296 COSTS_N_INSNS (1), /* cost of movzx */
1297 8, /* "large" insn */
1298 17, /* MOVE_RATIO */
1299 4, /* cost for loading QImode using movzbl */
1300 {4, 4, 4}, /* cost of loading integer registers
1301 in QImode, HImode and SImode.
1302 Relative to reg-reg move (2). */
1303 {4, 4, 4}, /* cost of storing integer registers */
1304 4, /* cost of reg,reg fld/fst */
1305 {12, 12, 12}, /* cost of loading fp registers
1306 in SFmode, DFmode and XFmode */
1307 {6, 6, 8}, /* cost of storing fp registers
1308 in SFmode, DFmode and XFmode */
1309 2, /* cost of moving MMX register */
1310 {8, 8}, /* cost of loading MMX registers
1311 in SImode and DImode */
1312 {8, 8}, /* cost of storing MMX registers
1313 in SImode and DImode */
1314 2, /* cost of moving SSE register */
1315 {8, 8, 8}, /* cost of loading SSE registers
1316 in SImode, DImode and TImode */
1317 {8, 8, 8}, /* cost of storing SSE registers
1318 in SImode, DImode and TImode */
1319 5, /* MMX or SSE register to integer */
1320 32, /* size of l1 cache. */
1321 256, /* size of l2 cache. */
1322 64, /* size of prefetch block */
1323 6, /* number of parallel prefetches */
1324 3, /* Branch cost */
1325 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1326 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1327 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1328 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1329 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1330 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1331 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1332 DUMMY_STRINGOP_ALGS},
1333 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1334 DUMMY_STRINGOP_ALGS},
1335 1, /* scalar_stmt_cost. */
1336 1, /* scalar load_cost. */
1337 1, /* scalar_store_cost. */
1338 1, /* vec_stmt_cost. */
1339 1, /* vec_to_scalar_cost. */
1340 1, /* scalar_to_vec_cost. */
1341 1, /* vec_align_load_cost. */
1342 2, /* vec_unalign_load_cost. */
1343 1, /* vec_store_cost. */
1344 3, /* cond_taken_branch_cost. */
1345 1, /* cond_not_taken_branch_cost. */
1348 const struct processor_costs *ix86_cost = &pentium_cost;
1350 /* Processor feature/optimization bitmasks. */
1351 #define m_386 (1<<PROCESSOR_I386)
1352 #define m_486 (1<<PROCESSOR_I486)
1353 #define m_PENT (1<<PROCESSOR_PENTIUM)
1354 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1355 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1356 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1357 #define m_CORE2 (1<<PROCESSOR_CORE2)
1358 #define m_ATOM (1<<PROCESSOR_ATOM)
1360 #define m_GEODE (1<<PROCESSOR_GEODE)
1361 #define m_K6 (1<<PROCESSOR_K6)
1362 #define m_K6_GEODE (m_K6 | m_GEODE)
1363 #define m_K8 (1<<PROCESSOR_K8)
1364 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1365 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1366 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1367 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1368 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1370 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1371 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1373 /* Generic instruction choice should be common subset of supported CPUs
1374 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1375 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1377 /* Feature tests against the various tunings. */
1378 unsigned char ix86_tune_features[X86_TUNE_LAST];
1380 /* Feature tests against the various tunings used to create ix86_tune_features
1381 based on the processor mask. */
1382 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1383 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1384 negatively, so enabling for Generic64 seems like good code size
1385 tradeoff. We can't enable it for 32bit generic because it does not
1386 work well with PPro base chips. */
1387 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1389 /* X86_TUNE_PUSH_MEMORY */
1390 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1391 | m_NOCONA | m_CORE2 | m_GENERIC,
1393 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1396 /* X86_TUNE_UNROLL_STRLEN */
1397 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1398 | m_CORE2 | m_GENERIC,
1400 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1401 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1403 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1404 on simulation result. But after P4 was made, no performance benefit
1405 was observed with branch hints. It also increases the code size.
1406 As a result, icc never generates branch hints. */
1409 /* X86_TUNE_DOUBLE_WITH_ADD */
1412 /* X86_TUNE_USE_SAHF */
1413 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1414 | m_NOCONA | m_CORE2 | m_GENERIC,
1416 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1417 partial dependencies. */
1418 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1419 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1421 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1422 register stalls on Generic32 compilation setting as well. However
1423 in current implementation the partial register stalls are not eliminated
1424 very well - they can be introduced via subregs synthesized by combine
1425 and can happen in caller/callee saving sequences. Because this option
1426 pays back little on PPro based chips and is in conflict with partial reg
1427 dependencies used by Athlon/P4 based chips, it is better to leave it off
1428 for generic32 for now. */
1431 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1432 m_CORE2 | m_GENERIC,
1434 /* X86_TUNE_USE_HIMODE_FIOP */
1435 m_386 | m_486 | m_K6_GEODE,
1437 /* X86_TUNE_USE_SIMODE_FIOP */
1438 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1440 /* X86_TUNE_USE_MOV0 */
1443 /* X86_TUNE_USE_CLTD */
1444 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1446 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1449 /* X86_TUNE_SPLIT_LONG_MOVES */
1452 /* X86_TUNE_READ_MODIFY_WRITE */
1455 /* X86_TUNE_READ_MODIFY */
1458 /* X86_TUNE_PROMOTE_QIMODE */
1459 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1460 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1462 /* X86_TUNE_FAST_PREFIX */
1463 ~(m_PENT | m_486 | m_386),
1465 /* X86_TUNE_SINGLE_STRINGOP */
1466 m_386 | m_PENT4 | m_NOCONA,
1468 /* X86_TUNE_QIMODE_MATH */
1471 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1472 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1473 might be considered for Generic32 if our scheme for avoiding partial
1474 stalls was more effective. */
1477 /* X86_TUNE_PROMOTE_QI_REGS */
1480 /* X86_TUNE_PROMOTE_HI_REGS */
1483 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1484 over esp addition. */
1485 m_386 | m_486 | m_PENT | m_PPRO,
1487 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1488 over esp addition. */
1491 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1492 over esp subtraction. */
1493 m_386 | m_486 | m_PENT | m_K6_GEODE,
1495 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1496 over esp subtraction. */
1497 m_PENT | m_K6_GEODE,
1499 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1500 for DFmode copies */
1501 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1502 | m_GENERIC | m_GEODE),
1504 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1505 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1507 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1508 conflict here in between PPro/Pentium4 based chips that thread 128bit
1509 SSE registers as single units versus K8 based chips that divide SSE
1510 registers to two 64bit halves. This knob promotes all store destinations
1511 to be 128bit to allow register renaming on 128bit SSE units, but usually
1512 results in one extra microop on 64bit SSE units. Experimental results
1513 shows that disabling this option on P4 brings over 20% SPECfp regression,
1514 while enabling it on K8 brings roughly 2.4% regression that can be partly
1515 masked by careful scheduling of moves. */
1516 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1517 | m_AMDFAM10 | m_BDVER1,
1519 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1520 m_AMDFAM10 | m_BDVER1,
1522 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1525 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1528 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1529 are resolved on SSE register parts instead of whole registers, so we may
1530 maintain just lower part of scalar values in proper format leaving the
1531 upper part undefined. */
1534 /* X86_TUNE_SSE_TYPELESS_STORES */
1537 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1538 m_PPRO | m_PENT4 | m_NOCONA,
1540 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1541 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1543 /* X86_TUNE_PROLOGUE_USING_MOVE */
1544 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1546 /* X86_TUNE_EPILOGUE_USING_MOVE */
1547 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1549 /* X86_TUNE_SHIFT1 */
1552 /* X86_TUNE_USE_FFREEP */
1555 /* X86_TUNE_INTER_UNIT_MOVES */
1556 ~(m_AMD_MULTIPLE | m_GENERIC),
1558 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1559 ~(m_AMDFAM10 | m_BDVER1),
1561 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1562 than 4 branch instructions in the 16 byte window. */
1563 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1566 /* X86_TUNE_SCHEDULE */
1567 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1570 /* X86_TUNE_USE_BT */
1571 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1573 /* X86_TUNE_USE_INCDEC */
1574 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1576 /* X86_TUNE_PAD_RETURNS */
1577 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1579 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
1582 /* X86_TUNE_EXT_80387_CONSTANTS */
1583 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1584 | m_CORE2 | m_GENERIC,
1586 /* X86_TUNE_SHORTEN_X87_SSE */
1589 /* X86_TUNE_AVOID_VECTOR_DECODE */
1592 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1593 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1596 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1597 vector path on AMD machines. */
1598 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1600 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1602 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1604 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1608 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1609 but one byte longer. */
1612 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1613 operand that cannot be represented using a modRM byte. The XOR
1614 replacement is long decoded, so this split helps here as well. */
1617 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1619 m_AMDFAM10 | m_GENERIC,
1621 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1622 from integer to FP. */
1625 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1626 with a subsequent conditional jump instruction into a single
1627 compare-and-branch uop. */
1630 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1631 will impact LEA instruction selection. */
1634 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
1639 /* Feature tests against the various architecture variations. */
1640 unsigned char ix86_arch_features[X86_ARCH_LAST];
1642 /* Feature tests against the various architecture variations, used to create
1643 ix86_arch_features based on the processor mask. */
1644 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1645 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1646 ~(m_386 | m_486 | m_PENT | m_K6),
1648 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1651 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1654 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1657 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1661 static const unsigned int x86_accumulate_outgoing_args
1662 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1665 static const unsigned int x86_arch_always_fancy_math_387
1666 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1667 | m_NOCONA | m_CORE2 | m_GENERIC;
1669 static enum stringop_alg stringop_alg = no_stringop;
1671 /* In case the average insn count for single function invocation is
1672 lower than this constant, emit fast (but longer) prologue and
1674 #define FAST_PROLOGUE_INSN_COUNT 20
1676 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1677 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1678 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1679 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1681 /* Array of the smallest class containing reg number REGNO, indexed by
1682 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1684 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1686 /* ax, dx, cx, bx */
1687 AREG, DREG, CREG, BREG,
1688 /* si, di, bp, sp */
1689 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1691 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1692 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1695 /* flags, fpsr, fpcr, frame */
1696 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1698 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1701 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1704 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1705 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1706 /* SSE REX registers */
1707 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1711 /* The "default" register map used in 32bit mode. */
1713 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1715 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1716 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1717 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1718 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1719 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1720 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1721 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1724 /* The "default" register map used in 64bit mode. */
1726 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1728 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1729 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1730 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1731 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1732 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1733 8,9,10,11,12,13,14,15, /* extended integer registers */
1734 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1737 /* Define the register numbers to be used in Dwarf debugging information.
1738 The SVR4 reference port C compiler uses the following register numbers
1739 in its Dwarf output code:
1740 0 for %eax (gcc regno = 0)
1741 1 for %ecx (gcc regno = 2)
1742 2 for %edx (gcc regno = 1)
1743 3 for %ebx (gcc regno = 3)
1744 4 for %esp (gcc regno = 7)
1745 5 for %ebp (gcc regno = 6)
1746 6 for %esi (gcc regno = 4)
1747 7 for %edi (gcc regno = 5)
1748 The following three DWARF register numbers are never generated by
1749 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1750 believes these numbers have these meanings.
1751 8 for %eip (no gcc equivalent)
1752 9 for %eflags (gcc regno = 17)
1753 10 for %trapno (no gcc equivalent)
1754 It is not at all clear how we should number the FP stack registers
1755 for the x86 architecture. If the version of SDB on x86/svr4 were
1756 a bit less brain dead with respect to floating-point then we would
1757 have a precedent to follow with respect to DWARF register numbers
1758 for x86 FP registers, but the SDB on x86/svr4 is so completely
1759 broken with respect to FP registers that it is hardly worth thinking
1760 of it as something to strive for compatibility with.
1761 The version of x86/svr4 SDB I have at the moment does (partially)
1762 seem to believe that DWARF register number 11 is associated with
1763 the x86 register %st(0), but that's about all. Higher DWARF
1764 register numbers don't seem to be associated with anything in
1765 particular, and even for DWARF regno 11, SDB only seems to under-
1766 stand that it should say that a variable lives in %st(0) (when
1767 asked via an `=' command) if we said it was in DWARF regno 11,
1768 but SDB still prints garbage when asked for the value of the
1769 variable in question (via a `/' command).
1770 (Also note that the labels SDB prints for various FP stack regs
1771 when doing an `x' command are all wrong.)
1772 Note that these problems generally don't affect the native SVR4
1773 C compiler because it doesn't allow the use of -O with -g and
1774 because when it is *not* optimizing, it allocates a memory
1775 location for each floating-point variable, and the memory
1776 location is what gets described in the DWARF AT_location
1777 attribute for the variable in question.
1778 Regardless of the severe mental illness of the x86/svr4 SDB, we
1779 do something sensible here and we use the following DWARF
1780 register numbers. Note that these are all stack-top-relative
1782 11 for %st(0) (gcc regno = 8)
1783 12 for %st(1) (gcc regno = 9)
1784 13 for %st(2) (gcc regno = 10)
1785 14 for %st(3) (gcc regno = 11)
1786 15 for %st(4) (gcc regno = 12)
1787 16 for %st(5) (gcc regno = 13)
1788 17 for %st(6) (gcc regno = 14)
1789 18 for %st(7) (gcc regno = 15)
1791 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1793 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1794 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1795 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1796 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1797 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1798 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1799 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1802 /* Define parameter passing and return registers. */
1804 static int const x86_64_int_parameter_registers[6] =
1806 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1809 static int const x86_64_ms_abi_int_parameter_registers[4] =
1811 CX_REG, DX_REG, R8_REG, R9_REG
1814 static int const x86_64_int_return_registers[4] =
1816 AX_REG, DX_REG, DI_REG, SI_REG
1819 /* Define the structure for the machine field in struct function. */
1821 struct GTY(()) stack_local_entry {
1822 unsigned short mode;
1825 struct stack_local_entry *next;
1828 /* Structure describing stack frame layout.
1829 Stack grows downward:
1835 saved static chain if ix86_static_chain_on_stack
1837 saved frame pointer if frame_pointer_needed
1838 <- HARD_FRAME_POINTER
1844 <- sse_regs_save_offset
1847 [va_arg registers] |
1851 [padding2] | = to_allocate
1860 int outgoing_arguments_size;
1861 HOST_WIDE_INT frame;
1863 /* The offsets relative to ARG_POINTER. */
1864 HOST_WIDE_INT frame_pointer_offset;
1865 HOST_WIDE_INT hard_frame_pointer_offset;
1866 HOST_WIDE_INT stack_pointer_offset;
1867 HOST_WIDE_INT reg_save_offset;
1868 HOST_WIDE_INT sse_reg_save_offset;
1870 /* When save_regs_using_mov is set, emit prologue using
1871 move instead of push instructions. */
1872 bool save_regs_using_mov;
1875 /* Code model option. */
1876 enum cmodel ix86_cmodel;
1878 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1880 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1882 /* Which unit we are generating floating point math for. */
1883 enum fpmath_unit ix86_fpmath;
1885 /* Which cpu are we scheduling for. */
1886 enum attr_cpu ix86_schedule;
1888 /* Which cpu are we optimizing for. */
1889 enum processor_type ix86_tune;
1891 /* Which instruction set architecture to use. */
1892 enum processor_type ix86_arch;
1894 /* true if sse prefetch instruction is not NOOP. */
1895 int x86_prefetch_sse;
1897 /* ix86_regparm_string as a number */
1898 static int ix86_regparm;
1900 /* -mstackrealign option */
1901 extern int ix86_force_align_arg_pointer;
1902 static const char ix86_force_align_arg_pointer_string[]
1903 = "force_align_arg_pointer";
1905 static rtx (*ix86_gen_leave) (void);
1906 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1907 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1908 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1909 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1910 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1911 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1912 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
1913 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
1914 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
1916 /* Preferred alignment for stack boundary in bits. */
1917 unsigned int ix86_preferred_stack_boundary;
1919 /* Alignment for incoming stack boundary in bits specified at
1921 static unsigned int ix86_user_incoming_stack_boundary;
1923 /* Default alignment for incoming stack boundary in bits. */
1924 static unsigned int ix86_default_incoming_stack_boundary;
1926 /* Alignment for incoming stack boundary in bits. */
1927 unsigned int ix86_incoming_stack_boundary;
1929 /* The abi used by target. */
1930 enum calling_abi ix86_abi;
1932 /* Values 1-5: see jump.c */
1933 int ix86_branch_cost;
1935 /* Calling abi specific va_list type nodes. */
1936 static GTY(()) tree sysv_va_list_type_node;
1937 static GTY(()) tree ms_va_list_type_node;
1939 /* Variables which are this size or smaller are put in the data/bss
1940 or ldata/lbss sections. */
1942 int ix86_section_threshold = 65536;
1944 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1945 char internal_label_prefix[16];
1946 int internal_label_prefix_len;
1948 /* Fence to use after loop using movnt. */
1951 /* Register class used for passing given 64bit part of the argument.
1952 These represent classes as documented by the PS ABI, with the exception
1953 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1954 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1956 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1957 whenever possible (upper half does contain padding). */
1958 enum x86_64_reg_class
1961 X86_64_INTEGER_CLASS,
1962 X86_64_INTEGERSI_CLASS,
1969 X86_64_COMPLEX_X87_CLASS,
1973 #define MAX_CLASSES 4
1975 /* Table of constants used by fldpi, fldln2, etc.... */
1976 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1977 static bool ext_80387_constants_init = 0;
1980 static struct machine_function * ix86_init_machine_status (void);
1981 static rtx ix86_function_value (const_tree, const_tree, bool);
1982 static bool ix86_function_value_regno_p (const unsigned int);
1983 static rtx ix86_static_chain (const_tree, bool);
1984 static int ix86_function_regparm (const_tree, const_tree);
1985 static void ix86_compute_frame_layout (struct ix86_frame *);
1986 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1988 static void ix86_add_new_builtins (int);
1989 static rtx ix86_expand_vec_perm_builtin (tree);
1990 static tree ix86_canonical_va_list_type (tree);
1991 static void predict_jump (int);
1992 static unsigned int split_stack_prologue_scratch_regno (void);
1993 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
1995 enum ix86_function_specific_strings
1997 IX86_FUNCTION_SPECIFIC_ARCH,
1998 IX86_FUNCTION_SPECIFIC_TUNE,
1999 IX86_FUNCTION_SPECIFIC_FPMATH,
2000 IX86_FUNCTION_SPECIFIC_MAX
2003 static char *ix86_target_string (int, int, const char *, const char *,
2004 const char *, bool);
2005 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2006 static void ix86_function_specific_save (struct cl_target_option *);
2007 static void ix86_function_specific_restore (struct cl_target_option *);
2008 static void ix86_function_specific_print (FILE *, int,
2009 struct cl_target_option *);
2010 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2011 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2012 static bool ix86_can_inline_p (tree, tree);
2013 static void ix86_set_current_function (tree);
2014 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2016 static enum calling_abi ix86_function_abi (const_tree);
2019 #ifndef SUBTARGET32_DEFAULT_CPU
2020 #define SUBTARGET32_DEFAULT_CPU "i386"
2023 /* The svr4 ABI for the i386 says that records and unions are returned
2025 #ifndef DEFAULT_PCC_STRUCT_RETURN
2026 #define DEFAULT_PCC_STRUCT_RETURN 1
2029 /* Whether -mtune= or -march= were specified */
2030 static int ix86_tune_defaulted;
2031 static int ix86_arch_specified;
2033 /* A mask of ix86_isa_flags that includes bit X if X
2034 was set or cleared on the command line. */
2035 static int ix86_isa_flags_explicit;
2037 /* Define a set of ISAs which are available when a given ISA is
2038 enabled. MMX and SSE ISAs are handled separately. */
2040 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2041 #define OPTION_MASK_ISA_3DNOW_SET \
2042 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2044 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2045 #define OPTION_MASK_ISA_SSE2_SET \
2046 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2047 #define OPTION_MASK_ISA_SSE3_SET \
2048 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2049 #define OPTION_MASK_ISA_SSSE3_SET \
2050 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2051 #define OPTION_MASK_ISA_SSE4_1_SET \
2052 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2053 #define OPTION_MASK_ISA_SSE4_2_SET \
2054 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2055 #define OPTION_MASK_ISA_AVX_SET \
2056 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2057 #define OPTION_MASK_ISA_FMA_SET \
2058 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2060 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2062 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2064 #define OPTION_MASK_ISA_SSE4A_SET \
2065 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2066 #define OPTION_MASK_ISA_FMA4_SET \
2067 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2068 | OPTION_MASK_ISA_AVX_SET)
2069 #define OPTION_MASK_ISA_XOP_SET \
2070 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2071 #define OPTION_MASK_ISA_LWP_SET \
2074 /* AES and PCLMUL need SSE2 because they use xmm registers */
2075 #define OPTION_MASK_ISA_AES_SET \
2076 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2077 #define OPTION_MASK_ISA_PCLMUL_SET \
2078 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2080 #define OPTION_MASK_ISA_ABM_SET \
2081 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2083 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2084 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2085 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2086 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2087 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2089 #define OPTION_MASK_ISA_FSGSBASE_SET OPTION_MASK_ISA_FSGSBASE
2090 #define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
2091 #define OPTION_MASK_ISA_F16C_SET \
2092 (OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
2094 /* Define a set of ISAs which aren't available when a given ISA is
2095 disabled. MMX and SSE ISAs are handled separately. */
2097 #define OPTION_MASK_ISA_MMX_UNSET \
2098 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2099 #define OPTION_MASK_ISA_3DNOW_UNSET \
2100 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2101 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2103 #define OPTION_MASK_ISA_SSE_UNSET \
2104 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2105 #define OPTION_MASK_ISA_SSE2_UNSET \
2106 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2107 #define OPTION_MASK_ISA_SSE3_UNSET \
2108 (OPTION_MASK_ISA_SSE3 \
2109 | OPTION_MASK_ISA_SSSE3_UNSET \
2110 | OPTION_MASK_ISA_SSE4A_UNSET )
2111 #define OPTION_MASK_ISA_SSSE3_UNSET \
2112 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2113 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2114 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2115 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2116 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2117 #define OPTION_MASK_ISA_AVX_UNSET \
2118 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2119 | OPTION_MASK_ISA_FMA4_UNSET | OPTION_MASK_ISA_F16C_UNSET)
2120 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2122 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2124 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2126 #define OPTION_MASK_ISA_SSE4A_UNSET \
2127 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2129 #define OPTION_MASK_ISA_FMA4_UNSET \
2130 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2131 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2132 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2134 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2135 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2136 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2137 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2138 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2139 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2140 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2141 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2143 #define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
2144 #define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
2145 #define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
2147 /* Vectorization library interface and handlers. */
2148 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2150 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2151 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2153 /* Processor target table, indexed by processor number */
2156 const struct processor_costs *cost; /* Processor costs */
2157 const int align_loop; /* Default alignments. */
2158 const int align_loop_max_skip;
2159 const int align_jump;
2160 const int align_jump_max_skip;
2161 const int align_func;
2164 static const struct ptt processor_target_table[PROCESSOR_max] =
2166 {&i386_cost, 4, 3, 4, 3, 4},
2167 {&i486_cost, 16, 15, 16, 15, 16},
2168 {&pentium_cost, 16, 7, 16, 7, 16},
2169 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2170 {&geode_cost, 0, 0, 0, 0, 0},
2171 {&k6_cost, 32, 7, 32, 7, 32},
2172 {&athlon_cost, 16, 7, 16, 7, 16},
2173 {&pentium4_cost, 0, 0, 0, 0, 0},
2174 {&k8_cost, 16, 7, 16, 7, 16},
2175 {&nocona_cost, 0, 0, 0, 0, 0},
2176 {&core2_cost, 16, 10, 16, 10, 16},
2177 {&generic32_cost, 16, 7, 16, 7, 16},
2178 {&generic64_cost, 16, 10, 16, 10, 16},
2179 {&amdfam10_cost, 32, 24, 32, 7, 32},
2180 {&bdver1_cost, 32, 24, 32, 7, 32},
2181 {&atom_cost, 16, 7, 16, 7, 16}
2184 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2211 /* Return true if a red-zone is in use. */
2214 ix86_using_red_zone (void)
2216 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2219 /* Implement TARGET_HANDLE_OPTION. */
2222 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2229 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2230 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2234 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2242 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2243 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2247 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2248 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2258 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2259 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2263 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2264 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2271 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2272 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2276 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2277 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2284 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2285 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2289 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2290 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2297 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2298 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2302 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2303 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2310 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2311 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2315 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2316 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2323 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2324 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2328 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2329 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2336 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2349 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2350 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2354 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2355 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2360 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2365 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2366 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2372 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2373 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2377 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2378 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2385 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2386 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2390 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2391 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2398 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2399 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2403 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2404 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2411 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2412 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2416 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2417 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2424 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2425 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2429 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2430 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2437 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2438 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2442 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2443 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2450 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2451 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2455 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2456 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2463 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2464 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2468 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2469 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2476 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2477 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2481 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2482 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2489 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2490 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2494 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2495 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2502 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2503 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2507 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2508 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2515 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2516 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2520 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2521 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2528 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE_SET;
2529 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_SET;
2533 ix86_isa_flags &= ~OPTION_MASK_ISA_FSGSBASE_UNSET;
2534 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_UNSET;
2541 ix86_isa_flags |= OPTION_MASK_ISA_RDRND_SET;
2542 ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_SET;
2546 ix86_isa_flags &= ~OPTION_MASK_ISA_RDRND_UNSET;
2547 ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_UNSET;
2554 ix86_isa_flags |= OPTION_MASK_ISA_F16C_SET;
2555 ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_SET;
2559 ix86_isa_flags &= ~OPTION_MASK_ISA_F16C_UNSET;
2560 ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_UNSET;
2569 /* Return a string that documents the current -m options. The caller is
2570 responsible for freeing the string. */
2573 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2574 const char *fpmath, bool add_nl_p)
2576 struct ix86_target_opts
2578 const char *option; /* option string */
2579 int mask; /* isa mask options */
2582 /* This table is ordered so that options like -msse4.2 that imply
2583 preceding options while match those first. */
2584 static struct ix86_target_opts isa_opts[] =
2586 { "-m64", OPTION_MASK_ISA_64BIT },
2587 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2588 { "-mfma", OPTION_MASK_ISA_FMA },
2589 { "-mxop", OPTION_MASK_ISA_XOP },
2590 { "-mlwp", OPTION_MASK_ISA_LWP },
2591 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2592 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2593 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2594 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2595 { "-msse3", OPTION_MASK_ISA_SSE3 },
2596 { "-msse2", OPTION_MASK_ISA_SSE2 },
2597 { "-msse", OPTION_MASK_ISA_SSE },
2598 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2599 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2600 { "-mmmx", OPTION_MASK_ISA_MMX },
2601 { "-mabm", OPTION_MASK_ISA_ABM },
2602 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2603 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2604 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2605 { "-maes", OPTION_MASK_ISA_AES },
2606 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2607 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
2608 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
2609 { "-mf16c", OPTION_MASK_ISA_F16C },
2613 static struct ix86_target_opts flag_opts[] =
2615 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2616 { "-m80387", MASK_80387 },
2617 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2618 { "-malign-double", MASK_ALIGN_DOUBLE },
2619 { "-mcld", MASK_CLD },
2620 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2621 { "-mieee-fp", MASK_IEEE_FP },
2622 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2623 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2624 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2625 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2626 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2627 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2628 { "-mno-red-zone", MASK_NO_RED_ZONE },
2629 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2630 { "-mrecip", MASK_RECIP },
2631 { "-mrtd", MASK_RTD },
2632 { "-msseregparm", MASK_SSEREGPARM },
2633 { "-mstack-arg-probe", MASK_STACK_PROBE },
2634 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2635 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
2636 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
2639 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2642 char target_other[40];
2651 memset (opts, '\0', sizeof (opts));
2653 /* Add -march= option. */
2656 opts[num][0] = "-march=";
2657 opts[num++][1] = arch;
2660 /* Add -mtune= option. */
2663 opts[num][0] = "-mtune=";
2664 opts[num++][1] = tune;
2667 /* Pick out the options in isa options. */
2668 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2670 if ((isa & isa_opts[i].mask) != 0)
2672 opts[num++][0] = isa_opts[i].option;
2673 isa &= ~ isa_opts[i].mask;
2677 if (isa && add_nl_p)
2679 opts[num++][0] = isa_other;
2680 sprintf (isa_other, "(other isa: %#x)", isa);
2683 /* Add flag options. */
2684 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2686 if ((flags & flag_opts[i].mask) != 0)
2688 opts[num++][0] = flag_opts[i].option;
2689 flags &= ~ flag_opts[i].mask;
2693 if (flags && add_nl_p)
2695 opts[num++][0] = target_other;
2696 sprintf (target_other, "(other flags: %#x)", flags);
2699 /* Add -fpmath= option. */
2702 opts[num][0] = "-mfpmath=";
2703 opts[num++][1] = fpmath;
2710 gcc_assert (num < ARRAY_SIZE (opts));
2712 /* Size the string. */
2714 sep_len = (add_nl_p) ? 3 : 1;
2715 for (i = 0; i < num; i++)
2718 for (j = 0; j < 2; j++)
2720 len += strlen (opts[i][j]);
2723 /* Build the string. */
2724 ret = ptr = (char *) xmalloc (len);
2727 for (i = 0; i < num; i++)
2731 for (j = 0; j < 2; j++)
2732 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2739 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2747 for (j = 0; j < 2; j++)
2750 memcpy (ptr, opts[i][j], len2[j]);
2752 line_len += len2[j];
2757 gcc_assert (ret + len >= ptr);
2762 /* Return TRUE if software prefetching is beneficial for the
2766 software_prefetching_beneficial_p (void)
2770 case PROCESSOR_GEODE:
2772 case PROCESSOR_ATHLON:
2774 case PROCESSOR_AMDFAM10:
2782 /* Return true, if profiling code should be emitted before
2783 prologue. Otherwise it returns false.
2784 Note: For x86 with "hotfix" it is sorried. */
2786 ix86_profile_before_prologue (void)
2788 return flag_fentry != 0;
2791 /* Function that is callable from the debugger to print the current
2794 ix86_debug_options (void)
2796 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2797 ix86_arch_string, ix86_tune_string,
2798 ix86_fpmath_string, true);
2802 fprintf (stderr, "%s\n\n", opts);
2806 fputs ("<no options>\n\n", stderr);
2811 /* Override various settings based on options. If MAIN_ARGS_P, the
2812 options are from the command line, otherwise they are from
2816 ix86_option_override_internal (bool main_args_p)
2819 unsigned int ix86_arch_mask, ix86_tune_mask;
2820 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2825 /* Comes from final.c -- no real reason to change it. */
2826 #define MAX_CODE_ALIGN 16
2834 PTA_PREFETCH_SSE = 1 << 4,
2836 PTA_3DNOW_A = 1 << 6,
2840 PTA_POPCNT = 1 << 10,
2842 PTA_SSE4A = 1 << 12,
2843 PTA_NO_SAHF = 1 << 13,
2844 PTA_SSE4_1 = 1 << 14,
2845 PTA_SSE4_2 = 1 << 15,
2847 PTA_PCLMUL = 1 << 17,
2850 PTA_MOVBE = 1 << 20,
2854 PTA_FSGSBASE = 1 << 24,
2855 PTA_RDRND = 1 << 25,
2861 const char *const name; /* processor name or nickname. */
2862 const enum processor_type processor;
2863 const enum attr_cpu schedule;
2864 const unsigned /*enum pta_flags*/ flags;
2866 const processor_alias_table[] =
2868 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2869 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2870 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2871 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2872 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2873 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2874 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2875 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2876 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2877 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2878 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2879 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2880 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2882 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2884 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2885 PTA_MMX | PTA_SSE | PTA_SSE2},
2886 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2887 PTA_MMX |PTA_SSE | PTA_SSE2},
2888 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2889 PTA_MMX | PTA_SSE | PTA_SSE2},
2890 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2891 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2892 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2893 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2894 | PTA_CX16 | PTA_NO_SAHF},
2895 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2896 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2897 | PTA_SSSE3 | PTA_CX16},
2898 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2899 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2900 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2901 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2902 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2903 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2904 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2905 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2906 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2907 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2908 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2909 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2910 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2911 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2912 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2913 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2914 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2915 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2916 {"x86-64", PROCESSOR_K8, CPU_K8,
2917 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2918 {"k8", PROCESSOR_K8, CPU_K8,
2919 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2920 | PTA_SSE2 | PTA_NO_SAHF},
2921 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2922 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2923 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2924 {"opteron", PROCESSOR_K8, CPU_K8,
2925 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2926 | PTA_SSE2 | PTA_NO_SAHF},
2927 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2928 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2929 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2930 {"athlon64", PROCESSOR_K8, CPU_K8,
2931 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2932 | PTA_SSE2 | PTA_NO_SAHF},
2933 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2934 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2935 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2936 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2937 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2938 | PTA_SSE2 | PTA_NO_SAHF},
2939 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2940 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2941 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2942 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2943 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2944 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2945 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
2946 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2947 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
2948 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
2949 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
2950 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2951 0 /* flags are only used for -march switch. */ },
2952 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2953 PTA_64BIT /* flags are only used for -march switch. */ },
2956 int const pta_size = ARRAY_SIZE (processor_alias_table);
2958 /* Set up prefix/suffix so the error messages refer to either the command
2959 line argument, or the attribute(target). */
2968 prefix = "option(\"";
2973 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2974 SUBTARGET_OVERRIDE_OPTIONS;
2977 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2978 SUBSUBTARGET_OVERRIDE_OPTIONS;
2981 /* -fPIC is the default for x86_64. */
2982 if (TARGET_MACHO && TARGET_64BIT)
2985 /* Need to check -mtune=generic first. */
2986 if (ix86_tune_string)
2988 if (!strcmp (ix86_tune_string, "generic")
2989 || !strcmp (ix86_tune_string, "i686")
2990 /* As special support for cross compilers we read -mtune=native
2991 as -mtune=generic. With native compilers we won't see the
2992 -mtune=native, as it was changed by the driver. */
2993 || !strcmp (ix86_tune_string, "native"))
2996 ix86_tune_string = "generic64";
2998 ix86_tune_string = "generic32";
3000 /* If this call is for setting the option attribute, allow the
3001 generic32/generic64 that was previously set. */
3002 else if (!main_args_p
3003 && (!strcmp (ix86_tune_string, "generic32")
3004 || !strcmp (ix86_tune_string, "generic64")))
3006 else if (!strncmp (ix86_tune_string, "generic", 7))
3007 error ("bad value (%s) for %stune=%s %s",
3008 ix86_tune_string, prefix, suffix, sw);
3009 else if (!strcmp (ix86_tune_string, "x86-64"))
3010 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
3011 "%stune=k8%s or %stune=generic%s instead as appropriate.",
3012 prefix, suffix, prefix, suffix, prefix, suffix);
3016 if (ix86_arch_string)
3017 ix86_tune_string = ix86_arch_string;
3018 if (!ix86_tune_string)
3020 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3021 ix86_tune_defaulted = 1;
3024 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3025 need to use a sensible tune option. */
3026 if (!strcmp (ix86_tune_string, "generic")
3027 || !strcmp (ix86_tune_string, "x86-64")
3028 || !strcmp (ix86_tune_string, "i686"))
3031 ix86_tune_string = "generic64";
3033 ix86_tune_string = "generic32";
3037 if (ix86_stringop_string)
3039 if (!strcmp (ix86_stringop_string, "rep_byte"))
3040 stringop_alg = rep_prefix_1_byte;
3041 else if (!strcmp (ix86_stringop_string, "libcall"))
3042 stringop_alg = libcall;
3043 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
3044 stringop_alg = rep_prefix_4_byte;
3045 else if (!strcmp (ix86_stringop_string, "rep_8byte")
3047 /* rep; movq isn't available in 32-bit code. */
3048 stringop_alg = rep_prefix_8_byte;
3049 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3050 stringop_alg = loop_1_byte;
3051 else if (!strcmp (ix86_stringop_string, "loop"))
3052 stringop_alg = loop;
3053 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3054 stringop_alg = unrolled_loop;
3056 error ("bad value (%s) for %sstringop-strategy=%s %s",
3057 ix86_stringop_string, prefix, suffix, sw);
3060 if (!ix86_arch_string)
3061 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3063 ix86_arch_specified = 1;
3065 /* Validate -mabi= value. */
3066 if (ix86_abi_string)
3068 if (strcmp (ix86_abi_string, "sysv") == 0)
3069 ix86_abi = SYSV_ABI;
3070 else if (strcmp (ix86_abi_string, "ms") == 0)
3073 error ("unknown ABI (%s) for %sabi=%s %s",
3074 ix86_abi_string, prefix, suffix, sw);
3077 ix86_abi = DEFAULT_ABI;
3079 if (ix86_cmodel_string != 0)
3081 if (!strcmp (ix86_cmodel_string, "small"))
3082 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3083 else if (!strcmp (ix86_cmodel_string, "medium"))
3084 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3085 else if (!strcmp (ix86_cmodel_string, "large"))
3086 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3088 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3089 else if (!strcmp (ix86_cmodel_string, "32"))
3090 ix86_cmodel = CM_32;
3091 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3092 ix86_cmodel = CM_KERNEL;
3094 error ("bad value (%s) for %scmodel=%s %s",
3095 ix86_cmodel_string, prefix, suffix, sw);
3099 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3100 use of rip-relative addressing. This eliminates fixups that
3101 would otherwise be needed if this object is to be placed in a
3102 DLL, and is essentially just as efficient as direct addressing. */
3103 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3104 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3105 else if (TARGET_64BIT)
3106 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3108 ix86_cmodel = CM_32;
3110 if (ix86_asm_string != 0)
3113 && !strcmp (ix86_asm_string, "intel"))
3114 ix86_asm_dialect = ASM_INTEL;
3115 else if (!strcmp (ix86_asm_string, "att"))
3116 ix86_asm_dialect = ASM_ATT;
3118 error ("bad value (%s) for %sasm=%s %s",
3119 ix86_asm_string, prefix, suffix, sw);
3121 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3122 error ("code model %qs not supported in the %s bit mode",
3123 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3124 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3125 sorry ("%i-bit mode not compiled in",
3126 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3128 for (i = 0; i < pta_size; i++)
3129 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3131 ix86_schedule = processor_alias_table[i].schedule;
3132 ix86_arch = processor_alias_table[i].processor;
3133 /* Default cpu tuning to the architecture. */
3134 ix86_tune = ix86_arch;
3136 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3137 error ("CPU you selected does not support x86-64 "
3140 if (processor_alias_table[i].flags & PTA_MMX
3141 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3142 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3143 if (processor_alias_table[i].flags & PTA_3DNOW
3144 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3145 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3146 if (processor_alias_table[i].flags & PTA_3DNOW_A
3147 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3148 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3149 if (processor_alias_table[i].flags & PTA_SSE
3150 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3151 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3152 if (processor_alias_table[i].flags & PTA_SSE2
3153 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3154 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3155 if (processor_alias_table[i].flags & PTA_SSE3
3156 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3157 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3158 if (processor_alias_table[i].flags & PTA_SSSE3
3159 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3160 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3161 if (processor_alias_table[i].flags & PTA_SSE4_1
3162 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3163 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3164 if (processor_alias_table[i].flags & PTA_SSE4_2
3165 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3166 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3167 if (processor_alias_table[i].flags & PTA_AVX
3168 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3169 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3170 if (processor_alias_table[i].flags & PTA_FMA
3171 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3172 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3173 if (processor_alias_table[i].flags & PTA_SSE4A
3174 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3175 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3176 if (processor_alias_table[i].flags & PTA_FMA4
3177 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3178 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3179 if (processor_alias_table[i].flags & PTA_XOP
3180 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3181 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3182 if (processor_alias_table[i].flags & PTA_LWP
3183 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3184 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3185 if (processor_alias_table[i].flags & PTA_ABM
3186 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3187 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3188 if (processor_alias_table[i].flags & PTA_CX16
3189 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3190 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3191 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3192 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3193 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3194 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3195 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3196 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3197 if (processor_alias_table[i].flags & PTA_MOVBE
3198 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3199 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3200 if (processor_alias_table[i].flags & PTA_AES
3201 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3202 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3203 if (processor_alias_table[i].flags & PTA_PCLMUL
3204 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3205 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3206 if (processor_alias_table[i].flags & PTA_FSGSBASE
3207 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3208 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3209 if (processor_alias_table[i].flags & PTA_RDRND
3210 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3211 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3212 if (processor_alias_table[i].flags & PTA_F16C
3213 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3214 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3215 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3216 x86_prefetch_sse = true;
3221 if (!strcmp (ix86_arch_string, "generic"))
3222 error ("generic CPU can be used only for %stune=%s %s",
3223 prefix, suffix, sw);
3224 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3225 error ("bad value (%s) for %sarch=%s %s",
3226 ix86_arch_string, prefix, suffix, sw);
3228 ix86_arch_mask = 1u << ix86_arch;
3229 for (i = 0; i < X86_ARCH_LAST; ++i)
3230 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3232 for (i = 0; i < pta_size; i++)
3233 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3235 ix86_schedule = processor_alias_table[i].schedule;
3236 ix86_tune = processor_alias_table[i].processor;
3237 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3239 if (ix86_tune_defaulted)
3241 ix86_tune_string = "x86-64";
3242 for (i = 0; i < pta_size; i++)
3243 if (! strcmp (ix86_tune_string,
3244 processor_alias_table[i].name))
3246 ix86_schedule = processor_alias_table[i].schedule;
3247 ix86_tune = processor_alias_table[i].processor;
3250 error ("CPU you selected does not support x86-64 "
3253 /* Intel CPUs have always interpreted SSE prefetch instructions as
3254 NOPs; so, we can enable SSE prefetch instructions even when
3255 -mtune (rather than -march) points us to a processor that has them.
3256 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3257 higher processors. */
3259 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3260 x86_prefetch_sse = true;
3264 if (ix86_tune_specified && i == pta_size)
3265 error ("bad value (%s) for %stune=%s %s",
3266 ix86_tune_string, prefix, suffix, sw);
3268 ix86_tune_mask = 1u << ix86_tune;
3269 for (i = 0; i < X86_TUNE_LAST; ++i)
3270 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3272 #ifndef USE_IX86_FRAME_POINTER
3273 #define USE_IX86_FRAME_POINTER 0
3276 /* Set the default values for switches whose default depends on TARGET_64BIT
3277 in case they weren't overwritten by command line options. */
3282 if (flag_omit_frame_pointer == 2)
3283 flag_omit_frame_pointer = 1;
3284 if (flag_asynchronous_unwind_tables == 2)
3285 flag_asynchronous_unwind_tables = 1;
3286 if (flag_pcc_struct_return == 2)
3287 flag_pcc_struct_return = 0;
3293 if (flag_omit_frame_pointer == 2)
3294 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3295 if (flag_asynchronous_unwind_tables == 2)
3296 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3297 if (flag_pcc_struct_return == 2)
3298 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3302 ix86_cost = &ix86_size_cost;
3304 ix86_cost = processor_target_table[ix86_tune].cost;
3306 /* Arrange to set up i386_stack_locals for all functions. */
3307 init_machine_status = ix86_init_machine_status;
3309 /* Validate -mregparm= value. */
3310 if (ix86_regparm_string)
3313 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3314 i = atoi (ix86_regparm_string);
3315 if (i < 0 || i > REGPARM_MAX)
3316 error ("%sregparm=%d%s is not between 0 and %d",
3317 prefix, i, suffix, REGPARM_MAX);
3322 ix86_regparm = REGPARM_MAX;
3324 /* If the user has provided any of the -malign-* options,
3325 warn and use that value only if -falign-* is not set.
3326 Remove this code in GCC 3.2 or later. */
3327 if (ix86_align_loops_string)
3329 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3330 prefix, suffix, suffix);
3331 if (align_loops == 0)
3333 i = atoi (ix86_align_loops_string);
3334 if (i < 0 || i > MAX_CODE_ALIGN)
3335 error ("%salign-loops=%d%s is not between 0 and %d",
3336 prefix, i, suffix, MAX_CODE_ALIGN);
3338 align_loops = 1 << i;
3342 if (ix86_align_jumps_string)
3344 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3345 prefix, suffix, suffix);
3346 if (align_jumps == 0)
3348 i = atoi (ix86_align_jumps_string);
3349 if (i < 0 || i > MAX_CODE_ALIGN)
3350 error ("%salign-loops=%d%s is not between 0 and %d",
3351 prefix, i, suffix, MAX_CODE_ALIGN);
3353 align_jumps = 1 << i;
3357 if (ix86_align_funcs_string)
3359 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3360 prefix, suffix, suffix);
3361 if (align_functions == 0)
3363 i = atoi (ix86_align_funcs_string);
3364 if (i < 0 || i > MAX_CODE_ALIGN)
3365 error ("%salign-loops=%d%s is not between 0 and %d",
3366 prefix, i, suffix, MAX_CODE_ALIGN);
3368 align_functions = 1 << i;
3372 /* Default align_* from the processor table. */
3373 if (align_loops == 0)
3375 align_loops = processor_target_table[ix86_tune].align_loop;
3376 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3378 if (align_jumps == 0)
3380 align_jumps = processor_target_table[ix86_tune].align_jump;
3381 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3383 if (align_functions == 0)
3385 align_functions = processor_target_table[ix86_tune].align_func;
3388 /* Validate -mbranch-cost= value, or provide default. */
3389 ix86_branch_cost = ix86_cost->branch_cost;
3390 if (ix86_branch_cost_string)
3392 i = atoi (ix86_branch_cost_string);
3394 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3396 ix86_branch_cost = i;
3398 if (ix86_section_threshold_string)
3400 i = atoi (ix86_section_threshold_string);
3402 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3404 ix86_section_threshold = i;
3407 if (ix86_tls_dialect_string)
3409 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3410 ix86_tls_dialect = TLS_DIALECT_GNU;
3411 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3412 ix86_tls_dialect = TLS_DIALECT_GNU2;
3414 error ("bad value (%s) for %stls-dialect=%s %s",
3415 ix86_tls_dialect_string, prefix, suffix, sw);
3418 if (ix87_precision_string)
3420 i = atoi (ix87_precision_string);
3421 if (i != 32 && i != 64 && i != 80)
3422 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3427 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3429 /* Enable by default the SSE and MMX builtins. Do allow the user to
3430 explicitly disable any of these. In particular, disabling SSE and
3431 MMX for kernel code is extremely useful. */
3432 if (!ix86_arch_specified)
3434 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3435 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3438 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3442 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3444 if (!ix86_arch_specified)
3446 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3448 /* i386 ABI does not specify red zone. It still makes sense to use it
3449 when programmer takes care to stack from being destroyed. */
3450 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3451 target_flags |= MASK_NO_RED_ZONE;
3454 /* Keep nonleaf frame pointers. */
3455 if (flag_omit_frame_pointer)
3456 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3457 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3458 flag_omit_frame_pointer = 1;
3460 /* If we're doing fast math, we don't care about comparison order
3461 wrt NaNs. This lets us use a shorter comparison sequence. */
3462 if (flag_finite_math_only)
3463 target_flags &= ~MASK_IEEE_FP;
3465 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3466 since the insns won't need emulation. */
3467 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3468 target_flags &= ~MASK_NO_FANCY_MATH_387;
3470 /* Likewise, if the target doesn't have a 387, or we've specified
3471 software floating point, don't use 387 inline intrinsics. */
3473 target_flags |= MASK_NO_FANCY_MATH_387;
3475 /* Turn on MMX builtins for -msse. */
3478 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3479 x86_prefetch_sse = true;
3482 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3483 if (TARGET_SSE4_2 || TARGET_ABM)
3484 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3486 /* Validate -mpreferred-stack-boundary= value or default it to
3487 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3488 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3489 if (ix86_preferred_stack_boundary_string)
3491 i = atoi (ix86_preferred_stack_boundary_string);
3492 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3493 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3494 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3496 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3499 /* Set the default value for -mstackrealign. */
3500 if (ix86_force_align_arg_pointer == -1)
3501 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3503 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3505 /* Validate -mincoming-stack-boundary= value or default it to
3506 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3507 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3508 if (ix86_incoming_stack_boundary_string)
3510 i = atoi (ix86_incoming_stack_boundary_string);
3511 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3512 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3513 i, TARGET_64BIT ? 4 : 2);
3516 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3517 ix86_incoming_stack_boundary
3518 = ix86_user_incoming_stack_boundary;
3522 /* Accept -msseregparm only if at least SSE support is enabled. */
3523 if (TARGET_SSEREGPARM
3525 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3527 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3528 if (ix86_fpmath_string != 0)
3530 if (! strcmp (ix86_fpmath_string, "387"))
3531 ix86_fpmath = FPMATH_387;
3532 else if (! strcmp (ix86_fpmath_string, "sse"))
3536 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3537 ix86_fpmath = FPMATH_387;
3540 ix86_fpmath = FPMATH_SSE;
3542 else if (! strcmp (ix86_fpmath_string, "387,sse")
3543 || ! strcmp (ix86_fpmath_string, "387+sse")
3544 || ! strcmp (ix86_fpmath_string, "sse,387")
3545 || ! strcmp (ix86_fpmath_string, "sse+387")
3546 || ! strcmp (ix86_fpmath_string, "both"))
3550 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3551 ix86_fpmath = FPMATH_387;
3553 else if (!TARGET_80387)
3555 warning (0, "387 instruction set disabled, using SSE arithmetics");
3556 ix86_fpmath = FPMATH_SSE;
3559 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3562 error ("bad value (%s) for %sfpmath=%s %s",
3563 ix86_fpmath_string, prefix, suffix, sw);
3566 /* If the i387 is disabled, then do not return values in it. */
3568 target_flags &= ~MASK_FLOAT_RETURNS;
3570 /* Use external vectorized library in vectorizing intrinsics. */
3571 if (ix86_veclibabi_string)
3573 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3574 ix86_veclib_handler = ix86_veclibabi_svml;
3575 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3576 ix86_veclib_handler = ix86_veclibabi_acml;
3578 error ("unknown vectorization library ABI type (%s) for "
3579 "%sveclibabi=%s %s", ix86_veclibabi_string,
3580 prefix, suffix, sw);
3583 if ((!USE_IX86_FRAME_POINTER
3584 || (x86_accumulate_outgoing_args & ix86_tune_mask))
3585 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3587 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3589 /* ??? Unwind info is not correct around the CFG unless either a frame
3590 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3591 unwind info generation to be aware of the CFG and propagating states
3593 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3594 || flag_exceptions || flag_non_call_exceptions)
3595 && flag_omit_frame_pointer
3596 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3598 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3599 warning (0, "unwind tables currently require either a frame pointer "
3600 "or %saccumulate-outgoing-args%s for correctness",
3602 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3605 /* If stack probes are required, the space used for large function
3606 arguments on the stack must also be probed, so enable
3607 -maccumulate-outgoing-args so this happens in the prologue. */
3608 if (TARGET_STACK_PROBE
3609 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3611 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3612 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3613 "for correctness", prefix, suffix);
3614 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3617 /* For sane SSE instruction set generation we need fcomi instruction.
3618 It is safe to enable all CMOVE instructions. */
3622 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3625 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3626 p = strchr (internal_label_prefix, 'X');
3627 internal_label_prefix_len = p - internal_label_prefix;
3631 /* When scheduling description is not available, disable scheduler pass
3632 so it won't slow down the compilation and make x87 code slower. */
3633 if (!TARGET_SCHEDULE)
3634 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3636 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3637 set_param_value ("simultaneous-prefetches",
3638 ix86_cost->simultaneous_prefetches);
3639 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3640 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3641 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3642 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3643 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3644 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3646 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3647 if (flag_prefetch_loop_arrays < 0
3650 && software_prefetching_beneficial_p ())
3651 flag_prefetch_loop_arrays = 1;
3653 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3654 can be optimized to ap = __builtin_next_arg (0). */
3655 if (!TARGET_64BIT && !flag_split_stack)
3656 targetm.expand_builtin_va_start = NULL;
3660 ix86_gen_leave = gen_leave_rex64;
3661 ix86_gen_add3 = gen_adddi3;
3662 ix86_gen_sub3 = gen_subdi3;
3663 ix86_gen_sub3_carry = gen_subdi3_carry;
3664 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3665 ix86_gen_monitor = gen_sse3_monitor64;
3666 ix86_gen_andsp = gen_anddi3;
3667 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
3668 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
3669 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
3673 ix86_gen_leave = gen_leave;
3674 ix86_gen_add3 = gen_addsi3;
3675 ix86_gen_sub3 = gen_subsi3;
3676 ix86_gen_sub3_carry = gen_subsi3_carry;
3677 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3678 ix86_gen_monitor = gen_sse3_monitor;
3679 ix86_gen_andsp = gen_andsi3;
3680 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
3681 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
3682 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
3686 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3688 target_flags |= MASK_CLD & ~target_flags_explicit;
3691 if (!TARGET_64BIT && flag_pic)
3693 if (flag_fentry > 0)
3694 sorry ("-mfentry isn't supported for 32-bit in combination with -fpic");
3697 if (flag_fentry < 0)
3699 #if defined(PROFILE_BEFORE_PROLOGUE)
3706 /* Save the initial options in case the user does function specific options */
3708 target_option_default_node = target_option_current_node
3709 = build_target_option_node ();
3712 /* Implement the TARGET_OPTION_OVERRIDE hook. */
3715 ix86_option_override (void)
3717 ix86_option_override_internal (true);
3720 /* Update register usage after having seen the compiler flags. */
3723 ix86_conditional_register_usage (void)
3728 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3730 if (fixed_regs[i] > 1)
3731 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3732 if (call_used_regs[i] > 1)
3733 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3736 /* The PIC register, if it exists, is fixed. */
3737 j = PIC_OFFSET_TABLE_REGNUM;
3738 if (j != INVALID_REGNUM)
3739 fixed_regs[j] = call_used_regs[j] = 1;
3741 /* The MS_ABI changes the set of call-used registers. */
3742 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3744 call_used_regs[SI_REG] = 0;
3745 call_used_regs[DI_REG] = 0;
3746 call_used_regs[XMM6_REG] = 0;
3747 call_used_regs[XMM7_REG] = 0;
3748 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3749 call_used_regs[i] = 0;
3752 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3753 other call-clobbered regs for 64-bit. */
3756 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3758 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3759 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3760 && call_used_regs[i])
3761 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3764 /* If MMX is disabled, squash the registers. */
3766 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3767 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3768 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3770 /* If SSE is disabled, squash the registers. */
3772 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3773 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3774 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3776 /* If the FPU is disabled, squash the registers. */
3777 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3778 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3779 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3780 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3782 /* If 32-bit, squash the 64-bit registers. */
3785 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3787 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3793 /* Save the current options */
3796 ix86_function_specific_save (struct cl_target_option *ptr)
3798 ptr->arch = ix86_arch;
3799 ptr->schedule = ix86_schedule;
3800 ptr->tune = ix86_tune;
3801 ptr->fpmath = ix86_fpmath;
3802 ptr->branch_cost = ix86_branch_cost;
3803 ptr->tune_defaulted = ix86_tune_defaulted;
3804 ptr->arch_specified = ix86_arch_specified;
3805 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3806 ptr->target_flags_explicit = target_flags_explicit;
3808 /* The fields are char but the variables are not; make sure the
3809 values fit in the fields. */
3810 gcc_assert (ptr->arch == ix86_arch);
3811 gcc_assert (ptr->schedule == ix86_schedule);
3812 gcc_assert (ptr->tune == ix86_tune);
3813 gcc_assert (ptr->fpmath == ix86_fpmath);
3814 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3817 /* Restore the current options */
3820 ix86_function_specific_restore (struct cl_target_option *ptr)
3822 enum processor_type old_tune = ix86_tune;
3823 enum processor_type old_arch = ix86_arch;
3824 unsigned int ix86_arch_mask, ix86_tune_mask;
3827 ix86_arch = (enum processor_type) ptr->arch;
3828 ix86_schedule = (enum attr_cpu) ptr->schedule;
3829 ix86_tune = (enum processor_type) ptr->tune;
3830 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3831 ix86_branch_cost = ptr->branch_cost;
3832 ix86_tune_defaulted = ptr->tune_defaulted;
3833 ix86_arch_specified = ptr->arch_specified;
3834 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3835 target_flags_explicit = ptr->target_flags_explicit;
3837 /* Recreate the arch feature tests if the arch changed */
3838 if (old_arch != ix86_arch)
3840 ix86_arch_mask = 1u << ix86_arch;
3841 for (i = 0; i < X86_ARCH_LAST; ++i)
3842 ix86_arch_features[i]
3843 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3846 /* Recreate the tune optimization tests */
3847 if (old_tune != ix86_tune)
3849 ix86_tune_mask = 1u << ix86_tune;
3850 for (i = 0; i < X86_TUNE_LAST; ++i)
3851 ix86_tune_features[i]
3852 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3856 /* Print the current options */
3859 ix86_function_specific_print (FILE *file, int indent,
3860 struct cl_target_option *ptr)
3863 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3864 NULL, NULL, NULL, false);
3866 fprintf (file, "%*sarch = %d (%s)\n",
3869 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3870 ? cpu_names[ptr->arch]
3873 fprintf (file, "%*stune = %d (%s)\n",
3876 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3877 ? cpu_names[ptr->tune]
3880 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3881 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3882 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3883 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3887 fprintf (file, "%*s%s\n", indent, "", target_string);
3888 free (target_string);
3893 /* Inner function to process the attribute((target(...))), take an argument and
3894 set the current options from the argument. If we have a list, recursively go
3898 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3903 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3904 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3905 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3906 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3921 enum ix86_opt_type type;
3926 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3927 IX86_ATTR_ISA ("abm", OPT_mabm),
3928 IX86_ATTR_ISA ("aes", OPT_maes),
3929 IX86_ATTR_ISA ("avx", OPT_mavx),
3930 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3931 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3932 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3933 IX86_ATTR_ISA ("sse", OPT_msse),
3934 IX86_ATTR_ISA ("sse2", OPT_msse2),
3935 IX86_ATTR_ISA ("sse3", OPT_msse3),
3936 IX86_ATTR_ISA ("sse4", OPT_msse4),
3937 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3938 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3939 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3940 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3941 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3942 IX86_ATTR_ISA ("xop", OPT_mxop),
3943 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3944 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
3945 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
3946 IX86_ATTR_ISA ("f16c", OPT_mf16c),
3948 /* string options */
3949 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3950 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3951 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3954 IX86_ATTR_YES ("cld",
3958 IX86_ATTR_NO ("fancy-math-387",
3959 OPT_mfancy_math_387,
3960 MASK_NO_FANCY_MATH_387),
3962 IX86_ATTR_YES ("ieee-fp",
3966 IX86_ATTR_YES ("inline-all-stringops",
3967 OPT_minline_all_stringops,
3968 MASK_INLINE_ALL_STRINGOPS),
3970 IX86_ATTR_YES ("inline-stringops-dynamically",
3971 OPT_minline_stringops_dynamically,
3972 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3974 IX86_ATTR_NO ("align-stringops",
3975 OPT_mno_align_stringops,
3976 MASK_NO_ALIGN_STRINGOPS),
3978 IX86_ATTR_YES ("recip",
3984 /* If this is a list, recurse to get the options. */
3985 if (TREE_CODE (args) == TREE_LIST)
3989 for (; args; args = TREE_CHAIN (args))
3990 if (TREE_VALUE (args)
3991 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3997 else if (TREE_CODE (args) != STRING_CST)
4000 /* Handle multiple arguments separated by commas. */
4001 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4003 while (next_optstr && *next_optstr != '\0')
4005 char *p = next_optstr;
4007 char *comma = strchr (next_optstr, ',');
4008 const char *opt_string;
4009 size_t len, opt_len;
4014 enum ix86_opt_type type = ix86_opt_unknown;
4020 len = comma - next_optstr;
4021 next_optstr = comma + 1;
4029 /* Recognize no-xxx. */
4030 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4039 /* Find the option. */
4042 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4044 type = attrs[i].type;
4045 opt_len = attrs[i].len;
4046 if (ch == attrs[i].string[0]
4047 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
4048 && memcmp (p, attrs[i].string, opt_len) == 0)
4051 mask = attrs[i].mask;
4052 opt_string = attrs[i].string;
4057 /* Process the option. */
4060 error ("attribute(target(\"%s\")) is unknown", orig_p);
4064 else if (type == ix86_opt_isa)
4065 ix86_handle_option (opt, p, opt_set_p);
4067 else if (type == ix86_opt_yes || type == ix86_opt_no)
4069 if (type == ix86_opt_no)
4070 opt_set_p = !opt_set_p;
4073 target_flags |= mask;
4075 target_flags &= ~mask;
4078 else if (type == ix86_opt_str)
4082 error ("option(\"%s\") was already specified", opt_string);
4086 p_strings[opt] = xstrdup (p + opt_len);
4096 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4099 ix86_valid_target_attribute_tree (tree args)
4101 const char *orig_arch_string = ix86_arch_string;
4102 const char *orig_tune_string = ix86_tune_string;
4103 const char *orig_fpmath_string = ix86_fpmath_string;
4104 int orig_tune_defaulted = ix86_tune_defaulted;
4105 int orig_arch_specified = ix86_arch_specified;
4106 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
4109 struct cl_target_option *def
4110 = TREE_TARGET_OPTION (target_option_default_node);
4112 /* Process each of the options on the chain. */
4113 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
4116 /* If the changed options are different from the default, rerun
4117 ix86_option_override_internal, and then save the options away.
4118 The string options are are attribute options, and will be undone
4119 when we copy the save structure. */
4120 if (ix86_isa_flags != def->ix86_isa_flags
4121 || target_flags != def->target_flags
4122 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4123 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4124 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4126 /* If we are using the default tune= or arch=, undo the string assigned,
4127 and use the default. */
4128 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4129 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4130 else if (!orig_arch_specified)
4131 ix86_arch_string = NULL;
4133 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4134 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4135 else if (orig_tune_defaulted)
4136 ix86_tune_string = NULL;
4138 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4139 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4140 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4141 else if (!TARGET_64BIT && TARGET_SSE)
4142 ix86_fpmath_string = "sse,387";
4144 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4145 ix86_option_override_internal (false);
4147 /* Add any builtin functions with the new isa if any. */
4148 ix86_add_new_builtins (ix86_isa_flags);
4150 /* Save the current options unless we are validating options for
4152 t = build_target_option_node ();
4154 ix86_arch_string = orig_arch_string;
4155 ix86_tune_string = orig_tune_string;
4156 ix86_fpmath_string = orig_fpmath_string;
4158 /* Free up memory allocated to hold the strings */
4159 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4160 if (option_strings[i])
4161 free (option_strings[i]);
4167 /* Hook to validate attribute((target("string"))). */
4170 ix86_valid_target_attribute_p (tree fndecl,
4171 tree ARG_UNUSED (name),
4173 int ARG_UNUSED (flags))
4175 struct cl_target_option cur_target;
4177 tree old_optimize = build_optimization_node ();
4178 tree new_target, new_optimize;
4179 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4181 /* If the function changed the optimization levels as well as setting target
4182 options, start with the optimizations specified. */
4183 if (func_optimize && func_optimize != old_optimize)
4184 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
4186 /* The target attributes may also change some optimization flags, so update
4187 the optimization options if necessary. */
4188 cl_target_option_save (&cur_target);
4189 new_target = ix86_valid_target_attribute_tree (args);
4190 new_optimize = build_optimization_node ();
4197 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4199 if (old_optimize != new_optimize)
4200 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4203 cl_target_option_restore (&cur_target);
4205 if (old_optimize != new_optimize)
4206 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
4212 /* Hook to determine if one function can safely inline another. */
4215 ix86_can_inline_p (tree caller, tree callee)
4218 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4219 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4221 /* If callee has no option attributes, then it is ok to inline. */
4225 /* If caller has no option attributes, but callee does then it is not ok to
4227 else if (!caller_tree)
4232 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4233 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4235 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4236 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4238 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
4239 != callee_opts->ix86_isa_flags)
4242 /* See if we have the same non-isa options. */
4243 else if (caller_opts->target_flags != callee_opts->target_flags)
4246 /* See if arch, tune, etc. are the same. */
4247 else if (caller_opts->arch != callee_opts->arch)
4250 else if (caller_opts->tune != callee_opts->tune)
4253 else if (caller_opts->fpmath != callee_opts->fpmath)
4256 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4267 /* Remember the last target of ix86_set_current_function. */
4268 static GTY(()) tree ix86_previous_fndecl;
4270 /* Establish appropriate back-end context for processing the function
4271 FNDECL. The argument might be NULL to indicate processing at top
4272 level, outside of any function scope. */
4274 ix86_set_current_function (tree fndecl)
4276 /* Only change the context if the function changes. This hook is called
4277 several times in the course of compiling a function, and we don't want to
4278 slow things down too much or call target_reinit when it isn't safe. */
4279 if (fndecl && fndecl != ix86_previous_fndecl)
4281 tree old_tree = (ix86_previous_fndecl
4282 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4285 tree new_tree = (fndecl
4286 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4289 ix86_previous_fndecl = fndecl;
4290 if (old_tree == new_tree)
4295 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4301 struct cl_target_option *def
4302 = TREE_TARGET_OPTION (target_option_current_node);
4304 cl_target_option_restore (def);
4311 /* Return true if this goes in large data/bss. */
4314 ix86_in_large_data_p (tree exp)
4316 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4319 /* Functions are never large data. */
4320 if (TREE_CODE (exp) == FUNCTION_DECL)
4323 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4325 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4326 if (strcmp (section, ".ldata") == 0
4327 || strcmp (section, ".lbss") == 0)
4333 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4335 /* If this is an incomplete type with size 0, then we can't put it
4336 in data because it might be too big when completed. */
4337 if (!size || size > ix86_section_threshold)
4344 /* Switch to the appropriate section for output of DECL.
4345 DECL is either a `VAR_DECL' node or a constant of some sort.
4346 RELOC indicates whether forming the initial value of DECL requires
4347 link-time relocations. */
4349 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4353 x86_64_elf_select_section (tree decl, int reloc,
4354 unsigned HOST_WIDE_INT align)
4356 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4357 && ix86_in_large_data_p (decl))
4359 const char *sname = NULL;
4360 unsigned int flags = SECTION_WRITE;
4361 switch (categorize_decl_for_section (decl, reloc))
4366 case SECCAT_DATA_REL:
4367 sname = ".ldata.rel";
4369 case SECCAT_DATA_REL_LOCAL:
4370 sname = ".ldata.rel.local";
4372 case SECCAT_DATA_REL_RO:
4373 sname = ".ldata.rel.ro";
4375 case SECCAT_DATA_REL_RO_LOCAL:
4376 sname = ".ldata.rel.ro.local";
4380 flags |= SECTION_BSS;
4383 case SECCAT_RODATA_MERGE_STR:
4384 case SECCAT_RODATA_MERGE_STR_INIT:
4385 case SECCAT_RODATA_MERGE_CONST:
4389 case SECCAT_SRODATA:
4396 /* We don't split these for medium model. Place them into
4397 default sections and hope for best. */
4402 /* We might get called with string constants, but get_named_section
4403 doesn't like them as they are not DECLs. Also, we need to set
4404 flags in that case. */
4406 return get_section (sname, flags, NULL);
4407 return get_named_section (decl, sname, reloc);
4410 return default_elf_select_section (decl, reloc, align);
4413 /* Build up a unique section name, expressed as a
4414 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4415 RELOC indicates whether the initial value of EXP requires
4416 link-time relocations. */
4418 static void ATTRIBUTE_UNUSED
4419 x86_64_elf_unique_section (tree decl, int reloc)
4421 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4422 && ix86_in_large_data_p (decl))
4424 const char *prefix = NULL;
4425 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4426 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4428 switch (categorize_decl_for_section (decl, reloc))
4431 case SECCAT_DATA_REL:
4432 case SECCAT_DATA_REL_LOCAL:
4433 case SECCAT_DATA_REL_RO:
4434 case SECCAT_DATA_REL_RO_LOCAL:
4435 prefix = one_only ? ".ld" : ".ldata";
4438 prefix = one_only ? ".lb" : ".lbss";
4441 case SECCAT_RODATA_MERGE_STR:
4442 case SECCAT_RODATA_MERGE_STR_INIT:
4443 case SECCAT_RODATA_MERGE_CONST:
4444 prefix = one_only ? ".lr" : ".lrodata";
4446 case SECCAT_SRODATA:
4453 /* We don't split these for medium model. Place them into
4454 default sections and hope for best. */
4459 const char *name, *linkonce;
4462 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4463 name = targetm.strip_name_encoding (name);
4465 /* If we're using one_only, then there needs to be a .gnu.linkonce
4466 prefix to the section name. */
4467 linkonce = one_only ? ".gnu.linkonce" : "";
4469 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4471 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4475 default_unique_section (decl, reloc);
4478 #ifdef COMMON_ASM_OP
4479 /* This says how to output assembler code to declare an
4480 uninitialized external linkage data object.
4482 For medium model x86-64 we need to use .largecomm opcode for
4485 x86_elf_aligned_common (FILE *file,
4486 const char *name, unsigned HOST_WIDE_INT size,
4489 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4490 && size > (unsigned int)ix86_section_threshold)
4491 fputs (".largecomm\t", file);
4493 fputs (COMMON_ASM_OP, file);
4494 assemble_name (file, name);
4495 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4496 size, align / BITS_PER_UNIT);
4500 /* Utility function for targets to use in implementing
4501 ASM_OUTPUT_ALIGNED_BSS. */
4504 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4505 const char *name, unsigned HOST_WIDE_INT size,
4508 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4509 && size > (unsigned int)ix86_section_threshold)
4510 switch_to_section (get_named_section (decl, ".lbss", 0));
4512 switch_to_section (bss_section);
4513 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4514 #ifdef ASM_DECLARE_OBJECT_NAME
4515 last_assemble_variable_decl = decl;
4516 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4518 /* Standard thing is just output label for the object. */
4519 ASM_OUTPUT_LABEL (file, name);
4520 #endif /* ASM_DECLARE_OBJECT_NAME */
4521 ASM_OUTPUT_SKIP (file, size ? size : 1);
4525 ix86_option_optimization (int level, int size ATTRIBUTE_UNUSED)
4527 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4528 make the problem with not enough registers even worse. */
4529 #ifdef INSN_SCHEDULING
4531 flag_schedule_insns = 0;
4535 /* The Darwin libraries never set errno, so we might as well
4536 avoid calling them when that's the only reason we would. */
4537 flag_errno_math = 0;
4539 /* The default values of these switches depend on the TARGET_64BIT
4540 that is not known at this moment. Mark these values with 2 and
4541 let user the to override these. In case there is no command line
4542 option specifying them, we will set the defaults in
4543 ix86_option_override_internal. */
4545 flag_omit_frame_pointer = 2;
4547 /* For -O2 and beyond, turn on -fzee for x86_64 target. */
4551 flag_pcc_struct_return = 2;
4552 flag_asynchronous_unwind_tables = 2;
4553 flag_vect_cost_model = 1;
4554 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4555 SUBTARGET_OPTIMIZATION_OPTIONS;
4559 /* Decide whether we must probe the stack before any space allocation
4560 on this target. It's essentially TARGET_STACK_PROBE except when
4561 -fstack-check causes the stack to be already probed differently. */
4564 ix86_target_stack_probe (void)
4566 /* Do not probe the stack twice if static stack checking is enabled. */
4567 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
4570 return TARGET_STACK_PROBE;
4573 /* Decide whether we can make a sibling call to a function. DECL is the
4574 declaration of the function being targeted by the call and EXP is the
4575 CALL_EXPR representing the call. */
4578 ix86_function_ok_for_sibcall (tree decl, tree exp)
4580 tree type, decl_or_type;
4583 /* If we are generating position-independent code, we cannot sibcall
4584 optimize any indirect call, or a direct call to a global function,
4585 as the PLT requires %ebx be live. */
4586 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4589 /* If we need to align the outgoing stack, then sibcalling would
4590 unalign the stack, which may break the called function. */
4591 if (ix86_minimum_incoming_stack_boundary (true)
4592 < PREFERRED_STACK_BOUNDARY)
4597 decl_or_type = decl;
4598 type = TREE_TYPE (decl);
4602 /* We're looking at the CALL_EXPR, we need the type of the function. */
4603 type = CALL_EXPR_FN (exp); /* pointer expression */
4604 type = TREE_TYPE (type); /* pointer type */
4605 type = TREE_TYPE (type); /* function type */
4606 decl_or_type = type;
4609 /* Check that the return value locations are the same. Like
4610 if we are returning floats on the 80387 register stack, we cannot
4611 make a sibcall from a function that doesn't return a float to a
4612 function that does or, conversely, from a function that does return
4613 a float to a function that doesn't; the necessary stack adjustment
4614 would not be executed. This is also the place we notice
4615 differences in the return value ABI. Note that it is ok for one
4616 of the functions to have void return type as long as the return
4617 value of the other is passed in a register. */
4618 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4619 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4621 if (STACK_REG_P (a) || STACK_REG_P (b))
4623 if (!rtx_equal_p (a, b))
4626 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4628 else if (!rtx_equal_p (a, b))
4633 /* The SYSV ABI has more call-clobbered registers;
4634 disallow sibcalls from MS to SYSV. */
4635 if (cfun->machine->call_abi == MS_ABI
4636 && ix86_function_type_abi (type) == SYSV_ABI)
4641 /* If this call is indirect, we'll need to be able to use a
4642 call-clobbered register for the address of the target function.
4643 Make sure that all such registers are not used for passing
4644 parameters. Note that DLLIMPORT functions are indirect. */
4646 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4648 if (ix86_function_regparm (type, NULL) >= 3)
4650 /* ??? Need to count the actual number of registers to be used,
4651 not the possible number of registers. Fix later. */
4657 /* Otherwise okay. That also includes certain types of indirect calls. */
4661 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4662 and "sseregparm" calling convention attributes;
4663 arguments as in struct attribute_spec.handler. */
4666 ix86_handle_cconv_attribute (tree *node, tree name,
4668 int flags ATTRIBUTE_UNUSED,
4671 if (TREE_CODE (*node) != FUNCTION_TYPE
4672 && TREE_CODE (*node) != METHOD_TYPE
4673 && TREE_CODE (*node) != FIELD_DECL
4674 && TREE_CODE (*node) != TYPE_DECL)
4676 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4678 *no_add_attrs = true;
4682 /* Can combine regparm with all attributes but fastcall. */
4683 if (is_attribute_p ("regparm", name))
4687 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4689 error ("fastcall and regparm attributes are not compatible");
4692 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4694 error ("regparam and thiscall attributes are not compatible");
4697 cst = TREE_VALUE (args);
4698 if (TREE_CODE (cst) != INTEGER_CST)
4700 warning (OPT_Wattributes,
4701 "%qE attribute requires an integer constant argument",
4703 *no_add_attrs = true;
4705 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4707 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4709 *no_add_attrs = true;
4717 /* Do not warn when emulating the MS ABI. */
4718 if ((TREE_CODE (*node) != FUNCTION_TYPE
4719 && TREE_CODE (*node) != METHOD_TYPE)
4720 || ix86_function_type_abi (*node) != MS_ABI)
4721 warning (OPT_Wattributes, "%qE attribute ignored",
4723 *no_add_attrs = true;
4727 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4728 if (is_attribute_p ("fastcall", name))
4730 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4732 error ("fastcall and cdecl attributes are not compatible");
4734 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4736 error ("fastcall and stdcall attributes are not compatible");
4738 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4740 error ("fastcall and regparm attributes are not compatible");
4742 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4744 error ("fastcall and thiscall attributes are not compatible");
4748 /* Can combine stdcall with fastcall (redundant), regparm and
4750 else if (is_attribute_p ("stdcall", name))
4752 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4754 error ("stdcall and cdecl attributes are not compatible");
4756 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4758 error ("stdcall and fastcall attributes are not compatible");
4760 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4762 error ("stdcall and thiscall attributes are not compatible");
4766 /* Can combine cdecl with regparm and sseregparm. */
4767 else if (is_attribute_p ("cdecl", name))
4769 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4771 error ("stdcall and cdecl attributes are not compatible");
4773 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4775 error ("fastcall and cdecl attributes are not compatible");
4777 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4779 error ("cdecl and thiscall attributes are not compatible");
4782 else if (is_attribute_p ("thiscall", name))
4784 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4785 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4787 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4789 error ("stdcall and thiscall attributes are not compatible");
4791 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4793 error ("fastcall and thiscall attributes are not compatible");
4795 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4797 error ("cdecl and thiscall attributes are not compatible");
4801 /* Can combine sseregparm with all attributes. */
4806 /* Return 0 if the attributes for two types are incompatible, 1 if they
4807 are compatible, and 2 if they are nearly compatible (which causes a
4808 warning to be generated). */
4811 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4813 /* Check for mismatch of non-default calling convention. */
4814 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4816 if (TREE_CODE (type1) != FUNCTION_TYPE
4817 && TREE_CODE (type1) != METHOD_TYPE)
4820 /* Check for mismatched fastcall/regparm types. */
4821 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4822 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4823 || (ix86_function_regparm (type1, NULL)
4824 != ix86_function_regparm (type2, NULL)))
4827 /* Check for mismatched sseregparm types. */
4828 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4829 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4832 /* Check for mismatched thiscall types. */
4833 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4834 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4837 /* Check for mismatched return types (cdecl vs stdcall). */
4838 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4839 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4845 /* Return the regparm value for a function with the indicated TYPE and DECL.
4846 DECL may be NULL when calling function indirectly
4847 or considering a libcall. */
4850 ix86_function_regparm (const_tree type, const_tree decl)
4856 return (ix86_function_type_abi (type) == SYSV_ABI
4857 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4859 regparm = ix86_regparm;
4860 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4863 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4867 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4870 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4873 /* Use register calling convention for local functions when possible. */
4875 && TREE_CODE (decl) == FUNCTION_DECL
4877 && !(profile_flag && !flag_fentry))
4879 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4880 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4883 int local_regparm, globals = 0, regno;
4885 /* Make sure no regparm register is taken by a
4886 fixed register variable. */
4887 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4888 if (fixed_regs[local_regparm])
4891 /* We don't want to use regparm(3) for nested functions as
4892 these use a static chain pointer in the third argument. */
4893 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4896 /* In 32-bit mode save a register for the split stack. */
4897 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
4900 /* Each fixed register usage increases register pressure,
4901 so less registers should be used for argument passing.
4902 This functionality can be overriden by an explicit
4904 for (regno = 0; regno <= DI_REG; regno++)
4905 if (fixed_regs[regno])
4909 = globals < local_regparm ? local_regparm - globals : 0;
4911 if (local_regparm > regparm)
4912 regparm = local_regparm;
4919 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4920 DFmode (2) arguments in SSE registers for a function with the
4921 indicated TYPE and DECL. DECL may be NULL when calling function
4922 indirectly or considering a libcall. Otherwise return 0. */
4925 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4927 gcc_assert (!TARGET_64BIT);
4929 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4930 by the sseregparm attribute. */
4931 if (TARGET_SSEREGPARM
4932 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4939 error ("Calling %qD with attribute sseregparm without "
4940 "SSE/SSE2 enabled", decl);
4942 error ("Calling %qT with attribute sseregparm without "
4943 "SSE/SSE2 enabled", type);
4951 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4952 (and DFmode for SSE2) arguments in SSE registers. */
4953 if (decl && TARGET_SSE_MATH && optimize
4954 && !(profile_flag && !flag_fentry))
4956 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4957 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4959 return TARGET_SSE2 ? 2 : 1;
4965 /* Return true if EAX is live at the start of the function. Used by
4966 ix86_expand_prologue to determine if we need special help before
4967 calling allocate_stack_worker. */
4970 ix86_eax_live_at_start_p (void)
4972 /* Cheat. Don't bother working forward from ix86_function_regparm
4973 to the function type to whether an actual argument is located in
4974 eax. Instead just look at cfg info, which is still close enough
4975 to correct at this point. This gives false positives for broken
4976 functions that might use uninitialized data that happens to be
4977 allocated in eax, but who cares? */
4978 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4981 /* Value is the number of bytes of arguments automatically
4982 popped when returning from a subroutine call.
4983 FUNDECL is the declaration node of the function (as a tree),
4984 FUNTYPE is the data type of the function (as a tree),
4985 or for a library call it is an identifier node for the subroutine name.
4986 SIZE is the number of bytes of arguments passed on the stack.
4988 On the 80386, the RTD insn may be used to pop them if the number
4989 of args is fixed, but if the number is variable then the caller
4990 must pop them all. RTD can't be used for library calls now
4991 because the library is compiled with the Unix compiler.
4992 Use of RTD is a selectable option, since it is incompatible with
4993 standard Unix calling sequences. If the option is not selected,
4994 the caller must always pop the args.
4996 The attribute stdcall is equivalent to RTD on a per module basis. */
4999 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5003 /* None of the 64-bit ABIs pop arguments. */
5007 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
5009 /* Cdecl functions override -mrtd, and never pop the stack. */
5010 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
5012 /* Stdcall and fastcall functions will pop the stack if not
5014 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
5015 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
5016 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
5019 if (rtd && ! stdarg_p (funtype))
5023 /* Lose any fake structure return argument if it is passed on the stack. */
5024 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5025 && !KEEP_AGGREGATE_RETURN_POINTER)
5027 int nregs = ix86_function_regparm (funtype, fundecl);
5029 return GET_MODE_SIZE (Pmode);
5035 /* Argument support functions. */
5037 /* Return true when register may be used to pass function parameters. */
5039 ix86_function_arg_regno_p (int regno)
5042 const int *parm_regs;
5047 return (regno < REGPARM_MAX
5048 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5050 return (regno < REGPARM_MAX
5051 || (TARGET_MMX && MMX_REGNO_P (regno)
5052 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5053 || (TARGET_SSE && SSE_REGNO_P (regno)
5054 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5059 if (SSE_REGNO_P (regno) && TARGET_SSE)
5064 if (TARGET_SSE && SSE_REGNO_P (regno)
5065 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5069 /* TODO: The function should depend on current function ABI but
5070 builtins.c would need updating then. Therefore we use the
5073 /* RAX is used as hidden argument to va_arg functions. */
5074 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5077 if (ix86_abi == MS_ABI)
5078 parm_regs = x86_64_ms_abi_int_parameter_registers;
5080 parm_regs = x86_64_int_parameter_registers;
5081 for (i = 0; i < (ix86_abi == MS_ABI
5082 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5083 if (regno == parm_regs[i])
5088 /* Return if we do not know how to pass TYPE solely in registers. */
5091 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5093 if (must_pass_in_stack_var_size_or_pad (mode, type))
5096 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5097 The layout_type routine is crafty and tries to trick us into passing
5098 currently unsupported vector types on the stack by using TImode. */
5099 return (!TARGET_64BIT && mode == TImode
5100 && type && TREE_CODE (type) != VECTOR_TYPE);
5103 /* It returns the size, in bytes, of the area reserved for arguments passed
5104 in registers for the function represented by fndecl dependent to the used
5107 ix86_reg_parm_stack_space (const_tree fndecl)
5109 enum calling_abi call_abi = SYSV_ABI;
5110 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5111 call_abi = ix86_function_abi (fndecl);
5113 call_abi = ix86_function_type_abi (fndecl);
5114 if (call_abi == MS_ABI)
5119 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5122 ix86_function_type_abi (const_tree fntype)
5124 if (TARGET_64BIT && fntype != NULL)
5126 enum calling_abi abi = ix86_abi;
5127 if (abi == SYSV_ABI)
5129 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5132 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5140 ix86_function_ms_hook_prologue (const_tree fn)
5142 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5144 if (decl_function_context (fn) != NULL_TREE)
5145 error_at (DECL_SOURCE_LOCATION (fn),
5146 "ms_hook_prologue is not compatible with nested function");
5153 static enum calling_abi
5154 ix86_function_abi (const_tree fndecl)
5158 return ix86_function_type_abi (TREE_TYPE (fndecl));
5161 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5164 ix86_cfun_abi (void)
5166 if (! cfun || ! TARGET_64BIT)
5168 return cfun->machine->call_abi;
5171 /* Write the extra assembler code needed to declare a function properly. */
5174 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5177 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5181 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5182 unsigned int filler_cc = 0xcccccccc;
5184 for (i = 0; i < filler_count; i += 4)
5185 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5188 ASM_OUTPUT_LABEL (asm_out_file, fname);
5190 /* Output magic byte marker, if hot-patch attribute is set. */
5195 /* leaq [%rsp + 0], %rsp */
5196 asm_fprintf (asm_out_file, ASM_BYTE
5197 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5201 /* movl.s %edi, %edi
5203 movl.s %esp, %ebp */
5204 asm_fprintf (asm_out_file, ASM_BYTE
5205 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5211 extern void init_regs (void);
5213 /* Implementation of call abi switching target hook. Specific to FNDECL
5214 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5215 for more details. */
5217 ix86_call_abi_override (const_tree fndecl)
5219 if (fndecl == NULL_TREE)
5220 cfun->machine->call_abi = ix86_abi;
5222 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5225 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5226 re-initialization of init_regs each time we switch function context since
5227 this is needed only during RTL expansion. */
5229 ix86_maybe_switch_abi (void)
5232 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5236 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5237 for a call to a function whose data type is FNTYPE.
5238 For a library call, FNTYPE is 0. */
5241 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5242 tree fntype, /* tree ptr for function decl */
5243 rtx libname, /* SYMBOL_REF of library name or 0 */
5246 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
5247 memset (cum, 0, sizeof (*cum));
5250 cum->call_abi = ix86_function_abi (fndecl);
5252 cum->call_abi = ix86_function_type_abi (fntype);
5253 /* Set up the number of registers to use for passing arguments. */
5255 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5256 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5257 "or subtarget optimization implying it");
5258 cum->nregs = ix86_regparm;
5261 cum->nregs = (cum->call_abi == SYSV_ABI
5262 ? X86_64_REGPARM_MAX
5263 : X86_64_MS_REGPARM_MAX);
5267 cum->sse_nregs = SSE_REGPARM_MAX;
5270 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5271 ? X86_64_SSE_REGPARM_MAX
5272 : X86_64_MS_SSE_REGPARM_MAX);
5276 cum->mmx_nregs = MMX_REGPARM_MAX;
5277 cum->warn_avx = true;
5278 cum->warn_sse = true;
5279 cum->warn_mmx = true;
5281 /* Because type might mismatch in between caller and callee, we need to
5282 use actual type of function for local calls.
5283 FIXME: cgraph_analyze can be told to actually record if function uses
5284 va_start so for local functions maybe_vaarg can be made aggressive
5286 FIXME: once typesytem is fixed, we won't need this code anymore. */
5288 fntype = TREE_TYPE (fndecl);
5289 cum->maybe_vaarg = (fntype
5290 ? (!prototype_p (fntype) || stdarg_p (fntype))
5295 /* If there are variable arguments, then we won't pass anything
5296 in registers in 32-bit mode. */
5297 if (stdarg_p (fntype))
5308 /* Use ecx and edx registers if function has fastcall attribute,
5309 else look for regparm information. */
5312 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5315 cum->fastcall = 1; /* Same first register as in fastcall. */
5317 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5323 cum->nregs = ix86_function_regparm (fntype, fndecl);
5326 /* Set up the number of SSE registers used for passing SFmode
5327 and DFmode arguments. Warn for mismatching ABI. */
5328 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5332 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5333 But in the case of vector types, it is some vector mode.
5335 When we have only some of our vector isa extensions enabled, then there
5336 are some modes for which vector_mode_supported_p is false. For these
5337 modes, the generic vector support in gcc will choose some non-vector mode
5338 in order to implement the type. By computing the natural mode, we'll
5339 select the proper ABI location for the operand and not depend on whatever
5340 the middle-end decides to do with these vector types.
5342 The midde-end can't deal with the vector types > 16 bytes. In this
5343 case, we return the original mode and warn ABI change if CUM isn't
5346 static enum machine_mode
5347 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
5349 enum machine_mode mode = TYPE_MODE (type);
5351 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5353 HOST_WIDE_INT size = int_size_in_bytes (type);
5354 if ((size == 8 || size == 16 || size == 32)
5355 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5356 && TYPE_VECTOR_SUBPARTS (type) > 1)
5358 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5360 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5361 mode = MIN_MODE_VECTOR_FLOAT;
5363 mode = MIN_MODE_VECTOR_INT;
5365 /* Get the mode which has this inner mode and number of units. */
5366 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5367 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5368 && GET_MODE_INNER (mode) == innermode)
5370 if (size == 32 && !TARGET_AVX)
5372 static bool warnedavx;
5379 warning (0, "AVX vector argument without AVX "
5380 "enabled changes the ABI");
5382 return TYPE_MODE (type);
5395 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5396 this may not agree with the mode that the type system has chosen for the
5397 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5398 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5401 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5406 if (orig_mode != BLKmode)
5407 tmp = gen_rtx_REG (orig_mode, regno);
5410 tmp = gen_rtx_REG (mode, regno);
5411 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5412 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5418 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5419 of this code is to classify each 8bytes of incoming argument by the register
5420 class and assign registers accordingly. */
5422 /* Return the union class of CLASS1 and CLASS2.
5423 See the x86-64 PS ABI for details. */
5425 static enum x86_64_reg_class
5426 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5428 /* Rule #1: If both classes are equal, this is the resulting class. */
5429 if (class1 == class2)
5432 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5434 if (class1 == X86_64_NO_CLASS)
5436 if (class2 == X86_64_NO_CLASS)
5439 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5440 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5441 return X86_64_MEMORY_CLASS;
5443 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5444 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5445 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5446 return X86_64_INTEGERSI_CLASS;
5447 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5448 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5449 return X86_64_INTEGER_CLASS;
5451 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5453 if (class1 == X86_64_X87_CLASS
5454 || class1 == X86_64_X87UP_CLASS
5455 || class1 == X86_64_COMPLEX_X87_CLASS
5456 || class2 == X86_64_X87_CLASS
5457 || class2 == X86_64_X87UP_CLASS
5458 || class2 == X86_64_COMPLEX_X87_CLASS)
5459 return X86_64_MEMORY_CLASS;
5461 /* Rule #6: Otherwise class SSE is used. */
5462 return X86_64_SSE_CLASS;
5465 /* Classify the argument of type TYPE and mode MODE.
5466 CLASSES will be filled by the register class used to pass each word
5467 of the operand. The number of words is returned. In case the parameter
5468 should be passed in memory, 0 is returned. As a special case for zero
5469 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5471 BIT_OFFSET is used internally for handling records and specifies offset
5472 of the offset in bits modulo 256 to avoid overflow cases.
5474 See the x86-64 PS ABI for details.
5478 classify_argument (enum machine_mode mode, const_tree type,
5479 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5481 HOST_WIDE_INT bytes =
5482 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5483 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5485 /* Variable sized entities are always passed/returned in memory. */
5489 if (mode != VOIDmode
5490 && targetm.calls.must_pass_in_stack (mode, type))
5493 if (type && AGGREGATE_TYPE_P (type))
5497 enum x86_64_reg_class subclasses[MAX_CLASSES];
5499 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5503 for (i = 0; i < words; i++)
5504 classes[i] = X86_64_NO_CLASS;
5506 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5507 signalize memory class, so handle it as special case. */
5510 classes[0] = X86_64_NO_CLASS;
5514 /* Classify each field of record and merge classes. */
5515 switch (TREE_CODE (type))
5518 /* And now merge the fields of structure. */
5519 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5521 if (TREE_CODE (field) == FIELD_DECL)
5525 if (TREE_TYPE (field) == error_mark_node)
5528 /* Bitfields are always classified as integer. Handle them
5529 early, since later code would consider them to be
5530 misaligned integers. */
5531 if (DECL_BIT_FIELD (field))
5533 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5534 i < ((int_bit_position (field) + (bit_offset % 64))
5535 + tree_low_cst (DECL_SIZE (field), 0)
5538 merge_classes (X86_64_INTEGER_CLASS,
5545 type = TREE_TYPE (field);
5547 /* Flexible array member is ignored. */
5548 if (TYPE_MODE (type) == BLKmode
5549 && TREE_CODE (type) == ARRAY_TYPE
5550 && TYPE_SIZE (type) == NULL_TREE
5551 && TYPE_DOMAIN (type) != NULL_TREE
5552 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5557 if (!warned && warn_psabi)
5560 inform (input_location,
5561 "The ABI of passing struct with"
5562 " a flexible array member has"
5563 " changed in GCC 4.4");
5567 num = classify_argument (TYPE_MODE (type), type,
5569 (int_bit_position (field)
5570 + bit_offset) % 256);
5573 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5574 for (i = 0; i < num && (i + pos) < words; i++)
5576 merge_classes (subclasses[i], classes[i + pos]);
5583 /* Arrays are handled as small records. */
5586 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5587 TREE_TYPE (type), subclasses, bit_offset);
5591 /* The partial classes are now full classes. */
5592 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5593 subclasses[0] = X86_64_SSE_CLASS;
5594 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5595 && !((bit_offset % 64) == 0 && bytes == 4))
5596 subclasses[0] = X86_64_INTEGER_CLASS;
5598 for (i = 0; i < words; i++)
5599 classes[i] = subclasses[i % num];
5604 case QUAL_UNION_TYPE:
5605 /* Unions are similar to RECORD_TYPE but offset is always 0.
5607 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5609 if (TREE_CODE (field) == FIELD_DECL)
5613 if (TREE_TYPE (field) == error_mark_node)
5616 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5617 TREE_TYPE (field), subclasses,
5621 for (i = 0; i < num; i++)
5622 classes[i] = merge_classes (subclasses[i], classes[i]);
5633 /* When size > 16 bytes, if the first one isn't
5634 X86_64_SSE_CLASS or any other ones aren't
5635 X86_64_SSEUP_CLASS, everything should be passed in
5637 if (classes[0] != X86_64_SSE_CLASS)
5640 for (i = 1; i < words; i++)
5641 if (classes[i] != X86_64_SSEUP_CLASS)
5645 /* Final merger cleanup. */
5646 for (i = 0; i < words; i++)
5648 /* If one class is MEMORY, everything should be passed in
5650 if (classes[i] == X86_64_MEMORY_CLASS)
5653 /* The X86_64_SSEUP_CLASS should be always preceded by
5654 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5655 if (classes[i] == X86_64_SSEUP_CLASS
5656 && classes[i - 1] != X86_64_SSE_CLASS
5657 && classes[i - 1] != X86_64_SSEUP_CLASS)
5659 /* The first one should never be X86_64_SSEUP_CLASS. */
5660 gcc_assert (i != 0);
5661 classes[i] = X86_64_SSE_CLASS;
5664 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5665 everything should be passed in memory. */
5666 if (classes[i] == X86_64_X87UP_CLASS
5667 && (classes[i - 1] != X86_64_X87_CLASS))
5671 /* The first one should never be X86_64_X87UP_CLASS. */
5672 gcc_assert (i != 0);
5673 if (!warned && warn_psabi)
5676 inform (input_location,
5677 "The ABI of passing union with long double"
5678 " has changed in GCC 4.4");
5686 /* Compute alignment needed. We align all types to natural boundaries with
5687 exception of XFmode that is aligned to 64bits. */
5688 if (mode != VOIDmode && mode != BLKmode)
5690 int mode_alignment = GET_MODE_BITSIZE (mode);
5693 mode_alignment = 128;
5694 else if (mode == XCmode)
5695 mode_alignment = 256;
5696 if (COMPLEX_MODE_P (mode))
5697 mode_alignment /= 2;
5698 /* Misaligned fields are always returned in memory. */
5699 if (bit_offset % mode_alignment)
5703 /* for V1xx modes, just use the base mode */
5704 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5705 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5706 mode = GET_MODE_INNER (mode);
5708 /* Classification of atomic types. */
5713 classes[0] = X86_64_SSE_CLASS;
5716 classes[0] = X86_64_SSE_CLASS;
5717 classes[1] = X86_64_SSEUP_CLASS;
5727 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5731 classes[0] = X86_64_INTEGERSI_CLASS;
5734 else if (size <= 64)
5736 classes[0] = X86_64_INTEGER_CLASS;
5739 else if (size <= 64+32)
5741 classes[0] = X86_64_INTEGER_CLASS;
5742 classes[1] = X86_64_INTEGERSI_CLASS;
5745 else if (size <= 64+64)
5747 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5755 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5759 /* OImode shouldn't be used directly. */
5764 if (!(bit_offset % 64))
5765 classes[0] = X86_64_SSESF_CLASS;
5767 classes[0] = X86_64_SSE_CLASS;
5770 classes[0] = X86_64_SSEDF_CLASS;
5773 classes[0] = X86_64_X87_CLASS;
5774 classes[1] = X86_64_X87UP_CLASS;
5777 classes[0] = X86_64_SSE_CLASS;
5778 classes[1] = X86_64_SSEUP_CLASS;
5781 classes[0] = X86_64_SSE_CLASS;
5782 if (!(bit_offset % 64))
5788 if (!warned && warn_psabi)
5791 inform (input_location,
5792 "The ABI of passing structure with complex float"
5793 " member has changed in GCC 4.4");
5795 classes[1] = X86_64_SSESF_CLASS;
5799 classes[0] = X86_64_SSEDF_CLASS;
5800 classes[1] = X86_64_SSEDF_CLASS;
5803 classes[0] = X86_64_COMPLEX_X87_CLASS;
5806 /* This modes is larger than 16 bytes. */
5814 classes[0] = X86_64_SSE_CLASS;
5815 classes[1] = X86_64_SSEUP_CLASS;
5816 classes[2] = X86_64_SSEUP_CLASS;
5817 classes[3] = X86_64_SSEUP_CLASS;
5825 classes[0] = X86_64_SSE_CLASS;
5826 classes[1] = X86_64_SSEUP_CLASS;
5834 classes[0] = X86_64_SSE_CLASS;
5840 gcc_assert (VECTOR_MODE_P (mode));
5845 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5847 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5848 classes[0] = X86_64_INTEGERSI_CLASS;
5850 classes[0] = X86_64_INTEGER_CLASS;
5851 classes[1] = X86_64_INTEGER_CLASS;
5852 return 1 + (bytes > 8);
5856 /* Examine the argument and return set number of register required in each
5857 class. Return 0 iff parameter should be passed in memory. */
5859 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5860 int *int_nregs, int *sse_nregs)
5862 enum x86_64_reg_class regclass[MAX_CLASSES];
5863 int n = classify_argument (mode, type, regclass, 0);
5869 for (n--; n >= 0; n--)
5870 switch (regclass[n])
5872 case X86_64_INTEGER_CLASS:
5873 case X86_64_INTEGERSI_CLASS:
5876 case X86_64_SSE_CLASS:
5877 case X86_64_SSESF_CLASS:
5878 case X86_64_SSEDF_CLASS:
5881 case X86_64_NO_CLASS:
5882 case X86_64_SSEUP_CLASS:
5884 case X86_64_X87_CLASS:
5885 case X86_64_X87UP_CLASS:
5889 case X86_64_COMPLEX_X87_CLASS:
5890 return in_return ? 2 : 0;
5891 case X86_64_MEMORY_CLASS:
5897 /* Construct container for the argument used by GCC interface. See
5898 FUNCTION_ARG for the detailed description. */
5901 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5902 const_tree type, int in_return, int nintregs, int nsseregs,
5903 const int *intreg, int sse_regno)
5905 /* The following variables hold the static issued_error state. */
5906 static bool issued_sse_arg_error;
5907 static bool issued_sse_ret_error;
5908 static bool issued_x87_ret_error;
5910 enum machine_mode tmpmode;
5912 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5913 enum x86_64_reg_class regclass[MAX_CLASSES];
5917 int needed_sseregs, needed_intregs;
5918 rtx exp[MAX_CLASSES];
5921 n = classify_argument (mode, type, regclass, 0);
5924 if (!examine_argument (mode, type, in_return, &needed_intregs,
5927 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5930 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5931 some less clueful developer tries to use floating-point anyway. */
5932 if (needed_sseregs && !TARGET_SSE)
5936 if (!issued_sse_ret_error)
5938 error ("SSE register return with SSE disabled");
5939 issued_sse_ret_error = true;
5942 else if (!issued_sse_arg_error)
5944 error ("SSE register argument with SSE disabled");
5945 issued_sse_arg_error = true;
5950 /* Likewise, error if the ABI requires us to return values in the
5951 x87 registers and the user specified -mno-80387. */
5952 if (!TARGET_80387 && in_return)
5953 for (i = 0; i < n; i++)
5954 if (regclass[i] == X86_64_X87_CLASS
5955 || regclass[i] == X86_64_X87UP_CLASS
5956 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5958 if (!issued_x87_ret_error)
5960 error ("x87 register return with x87 disabled");
5961 issued_x87_ret_error = true;
5966 /* First construct simple cases. Avoid SCmode, since we want to use
5967 single register to pass this type. */
5968 if (n == 1 && mode != SCmode)
5969 switch (regclass[0])
5971 case X86_64_INTEGER_CLASS:
5972 case X86_64_INTEGERSI_CLASS:
5973 return gen_rtx_REG (mode, intreg[0]);
5974 case X86_64_SSE_CLASS:
5975 case X86_64_SSESF_CLASS:
5976 case X86_64_SSEDF_CLASS:
5977 if (mode != BLKmode)
5978 return gen_reg_or_parallel (mode, orig_mode,
5979 SSE_REGNO (sse_regno));
5981 case X86_64_X87_CLASS:
5982 case X86_64_COMPLEX_X87_CLASS:
5983 return gen_rtx_REG (mode, FIRST_STACK_REG);
5984 case X86_64_NO_CLASS:
5985 /* Zero sized array, struct or class. */
5990 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5991 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5992 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5994 && regclass[0] == X86_64_SSE_CLASS
5995 && regclass[1] == X86_64_SSEUP_CLASS
5996 && regclass[2] == X86_64_SSEUP_CLASS
5997 && regclass[3] == X86_64_SSEUP_CLASS
5999 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6002 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6003 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6004 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6005 && regclass[1] == X86_64_INTEGER_CLASS
6006 && (mode == CDImode || mode == TImode || mode == TFmode)
6007 && intreg[0] + 1 == intreg[1])
6008 return gen_rtx_REG (mode, intreg[0]);
6010 /* Otherwise figure out the entries of the PARALLEL. */
6011 for (i = 0; i < n; i++)
6015 switch (regclass[i])
6017 case X86_64_NO_CLASS:
6019 case X86_64_INTEGER_CLASS:
6020 case X86_64_INTEGERSI_CLASS:
6021 /* Merge TImodes on aligned occasions here too. */
6022 if (i * 8 + 8 > bytes)
6023 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6024 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6028 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6029 if (tmpmode == BLKmode)
6031 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6032 gen_rtx_REG (tmpmode, *intreg),
6036 case X86_64_SSESF_CLASS:
6037 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6038 gen_rtx_REG (SFmode,
6039 SSE_REGNO (sse_regno)),
6043 case X86_64_SSEDF_CLASS:
6044 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6045 gen_rtx_REG (DFmode,
6046 SSE_REGNO (sse_regno)),
6050 case X86_64_SSE_CLASS:
6058 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6068 && regclass[1] == X86_64_SSEUP_CLASS
6069 && regclass[2] == X86_64_SSEUP_CLASS
6070 && regclass[3] == X86_64_SSEUP_CLASS);
6077 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6078 gen_rtx_REG (tmpmode,
6079 SSE_REGNO (sse_regno)),
6088 /* Empty aligned struct, union or class. */
6092 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6093 for (i = 0; i < nexps; i++)
6094 XVECEXP (ret, 0, i) = exp [i];
6098 /* Update the data in CUM to advance over an argument of mode MODE
6099 and data type TYPE. (TYPE is null for libcalls where that information
6100 may not be available.) */
6103 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6104 const_tree type, HOST_WIDE_INT bytes,
6105 HOST_WIDE_INT words)
6121 cum->words += words;
6122 cum->nregs -= words;
6123 cum->regno += words;
6125 if (cum->nregs <= 0)
6133 /* OImode shouldn't be used directly. */
6137 if (cum->float_in_sse < 2)
6140 if (cum->float_in_sse < 1)
6157 if (!type || !AGGREGATE_TYPE_P (type))
6159 cum->sse_words += words;
6160 cum->sse_nregs -= 1;
6161 cum->sse_regno += 1;
6162 if (cum->sse_nregs <= 0)
6176 if (!type || !AGGREGATE_TYPE_P (type))
6178 cum->mmx_words += words;
6179 cum->mmx_nregs -= 1;
6180 cum->mmx_regno += 1;
6181 if (cum->mmx_nregs <= 0)
6192 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6193 const_tree type, HOST_WIDE_INT words, bool named)
6195 int int_nregs, sse_nregs;
6197 /* Unnamed 256bit vector mode parameters are passed on stack. */
6198 if (!named && VALID_AVX256_REG_MODE (mode))
6201 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6202 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6204 cum->nregs -= int_nregs;
6205 cum->sse_nregs -= sse_nregs;
6206 cum->regno += int_nregs;
6207 cum->sse_regno += sse_nregs;
6211 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6212 cum->words = (cum->words + align - 1) & ~(align - 1);
6213 cum->words += words;
6218 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6219 HOST_WIDE_INT words)
6221 /* Otherwise, this should be passed indirect. */
6222 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6224 cum->words += words;
6232 /* Update the data in CUM to advance over an argument of mode MODE and
6233 data type TYPE. (TYPE is null for libcalls where that information
6234 may not be available.) */
6237 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6238 const_tree type, bool named)
6240 HOST_WIDE_INT bytes, words;
6242 if (mode == BLKmode)
6243 bytes = int_size_in_bytes (type);
6245 bytes = GET_MODE_SIZE (mode);
6246 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6249 mode = type_natural_mode (type, NULL);
6251 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6252 function_arg_advance_ms_64 (cum, bytes, words);
6253 else if (TARGET_64BIT)
6254 function_arg_advance_64 (cum, mode, type, words, named);
6256 function_arg_advance_32 (cum, mode, type, bytes, words);
6259 /* Define where to put the arguments to a function.
6260 Value is zero to push the argument on the stack,
6261 or a hard register in which to store the argument.
6263 MODE is the argument's machine mode.
6264 TYPE is the data type of the argument (as a tree).
6265 This is null for libcalls where that information may
6267 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6268 the preceding args and about the function being called.
6269 NAMED is nonzero if this argument is a named parameter
6270 (otherwise it is an extra parameter matching an ellipsis). */
6273 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6274 enum machine_mode orig_mode, const_tree type,
6275 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6277 static bool warnedsse, warnedmmx;
6279 /* Avoid the AL settings for the Unix64 ABI. */
6280 if (mode == VOIDmode)
6296 if (words <= cum->nregs)
6298 int regno = cum->regno;
6300 /* Fastcall allocates the first two DWORD (SImode) or
6301 smaller arguments to ECX and EDX if it isn't an
6307 || (type && AGGREGATE_TYPE_P (type)))
6310 /* ECX not EAX is the first allocated register. */
6311 if (regno == AX_REG)
6314 return gen_rtx_REG (mode, regno);
6319 if (cum->float_in_sse < 2)
6322 if (cum->float_in_sse < 1)
6326 /* In 32bit, we pass TImode in xmm registers. */
6333 if (!type || !AGGREGATE_TYPE_P (type))
6335 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6338 warning (0, "SSE vector argument without SSE enabled "
6342 return gen_reg_or_parallel (mode, orig_mode,
6343 cum->sse_regno + FIRST_SSE_REG);
6348 /* OImode shouldn't be used directly. */
6357 if (!type || !AGGREGATE_TYPE_P (type))
6360 return gen_reg_or_parallel (mode, orig_mode,
6361 cum->sse_regno + FIRST_SSE_REG);
6371 if (!type || !AGGREGATE_TYPE_P (type))
6373 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6376 warning (0, "MMX vector argument without MMX enabled "
6380 return gen_reg_or_parallel (mode, orig_mode,
6381 cum->mmx_regno + FIRST_MMX_REG);
6390 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6391 enum machine_mode orig_mode, const_tree type, bool named)
6393 /* Handle a hidden AL argument containing number of registers
6394 for varargs x86-64 functions. */
6395 if (mode == VOIDmode)
6396 return GEN_INT (cum->maybe_vaarg
6397 ? (cum->sse_nregs < 0
6398 ? X86_64_SSE_REGPARM_MAX
6413 /* Unnamed 256bit vector mode parameters are passed on stack. */
6419 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6421 &x86_64_int_parameter_registers [cum->regno],
6426 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6427 enum machine_mode orig_mode, bool named,
6428 HOST_WIDE_INT bytes)
6432 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6433 We use value of -2 to specify that current function call is MSABI. */
6434 if (mode == VOIDmode)
6435 return GEN_INT (-2);
6437 /* If we've run out of registers, it goes on the stack. */
6438 if (cum->nregs == 0)
6441 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6443 /* Only floating point modes are passed in anything but integer regs. */
6444 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6447 regno = cum->regno + FIRST_SSE_REG;
6452 /* Unnamed floating parameters are passed in both the
6453 SSE and integer registers. */
6454 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6455 t2 = gen_rtx_REG (mode, regno);
6456 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6457 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6458 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6461 /* Handle aggregated types passed in register. */
6462 if (orig_mode == BLKmode)
6464 if (bytes > 0 && bytes <= 8)
6465 mode = (bytes > 4 ? DImode : SImode);
6466 if (mode == BLKmode)
6470 return gen_reg_or_parallel (mode, orig_mode, regno);
6473 /* Return where to put the arguments to a function.
6474 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6476 MODE is the argument's machine mode. TYPE is the data type of the
6477 argument. It is null for libcalls where that information may not be
6478 available. CUM gives information about the preceding args and about
6479 the function being called. NAMED is nonzero if this argument is a
6480 named parameter (otherwise it is an extra parameter matching an
6484 ix86_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6485 const_tree type, bool named)
6487 enum machine_mode mode = omode;
6488 HOST_WIDE_INT bytes, words;
6490 if (mode == BLKmode)
6491 bytes = int_size_in_bytes (type);
6493 bytes = GET_MODE_SIZE (mode);
6494 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6496 /* To simplify the code below, represent vector types with a vector mode
6497 even if MMX/SSE are not active. */
6498 if (type && TREE_CODE (type) == VECTOR_TYPE)
6499 mode = type_natural_mode (type, cum);
6501 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6502 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6503 else if (TARGET_64BIT)
6504 return function_arg_64 (cum, mode, omode, type, named);
6506 return function_arg_32 (cum, mode, omode, type, bytes, words);
6509 /* A C expression that indicates when an argument must be passed by
6510 reference. If nonzero for an argument, a copy of that argument is
6511 made in memory and a pointer to the argument is passed instead of
6512 the argument itself. The pointer is passed in whatever way is
6513 appropriate for passing a pointer to that type. */
6516 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6517 enum machine_mode mode ATTRIBUTE_UNUSED,
6518 const_tree type, bool named ATTRIBUTE_UNUSED)
6520 /* See Windows x64 Software Convention. */
6521 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6523 int msize = (int) GET_MODE_SIZE (mode);
6526 /* Arrays are passed by reference. */
6527 if (TREE_CODE (type) == ARRAY_TYPE)
6530 if (AGGREGATE_TYPE_P (type))
6532 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6533 are passed by reference. */
6534 msize = int_size_in_bytes (type);
6538 /* __m128 is passed by reference. */
6540 case 1: case 2: case 4: case 8:
6546 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6552 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6555 contains_aligned_value_p (const_tree type)
6557 enum machine_mode mode = TYPE_MODE (type);
6558 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6562 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6564 if (TYPE_ALIGN (type) < 128)
6567 if (AGGREGATE_TYPE_P (type))
6569 /* Walk the aggregates recursively. */
6570 switch (TREE_CODE (type))
6574 case QUAL_UNION_TYPE:
6578 /* Walk all the structure fields. */
6579 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6581 if (TREE_CODE (field) == FIELD_DECL
6582 && contains_aligned_value_p (TREE_TYPE (field)))
6589 /* Just for use if some languages passes arrays by value. */
6590 if (contains_aligned_value_p (TREE_TYPE (type)))
6601 /* Gives the alignment boundary, in bits, of an argument with the
6602 specified mode and type. */
6605 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
6610 /* Since the main variant type is used for call, we convert it to
6611 the main variant type. */
6612 type = TYPE_MAIN_VARIANT (type);
6613 align = TYPE_ALIGN (type);
6616 align = GET_MODE_ALIGNMENT (mode);
6617 if (align < PARM_BOUNDARY)
6618 align = PARM_BOUNDARY;
6619 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6620 natural boundaries. */
6621 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6623 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6624 make an exception for SSE modes since these require 128bit
6627 The handling here differs from field_alignment. ICC aligns MMX
6628 arguments to 4 byte boundaries, while structure fields are aligned
6629 to 8 byte boundaries. */
6632 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6633 align = PARM_BOUNDARY;
6637 if (!contains_aligned_value_p (type))
6638 align = PARM_BOUNDARY;
6641 if (align > BIGGEST_ALIGNMENT)
6642 align = BIGGEST_ALIGNMENT;
6646 /* Return true if N is a possible register number of function value. */
6649 ix86_function_value_regno_p (const unsigned int regno)
6656 case FIRST_FLOAT_REG:
6657 /* TODO: The function should depend on current function ABI but
6658 builtins.c would need updating then. Therefore we use the
6660 if (TARGET_64BIT && ix86_abi == MS_ABI)
6662 return TARGET_FLOAT_RETURNS_IN_80387;
6668 if (TARGET_MACHO || TARGET_64BIT)
6676 /* Define how to find the value returned by a function.
6677 VALTYPE is the data type of the value (as a tree).
6678 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6679 otherwise, FUNC is 0. */
6682 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6683 const_tree fntype, const_tree fn)
6687 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6688 we normally prevent this case when mmx is not available. However
6689 some ABIs may require the result to be returned like DImode. */
6690 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6691 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6693 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6694 we prevent this case when sse is not available. However some ABIs
6695 may require the result to be returned like integer TImode. */
6696 else if (mode == TImode
6697 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6698 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6700 /* 32-byte vector modes in %ymm0. */
6701 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6702 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6704 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6705 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6706 regno = FIRST_FLOAT_REG;
6708 /* Most things go in %eax. */
6711 /* Override FP return register with %xmm0 for local functions when
6712 SSE math is enabled or for functions with sseregparm attribute. */
6713 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6715 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6716 if ((sse_level >= 1 && mode == SFmode)
6717 || (sse_level == 2 && mode == DFmode))
6718 regno = FIRST_SSE_REG;
6721 /* OImode shouldn't be used directly. */
6722 gcc_assert (mode != OImode);
6724 return gen_rtx_REG (orig_mode, regno);
6728 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6733 /* Handle libcalls, which don't provide a type node. */
6734 if (valtype == NULL)
6746 return gen_rtx_REG (mode, FIRST_SSE_REG);
6749 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6753 return gen_rtx_REG (mode, AX_REG);
6757 ret = construct_container (mode, orig_mode, valtype, 1,
6758 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6759 x86_64_int_return_registers, 0);
6761 /* For zero sized structures, construct_container returns NULL, but we
6762 need to keep rest of compiler happy by returning meaningful value. */
6764 ret = gen_rtx_REG (orig_mode, AX_REG);
6770 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6772 unsigned int regno = AX_REG;
6776 switch (GET_MODE_SIZE (mode))
6779 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6780 && !COMPLEX_MODE_P (mode))
6781 regno = FIRST_SSE_REG;
6785 if (mode == SFmode || mode == DFmode)
6786 regno = FIRST_SSE_REG;
6792 return gen_rtx_REG (orig_mode, regno);
6796 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6797 enum machine_mode orig_mode, enum machine_mode mode)
6799 const_tree fn, fntype;
6802 if (fntype_or_decl && DECL_P (fntype_or_decl))
6803 fn = fntype_or_decl;
6804 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6806 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6807 return function_value_ms_64 (orig_mode, mode);
6808 else if (TARGET_64BIT)
6809 return function_value_64 (orig_mode, mode, valtype);
6811 return function_value_32 (orig_mode, mode, fntype, fn);
6815 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6816 bool outgoing ATTRIBUTE_UNUSED)
6818 enum machine_mode mode, orig_mode;
6820 orig_mode = TYPE_MODE (valtype);
6821 mode = type_natural_mode (valtype, NULL);
6822 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6826 ix86_libcall_value (enum machine_mode mode)
6828 return ix86_function_value_1 (NULL, NULL, mode, mode);
6831 /* Return true iff type is returned in memory. */
6833 static bool ATTRIBUTE_UNUSED
6834 return_in_memory_32 (const_tree type, enum machine_mode mode)
6838 if (mode == BLKmode)
6841 size = int_size_in_bytes (type);
6843 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6846 if (VECTOR_MODE_P (mode) || mode == TImode)
6848 /* User-created vectors small enough to fit in EAX. */
6852 /* MMX/3dNow values are returned in MM0,
6853 except when it doesn't exits or the ABI prescribes otherwise. */
6855 return !TARGET_MMX || TARGET_VECT8_RETURNS;
6857 /* SSE values are returned in XMM0, except when it doesn't exist. */
6861 /* AVX values are returned in YMM0, except when it doesn't exist. */
6872 /* OImode shouldn't be used directly. */
6873 gcc_assert (mode != OImode);
6878 static bool ATTRIBUTE_UNUSED
6879 return_in_memory_64 (const_tree type, enum machine_mode mode)
6881 int needed_intregs, needed_sseregs;
6882 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6885 static bool ATTRIBUTE_UNUSED
6886 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6888 HOST_WIDE_INT size = int_size_in_bytes (type);
6890 /* __m128 is returned in xmm0. */
6891 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6892 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6895 /* Otherwise, the size must be exactly in [1248]. */
6896 return size != 1 && size != 2 && size != 4 && size != 8;
6900 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6902 #ifdef SUBTARGET_RETURN_IN_MEMORY
6903 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6905 const enum machine_mode mode = type_natural_mode (type, NULL);
6909 if (ix86_function_type_abi (fntype) == MS_ABI)
6910 return return_in_memory_ms_64 (type, mode);
6912 return return_in_memory_64 (type, mode);
6915 return return_in_memory_32 (type, mode);
6919 /* When returning SSE vector types, we have a choice of either
6920 (1) being abi incompatible with a -march switch, or
6921 (2) generating an error.
6922 Given no good solution, I think the safest thing is one warning.
6923 The user won't be able to use -Werror, but....
6925 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6926 called in response to actually generating a caller or callee that
6927 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6928 via aggregate_value_p for general type probing from tree-ssa. */
6931 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6933 static bool warnedsse, warnedmmx;
6935 if (!TARGET_64BIT && type)
6937 /* Look at the return type of the function, not the function type. */
6938 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6940 if (!TARGET_SSE && !warnedsse)
6943 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6946 warning (0, "SSE vector return without SSE enabled "
6951 if (!TARGET_MMX && !warnedmmx)
6953 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6956 warning (0, "MMX vector return without MMX enabled "
6966 /* Create the va_list data type. */
6968 /* Returns the calling convention specific va_list date type.
6969 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6972 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6974 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6976 /* For i386 we use plain pointer to argument area. */
6977 if (!TARGET_64BIT || abi == MS_ABI)
6978 return build_pointer_type (char_type_node);
6980 record = lang_hooks.types.make_type (RECORD_TYPE);
6981 type_decl = build_decl (BUILTINS_LOCATION,
6982 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6984 f_gpr = build_decl (BUILTINS_LOCATION,
6985 FIELD_DECL, get_identifier ("gp_offset"),
6986 unsigned_type_node);
6987 f_fpr = build_decl (BUILTINS_LOCATION,
6988 FIELD_DECL, get_identifier ("fp_offset"),
6989 unsigned_type_node);
6990 f_ovf = build_decl (BUILTINS_LOCATION,
6991 FIELD_DECL, get_identifier ("overflow_arg_area"),
6993 f_sav = build_decl (BUILTINS_LOCATION,
6994 FIELD_DECL, get_identifier ("reg_save_area"),
6997 va_list_gpr_counter_field = f_gpr;
6998 va_list_fpr_counter_field = f_fpr;
7000 DECL_FIELD_CONTEXT (f_gpr) = record;
7001 DECL_FIELD_CONTEXT (f_fpr) = record;
7002 DECL_FIELD_CONTEXT (f_ovf) = record;
7003 DECL_FIELD_CONTEXT (f_sav) = record;
7005 TREE_CHAIN (record) = type_decl;
7006 TYPE_NAME (record) = type_decl;
7007 TYPE_FIELDS (record) = f_gpr;
7008 DECL_CHAIN (f_gpr) = f_fpr;
7009 DECL_CHAIN (f_fpr) = f_ovf;
7010 DECL_CHAIN (f_ovf) = f_sav;
7012 layout_type (record);
7014 /* The correct type is an array type of one element. */
7015 return build_array_type (record, build_index_type (size_zero_node));
7018 /* Setup the builtin va_list data type and for 64-bit the additional
7019 calling convention specific va_list data types. */
7022 ix86_build_builtin_va_list (void)
7024 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7026 /* Initialize abi specific va_list builtin types. */
7030 if (ix86_abi == MS_ABI)
7032 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7033 if (TREE_CODE (t) != RECORD_TYPE)
7034 t = build_variant_type_copy (t);
7035 sysv_va_list_type_node = t;
7040 if (TREE_CODE (t) != RECORD_TYPE)
7041 t = build_variant_type_copy (t);
7042 sysv_va_list_type_node = t;
7044 if (ix86_abi != MS_ABI)
7046 t = ix86_build_builtin_va_list_abi (MS_ABI);
7047 if (TREE_CODE (t) != RECORD_TYPE)
7048 t = build_variant_type_copy (t);
7049 ms_va_list_type_node = t;
7054 if (TREE_CODE (t) != RECORD_TYPE)
7055 t = build_variant_type_copy (t);
7056 ms_va_list_type_node = t;
7063 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7066 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7072 /* GPR size of varargs save area. */
7073 if (cfun->va_list_gpr_size)
7074 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7076 ix86_varargs_gpr_size = 0;
7078 /* FPR size of varargs save area. We don't need it if we don't pass
7079 anything in SSE registers. */
7080 if (TARGET_SSE && cfun->va_list_fpr_size)
7081 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7083 ix86_varargs_fpr_size = 0;
7085 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7088 save_area = frame_pointer_rtx;
7089 set = get_varargs_alias_set ();
7091 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7092 if (max > X86_64_REGPARM_MAX)
7093 max = X86_64_REGPARM_MAX;
7095 for (i = cum->regno; i < max; i++)
7097 mem = gen_rtx_MEM (Pmode,
7098 plus_constant (save_area, i * UNITS_PER_WORD));
7099 MEM_NOTRAP_P (mem) = 1;
7100 set_mem_alias_set (mem, set);
7101 emit_move_insn (mem, gen_rtx_REG (Pmode,
7102 x86_64_int_parameter_registers[i]));
7105 if (ix86_varargs_fpr_size)
7107 enum machine_mode smode;
7110 /* Now emit code to save SSE registers. The AX parameter contains number
7111 of SSE parameter registers used to call this function, though all we
7112 actually check here is the zero/non-zero status. */
7114 label = gen_label_rtx ();
7115 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7116 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7119 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7120 we used movdqa (i.e. TImode) instead? Perhaps even better would
7121 be if we could determine the real mode of the data, via a hook
7122 into pass_stdarg. Ignore all that for now. */
7124 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7125 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7127 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7128 if (max > X86_64_SSE_REGPARM_MAX)
7129 max = X86_64_SSE_REGPARM_MAX;
7131 for (i = cum->sse_regno; i < max; ++i)
7133 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7134 mem = gen_rtx_MEM (smode, mem);
7135 MEM_NOTRAP_P (mem) = 1;
7136 set_mem_alias_set (mem, set);
7137 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7139 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7147 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7149 alias_set_type set = get_varargs_alias_set ();
7152 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7156 mem = gen_rtx_MEM (Pmode,
7157 plus_constant (virtual_incoming_args_rtx,
7158 i * UNITS_PER_WORD));
7159 MEM_NOTRAP_P (mem) = 1;
7160 set_mem_alias_set (mem, set);
7162 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7163 emit_move_insn (mem, reg);
7168 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7169 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7172 CUMULATIVE_ARGS next_cum;
7175 /* This argument doesn't appear to be used anymore. Which is good,
7176 because the old code here didn't suppress rtl generation. */
7177 gcc_assert (!no_rtl);
7182 fntype = TREE_TYPE (current_function_decl);
7184 /* For varargs, we do not want to skip the dummy va_dcl argument.
7185 For stdargs, we do want to skip the last named argument. */
7187 if (stdarg_p (fntype))
7188 ix86_function_arg_advance (&next_cum, mode, type, true);
7190 if (cum->call_abi == MS_ABI)
7191 setup_incoming_varargs_ms_64 (&next_cum);
7193 setup_incoming_varargs_64 (&next_cum);
7196 /* Checks if TYPE is of kind va_list char *. */
7199 is_va_list_char_pointer (tree type)
7203 /* For 32-bit it is always true. */
7206 canonic = ix86_canonical_va_list_type (type);
7207 return (canonic == ms_va_list_type_node
7208 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7211 /* Implement va_start. */
7214 ix86_va_start (tree valist, rtx nextarg)
7216 HOST_WIDE_INT words, n_gpr, n_fpr;
7217 tree f_gpr, f_fpr, f_ovf, f_sav;
7218 tree gpr, fpr, ovf, sav, t;
7223 if (flag_split_stack
7224 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7226 unsigned int scratch_regno;
7228 /* When we are splitting the stack, we can't refer to the stack
7229 arguments using internal_arg_pointer, because they may be on
7230 the old stack. The split stack prologue will arrange to
7231 leave a pointer to the old stack arguments in a scratch
7232 register, which we here copy to a pseudo-register. The split
7233 stack prologue can't set the pseudo-register directly because
7234 it (the prologue) runs before any registers have been saved. */
7236 scratch_regno = split_stack_prologue_scratch_regno ();
7237 if (scratch_regno != INVALID_REGNUM)
7241 reg = gen_reg_rtx (Pmode);
7242 cfun->machine->split_stack_varargs_pointer = reg;
7245 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
7249 push_topmost_sequence ();
7250 emit_insn_after (seq, entry_of_function ());
7251 pop_topmost_sequence ();
7255 /* Only 64bit target needs something special. */
7256 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7258 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7259 std_expand_builtin_va_start (valist, nextarg);
7264 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
7265 next = expand_binop (ptr_mode, add_optab,
7266 cfun->machine->split_stack_varargs_pointer,
7267 crtl->args.arg_offset_rtx,
7268 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7269 convert_move (va_r, next, 0);
7274 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7275 f_fpr = DECL_CHAIN (f_gpr);
7276 f_ovf = DECL_CHAIN (f_fpr);
7277 f_sav = DECL_CHAIN (f_ovf);
7279 valist = build_simple_mem_ref (valist);
7280 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
7281 /* The following should be folded into the MEM_REF offset. */
7282 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
7284 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7286 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7288 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7291 /* Count number of gp and fp argument registers used. */
7292 words = crtl->args.info.words;
7293 n_gpr = crtl->args.info.regno;
7294 n_fpr = crtl->args.info.sse_regno;
7296 if (cfun->va_list_gpr_size)
7298 type = TREE_TYPE (gpr);
7299 t = build2 (MODIFY_EXPR, type,
7300 gpr, build_int_cst (type, n_gpr * 8));
7301 TREE_SIDE_EFFECTS (t) = 1;
7302 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7305 if (TARGET_SSE && cfun->va_list_fpr_size)
7307 type = TREE_TYPE (fpr);
7308 t = build2 (MODIFY_EXPR, type, fpr,
7309 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7310 TREE_SIDE_EFFECTS (t) = 1;
7311 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7314 /* Find the overflow area. */
7315 type = TREE_TYPE (ovf);
7316 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7317 ovf_rtx = crtl->args.internal_arg_pointer;
7319 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
7320 t = make_tree (type, ovf_rtx);
7322 t = build2 (POINTER_PLUS_EXPR, type, t,
7323 size_int (words * UNITS_PER_WORD));
7324 t = build2 (MODIFY_EXPR, type, ovf, t);
7325 TREE_SIDE_EFFECTS (t) = 1;
7326 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7328 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7330 /* Find the register save area.
7331 Prologue of the function save it right above stack frame. */
7332 type = TREE_TYPE (sav);
7333 t = make_tree (type, frame_pointer_rtx);
7334 if (!ix86_varargs_gpr_size)
7335 t = build2 (POINTER_PLUS_EXPR, type, t,
7336 size_int (-8 * X86_64_REGPARM_MAX));
7337 t = build2 (MODIFY_EXPR, type, sav, t);
7338 TREE_SIDE_EFFECTS (t) = 1;
7339 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7343 /* Implement va_arg. */
7346 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7349 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7350 tree f_gpr, f_fpr, f_ovf, f_sav;
7351 tree gpr, fpr, ovf, sav, t;
7353 tree lab_false, lab_over = NULL_TREE;
7358 enum machine_mode nat_mode;
7359 unsigned int arg_boundary;
7361 /* Only 64bit target needs something special. */
7362 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7363 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7365 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7366 f_fpr = DECL_CHAIN (f_gpr);
7367 f_ovf = DECL_CHAIN (f_fpr);
7368 f_sav = DECL_CHAIN (f_ovf);
7370 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7371 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7372 valist = build_va_arg_indirect_ref (valist);
7373 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7374 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7375 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7377 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7379 type = build_pointer_type (type);
7380 size = int_size_in_bytes (type);
7381 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7383 nat_mode = type_natural_mode (type, NULL);
7392 /* Unnamed 256bit vector mode parameters are passed on stack. */
7393 if (ix86_cfun_abi () == SYSV_ABI)
7400 container = construct_container (nat_mode, TYPE_MODE (type),
7401 type, 0, X86_64_REGPARM_MAX,
7402 X86_64_SSE_REGPARM_MAX, intreg,
7407 /* Pull the value out of the saved registers. */
7409 addr = create_tmp_var (ptr_type_node, "addr");
7413 int needed_intregs, needed_sseregs;
7415 tree int_addr, sse_addr;
7417 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7418 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7420 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7422 need_temp = (!REG_P (container)
7423 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7424 || TYPE_ALIGN (type) > 128));
7426 /* In case we are passing structure, verify that it is consecutive block
7427 on the register save area. If not we need to do moves. */
7428 if (!need_temp && !REG_P (container))
7430 /* Verify that all registers are strictly consecutive */
7431 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7435 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7437 rtx slot = XVECEXP (container, 0, i);
7438 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7439 || INTVAL (XEXP (slot, 1)) != i * 16)
7447 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7449 rtx slot = XVECEXP (container, 0, i);
7450 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7451 || INTVAL (XEXP (slot, 1)) != i * 8)
7463 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7464 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7467 /* First ensure that we fit completely in registers. */
7470 t = build_int_cst (TREE_TYPE (gpr),
7471 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7472 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7473 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7474 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7475 gimplify_and_add (t, pre_p);
7479 t = build_int_cst (TREE_TYPE (fpr),
7480 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7481 + X86_64_REGPARM_MAX * 8);
7482 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7483 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7484 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7485 gimplify_and_add (t, pre_p);
7488 /* Compute index to start of area used for integer regs. */
7491 /* int_addr = gpr + sav; */
7492 t = fold_convert (sizetype, gpr);
7493 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7494 gimplify_assign (int_addr, t, pre_p);
7498 /* sse_addr = fpr + sav; */
7499 t = fold_convert (sizetype, fpr);
7500 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7501 gimplify_assign (sse_addr, t, pre_p);
7505 int i, prev_size = 0;
7506 tree temp = create_tmp_var (type, "va_arg_tmp");
7509 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7510 gimplify_assign (addr, t, pre_p);
7512 for (i = 0; i < XVECLEN (container, 0); i++)
7514 rtx slot = XVECEXP (container, 0, i);
7515 rtx reg = XEXP (slot, 0);
7516 enum machine_mode mode = GET_MODE (reg);
7522 tree dest_addr, dest;
7523 int cur_size = GET_MODE_SIZE (mode);
7525 if (prev_size + cur_size > size)
7527 cur_size = size - prev_size;
7528 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
7529 if (mode == BLKmode)
7532 piece_type = lang_hooks.types.type_for_mode (mode, 1);
7533 if (mode == GET_MODE (reg))
7534 addr_type = build_pointer_type (piece_type);
7536 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7538 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7541 if (SSE_REGNO_P (REGNO (reg)))
7543 src_addr = sse_addr;
7544 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7548 src_addr = int_addr;
7549 src_offset = REGNO (reg) * 8;
7551 src_addr = fold_convert (addr_type, src_addr);
7552 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7553 size_int (src_offset));
7555 dest_addr = fold_convert (daddr_type, addr);
7556 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7557 size_int (INTVAL (XEXP (slot, 1))));
7558 if (cur_size == GET_MODE_SIZE (mode))
7560 src = build_va_arg_indirect_ref (src_addr);
7561 dest = build_va_arg_indirect_ref (dest_addr);
7563 gimplify_assign (dest, src, pre_p);
7568 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
7569 3, dest_addr, src_addr,
7570 size_int (cur_size));
7571 gimplify_and_add (copy, pre_p);
7573 prev_size += cur_size;
7579 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7580 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7581 gimplify_assign (gpr, t, pre_p);
7586 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7587 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7588 gimplify_assign (fpr, t, pre_p);
7591 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7593 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7596 /* ... otherwise out of the overflow area. */
7598 /* When we align parameter on stack for caller, if the parameter
7599 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7600 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7601 here with caller. */
7602 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7603 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7604 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7606 /* Care for on-stack alignment if needed. */
7607 if (arg_boundary <= 64 || size == 0)
7611 HOST_WIDE_INT align = arg_boundary / 8;
7612 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7613 size_int (align - 1));
7614 t = fold_convert (sizetype, t);
7615 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7617 t = fold_convert (TREE_TYPE (ovf), t);
7620 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7621 gimplify_assign (addr, t, pre_p);
7623 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7624 size_int (rsize * UNITS_PER_WORD));
7625 gimplify_assign (unshare_expr (ovf), t, pre_p);
7628 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7630 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7631 addr = fold_convert (ptrtype, addr);
7634 addr = build_va_arg_indirect_ref (addr);
7635 return build_va_arg_indirect_ref (addr);
7638 /* Return true if OPNUM's MEM should be matched
7639 in movabs* patterns. */
7642 ix86_check_movabs (rtx insn, int opnum)
7646 set = PATTERN (insn);
7647 if (GET_CODE (set) == PARALLEL)
7648 set = XVECEXP (set, 0, 0);
7649 gcc_assert (GET_CODE (set) == SET);
7650 mem = XEXP (set, opnum);
7651 while (GET_CODE (mem) == SUBREG)
7652 mem = SUBREG_REG (mem);
7653 gcc_assert (MEM_P (mem));
7654 return volatile_ok || !MEM_VOLATILE_P (mem);
7657 /* Initialize the table of extra 80387 mathematical constants. */
7660 init_ext_80387_constants (void)
7662 static const char * cst[5] =
7664 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7665 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7666 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7667 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7668 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7672 for (i = 0; i < 5; i++)
7674 real_from_string (&ext_80387_constants_table[i], cst[i]);
7675 /* Ensure each constant is rounded to XFmode precision. */
7676 real_convert (&ext_80387_constants_table[i],
7677 XFmode, &ext_80387_constants_table[i]);
7680 ext_80387_constants_init = 1;
7683 /* Return non-zero if the constant is something that
7684 can be loaded with a special instruction. */
7687 standard_80387_constant_p (rtx x)
7689 enum machine_mode mode = GET_MODE (x);
7693 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7696 if (x == CONST0_RTX (mode))
7698 if (x == CONST1_RTX (mode))
7701 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7703 /* For XFmode constants, try to find a special 80387 instruction when
7704 optimizing for size or on those CPUs that benefit from them. */
7706 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7710 if (! ext_80387_constants_init)
7711 init_ext_80387_constants ();
7713 for (i = 0; i < 5; i++)
7714 if (real_identical (&r, &ext_80387_constants_table[i]))
7718 /* Load of the constant -0.0 or -1.0 will be split as
7719 fldz;fchs or fld1;fchs sequence. */
7720 if (real_isnegzero (&r))
7722 if (real_identical (&r, &dconstm1))
7728 /* Return the opcode of the special instruction to be used to load
7732 standard_80387_constant_opcode (rtx x)
7734 switch (standard_80387_constant_p (x))
7758 /* Return the CONST_DOUBLE representing the 80387 constant that is
7759 loaded by the specified special instruction. The argument IDX
7760 matches the return value from standard_80387_constant_p. */
7763 standard_80387_constant_rtx (int idx)
7767 if (! ext_80387_constants_init)
7768 init_ext_80387_constants ();
7784 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7788 /* Return 1 if X is all 0s and 2 if x is all 1s
7789 in supported SSE vector mode. */
7792 standard_sse_constant_p (rtx x)
7794 enum machine_mode mode = GET_MODE (x);
7796 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7798 if (vector_all_ones_operand (x, mode))
7814 /* Return the opcode of the special instruction to be used to load
7818 standard_sse_constant_opcode (rtx insn, rtx x)
7820 switch (standard_sse_constant_p (x))
7823 switch (get_attr_mode (insn))
7826 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7828 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7829 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7831 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7833 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7834 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7836 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7838 return "vxorps\t%x0, %x0, %x0";
7840 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7841 return "vxorps\t%x0, %x0, %x0";
7843 return "vxorpd\t%x0, %x0, %x0";
7845 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7846 return "vxorps\t%x0, %x0, %x0";
7848 return "vpxor\t%x0, %x0, %x0";
7853 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7860 /* Returns true if OP contains a symbol reference */
7863 symbolic_reference_mentioned_p (rtx op)
7868 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7871 fmt = GET_RTX_FORMAT (GET_CODE (op));
7872 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7878 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7879 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7883 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7890 /* Return true if it is appropriate to emit `ret' instructions in the
7891 body of a function. Do this only if the epilogue is simple, needing a
7892 couple of insns. Prior to reloading, we can't tell how many registers
7893 must be saved, so return false then. Return false if there is no frame
7894 marker to de-allocate. */
7897 ix86_can_use_return_insn_p (void)
7899 struct ix86_frame frame;
7901 if (! reload_completed || frame_pointer_needed)
7904 /* Don't allow more than 32k pop, since that's all we can do
7905 with one instruction. */
7906 if (crtl->args.pops_args && crtl->args.size >= 32768)
7909 ix86_compute_frame_layout (&frame);
7910 return (frame.stack_pointer_offset == UNITS_PER_WORD
7911 && (frame.nregs + frame.nsseregs) == 0);
7914 /* Value should be nonzero if functions must have frame pointers.
7915 Zero means the frame pointer need not be set up (and parms may
7916 be accessed via the stack pointer) in functions that seem suitable. */
7919 ix86_frame_pointer_required (void)
7921 /* If we accessed previous frames, then the generated code expects
7922 to be able to access the saved ebp value in our frame. */
7923 if (cfun->machine->accesses_prev_frame)
7926 /* Several x86 os'es need a frame pointer for other reasons,
7927 usually pertaining to setjmp. */
7928 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7931 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
7932 turns off the frame pointer by default. Turn it back on now if
7933 we've not got a leaf function. */
7934 if (TARGET_OMIT_LEAF_FRAME_POINTER
7935 && (!current_function_is_leaf
7936 || ix86_current_function_calls_tls_descriptor))
7939 if (crtl->profile && !flag_fentry)
7945 /* Record that the current function accesses previous call frames. */
7948 ix86_setup_frame_addresses (void)
7950 cfun->machine->accesses_prev_frame = 1;
7953 #ifndef USE_HIDDEN_LINKONCE
7954 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7955 # define USE_HIDDEN_LINKONCE 1
7957 # define USE_HIDDEN_LINKONCE 0
7961 static int pic_labels_used;
7963 /* Fills in the label name that should be used for a pc thunk for
7964 the given register. */
7967 get_pc_thunk_name (char name[32], unsigned int regno)
7969 gcc_assert (!TARGET_64BIT);
7971 if (USE_HIDDEN_LINKONCE)
7972 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7974 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7978 /* This function generates code for -fpic that loads %ebx with
7979 the return address of the caller and then returns. */
7982 ix86_code_end (void)
7987 for (regno = AX_REG; regno <= SP_REG; regno++)
7992 if (!(pic_labels_used & (1 << regno)))
7995 get_pc_thunk_name (name, regno);
7997 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7998 get_identifier (name),
7999 build_function_type (void_type_node, void_list_node));
8000 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8001 NULL_TREE, void_type_node);
8002 TREE_PUBLIC (decl) = 1;
8003 TREE_STATIC (decl) = 1;
8008 switch_to_section (darwin_sections[text_coal_section]);
8009 fputs ("\t.weak_definition\t", asm_out_file);
8010 assemble_name (asm_out_file, name);
8011 fputs ("\n\t.private_extern\t", asm_out_file);
8012 assemble_name (asm_out_file, name);
8013 putc ('\n', asm_out_file);
8014 ASM_OUTPUT_LABEL (asm_out_file, name);
8015 DECL_WEAK (decl) = 1;
8019 if (USE_HIDDEN_LINKONCE)
8021 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8023 targetm.asm_out.unique_section (decl, 0);
8024 switch_to_section (get_named_section (decl, NULL, 0));
8026 targetm.asm_out.globalize_label (asm_out_file, name);
8027 fputs ("\t.hidden\t", asm_out_file);
8028 assemble_name (asm_out_file, name);
8029 putc ('\n', asm_out_file);
8030 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8034 switch_to_section (text_section);
8035 ASM_OUTPUT_LABEL (asm_out_file, name);
8038 DECL_INITIAL (decl) = make_node (BLOCK);
8039 current_function_decl = decl;
8040 init_function_start (decl);
8041 first_function_block_is_cold = false;
8042 /* Make sure unwind info is emitted for the thunk if needed. */
8043 final_start_function (emit_barrier (), asm_out_file, 1);
8045 /* Pad stack IP move with 4 instructions (two NOPs count
8046 as one instruction). */
8047 if (TARGET_PAD_SHORT_FUNCTION)
8052 fputs ("\tnop\n", asm_out_file);
8055 xops[0] = gen_rtx_REG (Pmode, regno);
8056 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8057 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8058 fputs ("\tret\n", asm_out_file);
8059 final_end_function ();
8060 init_insn_lengths ();
8061 free_after_compilation (cfun);
8063 current_function_decl = NULL;
8066 if (flag_split_stack)
8067 file_end_indicate_split_stack ();
8070 /* Emit code for the SET_GOT patterns. */
8073 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8079 if (TARGET_VXWORKS_RTP && flag_pic)
8081 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8082 xops[2] = gen_rtx_MEM (Pmode,
8083 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8084 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8086 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8087 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8088 an unadorned address. */
8089 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8090 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8091 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8095 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8097 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
8099 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8102 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8105 output_asm_insn ("call\t%a2", xops);
8106 #ifdef DWARF2_UNWIND_INFO
8107 /* The call to next label acts as a push. */
8108 if (dwarf2out_do_frame ())
8112 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8113 gen_rtx_PLUS (Pmode,
8116 RTX_FRAME_RELATED_P (insn) = 1;
8117 dwarf2out_frame_debug (insn, true);
8124 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8125 is what will be referenced by the Mach-O PIC subsystem. */
8127 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8130 targetm.asm_out.internal_label (asm_out_file, "L",
8131 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8135 output_asm_insn ("pop%z0\t%0", xops);
8136 #ifdef DWARF2_UNWIND_INFO
8137 /* The pop is a pop and clobbers dest, but doesn't restore it
8138 for unwind info purposes. */
8139 if (dwarf2out_do_frame ())
8143 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
8144 dwarf2out_frame_debug (insn, true);
8145 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8146 gen_rtx_PLUS (Pmode,
8149 RTX_FRAME_RELATED_P (insn) = 1;
8150 dwarf2out_frame_debug (insn, true);
8159 get_pc_thunk_name (name, REGNO (dest));
8160 pic_labels_used |= 1 << REGNO (dest);
8162 #ifdef DWARF2_UNWIND_INFO
8163 /* Ensure all queued register saves are flushed before the
8165 if (dwarf2out_do_frame ())
8166 dwarf2out_flush_queued_reg_saves ();
8168 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
8169 xops[2] = gen_rtx_MEM (QImode, xops[2]);
8170 output_asm_insn ("call\t%X2", xops);
8171 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8172 is what will be referenced by the Mach-O PIC subsystem. */
8175 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8177 targetm.asm_out.internal_label (asm_out_file, "L",
8178 CODE_LABEL_NUMBER (label));
8185 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
8186 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
8188 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
8193 /* Generate an "push" pattern for input ARG. */
8198 struct machine_function *m = cfun->machine;
8200 if (m->fs.cfa_reg == stack_pointer_rtx)
8201 m->fs.cfa_offset += UNITS_PER_WORD;
8202 m->fs.sp_offset += UNITS_PER_WORD;
8204 return gen_rtx_SET (VOIDmode,
8206 gen_rtx_PRE_DEC (Pmode,
8207 stack_pointer_rtx)),
8211 /* Generate an "pop" pattern for input ARG. */
8216 return gen_rtx_SET (VOIDmode,
8219 gen_rtx_POST_INC (Pmode,
8220 stack_pointer_rtx)));
8223 /* Return >= 0 if there is an unused call-clobbered register available
8224 for the entire function. */
8227 ix86_select_alt_pic_regnum (void)
8229 if (current_function_is_leaf
8231 && !ix86_current_function_calls_tls_descriptor)
8234 /* Can't use the same register for both PIC and DRAP. */
8236 drap = REGNO (crtl->drap_reg);
8239 for (i = 2; i >= 0; --i)
8240 if (i != drap && !df_regs_ever_live_p (i))
8244 return INVALID_REGNUM;
8247 /* Return 1 if we need to save REGNO. */
8249 ix86_save_reg (unsigned int regno, int maybe_eh_return)
8251 if (pic_offset_table_rtx
8252 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
8253 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8255 || crtl->calls_eh_return
8256 || crtl->uses_const_pool))
8258 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
8263 if (crtl->calls_eh_return && maybe_eh_return)
8268 unsigned test = EH_RETURN_DATA_REGNO (i);
8269 if (test == INVALID_REGNUM)
8276 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8279 return (df_regs_ever_live_p (regno)
8280 && !call_used_regs[regno]
8281 && !fixed_regs[regno]
8282 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8285 /* Return number of saved general prupose registers. */
8288 ix86_nsaved_regs (void)
8293 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8294 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8299 /* Return number of saved SSE registrers. */
8302 ix86_nsaved_sseregs (void)
8307 if (ix86_cfun_abi () != MS_ABI)
8309 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8310 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8315 /* Given FROM and TO register numbers, say whether this elimination is
8316 allowed. If stack alignment is needed, we can only replace argument
8317 pointer with hard frame pointer, or replace frame pointer with stack
8318 pointer. Otherwise, frame pointer elimination is automatically
8319 handled and all other eliminations are valid. */
8322 ix86_can_eliminate (const int from, const int to)
8324 if (stack_realign_fp)
8325 return ((from == ARG_POINTER_REGNUM
8326 && to == HARD_FRAME_POINTER_REGNUM)
8327 || (from == FRAME_POINTER_REGNUM
8328 && to == STACK_POINTER_REGNUM));
8330 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8333 /* Return the offset between two registers, one to be eliminated, and the other
8334 its replacement, at the start of a routine. */
8337 ix86_initial_elimination_offset (int from, int to)
8339 struct ix86_frame frame;
8340 ix86_compute_frame_layout (&frame);
8342 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8343 return frame.hard_frame_pointer_offset;
8344 else if (from == FRAME_POINTER_REGNUM
8345 && to == HARD_FRAME_POINTER_REGNUM)
8346 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8349 gcc_assert (to == STACK_POINTER_REGNUM);
8351 if (from == ARG_POINTER_REGNUM)
8352 return frame.stack_pointer_offset;
8354 gcc_assert (from == FRAME_POINTER_REGNUM);
8355 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8359 /* In a dynamically-aligned function, we can't know the offset from
8360 stack pointer to frame pointer, so we must ensure that setjmp
8361 eliminates fp against the hard fp (%ebp) rather than trying to
8362 index from %esp up to the top of the frame across a gap that is
8363 of unknown (at compile-time) size. */
8365 ix86_builtin_setjmp_frame_value (void)
8367 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8370 /* On the x86 -fsplit-stack and -fstack-protector both use the same
8371 field in the TCB, so they can not be used together. */
8374 ix86_supports_split_stack (bool report ATTRIBUTE_UNUSED)
8378 #ifndef TARGET_THREAD_SPLIT_STACK_OFFSET
8380 error ("%<-fsplit-stack%> currently only supported on GNU/Linux");
8383 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
8386 error ("%<-fsplit-stack%> requires "
8387 "assembler support for CFI directives");
8395 /* When using -fsplit-stack, the allocation routines set a field in
8396 the TCB to the bottom of the stack plus this much space, measured
8399 #define SPLIT_STACK_AVAILABLE 256
8401 /* Fill structure ix86_frame about frame of currently computed function. */
8404 ix86_compute_frame_layout (struct ix86_frame *frame)
8406 unsigned int stack_alignment_needed;
8407 HOST_WIDE_INT offset;
8408 unsigned int preferred_alignment;
8409 HOST_WIDE_INT size = get_frame_size ();
8410 HOST_WIDE_INT to_allocate;
8412 frame->nregs = ix86_nsaved_regs ();
8413 frame->nsseregs = ix86_nsaved_sseregs ();
8415 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8416 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8418 /* MS ABI seem to require stack alignment to be always 16 except for function
8419 prologues and leaf. */
8420 if ((ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8421 && (!current_function_is_leaf || cfun->calls_alloca != 0
8422 || ix86_current_function_calls_tls_descriptor))
8424 preferred_alignment = 16;
8425 stack_alignment_needed = 16;
8426 crtl->preferred_stack_boundary = 128;
8427 crtl->stack_alignment_needed = 128;
8430 gcc_assert (!size || stack_alignment_needed);
8431 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8432 gcc_assert (preferred_alignment <= stack_alignment_needed);
8434 /* During reload iteration the amount of registers saved can change.
8435 Recompute the value as needed. Do not recompute when amount of registers
8436 didn't change as reload does multiple calls to the function and does not
8437 expect the decision to change within single iteration. */
8438 if (!optimize_function_for_size_p (cfun)
8439 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8441 int count = frame->nregs;
8442 struct cgraph_node *node = cgraph_node (current_function_decl);
8444 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8445 /* The fast prologue uses move instead of push to save registers. This
8446 is significantly longer, but also executes faster as modern hardware
8447 can execute the moves in parallel, but can't do that for push/pop.
8449 Be careful about choosing what prologue to emit: When function takes
8450 many instructions to execute we may use slow version as well as in
8451 case function is known to be outside hot spot (this is known with
8452 feedback only). Weight the size of function by number of registers
8453 to save as it is cheap to use one or two push instructions but very
8454 slow to use many of them. */
8456 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8457 if (node->frequency < NODE_FREQUENCY_NORMAL
8458 || (flag_branch_probabilities
8459 && node->frequency < NODE_FREQUENCY_HOT))
8460 cfun->machine->use_fast_prologue_epilogue = false;
8462 cfun->machine->use_fast_prologue_epilogue
8463 = !expensive_function_p (count);
8465 if (TARGET_PROLOGUE_USING_MOVE
8466 && cfun->machine->use_fast_prologue_epilogue)
8467 frame->save_regs_using_mov = true;
8469 frame->save_regs_using_mov = false;
8471 /* If static stack checking is enabled and done with probes, the registers
8472 need to be saved before allocating the frame. */
8473 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
8474 frame->save_regs_using_mov = false;
8476 /* Skip return address. */
8477 offset = UNITS_PER_WORD;
8479 /* Skip pushed static chain. */
8480 if (ix86_static_chain_on_stack)
8481 offset += UNITS_PER_WORD;
8483 /* Skip saved base pointer. */
8484 if (frame_pointer_needed)
8485 offset += UNITS_PER_WORD;
8487 frame->hard_frame_pointer_offset = offset;
8489 /* Register save area */
8490 offset += frame->nregs * UNITS_PER_WORD;
8491 frame->reg_save_offset = offset;
8493 /* Align and set SSE register save area. */
8494 if (frame->nsseregs)
8496 /* The only ABI that has saved SSE registers (Win64) also has a
8497 16-byte aligned default stack, and thus we don't need to be
8498 within the re-aligned local stack frame to save them. */
8499 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
8500 offset = (offset + 16 - 1) & -16;
8501 offset += frame->nsseregs * 16;
8503 frame->sse_reg_save_offset = offset;
8505 /* The re-aligned stack starts here. Values before this point are not
8506 directly comparable with values below this point. In order to make
8507 sure that no value happens to be the same before and after, force
8508 the alignment computation below to add a non-zero value. */
8509 if (stack_realign_fp)
8510 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
8513 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8514 offset += frame->va_arg_size;
8516 /* Align start of frame for local function. */
8517 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
8519 /* Frame pointer points here. */
8520 frame->frame_pointer_offset = offset;
8524 /* Add outgoing arguments area. Can be skipped if we eliminated
8525 all the function calls as dead code.
8526 Skipping is however impossible when function calls alloca. Alloca
8527 expander assumes that last crtl->outgoing_args_size
8528 of stack frame are unused. */
8529 if (ACCUMULATE_OUTGOING_ARGS
8530 && (!current_function_is_leaf || cfun->calls_alloca
8531 || ix86_current_function_calls_tls_descriptor))
8533 offset += crtl->outgoing_args_size;
8534 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8537 frame->outgoing_arguments_size = 0;
8539 /* Align stack boundary. Only needed if we're calling another function
8541 if (!current_function_is_leaf || cfun->calls_alloca
8542 || ix86_current_function_calls_tls_descriptor)
8543 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
8545 /* We've reached end of stack frame. */
8546 frame->stack_pointer_offset = offset;
8548 /* Size prologue needs to allocate. */
8549 to_allocate = offset - frame->sse_reg_save_offset;
8551 if ((!to_allocate && frame->nregs <= 1)
8552 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
8553 frame->save_regs_using_mov = false;
8555 if (ix86_using_red_zone ()
8556 && current_function_sp_is_unchanging
8557 && current_function_is_leaf
8558 && !ix86_current_function_calls_tls_descriptor)
8560 frame->red_zone_size = to_allocate;
8561 if (frame->save_regs_using_mov)
8562 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8563 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8564 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8567 frame->red_zone_size = 0;
8568 frame->stack_pointer_offset -= frame->red_zone_size;
8571 /* This is semi-inlined memory_address_length, but simplified
8572 since we know that we're always dealing with reg+offset, and
8573 to avoid having to create and discard all that rtl. */
8576 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
8582 /* EBP and R13 cannot be encoded without an offset. */
8583 len = (regno == BP_REG || regno == R13_REG);
8585 else if (IN_RANGE (offset, -128, 127))
8588 /* ESP and R12 must be encoded with a SIB byte. */
8589 if (regno == SP_REG || regno == R12_REG)
8595 /* Return an RTX that points to CFA_OFFSET within the stack frame.
8596 The valid base registers are taken from CFUN->MACHINE->FS. */
8599 choose_baseaddr (HOST_WIDE_INT cfa_offset)
8601 const struct machine_function *m = cfun->machine;
8602 rtx base_reg = NULL;
8603 HOST_WIDE_INT base_offset = 0;
8605 if (m->use_fast_prologue_epilogue)
8607 /* Choose the base register most likely to allow the most scheduling
8608 opportunities. Generally FP is valid througout the function,
8609 while DRAP must be reloaded within the epilogue. But choose either
8610 over the SP due to increased encoding size. */
8614 base_reg = hard_frame_pointer_rtx;
8615 base_offset = m->fs.fp_offset - cfa_offset;
8617 else if (m->fs.drap_valid)
8619 base_reg = crtl->drap_reg;
8620 base_offset = 0 - cfa_offset;
8622 else if (m->fs.sp_valid)
8624 base_reg = stack_pointer_rtx;
8625 base_offset = m->fs.sp_offset - cfa_offset;
8630 HOST_WIDE_INT toffset;
8633 /* Choose the base register with the smallest address encoding.
8634 With a tie, choose FP > DRAP > SP. */
8637 base_reg = stack_pointer_rtx;
8638 base_offset = m->fs.sp_offset - cfa_offset;
8639 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
8641 if (m->fs.drap_valid)
8643 toffset = 0 - cfa_offset;
8644 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
8647 base_reg = crtl->drap_reg;
8648 base_offset = toffset;
8654 toffset = m->fs.fp_offset - cfa_offset;
8655 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
8658 base_reg = hard_frame_pointer_rtx;
8659 base_offset = toffset;
8664 gcc_assert (base_reg != NULL);
8666 return plus_constant (base_reg, base_offset);
8669 /* Emit code to save registers in the prologue. */
8672 ix86_emit_save_regs (void)
8677 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8678 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8680 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8681 RTX_FRAME_RELATED_P (insn) = 1;
8685 /* Emit a single register save at CFA - CFA_OFFSET. */
8688 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
8689 HOST_WIDE_INT cfa_offset)
8691 struct machine_function *m = cfun->machine;
8692 rtx reg = gen_rtx_REG (mode, regno);
8693 rtx mem, addr, base, insn;
8695 addr = choose_baseaddr (cfa_offset);
8696 mem = gen_frame_mem (mode, addr);
8698 /* For SSE saves, we need to indicate the 128-bit alignment. */
8699 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
8701 insn = emit_move_insn (mem, reg);
8702 RTX_FRAME_RELATED_P (insn) = 1;
8705 if (GET_CODE (base) == PLUS)
8706 base = XEXP (base, 0);
8707 gcc_checking_assert (REG_P (base));
8709 /* When saving registers into a re-aligned local stack frame, avoid
8710 any tricky guessing by dwarf2out. */
8711 if (m->fs.realigned)
8713 gcc_checking_assert (stack_realign_drap);
8715 if (regno == REGNO (crtl->drap_reg))
8717 /* A bit of a hack. We force the DRAP register to be saved in
8718 the re-aligned stack frame, which provides us with a copy
8719 of the CFA that will last past the prologue. Install it. */
8720 gcc_checking_assert (cfun->machine->fs.fp_valid);
8721 addr = plus_constant (hard_frame_pointer_rtx,
8722 cfun->machine->fs.fp_offset - cfa_offset);
8723 mem = gen_rtx_MEM (mode, addr);
8724 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
8728 /* The frame pointer is a stable reference within the
8729 aligned frame. Use it. */
8730 gcc_checking_assert (cfun->machine->fs.fp_valid);
8731 addr = plus_constant (hard_frame_pointer_rtx,
8732 cfun->machine->fs.fp_offset - cfa_offset);
8733 mem = gen_rtx_MEM (mode, addr);
8734 add_reg_note (insn, REG_CFA_EXPRESSION,
8735 gen_rtx_SET (VOIDmode, mem, reg));
8739 /* The memory may not be relative to the current CFA register,
8740 which means that we may need to generate a new pattern for
8741 use by the unwind info. */
8742 else if (base != m->fs.cfa_reg)
8744 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
8745 mem = gen_rtx_MEM (mode, addr);
8746 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
8750 /* Emit code to save registers using MOV insns.
8751 First register is stored at CFA - CFA_OFFSET. */
8753 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
8757 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8758 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8760 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
8761 cfa_offset -= UNITS_PER_WORD;
8765 /* Emit code to save SSE registers using MOV insns.
8766 First register is stored at CFA - CFA_OFFSET. */
8768 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
8772 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8773 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8775 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
8780 static GTY(()) rtx queued_cfa_restores;
8782 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8783 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
8784 Don't add the note if the previously saved value will be left untouched
8785 within stack red-zone till return, as unwinders can find the same value
8786 in the register and on the stack. */
8789 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
8791 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
8796 add_reg_note (insn, REG_CFA_RESTORE, reg);
8797 RTX_FRAME_RELATED_P (insn) = 1;
8801 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8804 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8807 ix86_add_queued_cfa_restore_notes (rtx insn)
8810 if (!queued_cfa_restores)
8812 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8814 XEXP (last, 1) = REG_NOTES (insn);
8815 REG_NOTES (insn) = queued_cfa_restores;
8816 queued_cfa_restores = NULL_RTX;
8817 RTX_FRAME_RELATED_P (insn) = 1;
8820 /* Expand prologue or epilogue stack adjustment.
8821 The pattern exist to put a dependency on all ebp-based memory accesses.
8822 STYLE should be negative if instructions should be marked as frame related,
8823 zero if %r11 register is live and cannot be freely used and positive
8827 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8828 int style, bool set_cfa)
8830 struct machine_function *m = cfun->machine;
8834 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
8835 else if (x86_64_immediate_operand (offset, DImode))
8836 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
8840 /* r11 is used by indirect sibcall return as well, set before the
8841 epilogue and used after the epilogue. */
8843 tmp = gen_rtx_REG (DImode, R11_REG);
8846 gcc_assert (src != hard_frame_pointer_rtx
8847 && dest != hard_frame_pointer_rtx);
8848 tmp = hard_frame_pointer_rtx;
8850 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
8852 RTX_FRAME_RELATED_P (insn) = 1;
8854 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
8857 insn = emit_insn (insn);
8859 ix86_add_queued_cfa_restore_notes (insn);
8865 gcc_assert (m->fs.cfa_reg == src);
8866 m->fs.cfa_offset += INTVAL (offset);
8867 m->fs.cfa_reg = dest;
8869 r = gen_rtx_PLUS (Pmode, src, offset);
8870 r = gen_rtx_SET (VOIDmode, dest, r);
8871 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8872 RTX_FRAME_RELATED_P (insn) = 1;
8875 RTX_FRAME_RELATED_P (insn) = 1;
8877 if (dest == stack_pointer_rtx)
8879 HOST_WIDE_INT ooffset = m->fs.sp_offset;
8880 bool valid = m->fs.sp_valid;
8882 if (src == hard_frame_pointer_rtx)
8884 valid = m->fs.fp_valid;
8885 ooffset = m->fs.fp_offset;
8887 else if (src == crtl->drap_reg)
8889 valid = m->fs.drap_valid;
8894 /* Else there are two possibilities: SP itself, which we set
8895 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
8896 taken care of this by hand along the eh_return path. */
8897 gcc_checking_assert (src == stack_pointer_rtx
8898 || offset == const0_rtx);
8901 m->fs.sp_offset = ooffset - INTVAL (offset);
8902 m->fs.sp_valid = valid;
8906 /* Find an available register to be used as dynamic realign argument
8907 pointer regsiter. Such a register will be written in prologue and
8908 used in begin of body, so it must not be
8909 1. parameter passing register.
8911 We reuse static-chain register if it is available. Otherwise, we
8912 use DI for i386 and R13 for x86-64. We chose R13 since it has
8915 Return: the regno of chosen register. */
8918 find_drap_reg (void)
8920 tree decl = cfun->decl;
8924 /* Use R13 for nested function or function need static chain.
8925 Since function with tail call may use any caller-saved
8926 registers in epilogue, DRAP must not use caller-saved
8927 register in such case. */
8928 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8935 /* Use DI for nested function or function need static chain.
8936 Since function with tail call may use any caller-saved
8937 registers in epilogue, DRAP must not use caller-saved
8938 register in such case. */
8939 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8942 /* Reuse static chain register if it isn't used for parameter
8944 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8945 && !lookup_attribute ("fastcall",
8946 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8947 && !lookup_attribute ("thiscall",
8948 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8955 /* Return minimum incoming stack alignment. */
8958 ix86_minimum_incoming_stack_boundary (bool sibcall)
8960 unsigned int incoming_stack_boundary;
8962 /* Prefer the one specified at command line. */
8963 if (ix86_user_incoming_stack_boundary)
8964 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8965 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8966 if -mstackrealign is used, it isn't used for sibcall check and
8967 estimated stack alignment is 128bit. */
8970 && ix86_force_align_arg_pointer
8971 && crtl->stack_alignment_estimated == 128)
8972 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8974 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8976 /* Incoming stack alignment can be changed on individual functions
8977 via force_align_arg_pointer attribute. We use the smallest
8978 incoming stack boundary. */
8979 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8980 && lookup_attribute (ix86_force_align_arg_pointer_string,
8981 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8982 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8984 /* The incoming stack frame has to be aligned at least at
8985 parm_stack_boundary. */
8986 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8987 incoming_stack_boundary = crtl->parm_stack_boundary;
8989 /* Stack at entrance of main is aligned by runtime. We use the
8990 smallest incoming stack boundary. */
8991 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8992 && DECL_NAME (current_function_decl)
8993 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8994 && DECL_FILE_SCOPE_P (current_function_decl))
8995 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8997 return incoming_stack_boundary;
9000 /* Update incoming stack boundary and estimated stack alignment. */
9003 ix86_update_stack_boundary (void)
9005 ix86_incoming_stack_boundary
9006 = ix86_minimum_incoming_stack_boundary (false);
9008 /* x86_64 vararg needs 16byte stack alignment for register save
9012 && crtl->stack_alignment_estimated < 128)
9013 crtl->stack_alignment_estimated = 128;
9016 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9017 needed or an rtx for DRAP otherwise. */
9020 ix86_get_drap_rtx (void)
9022 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9023 crtl->need_drap = true;
9025 if (stack_realign_drap)
9027 /* Assign DRAP to vDRAP and returns vDRAP */
9028 unsigned int regno = find_drap_reg ();
9033 arg_ptr = gen_rtx_REG (Pmode, regno);
9034 crtl->drap_reg = arg_ptr;
9037 drap_vreg = copy_to_reg (arg_ptr);
9041 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9044 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9045 RTX_FRAME_RELATED_P (insn) = 1;
9053 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9056 ix86_internal_arg_pointer (void)
9058 return virtual_incoming_args_rtx;
9061 struct scratch_reg {
9066 /* Return a short-lived scratch register for use on function entry.
9067 In 32-bit mode, it is valid only after the registers are saved
9068 in the prologue. This register must be released by means of
9069 release_scratch_register_on_entry once it is dead. */
9072 get_scratch_register_on_entry (struct scratch_reg *sr)
9080 /* We always use R11 in 64-bit mode. */
9085 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9087 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9088 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9089 int regparm = ix86_function_regparm (fntype, decl);
9091 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9093 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9094 for the static chain register. */
9095 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9096 && drap_regno != AX_REG)
9098 else if (regparm < 2 && drap_regno != DX_REG)
9100 /* ecx is the static chain register. */
9101 else if (regparm < 3 && !fastcall_p && !static_chain_p
9102 && drap_regno != CX_REG)
9104 else if (ix86_save_reg (BX_REG, true))
9106 /* esi is the static chain register. */
9107 else if (!(regparm == 3 && static_chain_p)
9108 && ix86_save_reg (SI_REG, true))
9110 else if (ix86_save_reg (DI_REG, true))
9114 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
9119 sr->reg = gen_rtx_REG (Pmode, regno);
9122 rtx insn = emit_insn (gen_push (sr->reg));
9123 RTX_FRAME_RELATED_P (insn) = 1;
9127 /* Release a scratch register obtained from the preceding function. */
9130 release_scratch_register_on_entry (struct scratch_reg *sr)
9134 rtx x, insn = emit_insn (gen_pop (sr->reg));
9136 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
9137 RTX_FRAME_RELATED_P (insn) = 1;
9138 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
9139 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
9140 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
9144 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
9146 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
9149 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
9151 /* We skip the probe for the first interval + a small dope of 4 words and
9152 probe that many bytes past the specified size to maintain a protection
9153 area at the botton of the stack. */
9154 const int dope = 4 * UNITS_PER_WORD;
9155 rtx size_rtx = GEN_INT (size);
9157 /* See if we have a constant small number of probes to generate. If so,
9158 that's the easy case. The run-time loop is made up of 11 insns in the
9159 generic case while the compile-time loop is made up of 3+2*(n-1) insns
9160 for n # of intervals. */
9161 if (size <= 5 * PROBE_INTERVAL)
9163 HOST_WIDE_INT i, adjust;
9164 bool first_probe = true;
9166 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
9167 values of N from 1 until it exceeds SIZE. If only one probe is
9168 needed, this will not generate any code. Then adjust and probe
9169 to PROBE_INTERVAL + SIZE. */
9170 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9174 adjust = 2 * PROBE_INTERVAL + dope;
9175 first_probe = false;
9178 adjust = PROBE_INTERVAL;
9180 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9181 plus_constant (stack_pointer_rtx, -adjust)));
9182 emit_stack_probe (stack_pointer_rtx);
9186 adjust = size + PROBE_INTERVAL + dope;
9188 adjust = size + PROBE_INTERVAL - i;
9190 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9191 plus_constant (stack_pointer_rtx, -adjust)));
9192 emit_stack_probe (stack_pointer_rtx);
9194 /* Adjust back to account for the additional first interval. */
9195 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9196 plus_constant (stack_pointer_rtx,
9197 PROBE_INTERVAL + dope)));
9200 /* Otherwise, do the same as above, but in a loop. Note that we must be
9201 extra careful with variables wrapping around because we might be at
9202 the very top (or the very bottom) of the address space and we have
9203 to be able to handle this case properly; in particular, we use an
9204 equality test for the loop condition. */
9207 HOST_WIDE_INT rounded_size;
9208 struct scratch_reg sr;
9210 get_scratch_register_on_entry (&sr);
9213 /* Step 1: round SIZE to the previous multiple of the interval. */
9215 rounded_size = size & -PROBE_INTERVAL;
9218 /* Step 2: compute initial and final value of the loop counter. */
9220 /* SP = SP_0 + PROBE_INTERVAL. */
9221 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9222 plus_constant (stack_pointer_rtx,
9223 - (PROBE_INTERVAL + dope))));
9225 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
9226 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
9227 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
9228 gen_rtx_PLUS (Pmode, sr.reg,
9229 stack_pointer_rtx)));
9234 while (SP != LAST_ADDR)
9236 SP = SP + PROBE_INTERVAL
9240 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
9241 values of N from 1 until it is equal to ROUNDED_SIZE. */
9243 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
9246 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
9247 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
9249 if (size != rounded_size)
9251 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9252 plus_constant (stack_pointer_rtx,
9253 rounded_size - size)));
9254 emit_stack_probe (stack_pointer_rtx);
9257 /* Adjust back to account for the additional first interval. */
9258 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9259 plus_constant (stack_pointer_rtx,
9260 PROBE_INTERVAL + dope)));
9262 release_scratch_register_on_entry (&sr);
9265 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
9266 cfun->machine->fs.sp_offset += size;
9268 /* Make sure nothing is scheduled before we are done. */
9269 emit_insn (gen_blockage ());
9272 /* Adjust the stack pointer up to REG while probing it. */
9275 output_adjust_stack_and_probe (rtx reg)
9277 static int labelno = 0;
9278 char loop_lab[32], end_lab[32];
9281 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9282 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9284 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9286 /* Jump to END_LAB if SP == LAST_ADDR. */
9287 xops[0] = stack_pointer_rtx;
9289 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9290 fputs ("\tje\t", asm_out_file);
9291 assemble_name_raw (asm_out_file, end_lab);
9292 fputc ('\n', asm_out_file);
9294 /* SP = SP + PROBE_INTERVAL. */
9295 xops[1] = GEN_INT (PROBE_INTERVAL);
9296 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9299 xops[1] = const0_rtx;
9300 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
9302 fprintf (asm_out_file, "\tjmp\t");
9303 assemble_name_raw (asm_out_file, loop_lab);
9304 fputc ('\n', asm_out_file);
9306 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9311 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
9312 inclusive. These are offsets from the current stack pointer. */
9315 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
9317 /* See if we have a constant small number of probes to generate. If so,
9318 that's the easy case. The run-time loop is made up of 7 insns in the
9319 generic case while the compile-time loop is made up of n insns for n #
9321 if (size <= 7 * PROBE_INTERVAL)
9325 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
9326 it exceeds SIZE. If only one probe is needed, this will not
9327 generate any code. Then probe at FIRST + SIZE. */
9328 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9329 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
9331 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
9334 /* Otherwise, do the same as above, but in a loop. Note that we must be
9335 extra careful with variables wrapping around because we might be at
9336 the very top (or the very bottom) of the address space and we have
9337 to be able to handle this case properly; in particular, we use an
9338 equality test for the loop condition. */
9341 HOST_WIDE_INT rounded_size, last;
9342 struct scratch_reg sr;
9344 get_scratch_register_on_entry (&sr);
9347 /* Step 1: round SIZE to the previous multiple of the interval. */
9349 rounded_size = size & -PROBE_INTERVAL;
9352 /* Step 2: compute initial and final value of the loop counter. */
9354 /* TEST_OFFSET = FIRST. */
9355 emit_move_insn (sr.reg, GEN_INT (-first));
9357 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
9358 last = first + rounded_size;
9363 while (TEST_ADDR != LAST_ADDR)
9365 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
9369 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
9370 until it is equal to ROUNDED_SIZE. */
9372 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
9375 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
9376 that SIZE is equal to ROUNDED_SIZE. */
9378 if (size != rounded_size)
9379 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
9382 rounded_size - size));
9384 release_scratch_register_on_entry (&sr);
9387 /* Make sure nothing is scheduled before we are done. */
9388 emit_insn (gen_blockage ());
9391 /* Probe a range of stack addresses from REG to END, inclusive. These are
9392 offsets from the current stack pointer. */
9395 output_probe_stack_range (rtx reg, rtx end)
9397 static int labelno = 0;
9398 char loop_lab[32], end_lab[32];
9401 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9402 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9404 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9406 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
9409 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9410 fputs ("\tje\t", asm_out_file);
9411 assemble_name_raw (asm_out_file, end_lab);
9412 fputc ('\n', asm_out_file);
9414 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
9415 xops[1] = GEN_INT (PROBE_INTERVAL);
9416 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9418 /* Probe at TEST_ADDR. */
9419 xops[0] = stack_pointer_rtx;
9421 xops[2] = const0_rtx;
9422 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
9424 fprintf (asm_out_file, "\tjmp\t");
9425 assemble_name_raw (asm_out_file, loop_lab);
9426 fputc ('\n', asm_out_file);
9428 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9433 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
9434 to be generated in correct form. */
9436 ix86_finalize_stack_realign_flags (void)
9438 /* Check if stack realign is really needed after reload, and
9439 stores result in cfun */
9440 unsigned int incoming_stack_boundary
9441 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
9442 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
9443 unsigned int stack_realign = (incoming_stack_boundary
9444 < (current_function_is_leaf
9445 ? crtl->max_used_stack_slot_alignment
9446 : crtl->stack_alignment_needed));
9448 if (crtl->stack_realign_finalized)
9450 /* After stack_realign_needed is finalized, we can't no longer
9452 gcc_assert (crtl->stack_realign_needed == stack_realign);
9456 crtl->stack_realign_needed = stack_realign;
9457 crtl->stack_realign_finalized = true;
9461 /* Expand the prologue into a bunch of separate insns. */
9464 ix86_expand_prologue (void)
9466 struct machine_function *m = cfun->machine;
9469 struct ix86_frame frame;
9470 HOST_WIDE_INT allocate;
9471 bool int_registers_saved;
9473 ix86_finalize_stack_realign_flags ();
9475 /* DRAP should not coexist with stack_realign_fp */
9476 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
9478 memset (&m->fs, 0, sizeof (m->fs));
9480 /* Initialize CFA state for before the prologue. */
9481 m->fs.cfa_reg = stack_pointer_rtx;
9482 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
9484 /* Track SP offset to the CFA. We continue tracking this after we've
9485 swapped the CFA register away from SP. In the case of re-alignment
9486 this is fudged; we're interested to offsets within the local frame. */
9487 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
9488 m->fs.sp_valid = true;
9490 ix86_compute_frame_layout (&frame);
9492 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
9494 /* We should have already generated an error for any use of
9495 ms_hook on a nested function. */
9496 gcc_checking_assert (!ix86_static_chain_on_stack);
9498 /* Check if profiling is active and we shall use profiling before
9499 prologue variant. If so sorry. */
9500 if (crtl->profile && flag_fentry != 0)
9501 sorry ("ms_hook_prologue attribute isn't compatible with -mfentry for 32-bit");
9503 /* In ix86_asm_output_function_label we emitted:
9504 8b ff movl.s %edi,%edi
9506 8b ec movl.s %esp,%ebp
9508 This matches the hookable function prologue in Win32 API
9509 functions in Microsoft Windows XP Service Pack 2 and newer.
9510 Wine uses this to enable Windows apps to hook the Win32 API
9511 functions provided by Wine.
9513 What that means is that we've already set up the frame pointer. */
9515 if (frame_pointer_needed
9516 && !(crtl->drap_reg && crtl->stack_realign_needed))
9520 /* We've decided to use the frame pointer already set up.
9521 Describe this to the unwinder by pretending that both
9522 push and mov insns happen right here.
9524 Putting the unwind info here at the end of the ms_hook
9525 is done so that we can make absolutely certain we get
9526 the required byte sequence at the start of the function,
9527 rather than relying on an assembler that can produce
9528 the exact encoding required.
9530 However it does mean (in the unpatched case) that we have
9531 a 1 insn window where the asynchronous unwind info is
9532 incorrect. However, if we placed the unwind info at
9533 its correct location we would have incorrect unwind info
9534 in the patched case. Which is probably all moot since
9535 I don't expect Wine generates dwarf2 unwind info for the
9536 system libraries that use this feature. */
9538 insn = emit_insn (gen_blockage ());
9540 push = gen_push (hard_frame_pointer_rtx);
9541 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
9543 RTX_FRAME_RELATED_P (push) = 1;
9544 RTX_FRAME_RELATED_P (mov) = 1;
9546 RTX_FRAME_RELATED_P (insn) = 1;
9547 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
9548 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
9550 /* Note that gen_push incremented m->fs.cfa_offset, even
9551 though we didn't emit the push insn here. */
9552 m->fs.cfa_reg = hard_frame_pointer_rtx;
9553 m->fs.fp_offset = m->fs.cfa_offset;
9554 m->fs.fp_valid = true;
9558 /* The frame pointer is not needed so pop %ebp again.
9559 This leaves us with a pristine state. */
9560 emit_insn (gen_pop (hard_frame_pointer_rtx));
9564 /* The first insn of a function that accepts its static chain on the
9565 stack is to push the register that would be filled in by a direct
9566 call. This insn will be skipped by the trampoline. */
9567 else if (ix86_static_chain_on_stack)
9569 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
9570 emit_insn (gen_blockage ());
9572 /* We don't want to interpret this push insn as a register save,
9573 only as a stack adjustment. The real copy of the register as
9574 a save will be done later, if needed. */
9575 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
9576 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
9577 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
9578 RTX_FRAME_RELATED_P (insn) = 1;
9581 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
9582 of DRAP is needed and stack realignment is really needed after reload */
9583 if (stack_realign_drap)
9585 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
9587 /* Only need to push parameter pointer reg if it is caller saved. */
9588 if (!call_used_regs[REGNO (crtl->drap_reg)])
9590 /* Push arg pointer reg */
9591 insn = emit_insn (gen_push (crtl->drap_reg));
9592 RTX_FRAME_RELATED_P (insn) = 1;
9595 /* Grab the argument pointer. */
9596 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
9597 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
9598 RTX_FRAME_RELATED_P (insn) = 1;
9599 m->fs.cfa_reg = crtl->drap_reg;
9600 m->fs.cfa_offset = 0;
9602 /* Align the stack. */
9603 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
9605 GEN_INT (-align_bytes)));
9606 RTX_FRAME_RELATED_P (insn) = 1;
9608 /* Replicate the return address on the stack so that return
9609 address can be reached via (argp - 1) slot. This is needed
9610 to implement macro RETURN_ADDR_RTX and intrinsic function
9611 expand_builtin_return_addr etc. */
9612 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
9613 t = gen_frame_mem (Pmode, t);
9614 insn = emit_insn (gen_push (t));
9615 RTX_FRAME_RELATED_P (insn) = 1;
9617 /* For the purposes of frame and register save area addressing,
9618 we've started over with a new frame. */
9619 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
9620 m->fs.realigned = true;
9623 if (frame_pointer_needed && !m->fs.fp_valid)
9625 /* Note: AT&T enter does NOT have reversed args. Enter is probably
9626 slower on all targets. Also sdb doesn't like it. */
9627 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
9628 RTX_FRAME_RELATED_P (insn) = 1;
9630 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
9631 RTX_FRAME_RELATED_P (insn) = 1;
9633 if (m->fs.cfa_reg == stack_pointer_rtx)
9634 m->fs.cfa_reg = hard_frame_pointer_rtx;
9635 gcc_assert (m->fs.sp_offset == frame.hard_frame_pointer_offset);
9636 m->fs.fp_offset = m->fs.sp_offset;
9637 m->fs.fp_valid = true;
9640 int_registers_saved = (frame.nregs == 0);
9642 if (!int_registers_saved)
9644 /* If saving registers via PUSH, do so now. */
9645 if (!frame.save_regs_using_mov)
9647 ix86_emit_save_regs ();
9648 int_registers_saved = true;
9649 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
9652 /* When using red zone we may start register saving before allocating
9653 the stack frame saving one cycle of the prologue. However, avoid
9654 doing this if we have to probe the stack; at least on x86_64 the
9655 stack probe can turn into a call that clobbers a red zone location. */
9656 else if (ix86_using_red_zone ()
9657 && (! TARGET_STACK_PROBE
9658 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
9660 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
9661 int_registers_saved = true;
9665 if (stack_realign_fp)
9667 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
9668 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
9670 /* The computation of the size of the re-aligned stack frame means
9671 that we must allocate the size of the register save area before
9672 performing the actual alignment. Otherwise we cannot guarantee
9673 that there's enough storage above the realignment point. */
9674 if (m->fs.sp_offset != frame.sse_reg_save_offset)
9675 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9676 GEN_INT (m->fs.sp_offset
9677 - frame.sse_reg_save_offset),
9680 /* Align the stack. */
9681 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
9683 GEN_INT (-align_bytes)));
9685 /* For the purposes of register save area addressing, the stack
9686 pointer is no longer valid. As for the value of sp_offset,
9687 see ix86_compute_frame_layout, which we need to match in order
9688 to pass verification of stack_pointer_offset at the end. */
9689 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
9690 m->fs.sp_valid = false;
9693 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
9695 if (flag_stack_usage)
9697 /* We start to count from ARG_POINTER. */
9698 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
9700 /* If it was realigned, take into account the fake frame. */
9701 if (stack_realign_drap)
9703 if (ix86_static_chain_on_stack)
9704 stack_size += UNITS_PER_WORD;
9706 if (!call_used_regs[REGNO (crtl->drap_reg)])
9707 stack_size += UNITS_PER_WORD;
9709 /* This over-estimates by 1 minimal-stack-alignment-unit but
9710 mitigates that by counting in the new return address slot. */
9711 current_function_dynamic_stack_size
9712 += crtl->stack_alignment_needed / BITS_PER_UNIT;
9715 current_function_static_stack_size = stack_size;
9718 /* The stack has already been decremented by the instruction calling us
9719 so we need to probe unconditionally to preserve the protection area. */
9720 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
9722 /* We expect the registers to be saved when probes are used. */
9723 gcc_assert (int_registers_saved);
9725 if (STACK_CHECK_MOVING_SP)
9727 ix86_adjust_stack_and_probe (allocate);
9732 HOST_WIDE_INT size = allocate;
9734 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
9735 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
9737 if (TARGET_STACK_PROBE)
9738 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
9740 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
9746 else if (!ix86_target_stack_probe ()
9747 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
9749 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9750 GEN_INT (-allocate), -1,
9751 m->fs.cfa_reg == stack_pointer_rtx);
9755 rtx eax = gen_rtx_REG (Pmode, AX_REG);
9757 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
9759 bool eax_live = false;
9760 bool r10_live = false;
9763 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
9764 if (!TARGET_64BIT_MS_ABI)
9765 eax_live = ix86_eax_live_at_start_p ();
9769 emit_insn (gen_push (eax));
9770 allocate -= UNITS_PER_WORD;
9774 r10 = gen_rtx_REG (Pmode, R10_REG);
9775 emit_insn (gen_push (r10));
9776 allocate -= UNITS_PER_WORD;
9779 emit_move_insn (eax, GEN_INT (allocate));
9780 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
9782 /* Use the fact that AX still contains ALLOCATE. */
9783 adjust_stack_insn = (TARGET_64BIT
9784 ? gen_pro_epilogue_adjust_stack_di_sub
9785 : gen_pro_epilogue_adjust_stack_si_sub);
9787 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
9788 stack_pointer_rtx, eax));
9790 if (m->fs.cfa_reg == stack_pointer_rtx)
9792 m->fs.cfa_offset += allocate;
9794 RTX_FRAME_RELATED_P (insn) = 1;
9795 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9796 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9797 plus_constant (stack_pointer_rtx,
9800 m->fs.sp_offset += allocate;
9802 if (r10_live && eax_live)
9804 t = choose_baseaddr (m->fs.sp_offset - allocate);
9805 emit_move_insn (r10, gen_frame_mem (Pmode, t));
9806 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
9807 emit_move_insn (eax, gen_frame_mem (Pmode, t));
9809 else if (eax_live || r10_live)
9811 t = choose_baseaddr (m->fs.sp_offset - allocate);
9812 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
9815 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
9817 if (!int_registers_saved)
9818 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
9820 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
9822 pic_reg_used = false;
9823 if (pic_offset_table_rtx
9824 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
9827 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
9829 if (alt_pic_reg_used != INVALID_REGNUM)
9830 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
9832 pic_reg_used = true;
9839 if (ix86_cmodel == CM_LARGE_PIC)
9841 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
9842 rtx label = gen_label_rtx ();
9844 LABEL_PRESERVE_P (label) = 1;
9845 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
9846 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
9847 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
9848 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
9849 pic_offset_table_rtx, tmp_reg));
9852 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
9855 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
9858 /* In the pic_reg_used case, make sure that the got load isn't deleted
9859 when mcount needs it. Blockage to avoid call movement across mcount
9860 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
9862 if (crtl->profile && !flag_fentry && pic_reg_used)
9863 emit_insn (gen_prologue_use (pic_offset_table_rtx));
9865 if (crtl->drap_reg && !crtl->stack_realign_needed)
9867 /* vDRAP is setup but after reload it turns out stack realign
9868 isn't necessary, here we will emit prologue to setup DRAP
9869 without stack realign adjustment */
9870 t = choose_baseaddr (0);
9871 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
9874 /* Prevent instructions from being scheduled into register save push
9875 sequence when access to the redzone area is done through frame pointer.
9876 The offset between the frame pointer and the stack pointer is calculated
9877 relative to the value of the stack pointer at the end of the function
9878 prologue, and moving instructions that access redzone area via frame
9879 pointer inside push sequence violates this assumption. */
9880 if (frame_pointer_needed && frame.red_zone_size)
9881 emit_insn (gen_memory_blockage ());
9883 /* Emit cld instruction if stringops are used in the function. */
9884 if (TARGET_CLD && ix86_current_function_needs_cld)
9885 emit_insn (gen_cld ());
9888 /* Emit code to restore REG using a POP insn. */
9891 ix86_emit_restore_reg_using_pop (rtx reg)
9893 struct machine_function *m = cfun->machine;
9894 rtx insn = emit_insn (gen_pop (reg));
9896 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
9897 m->fs.sp_offset -= UNITS_PER_WORD;
9899 if (m->fs.cfa_reg == crtl->drap_reg
9900 && REGNO (reg) == REGNO (crtl->drap_reg))
9902 /* Previously we'd represented the CFA as an expression
9903 like *(%ebp - 8). We've just popped that value from
9904 the stack, which means we need to reset the CFA to
9905 the drap register. This will remain until we restore
9906 the stack pointer. */
9907 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9908 RTX_FRAME_RELATED_P (insn) = 1;
9910 /* This means that the DRAP register is valid for addressing too. */
9911 m->fs.drap_valid = true;
9915 if (m->fs.cfa_reg == stack_pointer_rtx)
9917 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9918 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
9919 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
9920 RTX_FRAME_RELATED_P (insn) = 1;
9922 m->fs.cfa_offset -= UNITS_PER_WORD;
9925 /* When the frame pointer is the CFA, and we pop it, we are
9926 swapping back to the stack pointer as the CFA. This happens
9927 for stack frames that don't allocate other data, so we assume
9928 the stack pointer is now pointing at the return address, i.e.
9929 the function entry state, which makes the offset be 1 word. */
9930 if (reg == hard_frame_pointer_rtx)
9932 m->fs.fp_valid = false;
9933 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
9935 m->fs.cfa_reg = stack_pointer_rtx;
9936 m->fs.cfa_offset -= UNITS_PER_WORD;
9938 add_reg_note (insn, REG_CFA_DEF_CFA,
9939 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
9940 GEN_INT (m->fs.cfa_offset)));
9941 RTX_FRAME_RELATED_P (insn) = 1;
9946 /* Emit code to restore saved registers using POP insns. */
9949 ix86_emit_restore_regs_using_pop (void)
9953 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9954 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
9955 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
9958 /* Emit code and notes for the LEAVE instruction. */
9961 ix86_emit_leave (void)
9963 struct machine_function *m = cfun->machine;
9964 rtx insn = emit_insn (ix86_gen_leave ());
9966 ix86_add_queued_cfa_restore_notes (insn);
9968 gcc_assert (m->fs.fp_valid);
9969 m->fs.sp_valid = true;
9970 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
9971 m->fs.fp_valid = false;
9973 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
9975 m->fs.cfa_reg = stack_pointer_rtx;
9976 m->fs.cfa_offset = m->fs.sp_offset;
9978 add_reg_note (insn, REG_CFA_DEF_CFA,
9979 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
9980 RTX_FRAME_RELATED_P (insn) = 1;
9981 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
9986 /* Emit code to restore saved registers using MOV insns.
9987 First register is restored from CFA - CFA_OFFSET. */
9989 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
9990 int maybe_eh_return)
9992 struct machine_function *m = cfun->machine;
9995 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9996 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9998 rtx reg = gen_rtx_REG (Pmode, regno);
10001 mem = choose_baseaddr (cfa_offset);
10002 mem = gen_frame_mem (Pmode, mem);
10003 insn = emit_move_insn (reg, mem);
10005 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10007 /* Previously we'd represented the CFA as an expression
10008 like *(%ebp - 8). We've just popped that value from
10009 the stack, which means we need to reset the CFA to
10010 the drap register. This will remain until we restore
10011 the stack pointer. */
10012 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10013 RTX_FRAME_RELATED_P (insn) = 1;
10015 /* This means that the DRAP register is valid for addressing. */
10016 m->fs.drap_valid = true;
10019 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10021 cfa_offset -= UNITS_PER_WORD;
10025 /* Emit code to restore saved registers using MOV insns.
10026 First register is restored from CFA - CFA_OFFSET. */
10028 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10029 int maybe_eh_return)
10031 unsigned int regno;
10033 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10034 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10036 rtx reg = gen_rtx_REG (V4SFmode, regno);
10039 mem = choose_baseaddr (cfa_offset);
10040 mem = gen_rtx_MEM (V4SFmode, mem);
10041 set_mem_align (mem, 128);
10042 emit_move_insn (reg, mem);
10044 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10050 /* Restore function stack, frame, and registers. */
10053 ix86_expand_epilogue (int style)
10055 struct machine_function *m = cfun->machine;
10056 struct machine_frame_state frame_state_save = m->fs;
10057 struct ix86_frame frame;
10058 bool restore_regs_via_mov;
10061 ix86_finalize_stack_realign_flags ();
10062 ix86_compute_frame_layout (&frame);
10064 m->fs.sp_valid = (!frame_pointer_needed
10065 || (current_function_sp_is_unchanging
10066 && !stack_realign_fp));
10067 gcc_assert (!m->fs.sp_valid
10068 || m->fs.sp_offset == frame.stack_pointer_offset);
10070 /* The FP must be valid if the frame pointer is present. */
10071 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10072 gcc_assert (!m->fs.fp_valid
10073 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
10075 /* We must have *some* valid pointer to the stack frame. */
10076 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
10078 /* The DRAP is never valid at this point. */
10079 gcc_assert (!m->fs.drap_valid);
10081 /* See the comment about red zone and frame
10082 pointer usage in ix86_expand_prologue. */
10083 if (frame_pointer_needed && frame.red_zone_size)
10084 emit_insn (gen_memory_blockage ());
10086 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
10087 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
10089 /* Determine the CFA offset of the end of the red-zone. */
10090 m->fs.red_zone_offset = 0;
10091 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
10093 /* The red-zone begins below the return address. */
10094 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
10096 /* When the register save area is in the aligned portion of
10097 the stack, determine the maximum runtime displacement that
10098 matches up with the aligned frame. */
10099 if (stack_realign_drap)
10100 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
10104 /* Special care must be taken for the normal return case of a function
10105 using eh_return: the eax and edx registers are marked as saved, but
10106 not restored along this path. Adjust the save location to match. */
10107 if (crtl->calls_eh_return && style != 2)
10108 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
10110 /* If we're only restoring one register and sp is not valid then
10111 using a move instruction to restore the register since it's
10112 less work than reloading sp and popping the register. */
10113 if (!m->fs.sp_valid && frame.nregs <= 1)
10114 restore_regs_via_mov = true;
10115 /* EH_RETURN requires the use of moves to function properly. */
10116 else if (crtl->calls_eh_return)
10117 restore_regs_via_mov = true;
10118 else if (TARGET_EPILOGUE_USING_MOVE
10119 && cfun->machine->use_fast_prologue_epilogue
10120 && (frame.nregs > 1
10121 || m->fs.sp_offset != frame.reg_save_offset))
10122 restore_regs_via_mov = true;
10123 else if (frame_pointer_needed
10125 && m->fs.sp_offset != frame.reg_save_offset)
10126 restore_regs_via_mov = true;
10127 else if (frame_pointer_needed
10128 && TARGET_USE_LEAVE
10129 && cfun->machine->use_fast_prologue_epilogue
10130 && frame.nregs == 1)
10131 restore_regs_via_mov = true;
10133 restore_regs_via_mov = false;
10135 if (restore_regs_via_mov || frame.nsseregs)
10137 /* Ensure that the entire register save area is addressable via
10138 the stack pointer, if we will restore via sp. */
10140 && m->fs.sp_offset > 0x7fffffff
10141 && !(m->fs.fp_valid || m->fs.drap_valid)
10142 && (frame.nsseregs + frame.nregs) != 0)
10144 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10145 GEN_INT (m->fs.sp_offset
10146 - frame.sse_reg_save_offset),
10148 m->fs.cfa_reg == stack_pointer_rtx);
10152 /* If there are any SSE registers to restore, then we have to do it
10153 via moves, since there's obviously no pop for SSE regs. */
10154 if (frame.nsseregs)
10155 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
10158 if (restore_regs_via_mov)
10163 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
10165 /* eh_return epilogues need %ecx added to the stack pointer. */
10168 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
10170 /* Stack align doesn't work with eh_return. */
10171 gcc_assert (!stack_realign_drap);
10172 /* Neither does regparm nested functions. */
10173 gcc_assert (!ix86_static_chain_on_stack);
10175 if (frame_pointer_needed)
10177 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
10178 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
10179 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
10181 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
10182 insn = emit_move_insn (hard_frame_pointer_rtx, t);
10184 /* Note that we use SA as a temporary CFA, as the return
10185 address is at the proper place relative to it. We
10186 pretend this happens at the FP restore insn because
10187 prior to this insn the FP would be stored at the wrong
10188 offset relative to SA, and after this insn we have no
10189 other reasonable register to use for the CFA. We don't
10190 bother resetting the CFA to the SP for the duration of
10191 the return insn. */
10192 add_reg_note (insn, REG_CFA_DEF_CFA,
10193 plus_constant (sa, UNITS_PER_WORD));
10194 ix86_add_queued_cfa_restore_notes (insn);
10195 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
10196 RTX_FRAME_RELATED_P (insn) = 1;
10198 m->fs.cfa_reg = sa;
10199 m->fs.cfa_offset = UNITS_PER_WORD;
10200 m->fs.fp_valid = false;
10202 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
10203 const0_rtx, style, false);
10207 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
10208 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
10209 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
10210 ix86_add_queued_cfa_restore_notes (insn);
10212 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
10213 if (m->fs.cfa_offset != UNITS_PER_WORD)
10215 m->fs.cfa_offset = UNITS_PER_WORD;
10216 add_reg_note (insn, REG_CFA_DEF_CFA,
10217 plus_constant (stack_pointer_rtx,
10219 RTX_FRAME_RELATED_P (insn) = 1;
10222 m->fs.sp_offset = UNITS_PER_WORD;
10223 m->fs.sp_valid = true;
10228 /* First step is to deallocate the stack frame so that we can
10229 pop the registers. */
10230 if (!m->fs.sp_valid)
10232 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
10233 GEN_INT (m->fs.fp_offset
10234 - frame.reg_save_offset),
10237 else if (m->fs.sp_offset != frame.reg_save_offset)
10239 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10240 GEN_INT (m->fs.sp_offset
10241 - frame.reg_save_offset),
10243 m->fs.cfa_reg == stack_pointer_rtx);
10246 ix86_emit_restore_regs_using_pop ();
10249 /* If we used a stack pointer and haven't already got rid of it,
10251 if (m->fs.fp_valid)
10253 /* If the stack pointer is valid and pointing at the frame
10254 pointer store address, then we only need a pop. */
10255 if (m->fs.sp_valid && m->fs.sp_offset == frame.hard_frame_pointer_offset)
10256 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10257 /* Leave results in shorter dependency chains on CPUs that are
10258 able to grok it fast. */
10259 else if (TARGET_USE_LEAVE
10260 || optimize_function_for_size_p (cfun)
10261 || !cfun->machine->use_fast_prologue_epilogue)
10262 ix86_emit_leave ();
10265 pro_epilogue_adjust_stack (stack_pointer_rtx,
10266 hard_frame_pointer_rtx,
10267 const0_rtx, style, !using_drap);
10268 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10274 int param_ptr_offset = UNITS_PER_WORD;
10277 gcc_assert (stack_realign_drap);
10279 if (ix86_static_chain_on_stack)
10280 param_ptr_offset += UNITS_PER_WORD;
10281 if (!call_used_regs[REGNO (crtl->drap_reg)])
10282 param_ptr_offset += UNITS_PER_WORD;
10284 insn = emit_insn (gen_rtx_SET
10285 (VOIDmode, stack_pointer_rtx,
10286 gen_rtx_PLUS (Pmode,
10288 GEN_INT (-param_ptr_offset))));
10289 m->fs.cfa_reg = stack_pointer_rtx;
10290 m->fs.cfa_offset = param_ptr_offset;
10291 m->fs.sp_offset = param_ptr_offset;
10292 m->fs.realigned = false;
10294 add_reg_note (insn, REG_CFA_DEF_CFA,
10295 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10296 GEN_INT (param_ptr_offset)));
10297 RTX_FRAME_RELATED_P (insn) = 1;
10299 if (!call_used_regs[REGNO (crtl->drap_reg)])
10300 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
10303 /* At this point the stack pointer must be valid, and we must have
10304 restored all of the registers. We may not have deallocated the
10305 entire stack frame. We've delayed this until now because it may
10306 be possible to merge the local stack deallocation with the
10307 deallocation forced by ix86_static_chain_on_stack. */
10308 gcc_assert (m->fs.sp_valid);
10309 gcc_assert (!m->fs.fp_valid);
10310 gcc_assert (!m->fs.realigned);
10311 if (m->fs.sp_offset != UNITS_PER_WORD)
10313 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10314 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
10318 /* Sibcall epilogues don't want a return instruction. */
10321 m->fs = frame_state_save;
10325 if (crtl->args.pops_args && crtl->args.size)
10327 rtx popc = GEN_INT (crtl->args.pops_args);
10329 /* i386 can only pop 64K bytes. If asked to pop more, pop return
10330 address, do explicit add, and jump indirectly to the caller. */
10332 if (crtl->args.pops_args >= 65536)
10334 rtx ecx = gen_rtx_REG (SImode, CX_REG);
10337 /* There is no "pascal" calling convention in any 64bit ABI. */
10338 gcc_assert (!TARGET_64BIT);
10340 insn = emit_insn (gen_pop (ecx));
10341 m->fs.cfa_offset -= UNITS_PER_WORD;
10342 m->fs.sp_offset -= UNITS_PER_WORD;
10344 add_reg_note (insn, REG_CFA_ADJUST_CFA,
10345 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
10346 add_reg_note (insn, REG_CFA_REGISTER,
10347 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
10348 RTX_FRAME_RELATED_P (insn) = 1;
10350 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10352 emit_jump_insn (gen_return_indirect_internal (ecx));
10355 emit_jump_insn (gen_return_pop_internal (popc));
10358 emit_jump_insn (gen_return_internal ());
10360 /* Restore the state back to the state from the prologue,
10361 so that it's correct for the next epilogue. */
10362 m->fs = frame_state_save;
10365 /* Reset from the function's potential modifications. */
10368 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10369 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10371 if (pic_offset_table_rtx)
10372 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
10374 /* Mach-O doesn't support labels at the end of objects, so if
10375 it looks like we might want one, insert a NOP. */
10377 rtx insn = get_last_insn ();
10380 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
10381 insn = PREV_INSN (insn);
10385 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
10386 fputs ("\tnop\n", file);
10392 /* Return a scratch register to use in the split stack prologue. The
10393 split stack prologue is used for -fsplit-stack. It is the first
10394 instructions in the function, even before the regular prologue.
10395 The scratch register can be any caller-saved register which is not
10396 used for parameters or for the static chain. */
10398 static unsigned int
10399 split_stack_prologue_scratch_regno (void)
10408 is_fastcall = (lookup_attribute ("fastcall",
10409 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
10411 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
10415 if (DECL_STATIC_CHAIN (cfun->decl))
10417 sorry ("-fsplit-stack does not support fastcall with "
10418 "nested function");
10419 return INVALID_REGNUM;
10423 else if (regparm < 3)
10425 if (!DECL_STATIC_CHAIN (cfun->decl))
10431 sorry ("-fsplit-stack does not support 2 register "
10432 " parameters for a nested function");
10433 return INVALID_REGNUM;
10440 /* FIXME: We could make this work by pushing a register
10441 around the addition and comparison. */
10442 sorry ("-fsplit-stack does not support 3 register parameters");
10443 return INVALID_REGNUM;
10448 /* A SYMBOL_REF for the function which allocates new stackspace for
10451 static GTY(()) rtx split_stack_fn;
10453 /* Handle -fsplit-stack. These are the first instructions in the
10454 function, even before the regular prologue. */
10457 ix86_expand_split_stack_prologue (void)
10459 struct ix86_frame frame;
10460 HOST_WIDE_INT allocate;
10462 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
10463 rtx scratch_reg = NULL_RTX;
10464 rtx varargs_label = NULL_RTX;
10466 gcc_assert (flag_split_stack && reload_completed);
10468 ix86_finalize_stack_realign_flags ();
10469 ix86_compute_frame_layout (&frame);
10470 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
10472 /* This is the label we will branch to if we have enough stack
10473 space. We expect the basic block reordering pass to reverse this
10474 branch if optimizing, so that we branch in the unlikely case. */
10475 label = gen_label_rtx ();
10477 /* We need to compare the stack pointer minus the frame size with
10478 the stack boundary in the TCB. The stack boundary always gives
10479 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
10480 can compare directly. Otherwise we need to do an addition. */
10482 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
10483 UNSPEC_STACK_CHECK);
10484 limit = gen_rtx_CONST (Pmode, limit);
10485 limit = gen_rtx_MEM (Pmode, limit);
10486 if (allocate < SPLIT_STACK_AVAILABLE)
10487 current = stack_pointer_rtx;
10490 unsigned int scratch_regno;
10493 /* We need a scratch register to hold the stack pointer minus
10494 the required frame size. Since this is the very start of the
10495 function, the scratch register can be any caller-saved
10496 register which is not used for parameters. */
10497 offset = GEN_INT (- allocate);
10498 scratch_regno = split_stack_prologue_scratch_regno ();
10499 if (scratch_regno == INVALID_REGNUM)
10501 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
10502 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
10504 /* We don't use ix86_gen_add3 in this case because it will
10505 want to split to lea, but when not optimizing the insn
10506 will not be split after this point. */
10507 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
10508 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10513 emit_move_insn (scratch_reg, offset);
10514 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
10515 stack_pointer_rtx));
10517 current = scratch_reg;
10520 ix86_expand_branch (GEU, current, limit, label);
10521 jump_insn = get_last_insn ();
10522 JUMP_LABEL (jump_insn) = label;
10524 /* Mark the jump as very likely to be taken. */
10525 add_reg_note (jump_insn, REG_BR_PROB,
10526 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
10528 /* Get more stack space. We pass in the desired stack space and the
10529 size of the arguments to copy to the new stack. In 32-bit mode
10530 we push the parameters; __morestack will return on a new stack
10531 anyhow. In 64-bit mode we pass the parameters in r10 and
10533 allocate_rtx = GEN_INT (allocate);
10534 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
10535 call_fusage = NULL_RTX;
10540 reg = gen_rtx_REG (Pmode, R10_REG);
10542 /* If this function uses a static chain, it will be in %r10.
10543 Preserve it across the call to __morestack. */
10544 if (DECL_STATIC_CHAIN (cfun->decl))
10548 rax = gen_rtx_REG (Pmode, AX_REG);
10549 emit_move_insn (rax, reg);
10550 use_reg (&call_fusage, rax);
10553 emit_move_insn (reg, allocate_rtx);
10554 use_reg (&call_fusage, reg);
10555 reg = gen_rtx_REG (Pmode, R11_REG);
10556 emit_move_insn (reg, GEN_INT (args_size));
10557 use_reg (&call_fusage, reg);
10561 emit_insn (gen_push (GEN_INT (args_size)));
10562 emit_insn (gen_push (allocate_rtx));
10564 if (split_stack_fn == NULL_RTX)
10565 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
10566 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, split_stack_fn),
10567 GEN_INT (UNITS_PER_WORD), constm1_rtx,
10569 add_function_usage_to (call_insn, call_fusage);
10571 /* In order to make call/return prediction work right, we now need
10572 to execute a return instruction. See
10573 libgcc/config/i386/morestack.S for the details on how this works.
10575 For flow purposes gcc must not see this as a return
10576 instruction--we need control flow to continue at the subsequent
10577 label. Therefore, we use an unspec. */
10578 gcc_assert (crtl->args.pops_args < 65536);
10579 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
10581 /* If we are in 64-bit mode and this function uses a static chain,
10582 we saved %r10 in %rax before calling _morestack. */
10583 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
10584 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
10585 gen_rtx_REG (Pmode, AX_REG));
10587 /* If this function calls va_start, we need to store a pointer to
10588 the arguments on the old stack, because they may not have been
10589 all copied to the new stack. At this point the old stack can be
10590 found at the frame pointer value used by __morestack, because
10591 __morestack has set that up before calling back to us. Here we
10592 store that pointer in a scratch register, and in
10593 ix86_expand_prologue we store the scratch register in a stack
10595 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
10597 unsigned int scratch_regno;
10601 scratch_regno = split_stack_prologue_scratch_regno ();
10602 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
10603 frame_reg = gen_rtx_REG (Pmode, BP_REG);
10607 return address within this function
10608 return address of caller of this function
10610 So we add three words to get to the stack arguments.
10614 return address within this function
10615 first argument to __morestack
10616 second argument to __morestack
10617 return address of caller of this function
10619 So we add five words to get to the stack arguments.
10621 words = TARGET_64BIT ? 3 : 5;
10622 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
10623 gen_rtx_PLUS (Pmode, frame_reg,
10624 GEN_INT (words * UNITS_PER_WORD))));
10626 varargs_label = gen_label_rtx ();
10627 emit_jump_insn (gen_jump (varargs_label));
10628 JUMP_LABEL (get_last_insn ()) = varargs_label;
10633 emit_label (label);
10634 LABEL_NUSES (label) = 1;
10636 /* If this function calls va_start, we now have to set the scratch
10637 register for the case where we do not call __morestack. In this
10638 case we need to set it based on the stack pointer. */
10639 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
10641 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
10642 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10643 GEN_INT (UNITS_PER_WORD))));
10645 emit_label (varargs_label);
10646 LABEL_NUSES (varargs_label) = 1;
10650 /* We may have to tell the dataflow pass that the split stack prologue
10651 is initializing a scratch register. */
10654 ix86_live_on_entry (bitmap regs)
10656 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
10658 gcc_assert (flag_split_stack);
10659 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
10663 /* Extract the parts of an RTL expression that is a valid memory address
10664 for an instruction. Return 0 if the structure of the address is
10665 grossly off. Return -1 if the address contains ASHIFT, so it is not
10666 strictly valid, but still used for computing length of lea instruction. */
10669 ix86_decompose_address (rtx addr, struct ix86_address *out)
10671 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
10672 rtx base_reg, index_reg;
10673 HOST_WIDE_INT scale = 1;
10674 rtx scale_rtx = NULL_RTX;
10677 enum ix86_address_seg seg = SEG_DEFAULT;
10679 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
10681 else if (GET_CODE (addr) == PLUS)
10683 rtx addends[4], op;
10691 addends[n++] = XEXP (op, 1);
10694 while (GET_CODE (op) == PLUS);
10699 for (i = n; i >= 0; --i)
10702 switch (GET_CODE (op))
10707 index = XEXP (op, 0);
10708 scale_rtx = XEXP (op, 1);
10714 index = XEXP (op, 0);
10715 tmp = XEXP (op, 1);
10716 if (!CONST_INT_P (tmp))
10718 scale = INTVAL (tmp);
10719 if ((unsigned HOST_WIDE_INT) scale > 3)
10721 scale = 1 << scale;
10725 if (XINT (op, 1) == UNSPEC_TP
10726 && TARGET_TLS_DIRECT_SEG_REFS
10727 && seg == SEG_DEFAULT)
10728 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
10757 else if (GET_CODE (addr) == MULT)
10759 index = XEXP (addr, 0); /* index*scale */
10760 scale_rtx = XEXP (addr, 1);
10762 else if (GET_CODE (addr) == ASHIFT)
10764 /* We're called for lea too, which implements ashift on occasion. */
10765 index = XEXP (addr, 0);
10766 tmp = XEXP (addr, 1);
10767 if (!CONST_INT_P (tmp))
10769 scale = INTVAL (tmp);
10770 if ((unsigned HOST_WIDE_INT) scale > 3)
10772 scale = 1 << scale;
10776 disp = addr; /* displacement */
10778 /* Extract the integral value of scale. */
10781 if (!CONST_INT_P (scale_rtx))
10783 scale = INTVAL (scale_rtx);
10786 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
10787 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
10789 /* Avoid useless 0 displacement. */
10790 if (disp == const0_rtx && (base || index))
10793 /* Allow arg pointer and stack pointer as index if there is not scaling. */
10794 if (base_reg && index_reg && scale == 1
10795 && (index_reg == arg_pointer_rtx
10796 || index_reg == frame_pointer_rtx
10797 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
10800 tmp = base, base = index, index = tmp;
10801 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
10804 /* Special case: %ebp cannot be encoded as a base without a displacement.
10808 && (base_reg == hard_frame_pointer_rtx
10809 || base_reg == frame_pointer_rtx
10810 || base_reg == arg_pointer_rtx
10811 || (REG_P (base_reg)
10812 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
10813 || REGNO (base_reg) == R13_REG))))
10816 /* Special case: on K6, [%esi] makes the instruction vector decoded.
10817 Avoid this by transforming to [%esi+0].
10818 Reload calls address legitimization without cfun defined, so we need
10819 to test cfun for being non-NULL. */
10820 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
10821 && base_reg && !index_reg && !disp
10822 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
10825 /* Special case: encode reg+reg instead of reg*2. */
10826 if (!base && index && scale == 2)
10827 base = index, base_reg = index_reg, scale = 1;
10829 /* Special case: scaling cannot be encoded without base or displacement. */
10830 if (!base && !disp && index && scale != 1)
10834 out->index = index;
10836 out->scale = scale;
10842 /* Return cost of the memory address x.
10843 For i386, it is better to use a complex address than let gcc copy
10844 the address into a reg and make a new pseudo. But not if the address
10845 requires to two regs - that would mean more pseudos with longer
10848 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
10850 struct ix86_address parts;
10852 int ok = ix86_decompose_address (x, &parts);
10856 if (parts.base && GET_CODE (parts.base) == SUBREG)
10857 parts.base = SUBREG_REG (parts.base);
10858 if (parts.index && GET_CODE (parts.index) == SUBREG)
10859 parts.index = SUBREG_REG (parts.index);
10861 /* Attempt to minimize number of registers in the address. */
10863 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
10865 && (!REG_P (parts.index)
10866 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
10870 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
10872 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
10873 && parts.base != parts.index)
10876 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
10877 since it's predecode logic can't detect the length of instructions
10878 and it degenerates to vector decoded. Increase cost of such
10879 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
10880 to split such addresses or even refuse such addresses at all.
10882 Following addressing modes are affected:
10887 The first and last case may be avoidable by explicitly coding the zero in
10888 memory address, but I don't have AMD-K6 machine handy to check this
10892 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
10893 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
10894 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
10900 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
10901 this is used for to form addresses to local data when -fPIC is in
10905 darwin_local_data_pic (rtx disp)
10907 return (GET_CODE (disp) == UNSPEC
10908 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
10911 /* Determine if a given RTX is a valid constant. We already know this
10912 satisfies CONSTANT_P. */
10915 legitimate_constant_p (rtx x)
10917 switch (GET_CODE (x))
10922 if (GET_CODE (x) == PLUS)
10924 if (!CONST_INT_P (XEXP (x, 1)))
10929 if (TARGET_MACHO && darwin_local_data_pic (x))
10932 /* Only some unspecs are valid as "constants". */
10933 if (GET_CODE (x) == UNSPEC)
10934 switch (XINT (x, 1))
10937 case UNSPEC_GOTOFF:
10938 case UNSPEC_PLTOFF:
10939 return TARGET_64BIT;
10941 case UNSPEC_NTPOFF:
10942 x = XVECEXP (x, 0, 0);
10943 return (GET_CODE (x) == SYMBOL_REF
10944 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
10945 case UNSPEC_DTPOFF:
10946 x = XVECEXP (x, 0, 0);
10947 return (GET_CODE (x) == SYMBOL_REF
10948 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
10953 /* We must have drilled down to a symbol. */
10954 if (GET_CODE (x) == LABEL_REF)
10956 if (GET_CODE (x) != SYMBOL_REF)
10961 /* TLS symbols are never valid. */
10962 if (SYMBOL_REF_TLS_MODEL (x))
10965 /* DLLIMPORT symbols are never valid. */
10966 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
10967 && SYMBOL_REF_DLLIMPORT_P (x))
10972 if (GET_MODE (x) == TImode
10973 && x != CONST0_RTX (TImode)
10979 if (!standard_sse_constant_p (x))
10986 /* Otherwise we handle everything else in the move patterns. */
10990 /* Determine if it's legal to put X into the constant pool. This
10991 is not possible for the address of thread-local symbols, which
10992 is checked above. */
10995 ix86_cannot_force_const_mem (rtx x)
10997 /* We can always put integral constants and vectors in memory. */
10998 switch (GET_CODE (x))
11008 return !legitimate_constant_p (x);
11012 /* Nonzero if the constant value X is a legitimate general operand
11013 when generating PIC code. It is given that flag_pic is on and
11014 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
11017 legitimate_pic_operand_p (rtx x)
11021 switch (GET_CODE (x))
11024 inner = XEXP (x, 0);
11025 if (GET_CODE (inner) == PLUS
11026 && CONST_INT_P (XEXP (inner, 1)))
11027 inner = XEXP (inner, 0);
11029 /* Only some unspecs are valid as "constants". */
11030 if (GET_CODE (inner) == UNSPEC)
11031 switch (XINT (inner, 1))
11034 case UNSPEC_GOTOFF:
11035 case UNSPEC_PLTOFF:
11036 return TARGET_64BIT;
11038 x = XVECEXP (inner, 0, 0);
11039 return (GET_CODE (x) == SYMBOL_REF
11040 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11041 case UNSPEC_MACHOPIC_OFFSET:
11042 return legitimate_pic_address_disp_p (x);
11050 return legitimate_pic_address_disp_p (x);
11057 /* Determine if a given CONST RTX is a valid memory displacement
11061 legitimate_pic_address_disp_p (rtx disp)
11065 /* In 64bit mode we can allow direct addresses of symbols and labels
11066 when they are not dynamic symbols. */
11069 rtx op0 = disp, op1;
11071 switch (GET_CODE (disp))
11077 if (GET_CODE (XEXP (disp, 0)) != PLUS)
11079 op0 = XEXP (XEXP (disp, 0), 0);
11080 op1 = XEXP (XEXP (disp, 0), 1);
11081 if (!CONST_INT_P (op1)
11082 || INTVAL (op1) >= 16*1024*1024
11083 || INTVAL (op1) < -16*1024*1024)
11085 if (GET_CODE (op0) == LABEL_REF)
11087 if (GET_CODE (op0) != SYMBOL_REF)
11092 /* TLS references should always be enclosed in UNSPEC. */
11093 if (SYMBOL_REF_TLS_MODEL (op0))
11095 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
11096 && ix86_cmodel != CM_LARGE_PIC)
11104 if (GET_CODE (disp) != CONST)
11106 disp = XEXP (disp, 0);
11110 /* We are unsafe to allow PLUS expressions. This limit allowed distance
11111 of GOT tables. We should not need these anyway. */
11112 if (GET_CODE (disp) != UNSPEC
11113 || (XINT (disp, 1) != UNSPEC_GOTPCREL
11114 && XINT (disp, 1) != UNSPEC_GOTOFF
11115 && XINT (disp, 1) != UNSPEC_PLTOFF))
11118 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
11119 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
11125 if (GET_CODE (disp) == PLUS)
11127 if (!CONST_INT_P (XEXP (disp, 1)))
11129 disp = XEXP (disp, 0);
11133 if (TARGET_MACHO && darwin_local_data_pic (disp))
11136 if (GET_CODE (disp) != UNSPEC)
11139 switch (XINT (disp, 1))
11144 /* We need to check for both symbols and labels because VxWorks loads
11145 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
11147 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11148 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
11149 case UNSPEC_GOTOFF:
11150 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
11151 While ABI specify also 32bit relocation but we don't produce it in
11152 small PIC model at all. */
11153 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11154 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
11156 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
11158 case UNSPEC_GOTTPOFF:
11159 case UNSPEC_GOTNTPOFF:
11160 case UNSPEC_INDNTPOFF:
11163 disp = XVECEXP (disp, 0, 0);
11164 return (GET_CODE (disp) == SYMBOL_REF
11165 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
11166 case UNSPEC_NTPOFF:
11167 disp = XVECEXP (disp, 0, 0);
11168 return (GET_CODE (disp) == SYMBOL_REF
11169 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
11170 case UNSPEC_DTPOFF:
11171 disp = XVECEXP (disp, 0, 0);
11172 return (GET_CODE (disp) == SYMBOL_REF
11173 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
11179 /* Recognizes RTL expressions that are valid memory addresses for an
11180 instruction. The MODE argument is the machine mode for the MEM
11181 expression that wants to use this address.
11183 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
11184 convert common non-canonical forms to canonical form so that they will
11188 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
11189 rtx addr, bool strict)
11191 struct ix86_address parts;
11192 rtx base, index, disp;
11193 HOST_WIDE_INT scale;
11195 if (ix86_decompose_address (addr, &parts) <= 0)
11196 /* Decomposition failed. */
11200 index = parts.index;
11202 scale = parts.scale;
11204 /* Validate base register.
11206 Don't allow SUBREG's that span more than a word here. It can lead to spill
11207 failures when the base is one word out of a two word structure, which is
11208 represented internally as a DImode int. */
11216 else if (GET_CODE (base) == SUBREG
11217 && REG_P (SUBREG_REG (base))
11218 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
11220 reg = SUBREG_REG (base);
11222 /* Base is not a register. */
11225 if (GET_MODE (base) != Pmode)
11226 /* Base is not in Pmode. */
11229 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
11230 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
11231 /* Base is not valid. */
11235 /* Validate index register.
11237 Don't allow SUBREG's that span more than a word here -- same as above. */
11245 else if (GET_CODE (index) == SUBREG
11246 && REG_P (SUBREG_REG (index))
11247 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
11249 reg = SUBREG_REG (index);
11251 /* Index is not a register. */
11254 if (GET_MODE (index) != Pmode)
11255 /* Index is not in Pmode. */
11258 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
11259 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
11260 /* Index is not valid. */
11264 /* Validate scale factor. */
11268 /* Scale without index. */
11271 if (scale != 2 && scale != 4 && scale != 8)
11272 /* Scale is not a valid multiplier. */
11276 /* Validate displacement. */
11279 if (GET_CODE (disp) == CONST
11280 && GET_CODE (XEXP (disp, 0)) == UNSPEC
11281 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
11282 switch (XINT (XEXP (disp, 0), 1))
11284 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
11285 used. While ABI specify also 32bit relocations, we don't produce
11286 them at all and use IP relative instead. */
11288 case UNSPEC_GOTOFF:
11289 gcc_assert (flag_pic);
11291 goto is_legitimate_pic;
11293 /* 64bit address unspec. */
11296 case UNSPEC_GOTPCREL:
11297 gcc_assert (flag_pic);
11298 goto is_legitimate_pic;
11300 case UNSPEC_GOTTPOFF:
11301 case UNSPEC_GOTNTPOFF:
11302 case UNSPEC_INDNTPOFF:
11303 case UNSPEC_NTPOFF:
11304 case UNSPEC_DTPOFF:
11307 case UNSPEC_STACK_CHECK:
11308 gcc_assert (flag_split_stack);
11312 /* Invalid address unspec. */
11316 else if (SYMBOLIC_CONST (disp)
11320 && MACHOPIC_INDIRECT
11321 && !machopic_operand_p (disp)
11327 if (TARGET_64BIT && (index || base))
11329 /* foo@dtpoff(%rX) is ok. */
11330 if (GET_CODE (disp) != CONST
11331 || GET_CODE (XEXP (disp, 0)) != PLUS
11332 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
11333 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
11334 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
11335 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
11336 /* Non-constant pic memory reference. */
11339 else if (! legitimate_pic_address_disp_p (disp))
11340 /* Displacement is an invalid pic construct. */
11343 /* This code used to verify that a symbolic pic displacement
11344 includes the pic_offset_table_rtx register.
11346 While this is good idea, unfortunately these constructs may
11347 be created by "adds using lea" optimization for incorrect
11356 This code is nonsensical, but results in addressing
11357 GOT table with pic_offset_table_rtx base. We can't
11358 just refuse it easily, since it gets matched by
11359 "addsi3" pattern, that later gets split to lea in the
11360 case output register differs from input. While this
11361 can be handled by separate addsi pattern for this case
11362 that never results in lea, this seems to be easier and
11363 correct fix for crash to disable this test. */
11365 else if (GET_CODE (disp) != LABEL_REF
11366 && !CONST_INT_P (disp)
11367 && (GET_CODE (disp) != CONST
11368 || !legitimate_constant_p (disp))
11369 && (GET_CODE (disp) != SYMBOL_REF
11370 || !legitimate_constant_p (disp)))
11371 /* Displacement is not constant. */
11373 else if (TARGET_64BIT
11374 && !x86_64_immediate_operand (disp, VOIDmode))
11375 /* Displacement is out of range. */
11379 /* Everything looks valid. */
11383 /* Determine if a given RTX is a valid constant address. */
11386 constant_address_p (rtx x)
11388 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
11391 /* Return a unique alias set for the GOT. */
11393 static alias_set_type
11394 ix86_GOT_alias_set (void)
11396 static alias_set_type set = -1;
11398 set = new_alias_set ();
11402 /* Return a legitimate reference for ORIG (an address) using the
11403 register REG. If REG is 0, a new pseudo is generated.
11405 There are two types of references that must be handled:
11407 1. Global data references must load the address from the GOT, via
11408 the PIC reg. An insn is emitted to do this load, and the reg is
11411 2. Static data references, constant pool addresses, and code labels
11412 compute the address as an offset from the GOT, whose base is in
11413 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
11414 differentiate them from global data objects. The returned
11415 address is the PIC reg + an unspec constant.
11417 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
11418 reg also appears in the address. */
11421 legitimize_pic_address (rtx orig, rtx reg)
11424 rtx new_rtx = orig;
11428 if (TARGET_MACHO && !TARGET_64BIT)
11431 reg = gen_reg_rtx (Pmode);
11432 /* Use the generic Mach-O PIC machinery. */
11433 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
11437 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
11439 else if (TARGET_64BIT
11440 && ix86_cmodel != CM_SMALL_PIC
11441 && gotoff_operand (addr, Pmode))
11444 /* This symbol may be referenced via a displacement from the PIC
11445 base address (@GOTOFF). */
11447 if (reload_in_progress)
11448 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11449 if (GET_CODE (addr) == CONST)
11450 addr = XEXP (addr, 0);
11451 if (GET_CODE (addr) == PLUS)
11453 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
11455 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
11458 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
11459 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11461 tmpreg = gen_reg_rtx (Pmode);
11464 emit_move_insn (tmpreg, new_rtx);
11468 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
11469 tmpreg, 1, OPTAB_DIRECT);
11472 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
11474 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
11476 /* This symbol may be referenced via a displacement from the PIC
11477 base address (@GOTOFF). */
11479 if (reload_in_progress)
11480 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11481 if (GET_CODE (addr) == CONST)
11482 addr = XEXP (addr, 0);
11483 if (GET_CODE (addr) == PLUS)
11485 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
11487 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
11490 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
11491 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11492 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
11496 emit_move_insn (reg, new_rtx);
11500 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
11501 /* We can't use @GOTOFF for text labels on VxWorks;
11502 see gotoff_operand. */
11503 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
11505 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
11507 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
11508 return legitimize_dllimport_symbol (addr, true);
11509 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
11510 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
11511 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
11513 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
11514 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
11518 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
11520 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
11521 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11522 new_rtx = gen_const_mem (Pmode, new_rtx);
11523 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
11526 reg = gen_reg_rtx (Pmode);
11527 /* Use directly gen_movsi, otherwise the address is loaded
11528 into register for CSE. We don't want to CSE this addresses,
11529 instead we CSE addresses from the GOT table, so skip this. */
11530 emit_insn (gen_movsi (reg, new_rtx));
11535 /* This symbol must be referenced via a load from the
11536 Global Offset Table (@GOT). */
11538 if (reload_in_progress)
11539 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11540 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
11541 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11543 new_rtx = force_reg (Pmode, new_rtx);
11544 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
11545 new_rtx = gen_const_mem (Pmode, new_rtx);
11546 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
11549 reg = gen_reg_rtx (Pmode);
11550 emit_move_insn (reg, new_rtx);
11556 if (CONST_INT_P (addr)
11557 && !x86_64_immediate_operand (addr, VOIDmode))
11561 emit_move_insn (reg, addr);
11565 new_rtx = force_reg (Pmode, addr);
11567 else if (GET_CODE (addr) == CONST)
11569 addr = XEXP (addr, 0);
11571 /* We must match stuff we generate before. Assume the only
11572 unspecs that can get here are ours. Not that we could do
11573 anything with them anyway.... */
11574 if (GET_CODE (addr) == UNSPEC
11575 || (GET_CODE (addr) == PLUS
11576 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
11578 gcc_assert (GET_CODE (addr) == PLUS);
11580 if (GET_CODE (addr) == PLUS)
11582 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
11584 /* Check first to see if this is a constant offset from a @GOTOFF
11585 symbol reference. */
11586 if (gotoff_operand (op0, Pmode)
11587 && CONST_INT_P (op1))
11591 if (reload_in_progress)
11592 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11593 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
11595 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
11596 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11597 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
11601 emit_move_insn (reg, new_rtx);
11607 if (INTVAL (op1) < -16*1024*1024
11608 || INTVAL (op1) >= 16*1024*1024)
11610 if (!x86_64_immediate_operand (op1, Pmode))
11611 op1 = force_reg (Pmode, op1);
11612 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
11618 base = legitimize_pic_address (XEXP (addr, 0), reg);
11619 new_rtx = legitimize_pic_address (XEXP (addr, 1),
11620 base == reg ? NULL_RTX : reg);
11622 if (CONST_INT_P (new_rtx))
11623 new_rtx = plus_constant (base, INTVAL (new_rtx));
11626 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
11628 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
11629 new_rtx = XEXP (new_rtx, 1);
11631 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
11639 /* Load the thread pointer. If TO_REG is true, force it into a register. */
11642 get_thread_pointer (int to_reg)
11646 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
11650 reg = gen_reg_rtx (Pmode);
11651 insn = gen_rtx_SET (VOIDmode, reg, tp);
11652 insn = emit_insn (insn);
11657 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
11658 false if we expect this to be used for a memory address and true if
11659 we expect to load the address into a register. */
11662 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
11664 rtx dest, base, off, pic, tp;
11669 case TLS_MODEL_GLOBAL_DYNAMIC:
11670 dest = gen_reg_rtx (Pmode);
11671 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
11673 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
11675 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
11678 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
11679 insns = get_insns ();
11682 RTL_CONST_CALL_P (insns) = 1;
11683 emit_libcall_block (insns, dest, rax, x);
11685 else if (TARGET_64BIT && TARGET_GNU2_TLS)
11686 emit_insn (gen_tls_global_dynamic_64 (dest, x));
11688 emit_insn (gen_tls_global_dynamic_32 (dest, x));
11690 if (TARGET_GNU2_TLS)
11692 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
11694 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
11698 case TLS_MODEL_LOCAL_DYNAMIC:
11699 base = gen_reg_rtx (Pmode);
11700 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
11702 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
11704 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
11707 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
11708 insns = get_insns ();
11711 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
11712 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
11713 RTL_CONST_CALL_P (insns) = 1;
11714 emit_libcall_block (insns, base, rax, note);
11716 else if (TARGET_64BIT && TARGET_GNU2_TLS)
11717 emit_insn (gen_tls_local_dynamic_base_64 (base));
11719 emit_insn (gen_tls_local_dynamic_base_32 (base));
11721 if (TARGET_GNU2_TLS)
11723 rtx x = ix86_tls_module_base ();
11725 set_unique_reg_note (get_last_insn (), REG_EQUIV,
11726 gen_rtx_MINUS (Pmode, x, tp));
11729 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
11730 off = gen_rtx_CONST (Pmode, off);
11732 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
11734 if (TARGET_GNU2_TLS)
11736 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
11738 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
11743 case TLS_MODEL_INITIAL_EXEC:
11747 type = UNSPEC_GOTNTPOFF;
11751 if (reload_in_progress)
11752 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11753 pic = pic_offset_table_rtx;
11754 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
11756 else if (!TARGET_ANY_GNU_TLS)
11758 pic = gen_reg_rtx (Pmode);
11759 emit_insn (gen_set_got (pic));
11760 type = UNSPEC_GOTTPOFF;
11765 type = UNSPEC_INDNTPOFF;
11768 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
11769 off = gen_rtx_CONST (Pmode, off);
11771 off = gen_rtx_PLUS (Pmode, pic, off);
11772 off = gen_const_mem (Pmode, off);
11773 set_mem_alias_set (off, ix86_GOT_alias_set ());
11775 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
11777 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
11778 off = force_reg (Pmode, off);
11779 return gen_rtx_PLUS (Pmode, base, off);
11783 base = get_thread_pointer (true);
11784 dest = gen_reg_rtx (Pmode);
11785 emit_insn (gen_subsi3 (dest, base, off));
11789 case TLS_MODEL_LOCAL_EXEC:
11790 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
11791 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
11792 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
11793 off = gen_rtx_CONST (Pmode, off);
11795 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
11797 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
11798 return gen_rtx_PLUS (Pmode, base, off);
11802 base = get_thread_pointer (true);
11803 dest = gen_reg_rtx (Pmode);
11804 emit_insn (gen_subsi3 (dest, base, off));
11809 gcc_unreachable ();
11815 /* Create or return the unique __imp_DECL dllimport symbol corresponding
11818 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
11819 htab_t dllimport_map;
11822 get_dllimport_decl (tree decl)
11824 struct tree_map *h, in;
11827 const char *prefix;
11828 size_t namelen, prefixlen;
11833 if (!dllimport_map)
11834 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
11836 in.hash = htab_hash_pointer (decl);
11837 in.base.from = decl;
11838 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
11839 h = (struct tree_map *) *loc;
11843 *loc = h = ggc_alloc_tree_map ();
11845 h->base.from = decl;
11846 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
11847 VAR_DECL, NULL, ptr_type_node);
11848 DECL_ARTIFICIAL (to) = 1;
11849 DECL_IGNORED_P (to) = 1;
11850 DECL_EXTERNAL (to) = 1;
11851 TREE_READONLY (to) = 1;
11853 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
11854 name = targetm.strip_name_encoding (name);
11855 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
11856 ? "*__imp_" : "*__imp__";
11857 namelen = strlen (name);
11858 prefixlen = strlen (prefix);
11859 imp_name = (char *) alloca (namelen + prefixlen + 1);
11860 memcpy (imp_name, prefix, prefixlen);
11861 memcpy (imp_name + prefixlen, name, namelen + 1);
11863 name = ggc_alloc_string (imp_name, namelen + prefixlen);
11864 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
11865 SET_SYMBOL_REF_DECL (rtl, to);
11866 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
11868 rtl = gen_const_mem (Pmode, rtl);
11869 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
11871 SET_DECL_RTL (to, rtl);
11872 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
11877 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
11878 true if we require the result be a register. */
11881 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
11886 gcc_assert (SYMBOL_REF_DECL (symbol));
11887 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
11889 x = DECL_RTL (imp_decl);
11891 x = force_reg (Pmode, x);
11895 /* Try machine-dependent ways of modifying an illegitimate address
11896 to be legitimate. If we find one, return the new, valid address.
11897 This macro is used in only one place: `memory_address' in explow.c.
11899 OLDX is the address as it was before break_out_memory_refs was called.
11900 In some cases it is useful to look at this to decide what needs to be done.
11902 It is always safe for this macro to do nothing. It exists to recognize
11903 opportunities to optimize the output.
11905 For the 80386, we handle X+REG by loading X into a register R and
11906 using R+REG. R will go in a general reg and indexing will be used.
11907 However, if REG is a broken-out memory address or multiplication,
11908 nothing needs to be done because REG can certainly go in a general reg.
11910 When -fpic is used, special handling is needed for symbolic references.
11911 See comments by legitimize_pic_address in i386.c for details. */
11914 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
11915 enum machine_mode mode)
11920 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
11922 return legitimize_tls_address (x, (enum tls_model) log, false);
11923 if (GET_CODE (x) == CONST
11924 && GET_CODE (XEXP (x, 0)) == PLUS
11925 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
11926 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
11928 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
11929 (enum tls_model) log, false);
11930 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
11933 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
11935 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
11936 return legitimize_dllimport_symbol (x, true);
11937 if (GET_CODE (x) == CONST
11938 && GET_CODE (XEXP (x, 0)) == PLUS
11939 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
11940 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
11942 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
11943 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
11947 if (flag_pic && SYMBOLIC_CONST (x))
11948 return legitimize_pic_address (x, 0);
11950 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
11951 if (GET_CODE (x) == ASHIFT
11952 && CONST_INT_P (XEXP (x, 1))
11953 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
11956 log = INTVAL (XEXP (x, 1));
11957 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
11958 GEN_INT (1 << log));
11961 if (GET_CODE (x) == PLUS)
11963 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
11965 if (GET_CODE (XEXP (x, 0)) == ASHIFT
11966 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
11967 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
11970 log = INTVAL (XEXP (XEXP (x, 0), 1));
11971 XEXP (x, 0) = gen_rtx_MULT (Pmode,
11972 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
11973 GEN_INT (1 << log));
11976 if (GET_CODE (XEXP (x, 1)) == ASHIFT
11977 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
11978 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
11981 log = INTVAL (XEXP (XEXP (x, 1), 1));
11982 XEXP (x, 1) = gen_rtx_MULT (Pmode,
11983 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
11984 GEN_INT (1 << log));
11987 /* Put multiply first if it isn't already. */
11988 if (GET_CODE (XEXP (x, 1)) == MULT)
11990 rtx tmp = XEXP (x, 0);
11991 XEXP (x, 0) = XEXP (x, 1);
11996 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
11997 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
11998 created by virtual register instantiation, register elimination, and
11999 similar optimizations. */
12000 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
12003 x = gen_rtx_PLUS (Pmode,
12004 gen_rtx_PLUS (Pmode, XEXP (x, 0),
12005 XEXP (XEXP (x, 1), 0)),
12006 XEXP (XEXP (x, 1), 1));
12010 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
12011 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
12012 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
12013 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
12014 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
12015 && CONSTANT_P (XEXP (x, 1)))
12018 rtx other = NULL_RTX;
12020 if (CONST_INT_P (XEXP (x, 1)))
12022 constant = XEXP (x, 1);
12023 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
12025 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
12027 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
12028 other = XEXP (x, 1);
12036 x = gen_rtx_PLUS (Pmode,
12037 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
12038 XEXP (XEXP (XEXP (x, 0), 1), 0)),
12039 plus_constant (other, INTVAL (constant)));
12043 if (changed && ix86_legitimate_address_p (mode, x, false))
12046 if (GET_CODE (XEXP (x, 0)) == MULT)
12049 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
12052 if (GET_CODE (XEXP (x, 1)) == MULT)
12055 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
12059 && REG_P (XEXP (x, 1))
12060 && REG_P (XEXP (x, 0)))
12063 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
12066 x = legitimize_pic_address (x, 0);
12069 if (changed && ix86_legitimate_address_p (mode, x, false))
12072 if (REG_P (XEXP (x, 0)))
12074 rtx temp = gen_reg_rtx (Pmode);
12075 rtx val = force_operand (XEXP (x, 1), temp);
12077 emit_move_insn (temp, val);
12079 XEXP (x, 1) = temp;
12083 else if (REG_P (XEXP (x, 1)))
12085 rtx temp = gen_reg_rtx (Pmode);
12086 rtx val = force_operand (XEXP (x, 0), temp);
12088 emit_move_insn (temp, val);
12090 XEXP (x, 0) = temp;
12098 /* Print an integer constant expression in assembler syntax. Addition
12099 and subtraction are the only arithmetic that may appear in these
12100 expressions. FILE is the stdio stream to write to, X is the rtx, and
12101 CODE is the operand print code from the output string. */
12104 output_pic_addr_const (FILE *file, rtx x, int code)
12108 switch (GET_CODE (x))
12111 gcc_assert (flag_pic);
12116 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
12117 output_addr_const (file, x);
12120 const char *name = XSTR (x, 0);
12122 /* Mark the decl as referenced so that cgraph will
12123 output the function. */
12124 if (SYMBOL_REF_DECL (x))
12125 mark_decl_referenced (SYMBOL_REF_DECL (x));
12128 if (MACHOPIC_INDIRECT
12129 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
12130 name = machopic_indirection_name (x, /*stub_p=*/true);
12132 assemble_name (file, name);
12134 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12135 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
12136 fputs ("@PLT", file);
12143 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
12144 assemble_name (asm_out_file, buf);
12148 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12152 /* This used to output parentheses around the expression,
12153 but that does not work on the 386 (either ATT or BSD assembler). */
12154 output_pic_addr_const (file, XEXP (x, 0), code);
12158 if (GET_MODE (x) == VOIDmode)
12160 /* We can use %d if the number is <32 bits and positive. */
12161 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
12162 fprintf (file, "0x%lx%08lx",
12163 (unsigned long) CONST_DOUBLE_HIGH (x),
12164 (unsigned long) CONST_DOUBLE_LOW (x));
12166 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
12169 /* We can't handle floating point constants;
12170 TARGET_PRINT_OPERAND must handle them. */
12171 output_operand_lossage ("floating constant misused");
12175 /* Some assemblers need integer constants to appear first. */
12176 if (CONST_INT_P (XEXP (x, 0)))
12178 output_pic_addr_const (file, XEXP (x, 0), code);
12180 output_pic_addr_const (file, XEXP (x, 1), code);
12184 gcc_assert (CONST_INT_P (XEXP (x, 1)));
12185 output_pic_addr_const (file, XEXP (x, 1), code);
12187 output_pic_addr_const (file, XEXP (x, 0), code);
12193 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
12194 output_pic_addr_const (file, XEXP (x, 0), code);
12196 output_pic_addr_const (file, XEXP (x, 1), code);
12198 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
12202 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
12204 bool f = i386_asm_output_addr_const_extra (file, x);
12209 gcc_assert (XVECLEN (x, 0) == 1);
12210 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
12211 switch (XINT (x, 1))
12214 fputs ("@GOT", file);
12216 case UNSPEC_GOTOFF:
12217 fputs ("@GOTOFF", file);
12219 case UNSPEC_PLTOFF:
12220 fputs ("@PLTOFF", file);
12222 case UNSPEC_GOTPCREL:
12223 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12224 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
12226 case UNSPEC_GOTTPOFF:
12227 /* FIXME: This might be @TPOFF in Sun ld too. */
12228 fputs ("@gottpoff", file);
12231 fputs ("@tpoff", file);
12233 case UNSPEC_NTPOFF:
12235 fputs ("@tpoff", file);
12237 fputs ("@ntpoff", file);
12239 case UNSPEC_DTPOFF:
12240 fputs ("@dtpoff", file);
12242 case UNSPEC_GOTNTPOFF:
12244 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12245 "@gottpoff(%rip)": "@gottpoff[rip]", file);
12247 fputs ("@gotntpoff", file);
12249 case UNSPEC_INDNTPOFF:
12250 fputs ("@indntpoff", file);
12253 case UNSPEC_MACHOPIC_OFFSET:
12255 machopic_output_function_base_name (file);
12259 output_operand_lossage ("invalid UNSPEC as operand");
12265 output_operand_lossage ("invalid expression as operand");
12269 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12270 We need to emit DTP-relative relocations. */
12272 static void ATTRIBUTE_UNUSED
12273 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
12275 fputs (ASM_LONG, file);
12276 output_addr_const (file, x);
12277 fputs ("@dtpoff", file);
12283 fputs (", 0", file);
12286 gcc_unreachable ();
12290 /* Return true if X is a representation of the PIC register. This copes
12291 with calls from ix86_find_base_term, where the register might have
12292 been replaced by a cselib value. */
12295 ix86_pic_register_p (rtx x)
12297 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
12298 return (pic_offset_table_rtx
12299 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
12301 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
12304 /* In the name of slightly smaller debug output, and to cater to
12305 general assembler lossage, recognize PIC+GOTOFF and turn it back
12306 into a direct symbol reference.
12308 On Darwin, this is necessary to avoid a crash, because Darwin
12309 has a different PIC label for each routine but the DWARF debugging
12310 information is not associated with any particular routine, so it's
12311 necessary to remove references to the PIC label from RTL stored by
12312 the DWARF output code. */
12315 ix86_delegitimize_address (rtx x)
12317 rtx orig_x = delegitimize_mem_from_attrs (x);
12318 /* addend is NULL or some rtx if x is something+GOTOFF where
12319 something doesn't include the PIC register. */
12320 rtx addend = NULL_RTX;
12321 /* reg_addend is NULL or a multiple of some register. */
12322 rtx reg_addend = NULL_RTX;
12323 /* const_addend is NULL or a const_int. */
12324 rtx const_addend = NULL_RTX;
12325 /* This is the result, or NULL. */
12326 rtx result = NULL_RTX;
12335 if (GET_CODE (x) != CONST
12336 || GET_CODE (XEXP (x, 0)) != UNSPEC
12337 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
12338 || !MEM_P (orig_x))
12340 x = XVECEXP (XEXP (x, 0), 0, 0);
12341 if (GET_MODE (orig_x) != Pmode)
12342 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
12346 if (GET_CODE (x) != PLUS
12347 || GET_CODE (XEXP (x, 1)) != CONST)
12350 if (ix86_pic_register_p (XEXP (x, 0)))
12351 /* %ebx + GOT/GOTOFF */
12353 else if (GET_CODE (XEXP (x, 0)) == PLUS)
12355 /* %ebx + %reg * scale + GOT/GOTOFF */
12356 reg_addend = XEXP (x, 0);
12357 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
12358 reg_addend = XEXP (reg_addend, 1);
12359 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
12360 reg_addend = XEXP (reg_addend, 0);
12363 reg_addend = NULL_RTX;
12364 addend = XEXP (x, 0);
12368 addend = XEXP (x, 0);
12370 x = XEXP (XEXP (x, 1), 0);
12371 if (GET_CODE (x) == PLUS
12372 && CONST_INT_P (XEXP (x, 1)))
12374 const_addend = XEXP (x, 1);
12378 if (GET_CODE (x) == UNSPEC
12379 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
12380 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
12381 result = XVECEXP (x, 0, 0);
12383 if (TARGET_MACHO && darwin_local_data_pic (x)
12384 && !MEM_P (orig_x))
12385 result = XVECEXP (x, 0, 0);
12391 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
12393 result = gen_rtx_PLUS (Pmode, reg_addend, result);
12396 /* If the rest of original X doesn't involve the PIC register, add
12397 addend and subtract pic_offset_table_rtx. This can happen e.g.
12399 leal (%ebx, %ecx, 4), %ecx
12401 movl foo@GOTOFF(%ecx), %edx
12402 in which case we return (%ecx - %ebx) + foo. */
12403 if (pic_offset_table_rtx)
12404 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
12405 pic_offset_table_rtx),
12410 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
12411 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
12415 /* If X is a machine specific address (i.e. a symbol or label being
12416 referenced as a displacement from the GOT implemented using an
12417 UNSPEC), then return the base term. Otherwise return X. */
12420 ix86_find_base_term (rtx x)
12426 if (GET_CODE (x) != CONST)
12428 term = XEXP (x, 0);
12429 if (GET_CODE (term) == PLUS
12430 && (CONST_INT_P (XEXP (term, 1))
12431 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
12432 term = XEXP (term, 0);
12433 if (GET_CODE (term) != UNSPEC
12434 || XINT (term, 1) != UNSPEC_GOTPCREL)
12437 return XVECEXP (term, 0, 0);
12440 return ix86_delegitimize_address (x);
12444 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
12445 int fp, FILE *file)
12447 const char *suffix;
12449 if (mode == CCFPmode || mode == CCFPUmode)
12451 code = ix86_fp_compare_code_to_integer (code);
12455 code = reverse_condition (code);
12506 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
12510 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
12511 Those same assemblers have the same but opposite lossage on cmov. */
12512 if (mode == CCmode)
12513 suffix = fp ? "nbe" : "a";
12514 else if (mode == CCCmode)
12517 gcc_unreachable ();
12533 gcc_unreachable ();
12537 gcc_assert (mode == CCmode || mode == CCCmode);
12554 gcc_unreachable ();
12558 /* ??? As above. */
12559 gcc_assert (mode == CCmode || mode == CCCmode);
12560 suffix = fp ? "nb" : "ae";
12563 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
12567 /* ??? As above. */
12568 if (mode == CCmode)
12570 else if (mode == CCCmode)
12571 suffix = fp ? "nb" : "ae";
12573 gcc_unreachable ();
12576 suffix = fp ? "u" : "p";
12579 suffix = fp ? "nu" : "np";
12582 gcc_unreachable ();
12584 fputs (suffix, file);
12587 /* Print the name of register X to FILE based on its machine mode and number.
12588 If CODE is 'w', pretend the mode is HImode.
12589 If CODE is 'b', pretend the mode is QImode.
12590 If CODE is 'k', pretend the mode is SImode.
12591 If CODE is 'q', pretend the mode is DImode.
12592 If CODE is 'x', pretend the mode is V4SFmode.
12593 If CODE is 't', pretend the mode is V8SFmode.
12594 If CODE is 'h', pretend the reg is the 'high' byte register.
12595 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
12596 If CODE is 'd', duplicate the operand for AVX instruction.
12600 print_reg (rtx x, int code, FILE *file)
12603 bool duplicated = code == 'd' && TARGET_AVX;
12605 gcc_assert (x == pc_rtx
12606 || (REGNO (x) != ARG_POINTER_REGNUM
12607 && REGNO (x) != FRAME_POINTER_REGNUM
12608 && REGNO (x) != FLAGS_REG
12609 && REGNO (x) != FPSR_REG
12610 && REGNO (x) != FPCR_REG));
12612 if (ASSEMBLER_DIALECT == ASM_ATT)
12617 gcc_assert (TARGET_64BIT);
12618 fputs ("rip", file);
12622 if (code == 'w' || MMX_REG_P (x))
12624 else if (code == 'b')
12626 else if (code == 'k')
12628 else if (code == 'q')
12630 else if (code == 'y')
12632 else if (code == 'h')
12634 else if (code == 'x')
12636 else if (code == 't')
12639 code = GET_MODE_SIZE (GET_MODE (x));
12641 /* Irritatingly, AMD extended registers use different naming convention
12642 from the normal registers. */
12643 if (REX_INT_REG_P (x))
12645 gcc_assert (TARGET_64BIT);
12649 error ("extended registers have no high halves");
12652 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
12655 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
12658 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
12661 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
12664 error ("unsupported operand size for extended register");
12674 if (STACK_TOP_P (x))
12683 if (! ANY_FP_REG_P (x))
12684 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
12689 reg = hi_reg_name[REGNO (x)];
12692 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
12694 reg = qi_reg_name[REGNO (x)];
12697 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
12699 reg = qi_high_reg_name[REGNO (x)];
12704 gcc_assert (!duplicated);
12706 fputs (hi_reg_name[REGNO (x)] + 1, file);
12711 gcc_unreachable ();
12717 if (ASSEMBLER_DIALECT == ASM_ATT)
12718 fprintf (file, ", %%%s", reg);
12720 fprintf (file, ", %s", reg);
12724 /* Locate some local-dynamic symbol still in use by this function
12725 so that we can print its name in some tls_local_dynamic_base
12729 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
12733 if (GET_CODE (x) == SYMBOL_REF
12734 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
12736 cfun->machine->some_ld_name = XSTR (x, 0);
12743 static const char *
12744 get_some_local_dynamic_name (void)
12748 if (cfun->machine->some_ld_name)
12749 return cfun->machine->some_ld_name;
12751 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
12752 if (NONDEBUG_INSN_P (insn)
12753 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
12754 return cfun->machine->some_ld_name;
12759 /* Meaning of CODE:
12760 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
12761 C -- print opcode suffix for set/cmov insn.
12762 c -- like C, but print reversed condition
12763 F,f -- likewise, but for floating-point.
12764 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
12766 R -- print the prefix for register names.
12767 z -- print the opcode suffix for the size of the current operand.
12768 Z -- likewise, with special suffixes for x87 instructions.
12769 * -- print a star (in certain assembler syntax)
12770 A -- print an absolute memory reference.
12771 w -- print the operand as if it's a "word" (HImode) even if it isn't.
12772 s -- print a shift double count, followed by the assemblers argument
12774 b -- print the QImode name of the register for the indicated operand.
12775 %b0 would print %al if operands[0] is reg 0.
12776 w -- likewise, print the HImode name of the register.
12777 k -- likewise, print the SImode name of the register.
12778 q -- likewise, print the DImode name of the register.
12779 x -- likewise, print the V4SFmode name of the register.
12780 t -- likewise, print the V8SFmode name of the register.
12781 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
12782 y -- print "st(0)" instead of "st" as a register.
12783 d -- print duplicated register operand for AVX instruction.
12784 D -- print condition for SSE cmp instruction.
12785 P -- if PIC, print an @PLT suffix.
12786 X -- don't print any sort of PIC '@' suffix for a symbol.
12787 & -- print some in-use local-dynamic symbol name.
12788 H -- print a memory address offset by 8; used for sse high-parts
12789 Y -- print condition for XOP pcom* instruction.
12790 + -- print a branch hint as 'cs' or 'ds' prefix
12791 ; -- print a semicolon (after prefixes due to bug in older gas).
12792 @ -- print a segment register of thread base pointer load
12796 ix86_print_operand (FILE *file, rtx x, int code)
12803 if (ASSEMBLER_DIALECT == ASM_ATT)
12809 const char *name = get_some_local_dynamic_name ();
12811 output_operand_lossage ("'%%&' used without any "
12812 "local dynamic TLS references");
12814 assemble_name (file, name);
12819 switch (ASSEMBLER_DIALECT)
12826 /* Intel syntax. For absolute addresses, registers should not
12827 be surrounded by braces. */
12831 ix86_print_operand (file, x, 0);
12838 gcc_unreachable ();
12841 ix86_print_operand (file, x, 0);
12846 if (ASSEMBLER_DIALECT == ASM_ATT)
12851 if (ASSEMBLER_DIALECT == ASM_ATT)
12856 if (ASSEMBLER_DIALECT == ASM_ATT)
12861 if (ASSEMBLER_DIALECT == ASM_ATT)
12866 if (ASSEMBLER_DIALECT == ASM_ATT)
12871 if (ASSEMBLER_DIALECT == ASM_ATT)
12876 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
12878 /* Opcodes don't get size suffixes if using Intel opcodes. */
12879 if (ASSEMBLER_DIALECT == ASM_INTEL)
12882 switch (GET_MODE_SIZE (GET_MODE (x)))
12901 output_operand_lossage
12902 ("invalid operand size for operand code '%c'", code);
12907 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
12909 (0, "non-integer operand used with operand code '%c'", code);
12913 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
12914 if (ASSEMBLER_DIALECT == ASM_INTEL)
12917 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
12919 switch (GET_MODE_SIZE (GET_MODE (x)))
12922 #ifdef HAVE_AS_IX86_FILDS
12932 #ifdef HAVE_AS_IX86_FILDQ
12935 fputs ("ll", file);
12943 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
12945 /* 387 opcodes don't get size suffixes
12946 if the operands are registers. */
12947 if (STACK_REG_P (x))
12950 switch (GET_MODE_SIZE (GET_MODE (x)))
12971 output_operand_lossage
12972 ("invalid operand type used with operand code '%c'", code);
12976 output_operand_lossage
12977 ("invalid operand size for operand code '%c'", code);
12994 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
12996 ix86_print_operand (file, x, 0);
12997 fputs (", ", file);
13002 /* Little bit of braindamage here. The SSE compare instructions
13003 does use completely different names for the comparisons that the
13004 fp conditional moves. */
13007 switch (GET_CODE (x))
13010 fputs ("eq", file);
13013 fputs ("eq_us", file);
13016 fputs ("lt", file);
13019 fputs ("nge", file);
13022 fputs ("le", file);
13025 fputs ("ngt", file);
13028 fputs ("unord", file);
13031 fputs ("neq", file);
13034 fputs ("neq_oq", file);
13037 fputs ("ge", file);
13040 fputs ("nlt", file);
13043 fputs ("gt", file);
13046 fputs ("nle", file);
13049 fputs ("ord", file);
13052 output_operand_lossage ("operand is not a condition code, "
13053 "invalid operand code 'D'");
13059 switch (GET_CODE (x))
13063 fputs ("eq", file);
13067 fputs ("lt", file);
13071 fputs ("le", file);
13074 fputs ("unord", file);
13078 fputs ("neq", file);
13082 fputs ("nlt", file);
13086 fputs ("nle", file);
13089 fputs ("ord", file);
13092 output_operand_lossage ("operand is not a condition code, "
13093 "invalid operand code 'D'");
13099 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13100 if (ASSEMBLER_DIALECT == ASM_ATT)
13102 switch (GET_MODE (x))
13104 case HImode: putc ('w', file); break;
13106 case SFmode: putc ('l', file); break;
13108 case DFmode: putc ('q', file); break;
13109 default: gcc_unreachable ();
13116 if (!COMPARISON_P (x))
13118 output_operand_lossage ("operand is neither a constant nor a "
13119 "condition code, invalid operand code "
13123 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
13126 if (!COMPARISON_P (x))
13128 output_operand_lossage ("operand is neither a constant nor a "
13129 "condition code, invalid operand code "
13133 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13134 if (ASSEMBLER_DIALECT == ASM_ATT)
13137 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
13140 /* Like above, but reverse condition */
13142 /* Check to see if argument to %c is really a constant
13143 and not a condition code which needs to be reversed. */
13144 if (!COMPARISON_P (x))
13146 output_operand_lossage ("operand is neither a constant nor a "
13147 "condition code, invalid operand "
13151 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
13154 if (!COMPARISON_P (x))
13156 output_operand_lossage ("operand is neither a constant nor a "
13157 "condition code, invalid operand "
13161 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13162 if (ASSEMBLER_DIALECT == ASM_ATT)
13165 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
13169 /* It doesn't actually matter what mode we use here, as we're
13170 only going to use this for printing. */
13171 x = adjust_address_nv (x, DImode, 8);
13179 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
13182 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
13185 int pred_val = INTVAL (XEXP (x, 0));
13187 if (pred_val < REG_BR_PROB_BASE * 45 / 100
13188 || pred_val > REG_BR_PROB_BASE * 55 / 100)
13190 int taken = pred_val > REG_BR_PROB_BASE / 2;
13191 int cputaken = final_forward_branch_p (current_output_insn) == 0;
13193 /* Emit hints only in the case default branch prediction
13194 heuristics would fail. */
13195 if (taken != cputaken)
13197 /* We use 3e (DS) prefix for taken branches and
13198 2e (CS) prefix for not taken branches. */
13200 fputs ("ds ; ", file);
13202 fputs ("cs ; ", file);
13210 switch (GET_CODE (x))
13213 fputs ("neq", file);
13216 fputs ("eq", file);
13220 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
13224 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
13228 fputs ("le", file);
13232 fputs ("lt", file);
13235 fputs ("unord", file);
13238 fputs ("ord", file);
13241 fputs ("ueq", file);
13244 fputs ("nlt", file);
13247 fputs ("nle", file);
13250 fputs ("ule", file);
13253 fputs ("ult", file);
13256 fputs ("une", file);
13259 output_operand_lossage ("operand is not a condition code, "
13260 "invalid operand code 'Y'");
13266 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
13272 if (ASSEMBLER_DIALECT == ASM_ATT)
13275 /* The kernel uses a different segment register for performance
13276 reasons; a system call would not have to trash the userspace
13277 segment register, which would be expensive. */
13278 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
13279 fputs ("fs", file);
13281 fputs ("gs", file);
13285 output_operand_lossage ("invalid operand code '%c'", code);
13290 print_reg (x, code, file);
13292 else if (MEM_P (x))
13294 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
13295 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
13296 && GET_MODE (x) != BLKmode)
13299 switch (GET_MODE_SIZE (GET_MODE (x)))
13301 case 1: size = "BYTE"; break;
13302 case 2: size = "WORD"; break;
13303 case 4: size = "DWORD"; break;
13304 case 8: size = "QWORD"; break;
13305 case 12: size = "TBYTE"; break;
13307 if (GET_MODE (x) == XFmode)
13312 case 32: size = "YMMWORD"; break;
13314 gcc_unreachable ();
13317 /* Check for explicit size override (codes 'b', 'w' and 'k') */
13320 else if (code == 'w')
13322 else if (code == 'k')
13325 fputs (size, file);
13326 fputs (" PTR ", file);
13330 /* Avoid (%rip) for call operands. */
13331 if (CONSTANT_ADDRESS_P (x) && code == 'P'
13332 && !CONST_INT_P (x))
13333 output_addr_const (file, x);
13334 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
13335 output_operand_lossage ("invalid constraints for operand");
13337 output_address (x);
13340 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
13345 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
13346 REAL_VALUE_TO_TARGET_SINGLE (r, l);
13348 if (ASSEMBLER_DIALECT == ASM_ATT)
13350 /* Sign extend 32bit SFmode immediate to 8 bytes. */
13352 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
13354 fprintf (file, "0x%08x", (unsigned int) l);
13357 /* These float cases don't actually occur as immediate operands. */
13358 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
13362 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
13363 fputs (dstr, file);
13366 else if (GET_CODE (x) == CONST_DOUBLE
13367 && GET_MODE (x) == XFmode)
13371 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
13372 fputs (dstr, file);
13377 /* We have patterns that allow zero sets of memory, for instance.
13378 In 64-bit mode, we should probably support all 8-byte vectors,
13379 since we can in fact encode that into an immediate. */
13380 if (GET_CODE (x) == CONST_VECTOR)
13382 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
13388 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
13390 if (ASSEMBLER_DIALECT == ASM_ATT)
13393 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
13394 || GET_CODE (x) == LABEL_REF)
13396 if (ASSEMBLER_DIALECT == ASM_ATT)
13399 fputs ("OFFSET FLAT:", file);
13402 if (CONST_INT_P (x))
13403 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
13405 output_pic_addr_const (file, x, code);
13407 output_addr_const (file, x);
13412 ix86_print_operand_punct_valid_p (unsigned char code)
13414 return (code == '@' || code == '*' || code == '+'
13415 || code == '&' || code == ';');
13418 /* Print a memory operand whose address is ADDR. */
13421 ix86_print_operand_address (FILE *file, rtx addr)
13423 struct ix86_address parts;
13424 rtx base, index, disp;
13426 int ok = ix86_decompose_address (addr, &parts);
13431 index = parts.index;
13433 scale = parts.scale;
13441 if (ASSEMBLER_DIALECT == ASM_ATT)
13443 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
13446 gcc_unreachable ();
13449 /* Use one byte shorter RIP relative addressing for 64bit mode. */
13450 if (TARGET_64BIT && !base && !index)
13454 if (GET_CODE (disp) == CONST
13455 && GET_CODE (XEXP (disp, 0)) == PLUS
13456 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
13457 symbol = XEXP (XEXP (disp, 0), 0);
13459 if (GET_CODE (symbol) == LABEL_REF
13460 || (GET_CODE (symbol) == SYMBOL_REF
13461 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
13464 if (!base && !index)
13466 /* Displacement only requires special attention. */
13468 if (CONST_INT_P (disp))
13470 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
13471 fputs ("ds:", file);
13472 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
13475 output_pic_addr_const (file, disp, 0);
13477 output_addr_const (file, disp);
13481 if (ASSEMBLER_DIALECT == ASM_ATT)
13486 output_pic_addr_const (file, disp, 0);
13487 else if (GET_CODE (disp) == LABEL_REF)
13488 output_asm_label (disp);
13490 output_addr_const (file, disp);
13495 print_reg (base, 0, file);
13499 print_reg (index, 0, file);
13501 fprintf (file, ",%d", scale);
13507 rtx offset = NULL_RTX;
13511 /* Pull out the offset of a symbol; print any symbol itself. */
13512 if (GET_CODE (disp) == CONST
13513 && GET_CODE (XEXP (disp, 0)) == PLUS
13514 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
13516 offset = XEXP (XEXP (disp, 0), 1);
13517 disp = gen_rtx_CONST (VOIDmode,
13518 XEXP (XEXP (disp, 0), 0));
13522 output_pic_addr_const (file, disp, 0);
13523 else if (GET_CODE (disp) == LABEL_REF)
13524 output_asm_label (disp);
13525 else if (CONST_INT_P (disp))
13528 output_addr_const (file, disp);
13534 print_reg (base, 0, file);
13537 if (INTVAL (offset) >= 0)
13539 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
13543 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
13550 print_reg (index, 0, file);
13552 fprintf (file, "*%d", scale);
13559 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
13562 i386_asm_output_addr_const_extra (FILE *file, rtx x)
13566 if (GET_CODE (x) != UNSPEC)
13569 op = XVECEXP (x, 0, 0);
13570 switch (XINT (x, 1))
13572 case UNSPEC_GOTTPOFF:
13573 output_addr_const (file, op);
13574 /* FIXME: This might be @TPOFF in Sun ld. */
13575 fputs ("@gottpoff", file);
13578 output_addr_const (file, op);
13579 fputs ("@tpoff", file);
13581 case UNSPEC_NTPOFF:
13582 output_addr_const (file, op);
13584 fputs ("@tpoff", file);
13586 fputs ("@ntpoff", file);
13588 case UNSPEC_DTPOFF:
13589 output_addr_const (file, op);
13590 fputs ("@dtpoff", file);
13592 case UNSPEC_GOTNTPOFF:
13593 output_addr_const (file, op);
13595 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13596 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
13598 fputs ("@gotntpoff", file);
13600 case UNSPEC_INDNTPOFF:
13601 output_addr_const (file, op);
13602 fputs ("@indntpoff", file);
13605 case UNSPEC_MACHOPIC_OFFSET:
13606 output_addr_const (file, op);
13608 machopic_output_function_base_name (file);
13612 case UNSPEC_STACK_CHECK:
13616 gcc_assert (flag_split_stack);
13618 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
13619 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
13621 gcc_unreachable ();
13624 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
13635 /* Split one or more double-mode RTL references into pairs of half-mode
13636 references. The RTL can be REG, offsettable MEM, integer constant, or
13637 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
13638 split and "num" is its length. lo_half and hi_half are output arrays
13639 that parallel "operands". */
13642 split_double_mode (enum machine_mode mode, rtx operands[],
13643 int num, rtx lo_half[], rtx hi_half[])
13645 enum machine_mode half_mode;
13651 half_mode = DImode;
13654 half_mode = SImode;
13657 gcc_unreachable ();
13660 byte = GET_MODE_SIZE (half_mode);
13664 rtx op = operands[num];
13666 /* simplify_subreg refuse to split volatile memory addresses,
13667 but we still have to handle it. */
13670 lo_half[num] = adjust_address (op, half_mode, 0);
13671 hi_half[num] = adjust_address (op, half_mode, byte);
13675 lo_half[num] = simplify_gen_subreg (half_mode, op,
13676 GET_MODE (op) == VOIDmode
13677 ? mode : GET_MODE (op), 0);
13678 hi_half[num] = simplify_gen_subreg (half_mode, op,
13679 GET_MODE (op) == VOIDmode
13680 ? mode : GET_MODE (op), byte);
13685 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
13686 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
13687 is the expression of the binary operation. The output may either be
13688 emitted here, or returned to the caller, like all output_* functions.
13690 There is no guarantee that the operands are the same mode, as they
13691 might be within FLOAT or FLOAT_EXTEND expressions. */
13693 #ifndef SYSV386_COMPAT
13694 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
13695 wants to fix the assemblers because that causes incompatibility
13696 with gcc. No-one wants to fix gcc because that causes
13697 incompatibility with assemblers... You can use the option of
13698 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
13699 #define SYSV386_COMPAT 1
13703 output_387_binary_op (rtx insn, rtx *operands)
13705 static char buf[40];
13708 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
13710 #ifdef ENABLE_CHECKING
13711 /* Even if we do not want to check the inputs, this documents input
13712 constraints. Which helps in understanding the following code. */
13713 if (STACK_REG_P (operands[0])
13714 && ((REG_P (operands[1])
13715 && REGNO (operands[0]) == REGNO (operands[1])
13716 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
13717 || (REG_P (operands[2])
13718 && REGNO (operands[0]) == REGNO (operands[2])
13719 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
13720 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
13723 gcc_assert (is_sse);
13726 switch (GET_CODE (operands[3]))
13729 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
13730 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
13738 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
13739 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
13747 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
13748 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
13756 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
13757 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
13765 gcc_unreachable ();
13772 strcpy (buf, ssep);
13773 if (GET_MODE (operands[0]) == SFmode)
13774 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
13776 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
13780 strcpy (buf, ssep + 1);
13781 if (GET_MODE (operands[0]) == SFmode)
13782 strcat (buf, "ss\t{%2, %0|%0, %2}");
13784 strcat (buf, "sd\t{%2, %0|%0, %2}");
13790 switch (GET_CODE (operands[3]))
13794 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
13796 rtx temp = operands[2];
13797 operands[2] = operands[1];
13798 operands[1] = temp;
13801 /* know operands[0] == operands[1]. */
13803 if (MEM_P (operands[2]))
13809 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
13811 if (STACK_TOP_P (operands[0]))
13812 /* How is it that we are storing to a dead operand[2]?
13813 Well, presumably operands[1] is dead too. We can't
13814 store the result to st(0) as st(0) gets popped on this
13815 instruction. Instead store to operands[2] (which I
13816 think has to be st(1)). st(1) will be popped later.
13817 gcc <= 2.8.1 didn't have this check and generated
13818 assembly code that the Unixware assembler rejected. */
13819 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
13821 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
13825 if (STACK_TOP_P (operands[0]))
13826 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
13828 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
13833 if (MEM_P (operands[1]))
13839 if (MEM_P (operands[2]))
13845 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
13848 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
13849 derived assemblers, confusingly reverse the direction of
13850 the operation for fsub{r} and fdiv{r} when the
13851 destination register is not st(0). The Intel assembler
13852 doesn't have this brain damage. Read !SYSV386_COMPAT to
13853 figure out what the hardware really does. */
13854 if (STACK_TOP_P (operands[0]))
13855 p = "{p\t%0, %2|rp\t%2, %0}";
13857 p = "{rp\t%2, %0|p\t%0, %2}";
13859 if (STACK_TOP_P (operands[0]))
13860 /* As above for fmul/fadd, we can't store to st(0). */
13861 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
13863 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
13868 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
13871 if (STACK_TOP_P (operands[0]))
13872 p = "{rp\t%0, %1|p\t%1, %0}";
13874 p = "{p\t%1, %0|rp\t%0, %1}";
13876 if (STACK_TOP_P (operands[0]))
13877 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
13879 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
13884 if (STACK_TOP_P (operands[0]))
13886 if (STACK_TOP_P (operands[1]))
13887 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
13889 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
13892 else if (STACK_TOP_P (operands[1]))
13895 p = "{\t%1, %0|r\t%0, %1}";
13897 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
13903 p = "{r\t%2, %0|\t%0, %2}";
13905 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
13911 gcc_unreachable ();
13918 /* Return needed mode for entity in optimize_mode_switching pass. */
13921 ix86_mode_needed (int entity, rtx insn)
13923 enum attr_i387_cw mode;
13925 /* The mode UNINITIALIZED is used to store control word after a
13926 function call or ASM pattern. The mode ANY specify that function
13927 has no requirements on the control word and make no changes in the
13928 bits we are interested in. */
13931 || (NONJUMP_INSN_P (insn)
13932 && (asm_noperands (PATTERN (insn)) >= 0
13933 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
13934 return I387_CW_UNINITIALIZED;
13936 if (recog_memoized (insn) < 0)
13937 return I387_CW_ANY;
13939 mode = get_attr_i387_cw (insn);
13944 if (mode == I387_CW_TRUNC)
13949 if (mode == I387_CW_FLOOR)
13954 if (mode == I387_CW_CEIL)
13959 if (mode == I387_CW_MASK_PM)
13964 gcc_unreachable ();
13967 return I387_CW_ANY;
13970 /* Output code to initialize control word copies used by trunc?f?i and
13971 rounding patterns. CURRENT_MODE is set to current control word,
13972 while NEW_MODE is set to new control word. */
13975 emit_i387_cw_initialization (int mode)
13977 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
13980 enum ix86_stack_slot slot;
13982 rtx reg = gen_reg_rtx (HImode);
13984 emit_insn (gen_x86_fnstcw_1 (stored_mode));
13985 emit_move_insn (reg, copy_rtx (stored_mode));
13987 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
13988 || optimize_function_for_size_p (cfun))
13992 case I387_CW_TRUNC:
13993 /* round toward zero (truncate) */
13994 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
13995 slot = SLOT_CW_TRUNC;
13998 case I387_CW_FLOOR:
13999 /* round down toward -oo */
14000 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14001 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
14002 slot = SLOT_CW_FLOOR;
14006 /* round up toward +oo */
14007 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14008 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
14009 slot = SLOT_CW_CEIL;
14012 case I387_CW_MASK_PM:
14013 /* mask precision exception for nearbyint() */
14014 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14015 slot = SLOT_CW_MASK_PM;
14019 gcc_unreachable ();
14026 case I387_CW_TRUNC:
14027 /* round toward zero (truncate) */
14028 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
14029 slot = SLOT_CW_TRUNC;
14032 case I387_CW_FLOOR:
14033 /* round down toward -oo */
14034 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
14035 slot = SLOT_CW_FLOOR;
14039 /* round up toward +oo */
14040 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
14041 slot = SLOT_CW_CEIL;
14044 case I387_CW_MASK_PM:
14045 /* mask precision exception for nearbyint() */
14046 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14047 slot = SLOT_CW_MASK_PM;
14051 gcc_unreachable ();
14055 gcc_assert (slot < MAX_386_STACK_LOCALS);
14057 new_mode = assign_386_stack_local (HImode, slot);
14058 emit_move_insn (new_mode, reg);
14061 /* Output code for INSN to convert a float to a signed int. OPERANDS
14062 are the insn operands. The output may be [HSD]Imode and the input
14063 operand may be [SDX]Fmode. */
14066 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
14068 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
14069 int dimode_p = GET_MODE (operands[0]) == DImode;
14070 int round_mode = get_attr_i387_cw (insn);
14072 /* Jump through a hoop or two for DImode, since the hardware has no
14073 non-popping instruction. We used to do this a different way, but
14074 that was somewhat fragile and broke with post-reload splitters. */
14075 if ((dimode_p || fisttp) && !stack_top_dies)
14076 output_asm_insn ("fld\t%y1", operands);
14078 gcc_assert (STACK_TOP_P (operands[1]));
14079 gcc_assert (MEM_P (operands[0]));
14080 gcc_assert (GET_MODE (operands[1]) != TFmode);
14083 output_asm_insn ("fisttp%Z0\t%0", operands);
14086 if (round_mode != I387_CW_ANY)
14087 output_asm_insn ("fldcw\t%3", operands);
14088 if (stack_top_dies || dimode_p)
14089 output_asm_insn ("fistp%Z0\t%0", operands);
14091 output_asm_insn ("fist%Z0\t%0", operands);
14092 if (round_mode != I387_CW_ANY)
14093 output_asm_insn ("fldcw\t%2", operands);
14099 /* Output code for x87 ffreep insn. The OPNO argument, which may only
14100 have the values zero or one, indicates the ffreep insn's operand
14101 from the OPERANDS array. */
14103 static const char *
14104 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
14106 if (TARGET_USE_FFREEP)
14107 #ifdef HAVE_AS_IX86_FFREEP
14108 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
14111 static char retval[32];
14112 int regno = REGNO (operands[opno]);
14114 gcc_assert (FP_REGNO_P (regno));
14116 regno -= FIRST_STACK_REG;
14118 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
14123 return opno ? "fstp\t%y1" : "fstp\t%y0";
14127 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
14128 should be used. UNORDERED_P is true when fucom should be used. */
14131 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
14133 int stack_top_dies;
14134 rtx cmp_op0, cmp_op1;
14135 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
14139 cmp_op0 = operands[0];
14140 cmp_op1 = operands[1];
14144 cmp_op0 = operands[1];
14145 cmp_op1 = operands[2];
14150 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
14151 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
14152 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
14153 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
14155 if (GET_MODE (operands[0]) == SFmode)
14157 return &ucomiss[TARGET_AVX ? 0 : 1];
14159 return &comiss[TARGET_AVX ? 0 : 1];
14162 return &ucomisd[TARGET_AVX ? 0 : 1];
14164 return &comisd[TARGET_AVX ? 0 : 1];
14167 gcc_assert (STACK_TOP_P (cmp_op0));
14169 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
14171 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
14173 if (stack_top_dies)
14175 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
14176 return output_387_ffreep (operands, 1);
14179 return "ftst\n\tfnstsw\t%0";
14182 if (STACK_REG_P (cmp_op1)
14184 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
14185 && REGNO (cmp_op1) != FIRST_STACK_REG)
14187 /* If both the top of the 387 stack dies, and the other operand
14188 is also a stack register that dies, then this must be a
14189 `fcompp' float compare */
14193 /* There is no double popping fcomi variant. Fortunately,
14194 eflags is immune from the fstp's cc clobbering. */
14196 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
14198 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
14199 return output_387_ffreep (operands, 0);
14204 return "fucompp\n\tfnstsw\t%0";
14206 return "fcompp\n\tfnstsw\t%0";
14211 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
14213 static const char * const alt[16] =
14215 "fcom%Z2\t%y2\n\tfnstsw\t%0",
14216 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
14217 "fucom%Z2\t%y2\n\tfnstsw\t%0",
14218 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
14220 "ficom%Z2\t%y2\n\tfnstsw\t%0",
14221 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
14225 "fcomi\t{%y1, %0|%0, %y1}",
14226 "fcomip\t{%y1, %0|%0, %y1}",
14227 "fucomi\t{%y1, %0|%0, %y1}",
14228 "fucomip\t{%y1, %0|%0, %y1}",
14239 mask = eflags_p << 3;
14240 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
14241 mask |= unordered_p << 1;
14242 mask |= stack_top_dies;
14244 gcc_assert (mask < 16);
14253 ix86_output_addr_vec_elt (FILE *file, int value)
14255 const char *directive = ASM_LONG;
14259 directive = ASM_QUAD;
14261 gcc_assert (!TARGET_64BIT);
14264 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
14268 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
14270 const char *directive = ASM_LONG;
14273 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
14274 directive = ASM_QUAD;
14276 gcc_assert (!TARGET_64BIT);
14278 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
14279 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
14280 fprintf (file, "%s%s%d-%s%d\n",
14281 directive, LPREFIX, value, LPREFIX, rel);
14282 else if (HAVE_AS_GOTOFF_IN_DATA)
14283 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
14285 else if (TARGET_MACHO)
14287 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
14288 machopic_output_function_base_name (file);
14293 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
14294 GOT_SYMBOL_NAME, LPREFIX, value);
14297 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
14301 ix86_expand_clear (rtx dest)
14305 /* We play register width games, which are only valid after reload. */
14306 gcc_assert (reload_completed);
14308 /* Avoid HImode and its attendant prefix byte. */
14309 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
14310 dest = gen_rtx_REG (SImode, REGNO (dest));
14311 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
14313 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
14314 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
14316 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14317 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
14323 /* X is an unchanging MEM. If it is a constant pool reference, return
14324 the constant pool rtx, else NULL. */
14327 maybe_get_pool_constant (rtx x)
14329 x = ix86_delegitimize_address (XEXP (x, 0));
14331 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
14332 return get_pool_constant (x);
14338 ix86_expand_move (enum machine_mode mode, rtx operands[])
14341 enum tls_model model;
14346 if (GET_CODE (op1) == SYMBOL_REF)
14348 model = SYMBOL_REF_TLS_MODEL (op1);
14351 op1 = legitimize_tls_address (op1, model, true);
14352 op1 = force_operand (op1, op0);
14356 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
14357 && SYMBOL_REF_DLLIMPORT_P (op1))
14358 op1 = legitimize_dllimport_symbol (op1, false);
14360 else if (GET_CODE (op1) == CONST
14361 && GET_CODE (XEXP (op1, 0)) == PLUS
14362 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
14364 rtx addend = XEXP (XEXP (op1, 0), 1);
14365 rtx symbol = XEXP (XEXP (op1, 0), 0);
14368 model = SYMBOL_REF_TLS_MODEL (symbol);
14370 tmp = legitimize_tls_address (symbol, model, true);
14371 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
14372 && SYMBOL_REF_DLLIMPORT_P (symbol))
14373 tmp = legitimize_dllimport_symbol (symbol, true);
14377 tmp = force_operand (tmp, NULL);
14378 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
14379 op0, 1, OPTAB_DIRECT);
14385 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
14387 if (TARGET_MACHO && !TARGET_64BIT)
14392 rtx temp = ((reload_in_progress
14393 || ((op0 && REG_P (op0))
14395 ? op0 : gen_reg_rtx (Pmode));
14396 op1 = machopic_indirect_data_reference (op1, temp);
14397 op1 = machopic_legitimize_pic_address (op1, mode,
14398 temp == op1 ? 0 : temp);
14400 else if (MACHOPIC_INDIRECT)
14401 op1 = machopic_indirect_data_reference (op1, 0);
14409 op1 = force_reg (Pmode, op1);
14410 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
14412 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
14413 op1 = legitimize_pic_address (op1, reg);
14422 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
14423 || !push_operand (op0, mode))
14425 op1 = force_reg (mode, op1);
14427 if (push_operand (op0, mode)
14428 && ! general_no_elim_operand (op1, mode))
14429 op1 = copy_to_mode_reg (mode, op1);
14431 /* Force large constants in 64bit compilation into register
14432 to get them CSEed. */
14433 if (can_create_pseudo_p ()
14434 && (mode == DImode) && TARGET_64BIT
14435 && immediate_operand (op1, mode)
14436 && !x86_64_zext_immediate_operand (op1, VOIDmode)
14437 && !register_operand (op0, mode)
14439 op1 = copy_to_mode_reg (mode, op1);
14441 if (can_create_pseudo_p ()
14442 && FLOAT_MODE_P (mode)
14443 && GET_CODE (op1) == CONST_DOUBLE)
14445 /* If we are loading a floating point constant to a register,
14446 force the value to memory now, since we'll get better code
14447 out the back end. */
14449 op1 = validize_mem (force_const_mem (mode, op1));
14450 if (!register_operand (op0, mode))
14452 rtx temp = gen_reg_rtx (mode);
14453 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
14454 emit_move_insn (op0, temp);
14460 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
14464 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
14466 rtx op0 = operands[0], op1 = operands[1];
14467 unsigned int align = GET_MODE_ALIGNMENT (mode);
14469 /* Force constants other than zero into memory. We do not know how
14470 the instructions used to build constants modify the upper 64 bits
14471 of the register, once we have that information we may be able
14472 to handle some of them more efficiently. */
14473 if (can_create_pseudo_p ()
14474 && register_operand (op0, mode)
14475 && (CONSTANT_P (op1)
14476 || (GET_CODE (op1) == SUBREG
14477 && CONSTANT_P (SUBREG_REG (op1))))
14478 && !standard_sse_constant_p (op1))
14479 op1 = validize_mem (force_const_mem (mode, op1));
14481 /* We need to check memory alignment for SSE mode since attribute
14482 can make operands unaligned. */
14483 if (can_create_pseudo_p ()
14484 && SSE_REG_MODE_P (mode)
14485 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
14486 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
14490 /* ix86_expand_vector_move_misalign() does not like constants ... */
14491 if (CONSTANT_P (op1)
14492 || (GET_CODE (op1) == SUBREG
14493 && CONSTANT_P (SUBREG_REG (op1))))
14494 op1 = validize_mem (force_const_mem (mode, op1));
14496 /* ... nor both arguments in memory. */
14497 if (!register_operand (op0, mode)
14498 && !register_operand (op1, mode))
14499 op1 = force_reg (mode, op1);
14501 tmp[0] = op0; tmp[1] = op1;
14502 ix86_expand_vector_move_misalign (mode, tmp);
14506 /* Make operand1 a register if it isn't already. */
14507 if (can_create_pseudo_p ()
14508 && !register_operand (op0, mode)
14509 && !register_operand (op1, mode))
14511 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
14515 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
14518 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
14519 straight to ix86_expand_vector_move. */
14520 /* Code generation for scalar reg-reg moves of single and double precision data:
14521 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
14525 if (x86_sse_partial_reg_dependency == true)
14530 Code generation for scalar loads of double precision data:
14531 if (x86_sse_split_regs == true)
14532 movlpd mem, reg (gas syntax)
14536 Code generation for unaligned packed loads of single precision data
14537 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
14538 if (x86_sse_unaligned_move_optimal)
14541 if (x86_sse_partial_reg_dependency == true)
14553 Code generation for unaligned packed loads of double precision data
14554 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
14555 if (x86_sse_unaligned_move_optimal)
14558 if (x86_sse_split_regs == true)
14571 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
14580 switch (GET_MODE_CLASS (mode))
14582 case MODE_VECTOR_INT:
14584 switch (GET_MODE_SIZE (mode))
14587 /* If we're optimizing for size, movups is the smallest. */
14588 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
14590 op0 = gen_lowpart (V4SFmode, op0);
14591 op1 = gen_lowpart (V4SFmode, op1);
14592 emit_insn (gen_avx_movups (op0, op1));
14595 op0 = gen_lowpart (V16QImode, op0);
14596 op1 = gen_lowpart (V16QImode, op1);
14597 emit_insn (gen_avx_movdqu (op0, op1));
14600 op0 = gen_lowpart (V32QImode, op0);
14601 op1 = gen_lowpart (V32QImode, op1);
14602 emit_insn (gen_avx_movdqu256 (op0, op1));
14605 gcc_unreachable ();
14608 case MODE_VECTOR_FLOAT:
14609 op0 = gen_lowpart (mode, op0);
14610 op1 = gen_lowpart (mode, op1);
14615 emit_insn (gen_avx_movups (op0, op1));
14618 emit_insn (gen_avx_movups256 (op0, op1));
14621 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
14623 op0 = gen_lowpart (V4SFmode, op0);
14624 op1 = gen_lowpart (V4SFmode, op1);
14625 emit_insn (gen_avx_movups (op0, op1));
14628 emit_insn (gen_avx_movupd (op0, op1));
14631 emit_insn (gen_avx_movupd256 (op0, op1));
14634 gcc_unreachable ();
14639 gcc_unreachable ();
14647 /* If we're optimizing for size, movups is the smallest. */
14648 if (optimize_insn_for_size_p ()
14649 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
14651 op0 = gen_lowpart (V4SFmode, op0);
14652 op1 = gen_lowpart (V4SFmode, op1);
14653 emit_insn (gen_sse_movups (op0, op1));
14657 /* ??? If we have typed data, then it would appear that using
14658 movdqu is the only way to get unaligned data loaded with
14660 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
14662 op0 = gen_lowpart (V16QImode, op0);
14663 op1 = gen_lowpart (V16QImode, op1);
14664 emit_insn (gen_sse2_movdqu (op0, op1));
14668 if (TARGET_SSE2 && mode == V2DFmode)
14672 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
14674 op0 = gen_lowpart (V2DFmode, op0);
14675 op1 = gen_lowpart (V2DFmode, op1);
14676 emit_insn (gen_sse2_movupd (op0, op1));
14680 /* When SSE registers are split into halves, we can avoid
14681 writing to the top half twice. */
14682 if (TARGET_SSE_SPLIT_REGS)
14684 emit_clobber (op0);
14689 /* ??? Not sure about the best option for the Intel chips.
14690 The following would seem to satisfy; the register is
14691 entirely cleared, breaking the dependency chain. We
14692 then store to the upper half, with a dependency depth
14693 of one. A rumor has it that Intel recommends two movsd
14694 followed by an unpacklpd, but this is unconfirmed. And
14695 given that the dependency depth of the unpacklpd would
14696 still be one, I'm not sure why this would be better. */
14697 zero = CONST0_RTX (V2DFmode);
14700 m = adjust_address (op1, DFmode, 0);
14701 emit_insn (gen_sse2_loadlpd (op0, zero, m));
14702 m = adjust_address (op1, DFmode, 8);
14703 emit_insn (gen_sse2_loadhpd (op0, op0, m));
14707 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
14709 op0 = gen_lowpart (V4SFmode, op0);
14710 op1 = gen_lowpart (V4SFmode, op1);
14711 emit_insn (gen_sse_movups (op0, op1));
14715 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
14716 emit_move_insn (op0, CONST0_RTX (mode));
14718 emit_clobber (op0);
14720 if (mode != V4SFmode)
14721 op0 = gen_lowpart (V4SFmode, op0);
14722 m = adjust_address (op1, V2SFmode, 0);
14723 emit_insn (gen_sse_loadlps (op0, op0, m));
14724 m = adjust_address (op1, V2SFmode, 8);
14725 emit_insn (gen_sse_loadhps (op0, op0, m));
14728 else if (MEM_P (op0))
14730 /* If we're optimizing for size, movups is the smallest. */
14731 if (optimize_insn_for_size_p ()
14732 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
14734 op0 = gen_lowpart (V4SFmode, op0);
14735 op1 = gen_lowpart (V4SFmode, op1);
14736 emit_insn (gen_sse_movups (op0, op1));
14740 /* ??? Similar to above, only less clear because of quote
14741 typeless stores unquote. */
14742 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
14743 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
14745 op0 = gen_lowpart (V16QImode, op0);
14746 op1 = gen_lowpart (V16QImode, op1);
14747 emit_insn (gen_sse2_movdqu (op0, op1));
14751 if (TARGET_SSE2 && mode == V2DFmode)
14753 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
14755 op0 = gen_lowpart (V2DFmode, op0);
14756 op1 = gen_lowpart (V2DFmode, op1);
14757 emit_insn (gen_sse2_movupd (op0, op1));
14761 m = adjust_address (op0, DFmode, 0);
14762 emit_insn (gen_sse2_storelpd (m, op1));
14763 m = adjust_address (op0, DFmode, 8);
14764 emit_insn (gen_sse2_storehpd (m, op1));
14769 if (mode != V4SFmode)
14770 op1 = gen_lowpart (V4SFmode, op1);
14772 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
14774 op0 = gen_lowpart (V4SFmode, op0);
14775 emit_insn (gen_sse_movups (op0, op1));
14779 m = adjust_address (op0, V2SFmode, 0);
14780 emit_insn (gen_sse_storelps (m, op1));
14781 m = adjust_address (op0, V2SFmode, 8);
14782 emit_insn (gen_sse_storehps (m, op1));
14787 gcc_unreachable ();
14790 /* Expand a push in MODE. This is some mode for which we do not support
14791 proper push instructions, at least from the registers that we expect
14792 the value to live in. */
14795 ix86_expand_push (enum machine_mode mode, rtx x)
14799 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
14800 GEN_INT (-GET_MODE_SIZE (mode)),
14801 stack_pointer_rtx, 1, OPTAB_DIRECT);
14802 if (tmp != stack_pointer_rtx)
14803 emit_move_insn (stack_pointer_rtx, tmp);
14805 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
14807 /* When we push an operand onto stack, it has to be aligned at least
14808 at the function argument boundary. However since we don't have
14809 the argument type, we can't determine the actual argument
14811 emit_move_insn (tmp, x);
14814 /* Helper function of ix86_fixup_binary_operands to canonicalize
14815 operand order. Returns true if the operands should be swapped. */
14818 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
14821 rtx dst = operands[0];
14822 rtx src1 = operands[1];
14823 rtx src2 = operands[2];
14825 /* If the operation is not commutative, we can't do anything. */
14826 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
14829 /* Highest priority is that src1 should match dst. */
14830 if (rtx_equal_p (dst, src1))
14832 if (rtx_equal_p (dst, src2))
14835 /* Next highest priority is that immediate constants come second. */
14836 if (immediate_operand (src2, mode))
14838 if (immediate_operand (src1, mode))
14841 /* Lowest priority is that memory references should come second. */
14851 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
14852 destination to use for the operation. If different from the true
14853 destination in operands[0], a copy operation will be required. */
14856 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
14859 rtx dst = operands[0];
14860 rtx src1 = operands[1];
14861 rtx src2 = operands[2];
14863 /* Canonicalize operand order. */
14864 if (ix86_swap_binary_operands_p (code, mode, operands))
14868 /* It is invalid to swap operands of different modes. */
14869 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
14876 /* Both source operands cannot be in memory. */
14877 if (MEM_P (src1) && MEM_P (src2))
14879 /* Optimization: Only read from memory once. */
14880 if (rtx_equal_p (src1, src2))
14882 src2 = force_reg (mode, src2);
14886 src2 = force_reg (mode, src2);
14889 /* If the destination is memory, and we do not have matching source
14890 operands, do things in registers. */
14891 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
14892 dst = gen_reg_rtx (mode);
14894 /* Source 1 cannot be a constant. */
14895 if (CONSTANT_P (src1))
14896 src1 = force_reg (mode, src1);
14898 /* Source 1 cannot be a non-matching memory. */
14899 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
14900 src1 = force_reg (mode, src1);
14902 operands[1] = src1;
14903 operands[2] = src2;
14907 /* Similarly, but assume that the destination has already been
14908 set up properly. */
14911 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
14912 enum machine_mode mode, rtx operands[])
14914 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
14915 gcc_assert (dst == operands[0]);
14918 /* Attempt to expand a binary operator. Make the expansion closer to the
14919 actual machine, then just general_operand, which will allow 3 separate
14920 memory references (one output, two input) in a single insn. */
14923 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
14926 rtx src1, src2, dst, op, clob;
14928 dst = ix86_fixup_binary_operands (code, mode, operands);
14929 src1 = operands[1];
14930 src2 = operands[2];
14932 /* Emit the instruction. */
14934 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
14935 if (reload_in_progress)
14937 /* Reload doesn't know about the flags register, and doesn't know that
14938 it doesn't want to clobber it. We can only do this with PLUS. */
14939 gcc_assert (code == PLUS);
14944 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14945 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
14948 /* Fix up the destination if needed. */
14949 if (dst != operands[0])
14950 emit_move_insn (operands[0], dst);
14953 /* Return TRUE or FALSE depending on whether the binary operator meets the
14954 appropriate constraints. */
14957 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
14960 rtx dst = operands[0];
14961 rtx src1 = operands[1];
14962 rtx src2 = operands[2];
14964 /* Both source operands cannot be in memory. */
14965 if (MEM_P (src1) && MEM_P (src2))
14968 /* Canonicalize operand order for commutative operators. */
14969 if (ix86_swap_binary_operands_p (code, mode, operands))
14976 /* If the destination is memory, we must have a matching source operand. */
14977 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
14980 /* Source 1 cannot be a constant. */
14981 if (CONSTANT_P (src1))
14984 /* Source 1 cannot be a non-matching memory. */
14985 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
14991 /* Attempt to expand a unary operator. Make the expansion closer to the
14992 actual machine, then just general_operand, which will allow 2 separate
14993 memory references (one output, one input) in a single insn. */
14996 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
14999 int matching_memory;
15000 rtx src, dst, op, clob;
15005 /* If the destination is memory, and we do not have matching source
15006 operands, do things in registers. */
15007 matching_memory = 0;
15010 if (rtx_equal_p (dst, src))
15011 matching_memory = 1;
15013 dst = gen_reg_rtx (mode);
15016 /* When source operand is memory, destination must match. */
15017 if (MEM_P (src) && !matching_memory)
15018 src = force_reg (mode, src);
15020 /* Emit the instruction. */
15022 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
15023 if (reload_in_progress || code == NOT)
15025 /* Reload doesn't know about the flags register, and doesn't know that
15026 it doesn't want to clobber it. */
15027 gcc_assert (code == NOT);
15032 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15033 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
15036 /* Fix up the destination if needed. */
15037 if (dst != operands[0])
15038 emit_move_insn (operands[0], dst);
15041 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
15042 divisor are within the the range [0-255]. */
15045 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
15048 rtx end_label, qimode_label;
15049 rtx insn, div, mod;
15050 rtx scratch, tmp0, tmp1, tmp2;
15051 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
15052 rtx (*gen_zero_extend) (rtx, rtx);
15053 rtx (*gen_test_ccno_1) (rtx, rtx);
15058 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
15059 gen_test_ccno_1 = gen_testsi_ccno_1;
15060 gen_zero_extend = gen_zero_extendqisi2;
15063 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
15064 gen_test_ccno_1 = gen_testdi_ccno_1;
15065 gen_zero_extend = gen_zero_extendqidi2;
15068 gcc_unreachable ();
15071 end_label = gen_label_rtx ();
15072 qimode_label = gen_label_rtx ();
15074 scratch = gen_reg_rtx (mode);
15076 /* Use 8bit unsigned divimod if dividend and divisor are within the
15077 the range [0-255]. */
15078 emit_move_insn (scratch, operands[2]);
15079 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
15080 scratch, 1, OPTAB_DIRECT);
15081 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
15082 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
15083 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
15084 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
15085 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
15087 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
15088 predict_jump (REG_BR_PROB_BASE * 50 / 100);
15089 JUMP_LABEL (insn) = qimode_label;
15091 /* Generate original signed/unsigned divimod. */
15092 div = gen_divmod4_1 (operands[0], operands[1],
15093 operands[2], operands[3]);
15096 /* Branch to the end. */
15097 emit_jump_insn (gen_jump (end_label));
15100 /* Generate 8bit unsigned divide. */
15101 emit_label (qimode_label);
15102 /* Don't use operands[0] for result of 8bit divide since not all
15103 registers support QImode ZERO_EXTRACT. */
15104 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
15105 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
15106 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
15107 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
15111 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
15112 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
15116 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
15117 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
15120 /* Extract remainder from AH. */
15121 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
15122 if (REG_P (operands[1]))
15123 insn = emit_move_insn (operands[1], tmp1);
15126 /* Need a new scratch register since the old one has result
15128 scratch = gen_reg_rtx (mode);
15129 emit_move_insn (scratch, tmp1);
15130 insn = emit_move_insn (operands[1], scratch);
15132 set_unique_reg_note (insn, REG_EQUAL, mod);
15134 /* Zero extend quotient from AL. */
15135 tmp1 = gen_lowpart (QImode, tmp0);
15136 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
15137 set_unique_reg_note (insn, REG_EQUAL, div);
15139 emit_label (end_label);
15142 #define LEA_SEARCH_THRESHOLD 12
15144 /* Search backward for non-agu definition of register number REGNO1
15145 or register number REGNO2 in INSN's basic block until
15146 1. Pass LEA_SEARCH_THRESHOLD instructions, or
15147 2. Reach BB boundary, or
15148 3. Reach agu definition.
15149 Returns the distance between the non-agu definition point and INSN.
15150 If no definition point, returns -1. */
15153 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
15156 basic_block bb = BLOCK_FOR_INSN (insn);
15159 enum attr_type insn_type;
15161 if (insn != BB_HEAD (bb))
15163 rtx prev = PREV_INSN (insn);
15164 while (prev && distance < LEA_SEARCH_THRESHOLD)
15166 if (NONDEBUG_INSN_P (prev))
15169 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
15170 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
15171 && !DF_REF_IS_ARTIFICIAL (*def_rec)
15172 && (regno1 == DF_REF_REGNO (*def_rec)
15173 || regno2 == DF_REF_REGNO (*def_rec)))
15175 insn_type = get_attr_type (prev);
15176 if (insn_type != TYPE_LEA)
15180 if (prev == BB_HEAD (bb))
15182 prev = PREV_INSN (prev);
15186 if (distance < LEA_SEARCH_THRESHOLD)
15190 bool simple_loop = false;
15192 FOR_EACH_EDGE (e, ei, bb->preds)
15195 simple_loop = true;
15201 rtx prev = BB_END (bb);
15204 && distance < LEA_SEARCH_THRESHOLD)
15206 if (NONDEBUG_INSN_P (prev))
15209 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
15210 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
15211 && !DF_REF_IS_ARTIFICIAL (*def_rec)
15212 && (regno1 == DF_REF_REGNO (*def_rec)
15213 || regno2 == DF_REF_REGNO (*def_rec)))
15215 insn_type = get_attr_type (prev);
15216 if (insn_type != TYPE_LEA)
15220 prev = PREV_INSN (prev);
15228 /* get_attr_type may modify recog data. We want to make sure
15229 that recog data is valid for instruction INSN, on which
15230 distance_non_agu_define is called. INSN is unchanged here. */
15231 extract_insn_cached (insn);
15235 /* Return the distance between INSN and the next insn that uses
15236 register number REGNO0 in memory address. Return -1 if no such
15237 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
15240 distance_agu_use (unsigned int regno0, rtx insn)
15242 basic_block bb = BLOCK_FOR_INSN (insn);
15247 if (insn != BB_END (bb))
15249 rtx next = NEXT_INSN (insn);
15250 while (next && distance < LEA_SEARCH_THRESHOLD)
15252 if (NONDEBUG_INSN_P (next))
15256 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
15257 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
15258 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
15259 && regno0 == DF_REF_REGNO (*use_rec))
15261 /* Return DISTANCE if OP0 is used in memory
15262 address in NEXT. */
15266 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
15267 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
15268 && !DF_REF_IS_ARTIFICIAL (*def_rec)
15269 && regno0 == DF_REF_REGNO (*def_rec))
15271 /* Return -1 if OP0 is set in NEXT. */
15275 if (next == BB_END (bb))
15277 next = NEXT_INSN (next);
15281 if (distance < LEA_SEARCH_THRESHOLD)
15285 bool simple_loop = false;
15287 FOR_EACH_EDGE (e, ei, bb->succs)
15290 simple_loop = true;
15296 rtx next = BB_HEAD (bb);
15299 && distance < LEA_SEARCH_THRESHOLD)
15301 if (NONDEBUG_INSN_P (next))
15305 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
15306 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
15307 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
15308 && regno0 == DF_REF_REGNO (*use_rec))
15310 /* Return DISTANCE if OP0 is used in memory
15311 address in NEXT. */
15315 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
15316 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
15317 && !DF_REF_IS_ARTIFICIAL (*def_rec)
15318 && regno0 == DF_REF_REGNO (*def_rec))
15320 /* Return -1 if OP0 is set in NEXT. */
15325 next = NEXT_INSN (next);
15333 /* Define this macro to tune LEA priority vs ADD, it take effect when
15334 there is a dilemma of choicing LEA or ADD
15335 Negative value: ADD is more preferred than LEA
15337 Positive value: LEA is more preferred than ADD*/
15338 #define IX86_LEA_PRIORITY 2
15340 /* Return true if it is ok to optimize an ADD operation to LEA
15341 operation to avoid flag register consumation. For most processors,
15342 ADD is faster than LEA. For the processors like ATOM, if the
15343 destination register of LEA holds an actual address which will be
15344 used soon, LEA is better and otherwise ADD is better. */
15347 ix86_lea_for_add_ok (rtx insn, rtx operands[])
15349 unsigned int regno0 = true_regnum (operands[0]);
15350 unsigned int regno1 = true_regnum (operands[1]);
15351 unsigned int regno2 = true_regnum (operands[2]);
15353 /* If a = b + c, (a!=b && a!=c), must use lea form. */
15354 if (regno0 != regno1 && regno0 != regno2)
15357 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
15361 int dist_define, dist_use;
15363 /* Return false if REGNO0 isn't used in memory address. */
15364 dist_use = distance_agu_use (regno0, insn);
15368 dist_define = distance_non_agu_define (regno1, regno2, insn);
15369 if (dist_define <= 0)
15372 /* If this insn has both backward non-agu dependence and forward
15373 agu dependence, the one with short distance take effect. */
15374 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
15381 /* Return true if destination reg of SET_BODY is shift count of
15385 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
15391 /* Retrieve destination of SET_BODY. */
15392 switch (GET_CODE (set_body))
15395 set_dest = SET_DEST (set_body);
15396 if (!set_dest || !REG_P (set_dest))
15400 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
15401 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
15409 /* Retrieve shift count of USE_BODY. */
15410 switch (GET_CODE (use_body))
15413 shift_rtx = XEXP (use_body, 1);
15416 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
15417 if (ix86_dep_by_shift_count_body (set_body,
15418 XVECEXP (use_body, 0, i)))
15426 && (GET_CODE (shift_rtx) == ASHIFT
15427 || GET_CODE (shift_rtx) == LSHIFTRT
15428 || GET_CODE (shift_rtx) == ASHIFTRT
15429 || GET_CODE (shift_rtx) == ROTATE
15430 || GET_CODE (shift_rtx) == ROTATERT))
15432 rtx shift_count = XEXP (shift_rtx, 1);
15434 /* Return true if shift count is dest of SET_BODY. */
15435 if (REG_P (shift_count)
15436 && true_regnum (set_dest) == true_regnum (shift_count))
15443 /* Return true if destination reg of SET_INSN is shift count of
15447 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
15449 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
15450 PATTERN (use_insn));
15453 /* Return TRUE or FALSE depending on whether the unary operator meets the
15454 appropriate constraints. */
15457 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
15458 enum machine_mode mode ATTRIBUTE_UNUSED,
15459 rtx operands[2] ATTRIBUTE_UNUSED)
15461 /* If one of operands is memory, source and destination must match. */
15462 if ((MEM_P (operands[0])
15463 || MEM_P (operands[1]))
15464 && ! rtx_equal_p (operands[0], operands[1]))
15469 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
15470 are ok, keeping in mind the possible movddup alternative. */
15473 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
15475 if (MEM_P (operands[0]))
15476 return rtx_equal_p (operands[0], operands[1 + high]);
15477 if (MEM_P (operands[1]) && MEM_P (operands[2]))
15478 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
15482 /* Post-reload splitter for converting an SF or DFmode value in an
15483 SSE register into an unsigned SImode. */
15486 ix86_split_convert_uns_si_sse (rtx operands[])
15488 enum machine_mode vecmode;
15489 rtx value, large, zero_or_two31, input, two31, x;
15491 large = operands[1];
15492 zero_or_two31 = operands[2];
15493 input = operands[3];
15494 two31 = operands[4];
15495 vecmode = GET_MODE (large);
15496 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
15498 /* Load up the value into the low element. We must ensure that the other
15499 elements are valid floats -- zero is the easiest such value. */
15502 if (vecmode == V4SFmode)
15503 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
15505 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
15509 input = gen_rtx_REG (vecmode, REGNO (input));
15510 emit_move_insn (value, CONST0_RTX (vecmode));
15511 if (vecmode == V4SFmode)
15512 emit_insn (gen_sse_movss (value, value, input));
15514 emit_insn (gen_sse2_movsd (value, value, input));
15517 emit_move_insn (large, two31);
15518 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
15520 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
15521 emit_insn (gen_rtx_SET (VOIDmode, large, x));
15523 x = gen_rtx_AND (vecmode, zero_or_two31, large);
15524 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
15526 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
15527 emit_insn (gen_rtx_SET (VOIDmode, value, x));
15529 large = gen_rtx_REG (V4SImode, REGNO (large));
15530 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
15532 x = gen_rtx_REG (V4SImode, REGNO (value));
15533 if (vecmode == V4SFmode)
15534 emit_insn (gen_sse2_cvttps2dq (x, value));
15536 emit_insn (gen_sse2_cvttpd2dq (x, value));
15539 emit_insn (gen_xorv4si3 (value, value, large));
15542 /* Convert an unsigned DImode value into a DFmode, using only SSE.
15543 Expects the 64-bit DImode to be supplied in a pair of integral
15544 registers. Requires SSE2; will use SSE3 if available. For x86_32,
15545 -mfpmath=sse, !optimize_size only. */
15548 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
15550 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
15551 rtx int_xmm, fp_xmm;
15552 rtx biases, exponents;
15555 int_xmm = gen_reg_rtx (V4SImode);
15556 if (TARGET_INTER_UNIT_MOVES)
15557 emit_insn (gen_movdi_to_sse (int_xmm, input));
15558 else if (TARGET_SSE_SPLIT_REGS)
15560 emit_clobber (int_xmm);
15561 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
15565 x = gen_reg_rtx (V2DImode);
15566 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
15567 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
15570 x = gen_rtx_CONST_VECTOR (V4SImode,
15571 gen_rtvec (4, GEN_INT (0x43300000UL),
15572 GEN_INT (0x45300000UL),
15573 const0_rtx, const0_rtx));
15574 exponents = validize_mem (force_const_mem (V4SImode, x));
15576 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
15577 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
15579 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
15580 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
15581 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
15582 (0x1.0p84 + double(fp_value_hi_xmm)).
15583 Note these exponents differ by 32. */
15585 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
15587 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
15588 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
15589 real_ldexp (&bias_lo_rvt, &dconst1, 52);
15590 real_ldexp (&bias_hi_rvt, &dconst1, 84);
15591 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
15592 x = const_double_from_real_value (bias_hi_rvt, DFmode);
15593 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
15594 biases = validize_mem (force_const_mem (V2DFmode, biases));
15595 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
15597 /* Add the upper and lower DFmode values together. */
15599 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
15602 x = copy_to_mode_reg (V2DFmode, fp_xmm);
15603 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
15604 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
15607 ix86_expand_vector_extract (false, target, fp_xmm, 0);
15610 /* Not used, but eases macroization of patterns. */
15612 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
15613 rtx input ATTRIBUTE_UNUSED)
15615 gcc_unreachable ();
15618 /* Convert an unsigned SImode value into a DFmode. Only currently used
15619 for SSE, but applicable anywhere. */
15622 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
15624 REAL_VALUE_TYPE TWO31r;
15627 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
15628 NULL, 1, OPTAB_DIRECT);
15630 fp = gen_reg_rtx (DFmode);
15631 emit_insn (gen_floatsidf2 (fp, x));
15633 real_ldexp (&TWO31r, &dconst1, 31);
15634 x = const_double_from_real_value (TWO31r, DFmode);
15636 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
15638 emit_move_insn (target, x);
15641 /* Convert a signed DImode value into a DFmode. Only used for SSE in
15642 32-bit mode; otherwise we have a direct convert instruction. */
15645 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
15647 REAL_VALUE_TYPE TWO32r;
15648 rtx fp_lo, fp_hi, x;
15650 fp_lo = gen_reg_rtx (DFmode);
15651 fp_hi = gen_reg_rtx (DFmode);
15653 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
15655 real_ldexp (&TWO32r, &dconst1, 32);
15656 x = const_double_from_real_value (TWO32r, DFmode);
15657 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
15659 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
15661 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
15664 emit_move_insn (target, x);
15667 /* Convert an unsigned SImode value into a SFmode, using only SSE.
15668 For x86_32, -mfpmath=sse, !optimize_size only. */
15670 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
15672 REAL_VALUE_TYPE ONE16r;
15673 rtx fp_hi, fp_lo, int_hi, int_lo, x;
15675 real_ldexp (&ONE16r, &dconst1, 16);
15676 x = const_double_from_real_value (ONE16r, SFmode);
15677 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
15678 NULL, 0, OPTAB_DIRECT);
15679 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
15680 NULL, 0, OPTAB_DIRECT);
15681 fp_hi = gen_reg_rtx (SFmode);
15682 fp_lo = gen_reg_rtx (SFmode);
15683 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
15684 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
15685 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
15687 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
15689 if (!rtx_equal_p (target, fp_hi))
15690 emit_move_insn (target, fp_hi);
15693 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
15694 then replicate the value for all elements of the vector
15698 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
15705 v = gen_rtvec (4, value, value, value, value);
15706 return gen_rtx_CONST_VECTOR (V4SImode, v);
15710 v = gen_rtvec (2, value, value);
15711 return gen_rtx_CONST_VECTOR (V2DImode, v);
15715 v = gen_rtvec (4, value, value, value, value);
15717 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
15718 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
15719 return gen_rtx_CONST_VECTOR (V4SFmode, v);
15723 v = gen_rtvec (2, value, value);
15725 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
15726 return gen_rtx_CONST_VECTOR (V2DFmode, v);
15729 gcc_unreachable ();
15733 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
15734 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
15735 for an SSE register. If VECT is true, then replicate the mask for
15736 all elements of the vector register. If INVERT is true, then create
15737 a mask excluding the sign bit. */
15740 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
15742 enum machine_mode vec_mode, imode;
15743 HOST_WIDE_INT hi, lo;
15748 /* Find the sign bit, sign extended to 2*HWI. */
15754 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
15755 lo = 0x80000000, hi = lo < 0;
15761 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
15762 if (HOST_BITS_PER_WIDE_INT >= 64)
15763 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
15765 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
15770 vec_mode = VOIDmode;
15771 if (HOST_BITS_PER_WIDE_INT >= 64)
15774 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
15781 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
15785 lo = ~lo, hi = ~hi;
15791 mask = immed_double_const (lo, hi, imode);
15793 vec = gen_rtvec (2, v, mask);
15794 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
15795 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
15802 gcc_unreachable ();
15806 lo = ~lo, hi = ~hi;
15808 /* Force this value into the low part of a fp vector constant. */
15809 mask = immed_double_const (lo, hi, imode);
15810 mask = gen_lowpart (mode, mask);
15812 if (vec_mode == VOIDmode)
15813 return force_reg (mode, mask);
15815 v = ix86_build_const_vector (mode, vect, mask);
15816 return force_reg (vec_mode, v);
15819 /* Generate code for floating point ABS or NEG. */
15822 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
15825 rtx mask, set, use, clob, dst, src;
15826 bool use_sse = false;
15827 bool vector_mode = VECTOR_MODE_P (mode);
15828 enum machine_mode elt_mode = mode;
15832 elt_mode = GET_MODE_INNER (mode);
15835 else if (mode == TFmode)
15837 else if (TARGET_SSE_MATH)
15838 use_sse = SSE_FLOAT_MODE_P (mode);
15840 /* NEG and ABS performed with SSE use bitwise mask operations.
15841 Create the appropriate mask now. */
15843 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
15852 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
15853 set = gen_rtx_SET (VOIDmode, dst, set);
15858 set = gen_rtx_fmt_e (code, mode, src);
15859 set = gen_rtx_SET (VOIDmode, dst, set);
15862 use = gen_rtx_USE (VOIDmode, mask);
15863 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15864 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15865 gen_rtvec (3, set, use, clob)));
15872 /* Expand a copysign operation. Special case operand 0 being a constant. */
15875 ix86_expand_copysign (rtx operands[])
15877 enum machine_mode mode;
15878 rtx dest, op0, op1, mask, nmask;
15880 dest = operands[0];
15884 mode = GET_MODE (dest);
15886 if (GET_CODE (op0) == CONST_DOUBLE)
15888 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
15890 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
15891 op0 = simplify_unary_operation (ABS, mode, op0, mode);
15893 if (mode == SFmode || mode == DFmode)
15895 enum machine_mode vmode;
15897 vmode = mode == SFmode ? V4SFmode : V2DFmode;
15899 if (op0 == CONST0_RTX (mode))
15900 op0 = CONST0_RTX (vmode);
15903 rtx v = ix86_build_const_vector (mode, false, op0);
15905 op0 = force_reg (vmode, v);
15908 else if (op0 != CONST0_RTX (mode))
15909 op0 = force_reg (mode, op0);
15911 mask = ix86_build_signbit_mask (mode, 0, 0);
15913 if (mode == SFmode)
15914 copysign_insn = gen_copysignsf3_const;
15915 else if (mode == DFmode)
15916 copysign_insn = gen_copysigndf3_const;
15918 copysign_insn = gen_copysigntf3_const;
15920 emit_insn (copysign_insn (dest, op0, op1, mask));
15924 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
15926 nmask = ix86_build_signbit_mask (mode, 0, 1);
15927 mask = ix86_build_signbit_mask (mode, 0, 0);
15929 if (mode == SFmode)
15930 copysign_insn = gen_copysignsf3_var;
15931 else if (mode == DFmode)
15932 copysign_insn = gen_copysigndf3_var;
15934 copysign_insn = gen_copysigntf3_var;
15936 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
15940 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
15941 be a constant, and so has already been expanded into a vector constant. */
15944 ix86_split_copysign_const (rtx operands[])
15946 enum machine_mode mode, vmode;
15947 rtx dest, op0, mask, x;
15949 dest = operands[0];
15951 mask = operands[3];
15953 mode = GET_MODE (dest);
15954 vmode = GET_MODE (mask);
15956 dest = simplify_gen_subreg (vmode, dest, mode, 0);
15957 x = gen_rtx_AND (vmode, dest, mask);
15958 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15960 if (op0 != CONST0_RTX (vmode))
15962 x = gen_rtx_IOR (vmode, dest, op0);
15963 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15967 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
15968 so we have to do two masks. */
15971 ix86_split_copysign_var (rtx operands[])
15973 enum machine_mode mode, vmode;
15974 rtx dest, scratch, op0, op1, mask, nmask, x;
15976 dest = operands[0];
15977 scratch = operands[1];
15980 nmask = operands[4];
15981 mask = operands[5];
15983 mode = GET_MODE (dest);
15984 vmode = GET_MODE (mask);
15986 if (rtx_equal_p (op0, op1))
15988 /* Shouldn't happen often (it's useless, obviously), but when it does
15989 we'd generate incorrect code if we continue below. */
15990 emit_move_insn (dest, op0);
15994 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
15996 gcc_assert (REGNO (op1) == REGNO (scratch));
15998 x = gen_rtx_AND (vmode, scratch, mask);
15999 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16002 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
16003 x = gen_rtx_NOT (vmode, dest);
16004 x = gen_rtx_AND (vmode, x, op0);
16005 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16009 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
16011 x = gen_rtx_AND (vmode, scratch, mask);
16013 else /* alternative 2,4 */
16015 gcc_assert (REGNO (mask) == REGNO (scratch));
16016 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
16017 x = gen_rtx_AND (vmode, scratch, op1);
16019 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16021 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
16023 dest = simplify_gen_subreg (vmode, op0, mode, 0);
16024 x = gen_rtx_AND (vmode, dest, nmask);
16026 else /* alternative 3,4 */
16028 gcc_assert (REGNO (nmask) == REGNO (dest));
16030 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
16031 x = gen_rtx_AND (vmode, dest, op0);
16033 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16036 x = gen_rtx_IOR (vmode, dest, scratch);
16037 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16040 /* Return TRUE or FALSE depending on whether the first SET in INSN
16041 has source and destination with matching CC modes, and that the
16042 CC mode is at least as constrained as REQ_MODE. */
16045 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
16048 enum machine_mode set_mode;
16050 set = PATTERN (insn);
16051 if (GET_CODE (set) == PARALLEL)
16052 set = XVECEXP (set, 0, 0);
16053 gcc_assert (GET_CODE (set) == SET);
16054 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
16056 set_mode = GET_MODE (SET_DEST (set));
16060 if (req_mode != CCNOmode
16061 && (req_mode != CCmode
16062 || XEXP (SET_SRC (set), 1) != const0_rtx))
16066 if (req_mode == CCGCmode)
16070 if (req_mode == CCGOCmode || req_mode == CCNOmode)
16074 if (req_mode == CCZmode)
16085 gcc_unreachable ();
16088 return GET_MODE (SET_SRC (set)) == set_mode;
16091 /* Generate insn patterns to do an integer compare of OPERANDS. */
16094 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
16096 enum machine_mode cmpmode;
16099 cmpmode = SELECT_CC_MODE (code, op0, op1);
16100 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
16102 /* This is very simple, but making the interface the same as in the
16103 FP case makes the rest of the code easier. */
16104 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
16105 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
16107 /* Return the test that should be put into the flags user, i.e.
16108 the bcc, scc, or cmov instruction. */
16109 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
16112 /* Figure out whether to use ordered or unordered fp comparisons.
16113 Return the appropriate mode to use. */
16116 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
16118 /* ??? In order to make all comparisons reversible, we do all comparisons
16119 non-trapping when compiling for IEEE. Once gcc is able to distinguish
16120 all forms trapping and nontrapping comparisons, we can make inequality
16121 comparisons trapping again, since it results in better code when using
16122 FCOM based compares. */
16123 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
16127 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
16129 enum machine_mode mode = GET_MODE (op0);
16131 if (SCALAR_FLOAT_MODE_P (mode))
16133 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
16134 return ix86_fp_compare_mode (code);
16139 /* Only zero flag is needed. */
16140 case EQ: /* ZF=0 */
16141 case NE: /* ZF!=0 */
16143 /* Codes needing carry flag. */
16144 case GEU: /* CF=0 */
16145 case LTU: /* CF=1 */
16146 /* Detect overflow checks. They need just the carry flag. */
16147 if (GET_CODE (op0) == PLUS
16148 && rtx_equal_p (op1, XEXP (op0, 0)))
16152 case GTU: /* CF=0 & ZF=0 */
16153 case LEU: /* CF=1 | ZF=1 */
16154 /* Detect overflow checks. They need just the carry flag. */
16155 if (GET_CODE (op0) == MINUS
16156 && rtx_equal_p (op1, XEXP (op0, 0)))
16160 /* Codes possibly doable only with sign flag when
16161 comparing against zero. */
16162 case GE: /* SF=OF or SF=0 */
16163 case LT: /* SF<>OF or SF=1 */
16164 if (op1 == const0_rtx)
16167 /* For other cases Carry flag is not required. */
16169 /* Codes doable only with sign flag when comparing
16170 against zero, but we miss jump instruction for it
16171 so we need to use relational tests against overflow
16172 that thus needs to be zero. */
16173 case GT: /* ZF=0 & SF=OF */
16174 case LE: /* ZF=1 | SF<>OF */
16175 if (op1 == const0_rtx)
16179 /* strcmp pattern do (use flags) and combine may ask us for proper
16184 gcc_unreachable ();
16188 /* Return the fixed registers used for condition codes. */
16191 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
16198 /* If two condition code modes are compatible, return a condition code
16199 mode which is compatible with both. Otherwise, return
16202 static enum machine_mode
16203 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
16208 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
16211 if ((m1 == CCGCmode && m2 == CCGOCmode)
16212 || (m1 == CCGOCmode && m2 == CCGCmode))
16218 gcc_unreachable ();
16248 /* These are only compatible with themselves, which we already
16255 /* Return a comparison we can do and that it is equivalent to
16256 swap_condition (code) apart possibly from orderedness.
16257 But, never change orderedness if TARGET_IEEE_FP, returning
16258 UNKNOWN in that case if necessary. */
16260 static enum rtx_code
16261 ix86_fp_swap_condition (enum rtx_code code)
16265 case GT: /* GTU - CF=0 & ZF=0 */
16266 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
16267 case GE: /* GEU - CF=0 */
16268 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
16269 case UNLT: /* LTU - CF=1 */
16270 return TARGET_IEEE_FP ? UNKNOWN : GT;
16271 case UNLE: /* LEU - CF=1 | ZF=1 */
16272 return TARGET_IEEE_FP ? UNKNOWN : GE;
16274 return swap_condition (code);
16278 /* Return cost of comparison CODE using the best strategy for performance.
16279 All following functions do use number of instructions as a cost metrics.
16280 In future this should be tweaked to compute bytes for optimize_size and
16281 take into account performance of various instructions on various CPUs. */
16284 ix86_fp_comparison_cost (enum rtx_code code)
16288 /* The cost of code using bit-twiddling on %ah. */
16305 arith_cost = TARGET_IEEE_FP ? 5 : 4;
16309 arith_cost = TARGET_IEEE_FP ? 6 : 4;
16312 gcc_unreachable ();
16315 switch (ix86_fp_comparison_strategy (code))
16317 case IX86_FPCMP_COMI:
16318 return arith_cost > 4 ? 3 : 2;
16319 case IX86_FPCMP_SAHF:
16320 return arith_cost > 4 ? 4 : 3;
16326 /* Return strategy to use for floating-point. We assume that fcomi is always
16327 preferrable where available, since that is also true when looking at size
16328 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
16330 enum ix86_fpcmp_strategy
16331 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
16333 /* Do fcomi/sahf based test when profitable. */
16336 return IX86_FPCMP_COMI;
16338 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
16339 return IX86_FPCMP_SAHF;
16341 return IX86_FPCMP_ARITH;
16344 /* Swap, force into registers, or otherwise massage the two operands
16345 to a fp comparison. The operands are updated in place; the new
16346 comparison code is returned. */
16348 static enum rtx_code
16349 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
16351 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
16352 rtx op0 = *pop0, op1 = *pop1;
16353 enum machine_mode op_mode = GET_MODE (op0);
16354 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
16356 /* All of the unordered compare instructions only work on registers.
16357 The same is true of the fcomi compare instructions. The XFmode
16358 compare instructions require registers except when comparing
16359 against zero or when converting operand 1 from fixed point to
16363 && (fpcmp_mode == CCFPUmode
16364 || (op_mode == XFmode
16365 && ! (standard_80387_constant_p (op0) == 1
16366 || standard_80387_constant_p (op1) == 1)
16367 && GET_CODE (op1) != FLOAT)
16368 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
16370 op0 = force_reg (op_mode, op0);
16371 op1 = force_reg (op_mode, op1);
16375 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
16376 things around if they appear profitable, otherwise force op0
16377 into a register. */
16379 if (standard_80387_constant_p (op0) == 0
16381 && ! (standard_80387_constant_p (op1) == 0
16384 enum rtx_code new_code = ix86_fp_swap_condition (code);
16385 if (new_code != UNKNOWN)
16388 tmp = op0, op0 = op1, op1 = tmp;
16394 op0 = force_reg (op_mode, op0);
16396 if (CONSTANT_P (op1))
16398 int tmp = standard_80387_constant_p (op1);
16400 op1 = validize_mem (force_const_mem (op_mode, op1));
16404 op1 = force_reg (op_mode, op1);
16407 op1 = force_reg (op_mode, op1);
16411 /* Try to rearrange the comparison to make it cheaper. */
16412 if (ix86_fp_comparison_cost (code)
16413 > ix86_fp_comparison_cost (swap_condition (code))
16414 && (REG_P (op1) || can_create_pseudo_p ()))
16417 tmp = op0, op0 = op1, op1 = tmp;
16418 code = swap_condition (code);
16420 op0 = force_reg (op_mode, op0);
16428 /* Convert comparison codes we use to represent FP comparison to integer
16429 code that will result in proper branch. Return UNKNOWN if no such code
16433 ix86_fp_compare_code_to_integer (enum rtx_code code)
16462 /* Generate insn patterns to do a floating point compare of OPERANDS. */
16465 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
16467 enum machine_mode fpcmp_mode, intcmp_mode;
16470 fpcmp_mode = ix86_fp_compare_mode (code);
16471 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
16473 /* Do fcomi/sahf based test when profitable. */
16474 switch (ix86_fp_comparison_strategy (code))
16476 case IX86_FPCMP_COMI:
16477 intcmp_mode = fpcmp_mode;
16478 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
16479 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
16484 case IX86_FPCMP_SAHF:
16485 intcmp_mode = fpcmp_mode;
16486 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
16487 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
16491 scratch = gen_reg_rtx (HImode);
16492 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
16493 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
16496 case IX86_FPCMP_ARITH:
16497 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
16498 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
16499 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
16501 scratch = gen_reg_rtx (HImode);
16502 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
16504 /* In the unordered case, we have to check C2 for NaN's, which
16505 doesn't happen to work out to anything nice combination-wise.
16506 So do some bit twiddling on the value we've got in AH to come
16507 up with an appropriate set of condition codes. */
16509 intcmp_mode = CCNOmode;
16514 if (code == GT || !TARGET_IEEE_FP)
16516 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
16521 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
16522 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
16523 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
16524 intcmp_mode = CCmode;
16530 if (code == LT && TARGET_IEEE_FP)
16532 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
16533 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
16534 intcmp_mode = CCmode;
16539 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
16545 if (code == GE || !TARGET_IEEE_FP)
16547 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
16552 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
16553 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
16559 if (code == LE && TARGET_IEEE_FP)
16561 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
16562 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
16563 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
16564 intcmp_mode = CCmode;
16569 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
16575 if (code == EQ && TARGET_IEEE_FP)
16577 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
16578 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
16579 intcmp_mode = CCmode;
16584 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
16590 if (code == NE && TARGET_IEEE_FP)
16592 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
16593 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
16599 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
16605 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
16609 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
16614 gcc_unreachable ();
16622 /* Return the test that should be put into the flags user, i.e.
16623 the bcc, scc, or cmov instruction. */
16624 return gen_rtx_fmt_ee (code, VOIDmode,
16625 gen_rtx_REG (intcmp_mode, FLAGS_REG),
16630 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
16634 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
16635 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
16637 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
16639 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
16640 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
16643 ret = ix86_expand_int_compare (code, op0, op1);
16649 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
16651 enum machine_mode mode = GET_MODE (op0);
16663 tmp = ix86_expand_compare (code, op0, op1);
16664 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
16665 gen_rtx_LABEL_REF (VOIDmode, label),
16667 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
16674 /* Expand DImode branch into multiple compare+branch. */
16676 rtx lo[2], hi[2], label2;
16677 enum rtx_code code1, code2, code3;
16678 enum machine_mode submode;
16680 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
16682 tmp = op0, op0 = op1, op1 = tmp;
16683 code = swap_condition (code);
16686 split_double_mode (mode, &op0, 1, lo+0, hi+0);
16687 split_double_mode (mode, &op1, 1, lo+1, hi+1);
16689 submode = mode == DImode ? SImode : DImode;
16691 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
16692 avoid two branches. This costs one extra insn, so disable when
16693 optimizing for size. */
16695 if ((code == EQ || code == NE)
16696 && (!optimize_insn_for_size_p ()
16697 || hi[1] == const0_rtx || lo[1] == const0_rtx))
16702 if (hi[1] != const0_rtx)
16703 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
16704 NULL_RTX, 0, OPTAB_WIDEN);
16707 if (lo[1] != const0_rtx)
16708 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
16709 NULL_RTX, 0, OPTAB_WIDEN);
16711 tmp = expand_binop (submode, ior_optab, xor1, xor0,
16712 NULL_RTX, 0, OPTAB_WIDEN);
16714 ix86_expand_branch (code, tmp, const0_rtx, label);
16718 /* Otherwise, if we are doing less-than or greater-or-equal-than,
16719 op1 is a constant and the low word is zero, then we can just
16720 examine the high word. Similarly for low word -1 and
16721 less-or-equal-than or greater-than. */
16723 if (CONST_INT_P (hi[1]))
16726 case LT: case LTU: case GE: case GEU:
16727 if (lo[1] == const0_rtx)
16729 ix86_expand_branch (code, hi[0], hi[1], label);
16733 case LE: case LEU: case GT: case GTU:
16734 if (lo[1] == constm1_rtx)
16736 ix86_expand_branch (code, hi[0], hi[1], label);
16744 /* Otherwise, we need two or three jumps. */
16746 label2 = gen_label_rtx ();
16749 code2 = swap_condition (code);
16750 code3 = unsigned_condition (code);
16754 case LT: case GT: case LTU: case GTU:
16757 case LE: code1 = LT; code2 = GT; break;
16758 case GE: code1 = GT; code2 = LT; break;
16759 case LEU: code1 = LTU; code2 = GTU; break;
16760 case GEU: code1 = GTU; code2 = LTU; break;
16762 case EQ: code1 = UNKNOWN; code2 = NE; break;
16763 case NE: code2 = UNKNOWN; break;
16766 gcc_unreachable ();
16771 * if (hi(a) < hi(b)) goto true;
16772 * if (hi(a) > hi(b)) goto false;
16773 * if (lo(a) < lo(b)) goto true;
16777 if (code1 != UNKNOWN)
16778 ix86_expand_branch (code1, hi[0], hi[1], label);
16779 if (code2 != UNKNOWN)
16780 ix86_expand_branch (code2, hi[0], hi[1], label2);
16782 ix86_expand_branch (code3, lo[0], lo[1], label);
16784 if (code2 != UNKNOWN)
16785 emit_label (label2);
16790 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
16795 /* Split branch based on floating point condition. */
16797 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
16798 rtx target1, rtx target2, rtx tmp, rtx pushed)
16803 if (target2 != pc_rtx)
16806 code = reverse_condition_maybe_unordered (code);
16811 condition = ix86_expand_fp_compare (code, op1, op2,
16814 /* Remove pushed operand from stack. */
16816 ix86_free_from_memory (GET_MODE (pushed));
16818 i = emit_jump_insn (gen_rtx_SET
16820 gen_rtx_IF_THEN_ELSE (VOIDmode,
16821 condition, target1, target2)));
16822 if (split_branch_probability >= 0)
16823 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
16827 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16831 gcc_assert (GET_MODE (dest) == QImode);
16833 ret = ix86_expand_compare (code, op0, op1);
16834 PUT_MODE (ret, QImode);
16835 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
16838 /* Expand comparison setting or clearing carry flag. Return true when
16839 successful and set pop for the operation. */
16841 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
16843 enum machine_mode mode =
16844 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
16846 /* Do not handle double-mode compares that go through special path. */
16847 if (mode == (TARGET_64BIT ? TImode : DImode))
16850 if (SCALAR_FLOAT_MODE_P (mode))
16852 rtx compare_op, compare_seq;
16854 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
16856 /* Shortcut: following common codes never translate
16857 into carry flag compares. */
16858 if (code == EQ || code == NE || code == UNEQ || code == LTGT
16859 || code == ORDERED || code == UNORDERED)
16862 /* These comparisons require zero flag; swap operands so they won't. */
16863 if ((code == GT || code == UNLE || code == LE || code == UNGT)
16864 && !TARGET_IEEE_FP)
16869 code = swap_condition (code);
16872 /* Try to expand the comparison and verify that we end up with
16873 carry flag based comparison. This fails to be true only when
16874 we decide to expand comparison using arithmetic that is not
16875 too common scenario. */
16877 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
16878 compare_seq = get_insns ();
16881 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
16882 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
16883 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
16885 code = GET_CODE (compare_op);
16887 if (code != LTU && code != GEU)
16890 emit_insn (compare_seq);
16895 if (!INTEGRAL_MODE_P (mode))
16904 /* Convert a==0 into (unsigned)a<1. */
16907 if (op1 != const0_rtx)
16910 code = (code == EQ ? LTU : GEU);
16913 /* Convert a>b into b<a or a>=b-1. */
16916 if (CONST_INT_P (op1))
16918 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
16919 /* Bail out on overflow. We still can swap operands but that
16920 would force loading of the constant into register. */
16921 if (op1 == const0_rtx
16922 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
16924 code = (code == GTU ? GEU : LTU);
16931 code = (code == GTU ? LTU : GEU);
16935 /* Convert a>=0 into (unsigned)a<0x80000000. */
16938 if (mode == DImode || op1 != const0_rtx)
16940 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
16941 code = (code == LT ? GEU : LTU);
16945 if (mode == DImode || op1 != constm1_rtx)
16947 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
16948 code = (code == LE ? GEU : LTU);
16954 /* Swapping operands may cause constant to appear as first operand. */
16955 if (!nonimmediate_operand (op0, VOIDmode))
16957 if (!can_create_pseudo_p ())
16959 op0 = force_reg (mode, op0);
16961 *pop = ix86_expand_compare (code, op0, op1);
16962 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
16967 ix86_expand_int_movcc (rtx operands[])
16969 enum rtx_code code = GET_CODE (operands[1]), compare_code;
16970 rtx compare_seq, compare_op;
16971 enum machine_mode mode = GET_MODE (operands[0]);
16972 bool sign_bit_compare_p = false;
16973 rtx op0 = XEXP (operands[1], 0);
16974 rtx op1 = XEXP (operands[1], 1);
16977 compare_op = ix86_expand_compare (code, op0, op1);
16978 compare_seq = get_insns ();
16981 compare_code = GET_CODE (compare_op);
16983 if ((op1 == const0_rtx && (code == GE || code == LT))
16984 || (op1 == constm1_rtx && (code == GT || code == LE)))
16985 sign_bit_compare_p = true;
16987 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
16988 HImode insns, we'd be swallowed in word prefix ops. */
16990 if ((mode != HImode || TARGET_FAST_PREFIX)
16991 && (mode != (TARGET_64BIT ? TImode : DImode))
16992 && CONST_INT_P (operands[2])
16993 && CONST_INT_P (operands[3]))
16995 rtx out = operands[0];
16996 HOST_WIDE_INT ct = INTVAL (operands[2]);
16997 HOST_WIDE_INT cf = INTVAL (operands[3]);
16998 HOST_WIDE_INT diff;
17001 /* Sign bit compares are better done using shifts than we do by using
17003 if (sign_bit_compare_p
17004 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
17006 /* Detect overlap between destination and compare sources. */
17009 if (!sign_bit_compare_p)
17012 bool fpcmp = false;
17014 compare_code = GET_CODE (compare_op);
17016 flags = XEXP (compare_op, 0);
17018 if (GET_MODE (flags) == CCFPmode
17019 || GET_MODE (flags) == CCFPUmode)
17023 = ix86_fp_compare_code_to_integer (compare_code);
17026 /* To simplify rest of code, restrict to the GEU case. */
17027 if (compare_code == LTU)
17029 HOST_WIDE_INT tmp = ct;
17032 compare_code = reverse_condition (compare_code);
17033 code = reverse_condition (code);
17038 PUT_CODE (compare_op,
17039 reverse_condition_maybe_unordered
17040 (GET_CODE (compare_op)));
17042 PUT_CODE (compare_op,
17043 reverse_condition (GET_CODE (compare_op)));
17047 if (reg_overlap_mentioned_p (out, op0)
17048 || reg_overlap_mentioned_p (out, op1))
17049 tmp = gen_reg_rtx (mode);
17051 if (mode == DImode)
17052 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
17054 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
17055 flags, compare_op));
17059 if (code == GT || code == GE)
17060 code = reverse_condition (code);
17063 HOST_WIDE_INT tmp = ct;
17068 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
17081 tmp = expand_simple_binop (mode, PLUS,
17083 copy_rtx (tmp), 1, OPTAB_DIRECT);
17094 tmp = expand_simple_binop (mode, IOR,
17096 copy_rtx (tmp), 1, OPTAB_DIRECT);
17098 else if (diff == -1 && ct)
17108 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
17110 tmp = expand_simple_binop (mode, PLUS,
17111 copy_rtx (tmp), GEN_INT (cf),
17112 copy_rtx (tmp), 1, OPTAB_DIRECT);
17120 * andl cf - ct, dest
17130 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
17133 tmp = expand_simple_binop (mode, AND,
17135 gen_int_mode (cf - ct, mode),
17136 copy_rtx (tmp), 1, OPTAB_DIRECT);
17138 tmp = expand_simple_binop (mode, PLUS,
17139 copy_rtx (tmp), GEN_INT (ct),
17140 copy_rtx (tmp), 1, OPTAB_DIRECT);
17143 if (!rtx_equal_p (tmp, out))
17144 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
17151 enum machine_mode cmp_mode = GET_MODE (op0);
17154 tmp = ct, ct = cf, cf = tmp;
17157 if (SCALAR_FLOAT_MODE_P (cmp_mode))
17159 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
17161 /* We may be reversing unordered compare to normal compare, that
17162 is not valid in general (we may convert non-trapping condition
17163 to trapping one), however on i386 we currently emit all
17164 comparisons unordered. */
17165 compare_code = reverse_condition_maybe_unordered (compare_code);
17166 code = reverse_condition_maybe_unordered (code);
17170 compare_code = reverse_condition (compare_code);
17171 code = reverse_condition (code);
17175 compare_code = UNKNOWN;
17176 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
17177 && CONST_INT_P (op1))
17179 if (op1 == const0_rtx
17180 && (code == LT || code == GE))
17181 compare_code = code;
17182 else if (op1 == constm1_rtx)
17186 else if (code == GT)
17191 /* Optimize dest = (op0 < 0) ? -1 : cf. */
17192 if (compare_code != UNKNOWN
17193 && GET_MODE (op0) == GET_MODE (out)
17194 && (cf == -1 || ct == -1))
17196 /* If lea code below could be used, only optimize
17197 if it results in a 2 insn sequence. */
17199 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
17200 || diff == 3 || diff == 5 || diff == 9)
17201 || (compare_code == LT && ct == -1)
17202 || (compare_code == GE && cf == -1))
17205 * notl op1 (if necessary)
17213 code = reverse_condition (code);
17216 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
17218 out = expand_simple_binop (mode, IOR,
17220 out, 1, OPTAB_DIRECT);
17221 if (out != operands[0])
17222 emit_move_insn (operands[0], out);
17229 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
17230 || diff == 3 || diff == 5 || diff == 9)
17231 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
17233 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
17239 * lea cf(dest*(ct-cf)),dest
17243 * This also catches the degenerate setcc-only case.
17249 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
17252 /* On x86_64 the lea instruction operates on Pmode, so we need
17253 to get arithmetics done in proper mode to match. */
17255 tmp = copy_rtx (out);
17259 out1 = copy_rtx (out);
17260 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
17264 tmp = gen_rtx_PLUS (mode, tmp, out1);
17270 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
17273 if (!rtx_equal_p (tmp, out))
17276 out = force_operand (tmp, copy_rtx (out));
17278 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
17280 if (!rtx_equal_p (out, operands[0]))
17281 emit_move_insn (operands[0], copy_rtx (out));
17287 * General case: Jumpful:
17288 * xorl dest,dest cmpl op1, op2
17289 * cmpl op1, op2 movl ct, dest
17290 * setcc dest jcc 1f
17291 * decl dest movl cf, dest
17292 * andl (cf-ct),dest 1:
17295 * Size 20. Size 14.
17297 * This is reasonably steep, but branch mispredict costs are
17298 * high on modern cpus, so consider failing only if optimizing
17302 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
17303 && BRANCH_COST (optimize_insn_for_speed_p (),
17308 enum machine_mode cmp_mode = GET_MODE (op0);
17313 if (SCALAR_FLOAT_MODE_P (cmp_mode))
17315 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
17317 /* We may be reversing unordered compare to normal compare,
17318 that is not valid in general (we may convert non-trapping
17319 condition to trapping one), however on i386 we currently
17320 emit all comparisons unordered. */
17321 code = reverse_condition_maybe_unordered (code);
17325 code = reverse_condition (code);
17326 if (compare_code != UNKNOWN)
17327 compare_code = reverse_condition (compare_code);
17331 if (compare_code != UNKNOWN)
17333 /* notl op1 (if needed)
17338 For x < 0 (resp. x <= -1) there will be no notl,
17339 so if possible swap the constants to get rid of the
17341 True/false will be -1/0 while code below (store flag
17342 followed by decrement) is 0/-1, so the constants need
17343 to be exchanged once more. */
17345 if (compare_code == GE || !cf)
17347 code = reverse_condition (code);
17352 HOST_WIDE_INT tmp = cf;
17357 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
17361 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
17363 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
17365 copy_rtx (out), 1, OPTAB_DIRECT);
17368 out = expand_simple_binop (mode, AND, copy_rtx (out),
17369 gen_int_mode (cf - ct, mode),
17370 copy_rtx (out), 1, OPTAB_DIRECT);
17372 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
17373 copy_rtx (out), 1, OPTAB_DIRECT);
17374 if (!rtx_equal_p (out, operands[0]))
17375 emit_move_insn (operands[0], copy_rtx (out));
17381 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
17383 /* Try a few things more with specific constants and a variable. */
17386 rtx var, orig_out, out, tmp;
17388 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
17391 /* If one of the two operands is an interesting constant, load a
17392 constant with the above and mask it in with a logical operation. */
17394 if (CONST_INT_P (operands[2]))
17397 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
17398 operands[3] = constm1_rtx, op = and_optab;
17399 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
17400 operands[3] = const0_rtx, op = ior_optab;
17404 else if (CONST_INT_P (operands[3]))
17407 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
17408 operands[2] = constm1_rtx, op = and_optab;
17409 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
17410 operands[2] = const0_rtx, op = ior_optab;
17417 orig_out = operands[0];
17418 tmp = gen_reg_rtx (mode);
17421 /* Recurse to get the constant loaded. */
17422 if (ix86_expand_int_movcc (operands) == 0)
17425 /* Mask in the interesting variable. */
17426 out = expand_binop (mode, op, var, tmp, orig_out, 0,
17428 if (!rtx_equal_p (out, orig_out))
17429 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
17435 * For comparison with above,
17445 if (! nonimmediate_operand (operands[2], mode))
17446 operands[2] = force_reg (mode, operands[2]);
17447 if (! nonimmediate_operand (operands[3], mode))
17448 operands[3] = force_reg (mode, operands[3]);
17450 if (! register_operand (operands[2], VOIDmode)
17452 || ! register_operand (operands[3], VOIDmode)))
17453 operands[2] = force_reg (mode, operands[2]);
17456 && ! register_operand (operands[3], VOIDmode))
17457 operands[3] = force_reg (mode, operands[3]);
17459 emit_insn (compare_seq);
17460 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
17461 gen_rtx_IF_THEN_ELSE (mode,
17462 compare_op, operands[2],
17467 /* Swap, force into registers, or otherwise massage the two operands
17468 to an sse comparison with a mask result. Thus we differ a bit from
17469 ix86_prepare_fp_compare_args which expects to produce a flags result.
17471 The DEST operand exists to help determine whether to commute commutative
17472 operators. The POP0/POP1 operands are updated in place. The new
17473 comparison code is returned, or UNKNOWN if not implementable. */
17475 static enum rtx_code
17476 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
17477 rtx *pop0, rtx *pop1)
17485 /* We have no LTGT as an operator. We could implement it with
17486 NE & ORDERED, but this requires an extra temporary. It's
17487 not clear that it's worth it. */
17494 /* These are supported directly. */
17501 /* For commutative operators, try to canonicalize the destination
17502 operand to be first in the comparison - this helps reload to
17503 avoid extra moves. */
17504 if (!dest || !rtx_equal_p (dest, *pop1))
17512 /* These are not supported directly. Swap the comparison operands
17513 to transform into something that is supported. */
17517 code = swap_condition (code);
17521 gcc_unreachable ();
17527 /* Detect conditional moves that exactly match min/max operational
17528 semantics. Note that this is IEEE safe, as long as we don't
17529 interchange the operands.
17531 Returns FALSE if this conditional move doesn't match a MIN/MAX,
17532 and TRUE if the operation is successful and instructions are emitted. */
17535 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
17536 rtx cmp_op1, rtx if_true, rtx if_false)
17538 enum machine_mode mode;
17544 else if (code == UNGE)
17547 if_true = if_false;
17553 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
17555 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
17560 mode = GET_MODE (dest);
17562 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
17563 but MODE may be a vector mode and thus not appropriate. */
17564 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
17566 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
17569 if_true = force_reg (mode, if_true);
17570 v = gen_rtvec (2, if_true, if_false);
17571 tmp = gen_rtx_UNSPEC (mode, v, u);
17575 code = is_min ? SMIN : SMAX;
17576 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
17579 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
17583 /* Expand an sse vector comparison. Return the register with the result. */
17586 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
17587 rtx op_true, rtx op_false)
17589 enum machine_mode mode = GET_MODE (dest);
17592 cmp_op0 = force_reg (mode, cmp_op0);
17593 if (!nonimmediate_operand (cmp_op1, mode))
17594 cmp_op1 = force_reg (mode, cmp_op1);
17597 || reg_overlap_mentioned_p (dest, op_true)
17598 || reg_overlap_mentioned_p (dest, op_false))
17599 dest = gen_reg_rtx (mode);
17601 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
17602 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17607 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
17608 operations. This is used for both scalar and vector conditional moves. */
17611 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
17613 enum machine_mode mode = GET_MODE (dest);
17616 if (op_false == CONST0_RTX (mode))
17618 op_true = force_reg (mode, op_true);
17619 x = gen_rtx_AND (mode, cmp, op_true);
17620 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17622 else if (op_true == CONST0_RTX (mode))
17624 op_false = force_reg (mode, op_false);
17625 x = gen_rtx_NOT (mode, cmp);
17626 x = gen_rtx_AND (mode, x, op_false);
17627 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17629 else if (TARGET_XOP)
17631 rtx pcmov = gen_rtx_SET (mode, dest,
17632 gen_rtx_IF_THEN_ELSE (mode, cmp,
17639 op_true = force_reg (mode, op_true);
17640 op_false = force_reg (mode, op_false);
17642 t2 = gen_reg_rtx (mode);
17644 t3 = gen_reg_rtx (mode);
17648 x = gen_rtx_AND (mode, op_true, cmp);
17649 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
17651 x = gen_rtx_NOT (mode, cmp);
17652 x = gen_rtx_AND (mode, x, op_false);
17653 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
17655 x = gen_rtx_IOR (mode, t3, t2);
17656 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17660 /* Expand a floating-point conditional move. Return true if successful. */
17663 ix86_expand_fp_movcc (rtx operands[])
17665 enum machine_mode mode = GET_MODE (operands[0]);
17666 enum rtx_code code = GET_CODE (operands[1]);
17667 rtx tmp, compare_op;
17668 rtx op0 = XEXP (operands[1], 0);
17669 rtx op1 = XEXP (operands[1], 1);
17671 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17673 enum machine_mode cmode;
17675 /* Since we've no cmove for sse registers, don't force bad register
17676 allocation just to gain access to it. Deny movcc when the
17677 comparison mode doesn't match the move mode. */
17678 cmode = GET_MODE (op0);
17679 if (cmode == VOIDmode)
17680 cmode = GET_MODE (op1);
17684 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
17685 if (code == UNKNOWN)
17688 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
17689 operands[2], operands[3]))
17692 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
17693 operands[2], operands[3]);
17694 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
17698 /* The floating point conditional move instructions don't directly
17699 support conditions resulting from a signed integer comparison. */
17701 compare_op = ix86_expand_compare (code, op0, op1);
17702 if (!fcmov_comparison_operator (compare_op, VOIDmode))
17704 tmp = gen_reg_rtx (QImode);
17705 ix86_expand_setcc (tmp, code, op0, op1);
17707 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
17710 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
17711 gen_rtx_IF_THEN_ELSE (mode, compare_op,
17712 operands[2], operands[3])));
17717 /* Expand a floating-point vector conditional move; a vcond operation
17718 rather than a movcc operation. */
17721 ix86_expand_fp_vcond (rtx operands[])
17723 enum rtx_code code = GET_CODE (operands[3]);
17726 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
17727 &operands[4], &operands[5]);
17728 if (code == UNKNOWN)
17731 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
17732 operands[5], operands[1], operands[2]))
17735 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
17736 operands[1], operands[2]);
17737 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
17741 /* Expand a signed/unsigned integral vector conditional move. */
17744 ix86_expand_int_vcond (rtx operands[])
17746 enum machine_mode mode = GET_MODE (operands[0]);
17747 enum rtx_code code = GET_CODE (operands[3]);
17748 bool negate = false;
17751 cop0 = operands[4];
17752 cop1 = operands[5];
17754 /* XOP supports all of the comparisons on all vector int types. */
17757 /* Canonicalize the comparison to EQ, GT, GTU. */
17768 code = reverse_condition (code);
17774 code = reverse_condition (code);
17780 code = swap_condition (code);
17781 x = cop0, cop0 = cop1, cop1 = x;
17785 gcc_unreachable ();
17788 /* Only SSE4.1/SSE4.2 supports V2DImode. */
17789 if (mode == V2DImode)
17794 /* SSE4.1 supports EQ. */
17795 if (!TARGET_SSE4_1)
17801 /* SSE4.2 supports GT/GTU. */
17802 if (!TARGET_SSE4_2)
17807 gcc_unreachable ();
17811 /* Unsigned parallel compare is not supported by the hardware.
17812 Play some tricks to turn this into a signed comparison
17816 cop0 = force_reg (mode, cop0);
17824 rtx (*gen_sub3) (rtx, rtx, rtx);
17826 /* Subtract (-(INT MAX) - 1) from both operands to make
17828 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
17830 gen_sub3 = (mode == V4SImode
17831 ? gen_subv4si3 : gen_subv2di3);
17832 t1 = gen_reg_rtx (mode);
17833 emit_insn (gen_sub3 (t1, cop0, mask));
17835 t2 = gen_reg_rtx (mode);
17836 emit_insn (gen_sub3 (t2, cop1, mask));
17846 /* Perform a parallel unsigned saturating subtraction. */
17847 x = gen_reg_rtx (mode);
17848 emit_insn (gen_rtx_SET (VOIDmode, x,
17849 gen_rtx_US_MINUS (mode, cop0, cop1)));
17852 cop1 = CONST0_RTX (mode);
17858 gcc_unreachable ();
17863 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
17864 operands[1+negate], operands[2-negate]);
17866 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
17867 operands[2-negate]);
17871 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
17872 true if we should do zero extension, else sign extension. HIGH_P is
17873 true if we want the N/2 high elements, else the low elements. */
17876 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
17878 enum machine_mode imode = GET_MODE (operands[1]);
17879 rtx (*unpack)(rtx, rtx, rtx);
17886 unpack = gen_vec_interleave_highv16qi;
17888 unpack = gen_vec_interleave_lowv16qi;
17892 unpack = gen_vec_interleave_highv8hi;
17894 unpack = gen_vec_interleave_lowv8hi;
17898 unpack = gen_vec_interleave_highv4si;
17900 unpack = gen_vec_interleave_lowv4si;
17903 gcc_unreachable ();
17906 dest = gen_lowpart (imode, operands[0]);
17909 se = force_reg (imode, CONST0_RTX (imode));
17911 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
17912 operands[1], pc_rtx, pc_rtx);
17914 emit_insn (unpack (dest, operands[1], se));
17917 /* This function performs the same task as ix86_expand_sse_unpack,
17918 but with SSE4.1 instructions. */
17921 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
17923 enum machine_mode imode = GET_MODE (operands[1]);
17924 rtx (*unpack)(rtx, rtx);
17931 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
17933 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
17937 unpack = gen_sse4_1_zero_extendv4hiv4si2;
17939 unpack = gen_sse4_1_sign_extendv4hiv4si2;
17943 unpack = gen_sse4_1_zero_extendv2siv2di2;
17945 unpack = gen_sse4_1_sign_extendv2siv2di2;
17948 gcc_unreachable ();
17951 dest = operands[0];
17954 /* Shift higher 8 bytes to lower 8 bytes. */
17955 src = gen_reg_rtx (imode);
17956 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
17957 gen_lowpart (V1TImode, operands[1]),
17963 emit_insn (unpack (dest, src));
17966 /* Expand conditional increment or decrement using adb/sbb instructions.
17967 The default case using setcc followed by the conditional move can be
17968 done by generic code. */
17970 ix86_expand_int_addcc (rtx operands[])
17972 enum rtx_code code = GET_CODE (operands[1]);
17974 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
17976 rtx val = const0_rtx;
17977 bool fpcmp = false;
17978 enum machine_mode mode;
17979 rtx op0 = XEXP (operands[1], 0);
17980 rtx op1 = XEXP (operands[1], 1);
17982 if (operands[3] != const1_rtx
17983 && operands[3] != constm1_rtx)
17985 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
17987 code = GET_CODE (compare_op);
17989 flags = XEXP (compare_op, 0);
17991 if (GET_MODE (flags) == CCFPmode
17992 || GET_MODE (flags) == CCFPUmode)
17995 code = ix86_fp_compare_code_to_integer (code);
18002 PUT_CODE (compare_op,
18003 reverse_condition_maybe_unordered
18004 (GET_CODE (compare_op)));
18006 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
18009 mode = GET_MODE (operands[0]);
18011 /* Construct either adc or sbb insn. */
18012 if ((code == LTU) == (operands[3] == constm1_rtx))
18017 insn = gen_subqi3_carry;
18020 insn = gen_subhi3_carry;
18023 insn = gen_subsi3_carry;
18026 insn = gen_subdi3_carry;
18029 gcc_unreachable ();
18037 insn = gen_addqi3_carry;
18040 insn = gen_addhi3_carry;
18043 insn = gen_addsi3_carry;
18046 insn = gen_adddi3_carry;
18049 gcc_unreachable ();
18052 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
18058 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
18059 but works for floating pointer parameters and nonoffsetable memories.
18060 For pushes, it returns just stack offsets; the values will be saved
18061 in the right order. Maximally three parts are generated. */
18064 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
18069 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
18071 size = (GET_MODE_SIZE (mode) + 4) / 8;
18073 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
18074 gcc_assert (size >= 2 && size <= 4);
18076 /* Optimize constant pool reference to immediates. This is used by fp
18077 moves, that force all constants to memory to allow combining. */
18078 if (MEM_P (operand) && MEM_READONLY_P (operand))
18080 rtx tmp = maybe_get_pool_constant (operand);
18085 if (MEM_P (operand) && !offsettable_memref_p (operand))
18087 /* The only non-offsetable memories we handle are pushes. */
18088 int ok = push_operand (operand, VOIDmode);
18092 operand = copy_rtx (operand);
18093 PUT_MODE (operand, Pmode);
18094 parts[0] = parts[1] = parts[2] = parts[3] = operand;
18098 if (GET_CODE (operand) == CONST_VECTOR)
18100 enum machine_mode imode = int_mode_for_mode (mode);
18101 /* Caution: if we looked through a constant pool memory above,
18102 the operand may actually have a different mode now. That's
18103 ok, since we want to pun this all the way back to an integer. */
18104 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
18105 gcc_assert (operand != NULL);
18111 if (mode == DImode)
18112 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
18117 if (REG_P (operand))
18119 gcc_assert (reload_completed);
18120 for (i = 0; i < size; i++)
18121 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
18123 else if (offsettable_memref_p (operand))
18125 operand = adjust_address (operand, SImode, 0);
18126 parts[0] = operand;
18127 for (i = 1; i < size; i++)
18128 parts[i] = adjust_address (operand, SImode, 4 * i);
18130 else if (GET_CODE (operand) == CONST_DOUBLE)
18135 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
18139 real_to_target (l, &r, mode);
18140 parts[3] = gen_int_mode (l[3], SImode);
18141 parts[2] = gen_int_mode (l[2], SImode);
18144 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
18145 parts[2] = gen_int_mode (l[2], SImode);
18148 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
18151 gcc_unreachable ();
18153 parts[1] = gen_int_mode (l[1], SImode);
18154 parts[0] = gen_int_mode (l[0], SImode);
18157 gcc_unreachable ();
18162 if (mode == TImode)
18163 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
18164 if (mode == XFmode || mode == TFmode)
18166 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
18167 if (REG_P (operand))
18169 gcc_assert (reload_completed);
18170 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
18171 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
18173 else if (offsettable_memref_p (operand))
18175 operand = adjust_address (operand, DImode, 0);
18176 parts[0] = operand;
18177 parts[1] = adjust_address (operand, upper_mode, 8);
18179 else if (GET_CODE (operand) == CONST_DOUBLE)
18184 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
18185 real_to_target (l, &r, mode);
18187 /* Do not use shift by 32 to avoid warning on 32bit systems. */
18188 if (HOST_BITS_PER_WIDE_INT >= 64)
18191 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
18192 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
18195 parts[0] = immed_double_const (l[0], l[1], DImode);
18197 if (upper_mode == SImode)
18198 parts[1] = gen_int_mode (l[2], SImode);
18199 else if (HOST_BITS_PER_WIDE_INT >= 64)
18202 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
18203 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
18206 parts[1] = immed_double_const (l[2], l[3], DImode);
18209 gcc_unreachable ();
18216 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
18217 Return false when normal moves are needed; true when all required
18218 insns have been emitted. Operands 2-4 contain the input values
18219 int the correct order; operands 5-7 contain the output values. */
18222 ix86_split_long_move (rtx operands[])
18227 int collisions = 0;
18228 enum machine_mode mode = GET_MODE (operands[0]);
18229 bool collisionparts[4];
18231 /* The DFmode expanders may ask us to move double.
18232 For 64bit target this is single move. By hiding the fact
18233 here we simplify i386.md splitters. */
18234 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
18236 /* Optimize constant pool reference to immediates. This is used by
18237 fp moves, that force all constants to memory to allow combining. */
18239 if (MEM_P (operands[1])
18240 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
18241 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
18242 operands[1] = get_pool_constant (XEXP (operands[1], 0));
18243 if (push_operand (operands[0], VOIDmode))
18245 operands[0] = copy_rtx (operands[0]);
18246 PUT_MODE (operands[0], Pmode);
18249 operands[0] = gen_lowpart (DImode, operands[0]);
18250 operands[1] = gen_lowpart (DImode, operands[1]);
18251 emit_move_insn (operands[0], operands[1]);
18255 /* The only non-offsettable memory we handle is push. */
18256 if (push_operand (operands[0], VOIDmode))
18259 gcc_assert (!MEM_P (operands[0])
18260 || offsettable_memref_p (operands[0]));
18262 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
18263 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
18265 /* When emitting push, take care for source operands on the stack. */
18266 if (push && MEM_P (operands[1])
18267 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
18269 rtx src_base = XEXP (part[1][nparts - 1], 0);
18271 /* Compensate for the stack decrement by 4. */
18272 if (!TARGET_64BIT && nparts == 3
18273 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
18274 src_base = plus_constant (src_base, 4);
18276 /* src_base refers to the stack pointer and is
18277 automatically decreased by emitted push. */
18278 for (i = 0; i < nparts; i++)
18279 part[1][i] = change_address (part[1][i],
18280 GET_MODE (part[1][i]), src_base);
18283 /* We need to do copy in the right order in case an address register
18284 of the source overlaps the destination. */
18285 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
18289 for (i = 0; i < nparts; i++)
18292 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
18293 if (collisionparts[i])
18297 /* Collision in the middle part can be handled by reordering. */
18298 if (collisions == 1 && nparts == 3 && collisionparts [1])
18300 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
18301 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
18303 else if (collisions == 1
18305 && (collisionparts [1] || collisionparts [2]))
18307 if (collisionparts [1])
18309 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
18310 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
18314 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
18315 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
18319 /* If there are more collisions, we can't handle it by reordering.
18320 Do an lea to the last part and use only one colliding move. */
18321 else if (collisions > 1)
18327 base = part[0][nparts - 1];
18329 /* Handle the case when the last part isn't valid for lea.
18330 Happens in 64-bit mode storing the 12-byte XFmode. */
18331 if (GET_MODE (base) != Pmode)
18332 base = gen_rtx_REG (Pmode, REGNO (base));
18334 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
18335 part[1][0] = replace_equiv_address (part[1][0], base);
18336 for (i = 1; i < nparts; i++)
18338 tmp = plus_constant (base, UNITS_PER_WORD * i);
18339 part[1][i] = replace_equiv_address (part[1][i], tmp);
18350 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
18351 emit_insn (gen_addsi3 (stack_pointer_rtx,
18352 stack_pointer_rtx, GEN_INT (-4)));
18353 emit_move_insn (part[0][2], part[1][2]);
18355 else if (nparts == 4)
18357 emit_move_insn (part[0][3], part[1][3]);
18358 emit_move_insn (part[0][2], part[1][2]);
18363 /* In 64bit mode we don't have 32bit push available. In case this is
18364 register, it is OK - we will just use larger counterpart. We also
18365 retype memory - these comes from attempt to avoid REX prefix on
18366 moving of second half of TFmode value. */
18367 if (GET_MODE (part[1][1]) == SImode)
18369 switch (GET_CODE (part[1][1]))
18372 part[1][1] = adjust_address (part[1][1], DImode, 0);
18376 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
18380 gcc_unreachable ();
18383 if (GET_MODE (part[1][0]) == SImode)
18384 part[1][0] = part[1][1];
18387 emit_move_insn (part[0][1], part[1][1]);
18388 emit_move_insn (part[0][0], part[1][0]);
18392 /* Choose correct order to not overwrite the source before it is copied. */
18393 if ((REG_P (part[0][0])
18394 && REG_P (part[1][1])
18395 && (REGNO (part[0][0]) == REGNO (part[1][1])
18397 && REGNO (part[0][0]) == REGNO (part[1][2]))
18399 && REGNO (part[0][0]) == REGNO (part[1][3]))))
18401 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
18403 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
18405 operands[2 + i] = part[0][j];
18406 operands[6 + i] = part[1][j];
18411 for (i = 0; i < nparts; i++)
18413 operands[2 + i] = part[0][i];
18414 operands[6 + i] = part[1][i];
18418 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
18419 if (optimize_insn_for_size_p ())
18421 for (j = 0; j < nparts - 1; j++)
18422 if (CONST_INT_P (operands[6 + j])
18423 && operands[6 + j] != const0_rtx
18424 && REG_P (operands[2 + j]))
18425 for (i = j; i < nparts - 1; i++)
18426 if (CONST_INT_P (operands[7 + i])
18427 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
18428 operands[7 + i] = operands[2 + j];
18431 for (i = 0; i < nparts; i++)
18432 emit_move_insn (operands[2 + i], operands[6 + i]);
18437 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
18438 left shift by a constant, either using a single shift or
18439 a sequence of add instructions. */
18442 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
18444 rtx (*insn)(rtx, rtx, rtx);
18447 || (count * ix86_cost->add <= ix86_cost->shift_const
18448 && !optimize_insn_for_size_p ()))
18450 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
18451 while (count-- > 0)
18452 emit_insn (insn (operand, operand, operand));
18456 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
18457 emit_insn (insn (operand, operand, GEN_INT (count)));
18462 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
18464 rtx (*gen_ashl3)(rtx, rtx, rtx);
18465 rtx (*gen_shld)(rtx, rtx, rtx);
18466 int half_width = GET_MODE_BITSIZE (mode) >> 1;
18468 rtx low[2], high[2];
18471 if (CONST_INT_P (operands[2]))
18473 split_double_mode (mode, operands, 2, low, high);
18474 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
18476 if (count >= half_width)
18478 emit_move_insn (high[0], low[1]);
18479 emit_move_insn (low[0], const0_rtx);
18481 if (count > half_width)
18482 ix86_expand_ashl_const (high[0], count - half_width, mode);
18486 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
18488 if (!rtx_equal_p (operands[0], operands[1]))
18489 emit_move_insn (operands[0], operands[1]);
18491 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
18492 ix86_expand_ashl_const (low[0], count, mode);
18497 split_double_mode (mode, operands, 1, low, high);
18499 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
18501 if (operands[1] == const1_rtx)
18503 /* Assuming we've chosen a QImode capable registers, then 1 << N
18504 can be done with two 32/64-bit shifts, no branches, no cmoves. */
18505 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
18507 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
18509 ix86_expand_clear (low[0]);
18510 ix86_expand_clear (high[0]);
18511 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
18513 d = gen_lowpart (QImode, low[0]);
18514 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
18515 s = gen_rtx_EQ (QImode, flags, const0_rtx);
18516 emit_insn (gen_rtx_SET (VOIDmode, d, s));
18518 d = gen_lowpart (QImode, high[0]);
18519 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
18520 s = gen_rtx_NE (QImode, flags, const0_rtx);
18521 emit_insn (gen_rtx_SET (VOIDmode, d, s));
18524 /* Otherwise, we can get the same results by manually performing
18525 a bit extract operation on bit 5/6, and then performing the two
18526 shifts. The two methods of getting 0/1 into low/high are exactly
18527 the same size. Avoiding the shift in the bit extract case helps
18528 pentium4 a bit; no one else seems to care much either way. */
18531 enum machine_mode half_mode;
18532 rtx (*gen_lshr3)(rtx, rtx, rtx);
18533 rtx (*gen_and3)(rtx, rtx, rtx);
18534 rtx (*gen_xor3)(rtx, rtx, rtx);
18535 HOST_WIDE_INT bits;
18538 if (mode == DImode)
18540 half_mode = SImode;
18541 gen_lshr3 = gen_lshrsi3;
18542 gen_and3 = gen_andsi3;
18543 gen_xor3 = gen_xorsi3;
18548 half_mode = DImode;
18549 gen_lshr3 = gen_lshrdi3;
18550 gen_and3 = gen_anddi3;
18551 gen_xor3 = gen_xordi3;
18555 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
18556 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
18558 x = gen_lowpart (half_mode, operands[2]);
18559 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
18561 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
18562 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
18563 emit_move_insn (low[0], high[0]);
18564 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
18567 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
18568 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
18572 if (operands[1] == constm1_rtx)
18574 /* For -1 << N, we can avoid the shld instruction, because we
18575 know that we're shifting 0...31/63 ones into a -1. */
18576 emit_move_insn (low[0], constm1_rtx);
18577 if (optimize_insn_for_size_p ())
18578 emit_move_insn (high[0], low[0]);
18580 emit_move_insn (high[0], constm1_rtx);
18584 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
18586 if (!rtx_equal_p (operands[0], operands[1]))
18587 emit_move_insn (operands[0], operands[1]);
18589 split_double_mode (mode, operands, 1, low, high);
18590 emit_insn (gen_shld (high[0], low[0], operands[2]));
18593 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
18595 if (TARGET_CMOVE && scratch)
18597 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
18598 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
18600 ix86_expand_clear (scratch);
18601 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
18605 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
18606 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
18608 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
18613 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
18615 rtx (*gen_ashr3)(rtx, rtx, rtx)
18616 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
18617 rtx (*gen_shrd)(rtx, rtx, rtx);
18618 int half_width = GET_MODE_BITSIZE (mode) >> 1;
18620 rtx low[2], high[2];
18623 if (CONST_INT_P (operands[2]))
18625 split_double_mode (mode, operands, 2, low, high);
18626 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
18628 if (count == GET_MODE_BITSIZE (mode) - 1)
18630 emit_move_insn (high[0], high[1]);
18631 emit_insn (gen_ashr3 (high[0], high[0],
18632 GEN_INT (half_width - 1)));
18633 emit_move_insn (low[0], high[0]);
18636 else if (count >= half_width)
18638 emit_move_insn (low[0], high[1]);
18639 emit_move_insn (high[0], low[0]);
18640 emit_insn (gen_ashr3 (high[0], high[0],
18641 GEN_INT (half_width - 1)));
18643 if (count > half_width)
18644 emit_insn (gen_ashr3 (low[0], low[0],
18645 GEN_INT (count - half_width)));
18649 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
18651 if (!rtx_equal_p (operands[0], operands[1]))
18652 emit_move_insn (operands[0], operands[1]);
18654 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
18655 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
18660 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
18662 if (!rtx_equal_p (operands[0], operands[1]))
18663 emit_move_insn (operands[0], operands[1]);
18665 split_double_mode (mode, operands, 1, low, high);
18667 emit_insn (gen_shrd (low[0], high[0], operands[2]));
18668 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
18670 if (TARGET_CMOVE && scratch)
18672 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
18673 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
18675 emit_move_insn (scratch, high[0]);
18676 emit_insn (gen_ashr3 (scratch, scratch,
18677 GEN_INT (half_width - 1)));
18678 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
18683 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
18684 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
18686 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
18692 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
18694 rtx (*gen_lshr3)(rtx, rtx, rtx)
18695 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
18696 rtx (*gen_shrd)(rtx, rtx, rtx);
18697 int half_width = GET_MODE_BITSIZE (mode) >> 1;
18699 rtx low[2], high[2];
18702 if (CONST_INT_P (operands[2]))
18704 split_double_mode (mode, operands, 2, low, high);
18705 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
18707 if (count >= half_width)
18709 emit_move_insn (low[0], high[1]);
18710 ix86_expand_clear (high[0]);
18712 if (count > half_width)
18713 emit_insn (gen_lshr3 (low[0], low[0],
18714 GEN_INT (count - half_width)));
18718 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
18720 if (!rtx_equal_p (operands[0], operands[1]))
18721 emit_move_insn (operands[0], operands[1]);
18723 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
18724 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
18729 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
18731 if (!rtx_equal_p (operands[0], operands[1]))
18732 emit_move_insn (operands[0], operands[1]);
18734 split_double_mode (mode, operands, 1, low, high);
18736 emit_insn (gen_shrd (low[0], high[0], operands[2]));
18737 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
18739 if (TARGET_CMOVE && scratch)
18741 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
18742 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
18744 ix86_expand_clear (scratch);
18745 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
18750 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
18751 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
18753 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
18758 /* Predict just emitted jump instruction to be taken with probability PROB. */
18760 predict_jump (int prob)
18762 rtx insn = get_last_insn ();
18763 gcc_assert (JUMP_P (insn));
18764 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
18767 /* Helper function for the string operations below. Dest VARIABLE whether
18768 it is aligned to VALUE bytes. If true, jump to the label. */
18770 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
18772 rtx label = gen_label_rtx ();
18773 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
18774 if (GET_MODE (variable) == DImode)
18775 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
18777 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
18778 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
18781 predict_jump (REG_BR_PROB_BASE * 50 / 100);
18783 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18787 /* Adjust COUNTER by the VALUE. */
18789 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
18791 rtx (*gen_add)(rtx, rtx, rtx)
18792 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
18794 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
18797 /* Zero extend possibly SImode EXP to Pmode register. */
18799 ix86_zero_extend_to_Pmode (rtx exp)
18802 if (GET_MODE (exp) == VOIDmode)
18803 return force_reg (Pmode, exp);
18804 if (GET_MODE (exp) == Pmode)
18805 return copy_to_mode_reg (Pmode, exp);
18806 r = gen_reg_rtx (Pmode);
18807 emit_insn (gen_zero_extendsidi2 (r, exp));
18811 /* Divide COUNTREG by SCALE. */
18813 scale_counter (rtx countreg, int scale)
18819 if (CONST_INT_P (countreg))
18820 return GEN_INT (INTVAL (countreg) / scale);
18821 gcc_assert (REG_P (countreg));
18823 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
18824 GEN_INT (exact_log2 (scale)),
18825 NULL, 1, OPTAB_DIRECT);
18829 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
18830 DImode for constant loop counts. */
18832 static enum machine_mode
18833 counter_mode (rtx count_exp)
18835 if (GET_MODE (count_exp) != VOIDmode)
18836 return GET_MODE (count_exp);
18837 if (!CONST_INT_P (count_exp))
18839 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
18844 /* When SRCPTR is non-NULL, output simple loop to move memory
18845 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
18846 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
18847 equivalent loop to set memory by VALUE (supposed to be in MODE).
18849 The size is rounded down to whole number of chunk size moved at once.
18850 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
18854 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
18855 rtx destptr, rtx srcptr, rtx value,
18856 rtx count, enum machine_mode mode, int unroll,
18859 rtx out_label, top_label, iter, tmp;
18860 enum machine_mode iter_mode = counter_mode (count);
18861 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
18862 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
18868 top_label = gen_label_rtx ();
18869 out_label = gen_label_rtx ();
18870 iter = gen_reg_rtx (iter_mode);
18872 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
18873 NULL, 1, OPTAB_DIRECT);
18874 /* Those two should combine. */
18875 if (piece_size == const1_rtx)
18877 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
18879 predict_jump (REG_BR_PROB_BASE * 10 / 100);
18881 emit_move_insn (iter, const0_rtx);
18883 emit_label (top_label);
18885 tmp = convert_modes (Pmode, iter_mode, iter, true);
18886 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
18887 destmem = change_address (destmem, mode, x_addr);
18891 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
18892 srcmem = change_address (srcmem, mode, y_addr);
18894 /* When unrolling for chips that reorder memory reads and writes,
18895 we can save registers by using single temporary.
18896 Also using 4 temporaries is overkill in 32bit mode. */
18897 if (!TARGET_64BIT && 0)
18899 for (i = 0; i < unroll; i++)
18904 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
18906 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
18908 emit_move_insn (destmem, srcmem);
18914 gcc_assert (unroll <= 4);
18915 for (i = 0; i < unroll; i++)
18917 tmpreg[i] = gen_reg_rtx (mode);
18921 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
18923 emit_move_insn (tmpreg[i], srcmem);
18925 for (i = 0; i < unroll; i++)
18930 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
18932 emit_move_insn (destmem, tmpreg[i]);
18937 for (i = 0; i < unroll; i++)
18941 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
18942 emit_move_insn (destmem, value);
18945 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
18946 true, OPTAB_LIB_WIDEN);
18948 emit_move_insn (iter, tmp);
18950 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
18952 if (expected_size != -1)
18954 expected_size /= GET_MODE_SIZE (mode) * unroll;
18955 if (expected_size == 0)
18957 else if (expected_size > REG_BR_PROB_BASE)
18958 predict_jump (REG_BR_PROB_BASE - 1);
18960 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
18963 predict_jump (REG_BR_PROB_BASE * 80 / 100);
18964 iter = ix86_zero_extend_to_Pmode (iter);
18965 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
18966 true, OPTAB_LIB_WIDEN);
18967 if (tmp != destptr)
18968 emit_move_insn (destptr, tmp);
18971 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
18972 true, OPTAB_LIB_WIDEN);
18974 emit_move_insn (srcptr, tmp);
18976 emit_label (out_label);
18979 /* Output "rep; mov" instruction.
18980 Arguments have same meaning as for previous function */
18982 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
18983 rtx destptr, rtx srcptr,
18985 enum machine_mode mode)
18991 /* If the size is known, it is shorter to use rep movs. */
18992 if (mode == QImode && CONST_INT_P (count)
18993 && !(INTVAL (count) & 3))
18996 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
18997 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
18998 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
18999 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
19000 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
19001 if (mode != QImode)
19003 destexp = gen_rtx_ASHIFT (Pmode, countreg,
19004 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19005 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
19006 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
19007 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19008 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
19012 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
19013 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
19015 if (CONST_INT_P (count))
19017 count = GEN_INT (INTVAL (count)
19018 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
19019 destmem = shallow_copy_rtx (destmem);
19020 srcmem = shallow_copy_rtx (srcmem);
19021 set_mem_size (destmem, count);
19022 set_mem_size (srcmem, count);
19026 if (MEM_SIZE (destmem))
19027 set_mem_size (destmem, NULL_RTX);
19028 if (MEM_SIZE (srcmem))
19029 set_mem_size (srcmem, NULL_RTX);
19031 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
19035 /* Output "rep; stos" instruction.
19036 Arguments have same meaning as for previous function */
19038 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
19039 rtx count, enum machine_mode mode,
19045 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
19046 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
19047 value = force_reg (mode, gen_lowpart (mode, value));
19048 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
19049 if (mode != QImode)
19051 destexp = gen_rtx_ASHIFT (Pmode, countreg,
19052 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19053 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
19056 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
19057 if (orig_value == const0_rtx && CONST_INT_P (count))
19059 count = GEN_INT (INTVAL (count)
19060 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
19061 destmem = shallow_copy_rtx (destmem);
19062 set_mem_size (destmem, count);
19064 else if (MEM_SIZE (destmem))
19065 set_mem_size (destmem, NULL_RTX);
19066 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
19070 emit_strmov (rtx destmem, rtx srcmem,
19071 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
19073 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
19074 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
19075 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19078 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
19080 expand_movmem_epilogue (rtx destmem, rtx srcmem,
19081 rtx destptr, rtx srcptr, rtx count, int max_size)
19084 if (CONST_INT_P (count))
19086 HOST_WIDE_INT countval = INTVAL (count);
19089 if ((countval & 0x10) && max_size > 16)
19093 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
19094 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
19097 gcc_unreachable ();
19100 if ((countval & 0x08) && max_size > 8)
19103 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
19106 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
19107 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
19111 if ((countval & 0x04) && max_size > 4)
19113 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
19116 if ((countval & 0x02) && max_size > 2)
19118 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
19121 if ((countval & 0x01) && max_size > 1)
19123 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
19130 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
19131 count, 1, OPTAB_DIRECT);
19132 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
19133 count, QImode, 1, 4);
19137 /* When there are stringops, we can cheaply increase dest and src pointers.
19138 Otherwise we save code size by maintaining offset (zero is readily
19139 available from preceding rep operation) and using x86 addressing modes.
19141 if (TARGET_SINGLE_STRINGOP)
19145 rtx label = ix86_expand_aligntest (count, 4, true);
19146 src = change_address (srcmem, SImode, srcptr);
19147 dest = change_address (destmem, SImode, destptr);
19148 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19149 emit_label (label);
19150 LABEL_NUSES (label) = 1;
19154 rtx label = ix86_expand_aligntest (count, 2, true);
19155 src = change_address (srcmem, HImode, srcptr);
19156 dest = change_address (destmem, HImode, destptr);
19157 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19158 emit_label (label);
19159 LABEL_NUSES (label) = 1;
19163 rtx label = ix86_expand_aligntest (count, 1, true);
19164 src = change_address (srcmem, QImode, srcptr);
19165 dest = change_address (destmem, QImode, destptr);
19166 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19167 emit_label (label);
19168 LABEL_NUSES (label) = 1;
19173 rtx offset = force_reg (Pmode, const0_rtx);
19178 rtx label = ix86_expand_aligntest (count, 4, true);
19179 src = change_address (srcmem, SImode, srcptr);
19180 dest = change_address (destmem, SImode, destptr);
19181 emit_move_insn (dest, src);
19182 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
19183 true, OPTAB_LIB_WIDEN);
19185 emit_move_insn (offset, tmp);
19186 emit_label (label);
19187 LABEL_NUSES (label) = 1;
19191 rtx label = ix86_expand_aligntest (count, 2, true);
19192 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
19193 src = change_address (srcmem, HImode, tmp);
19194 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
19195 dest = change_address (destmem, HImode, tmp);
19196 emit_move_insn (dest, src);
19197 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
19198 true, OPTAB_LIB_WIDEN);
19200 emit_move_insn (offset, tmp);
19201 emit_label (label);
19202 LABEL_NUSES (label) = 1;
19206 rtx label = ix86_expand_aligntest (count, 1, true);
19207 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
19208 src = change_address (srcmem, QImode, tmp);
19209 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
19210 dest = change_address (destmem, QImode, tmp);
19211 emit_move_insn (dest, src);
19212 emit_label (label);
19213 LABEL_NUSES (label) = 1;
19218 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
19220 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
19221 rtx count, int max_size)
19224 expand_simple_binop (counter_mode (count), AND, count,
19225 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
19226 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
19227 gen_lowpart (QImode, value), count, QImode,
19231 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
19233 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
19237 if (CONST_INT_P (count))
19239 HOST_WIDE_INT countval = INTVAL (count);
19242 if ((countval & 0x10) && max_size > 16)
19246 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
19247 emit_insn (gen_strset (destptr, dest, value));
19248 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
19249 emit_insn (gen_strset (destptr, dest, value));
19252 gcc_unreachable ();
19255 if ((countval & 0x08) && max_size > 8)
19259 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
19260 emit_insn (gen_strset (destptr, dest, value));
19264 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
19265 emit_insn (gen_strset (destptr, dest, value));
19266 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
19267 emit_insn (gen_strset (destptr, dest, value));
19271 if ((countval & 0x04) && max_size > 4)
19273 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
19274 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
19277 if ((countval & 0x02) && max_size > 2)
19279 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
19280 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
19283 if ((countval & 0x01) && max_size > 1)
19285 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
19286 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
19293 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
19298 rtx label = ix86_expand_aligntest (count, 16, true);
19301 dest = change_address (destmem, DImode, destptr);
19302 emit_insn (gen_strset (destptr, dest, value));
19303 emit_insn (gen_strset (destptr, dest, value));
19307 dest = change_address (destmem, SImode, destptr);
19308 emit_insn (gen_strset (destptr, dest, value));
19309 emit_insn (gen_strset (destptr, dest, value));
19310 emit_insn (gen_strset (destptr, dest, value));
19311 emit_insn (gen_strset (destptr, dest, value));
19313 emit_label (label);
19314 LABEL_NUSES (label) = 1;
19318 rtx label = ix86_expand_aligntest (count, 8, true);
19321 dest = change_address (destmem, DImode, destptr);
19322 emit_insn (gen_strset (destptr, dest, value));
19326 dest = change_address (destmem, SImode, destptr);
19327 emit_insn (gen_strset (destptr, dest, value));
19328 emit_insn (gen_strset (destptr, dest, value));
19330 emit_label (label);
19331 LABEL_NUSES (label) = 1;
19335 rtx label = ix86_expand_aligntest (count, 4, true);
19336 dest = change_address (destmem, SImode, destptr);
19337 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
19338 emit_label (label);
19339 LABEL_NUSES (label) = 1;
19343 rtx label = ix86_expand_aligntest (count, 2, true);
19344 dest = change_address (destmem, HImode, destptr);
19345 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
19346 emit_label (label);
19347 LABEL_NUSES (label) = 1;
19351 rtx label = ix86_expand_aligntest (count, 1, true);
19352 dest = change_address (destmem, QImode, destptr);
19353 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
19354 emit_label (label);
19355 LABEL_NUSES (label) = 1;
19359 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
19360 DESIRED_ALIGNMENT. */
19362 expand_movmem_prologue (rtx destmem, rtx srcmem,
19363 rtx destptr, rtx srcptr, rtx count,
19364 int align, int desired_alignment)
19366 if (align <= 1 && desired_alignment > 1)
19368 rtx label = ix86_expand_aligntest (destptr, 1, false);
19369 srcmem = change_address (srcmem, QImode, srcptr);
19370 destmem = change_address (destmem, QImode, destptr);
19371 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
19372 ix86_adjust_counter (count, 1);
19373 emit_label (label);
19374 LABEL_NUSES (label) = 1;
19376 if (align <= 2 && desired_alignment > 2)
19378 rtx label = ix86_expand_aligntest (destptr, 2, false);
19379 srcmem = change_address (srcmem, HImode, srcptr);
19380 destmem = change_address (destmem, HImode, destptr);
19381 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
19382 ix86_adjust_counter (count, 2);
19383 emit_label (label);
19384 LABEL_NUSES (label) = 1;
19386 if (align <= 4 && desired_alignment > 4)
19388 rtx label = ix86_expand_aligntest (destptr, 4, false);
19389 srcmem = change_address (srcmem, SImode, srcptr);
19390 destmem = change_address (destmem, SImode, destptr);
19391 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
19392 ix86_adjust_counter (count, 4);
19393 emit_label (label);
19394 LABEL_NUSES (label) = 1;
19396 gcc_assert (desired_alignment <= 8);
19399 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
19400 ALIGN_BYTES is how many bytes need to be copied. */
19402 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
19403 int desired_align, int align_bytes)
19406 rtx src_size, dst_size;
19408 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
19409 if (src_align_bytes >= 0)
19410 src_align_bytes = desired_align - src_align_bytes;
19411 src_size = MEM_SIZE (src);
19412 dst_size = MEM_SIZE (dst);
19413 if (align_bytes & 1)
19415 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
19416 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
19418 emit_insn (gen_strmov (destreg, dst, srcreg, src));
19420 if (align_bytes & 2)
19422 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
19423 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
19424 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
19425 set_mem_align (dst, 2 * BITS_PER_UNIT);
19426 if (src_align_bytes >= 0
19427 && (src_align_bytes & 1) == (align_bytes & 1)
19428 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
19429 set_mem_align (src, 2 * BITS_PER_UNIT);
19431 emit_insn (gen_strmov (destreg, dst, srcreg, src));
19433 if (align_bytes & 4)
19435 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
19436 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
19437 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
19438 set_mem_align (dst, 4 * BITS_PER_UNIT);
19439 if (src_align_bytes >= 0)
19441 unsigned int src_align = 0;
19442 if ((src_align_bytes & 3) == (align_bytes & 3))
19444 else if ((src_align_bytes & 1) == (align_bytes & 1))
19446 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
19447 set_mem_align (src, src_align * BITS_PER_UNIT);
19450 emit_insn (gen_strmov (destreg, dst, srcreg, src));
19452 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
19453 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
19454 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
19455 set_mem_align (dst, desired_align * BITS_PER_UNIT);
19456 if (src_align_bytes >= 0)
19458 unsigned int src_align = 0;
19459 if ((src_align_bytes & 7) == (align_bytes & 7))
19461 else if ((src_align_bytes & 3) == (align_bytes & 3))
19463 else if ((src_align_bytes & 1) == (align_bytes & 1))
19465 if (src_align > (unsigned int) desired_align)
19466 src_align = desired_align;
19467 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
19468 set_mem_align (src, src_align * BITS_PER_UNIT);
19471 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
19473 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
19478 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
19479 DESIRED_ALIGNMENT. */
19481 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
19482 int align, int desired_alignment)
19484 if (align <= 1 && desired_alignment > 1)
19486 rtx label = ix86_expand_aligntest (destptr, 1, false);
19487 destmem = change_address (destmem, QImode, destptr);
19488 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
19489 ix86_adjust_counter (count, 1);
19490 emit_label (label);
19491 LABEL_NUSES (label) = 1;
19493 if (align <= 2 && desired_alignment > 2)
19495 rtx label = ix86_expand_aligntest (destptr, 2, false);
19496 destmem = change_address (destmem, HImode, destptr);
19497 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
19498 ix86_adjust_counter (count, 2);
19499 emit_label (label);
19500 LABEL_NUSES (label) = 1;
19502 if (align <= 4 && desired_alignment > 4)
19504 rtx label = ix86_expand_aligntest (destptr, 4, false);
19505 destmem = change_address (destmem, SImode, destptr);
19506 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
19507 ix86_adjust_counter (count, 4);
19508 emit_label (label);
19509 LABEL_NUSES (label) = 1;
19511 gcc_assert (desired_alignment <= 8);
19514 /* Set enough from DST to align DST known to by aligned by ALIGN to
19515 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
19517 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
19518 int desired_align, int align_bytes)
19521 rtx dst_size = MEM_SIZE (dst);
19522 if (align_bytes & 1)
19524 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
19526 emit_insn (gen_strset (destreg, dst,
19527 gen_lowpart (QImode, value)));
19529 if (align_bytes & 2)
19531 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
19532 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
19533 set_mem_align (dst, 2 * BITS_PER_UNIT);
19535 emit_insn (gen_strset (destreg, dst,
19536 gen_lowpart (HImode, value)));
19538 if (align_bytes & 4)
19540 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
19541 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
19542 set_mem_align (dst, 4 * BITS_PER_UNIT);
19544 emit_insn (gen_strset (destreg, dst,
19545 gen_lowpart (SImode, value)));
19547 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
19548 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
19549 set_mem_align (dst, desired_align * BITS_PER_UNIT);
19551 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
19555 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
19556 static enum stringop_alg
19557 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
19558 int *dynamic_check)
19560 const struct stringop_algs * algs;
19561 bool optimize_for_speed;
19562 /* Algorithms using the rep prefix want at least edi and ecx;
19563 additionally, memset wants eax and memcpy wants esi. Don't
19564 consider such algorithms if the user has appropriated those
19565 registers for their own purposes. */
19566 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
19568 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
19570 #define ALG_USABLE_P(alg) (rep_prefix_usable \
19571 || (alg != rep_prefix_1_byte \
19572 && alg != rep_prefix_4_byte \
19573 && alg != rep_prefix_8_byte))
19574 const struct processor_costs *cost;
19576 /* Even if the string operation call is cold, we still might spend a lot
19577 of time processing large blocks. */
19578 if (optimize_function_for_size_p (cfun)
19579 || (optimize_insn_for_size_p ()
19580 && expected_size != -1 && expected_size < 256))
19581 optimize_for_speed = false;
19583 optimize_for_speed = true;
19585 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
19587 *dynamic_check = -1;
19589 algs = &cost->memset[TARGET_64BIT != 0];
19591 algs = &cost->memcpy[TARGET_64BIT != 0];
19592 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
19593 return stringop_alg;
19594 /* rep; movq or rep; movl is the smallest variant. */
19595 else if (!optimize_for_speed)
19597 if (!count || (count & 3))
19598 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
19600 return rep_prefix_usable ? rep_prefix_4_byte : loop;
19602 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
19604 else if (expected_size != -1 && expected_size < 4)
19605 return loop_1_byte;
19606 else if (expected_size != -1)
19609 enum stringop_alg alg = libcall;
19610 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
19612 /* We get here if the algorithms that were not libcall-based
19613 were rep-prefix based and we are unable to use rep prefixes
19614 based on global register usage. Break out of the loop and
19615 use the heuristic below. */
19616 if (algs->size[i].max == 0)
19618 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
19620 enum stringop_alg candidate = algs->size[i].alg;
19622 if (candidate != libcall && ALG_USABLE_P (candidate))
19624 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
19625 last non-libcall inline algorithm. */
19626 if (TARGET_INLINE_ALL_STRINGOPS)
19628 /* When the current size is best to be copied by a libcall,
19629 but we are still forced to inline, run the heuristic below
19630 that will pick code for medium sized blocks. */
19631 if (alg != libcall)
19635 else if (ALG_USABLE_P (candidate))
19639 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
19641 /* When asked to inline the call anyway, try to pick meaningful choice.
19642 We look for maximal size of block that is faster to copy by hand and
19643 take blocks of at most of that size guessing that average size will
19644 be roughly half of the block.
19646 If this turns out to be bad, we might simply specify the preferred
19647 choice in ix86_costs. */
19648 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
19649 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
19652 enum stringop_alg alg;
19654 bool any_alg_usable_p = true;
19656 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
19658 enum stringop_alg candidate = algs->size[i].alg;
19659 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
19661 if (candidate != libcall && candidate
19662 && ALG_USABLE_P (candidate))
19663 max = algs->size[i].max;
19665 /* If there aren't any usable algorithms, then recursing on
19666 smaller sizes isn't going to find anything. Just return the
19667 simple byte-at-a-time copy loop. */
19668 if (!any_alg_usable_p)
19670 /* Pick something reasonable. */
19671 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
19672 *dynamic_check = 128;
19673 return loop_1_byte;
19677 alg = decide_alg (count, max / 2, memset, dynamic_check);
19678 gcc_assert (*dynamic_check == -1);
19679 gcc_assert (alg != libcall);
19680 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
19681 *dynamic_check = max;
19684 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
19685 #undef ALG_USABLE_P
19688 /* Decide on alignment. We know that the operand is already aligned to ALIGN
19689 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
19691 decide_alignment (int align,
19692 enum stringop_alg alg,
19695 int desired_align = 0;
19699 gcc_unreachable ();
19701 case unrolled_loop:
19702 desired_align = GET_MODE_SIZE (Pmode);
19704 case rep_prefix_8_byte:
19707 case rep_prefix_4_byte:
19708 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
19709 copying whole cacheline at once. */
19710 if (TARGET_PENTIUMPRO)
19715 case rep_prefix_1_byte:
19716 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
19717 copying whole cacheline at once. */
19718 if (TARGET_PENTIUMPRO)
19732 if (desired_align < align)
19733 desired_align = align;
19734 if (expected_size != -1 && expected_size < 4)
19735 desired_align = align;
19736 return desired_align;
19739 /* Return the smallest power of 2 greater than VAL. */
19741 smallest_pow2_greater_than (int val)
19749 /* Expand string move (memcpy) operation. Use i386 string operations when
19750 profitable. expand_setmem contains similar code. The code depends upon
19751 architecture, block size and alignment, but always has the same
19754 1) Prologue guard: Conditional that jumps up to epilogues for small
19755 blocks that can be handled by epilogue alone. This is faster but
19756 also needed for correctness, since prologue assume the block is larger
19757 than the desired alignment.
19759 Optional dynamic check for size and libcall for large
19760 blocks is emitted here too, with -minline-stringops-dynamically.
19762 2) Prologue: copy first few bytes in order to get destination aligned
19763 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
19764 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
19765 We emit either a jump tree on power of two sized blocks, or a byte loop.
19767 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
19768 with specified algorithm.
19770 4) Epilogue: code copying tail of the block that is too small to be
19771 handled by main body (or up to size guarded by prologue guard). */
19774 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
19775 rtx expected_align_exp, rtx expected_size_exp)
19781 rtx jump_around_label = NULL;
19782 HOST_WIDE_INT align = 1;
19783 unsigned HOST_WIDE_INT count = 0;
19784 HOST_WIDE_INT expected_size = -1;
19785 int size_needed = 0, epilogue_size_needed;
19786 int desired_align = 0, align_bytes = 0;
19787 enum stringop_alg alg;
19789 bool need_zero_guard = false;
19791 if (CONST_INT_P (align_exp))
19792 align = INTVAL (align_exp);
19793 /* i386 can do misaligned access on reasonably increased cost. */
19794 if (CONST_INT_P (expected_align_exp)
19795 && INTVAL (expected_align_exp) > align)
19796 align = INTVAL (expected_align_exp);
19797 /* ALIGN is the minimum of destination and source alignment, but we care here
19798 just about destination alignment. */
19799 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
19800 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
19802 if (CONST_INT_P (count_exp))
19803 count = expected_size = INTVAL (count_exp);
19804 if (CONST_INT_P (expected_size_exp) && count == 0)
19805 expected_size = INTVAL (expected_size_exp);
19807 /* Make sure we don't need to care about overflow later on. */
19808 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
19811 /* Step 0: Decide on preferred algorithm, desired alignment and
19812 size of chunks to be copied by main loop. */
19814 alg = decide_alg (count, expected_size, false, &dynamic_check);
19815 desired_align = decide_alignment (align, alg, expected_size);
19817 if (!TARGET_ALIGN_STRINGOPS)
19818 align = desired_align;
19820 if (alg == libcall)
19822 gcc_assert (alg != no_stringop);
19824 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
19825 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
19826 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
19831 gcc_unreachable ();
19833 need_zero_guard = true;
19834 size_needed = GET_MODE_SIZE (Pmode);
19836 case unrolled_loop:
19837 need_zero_guard = true;
19838 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
19840 case rep_prefix_8_byte:
19843 case rep_prefix_4_byte:
19846 case rep_prefix_1_byte:
19850 need_zero_guard = true;
19855 epilogue_size_needed = size_needed;
19857 /* Step 1: Prologue guard. */
19859 /* Alignment code needs count to be in register. */
19860 if (CONST_INT_P (count_exp) && desired_align > align)
19862 if (INTVAL (count_exp) > desired_align
19863 && INTVAL (count_exp) > size_needed)
19866 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
19867 if (align_bytes <= 0)
19870 align_bytes = desired_align - align_bytes;
19872 if (align_bytes == 0)
19873 count_exp = force_reg (counter_mode (count_exp), count_exp);
19875 gcc_assert (desired_align >= 1 && align >= 1);
19877 /* Ensure that alignment prologue won't copy past end of block. */
19878 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
19880 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
19881 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
19882 Make sure it is power of 2. */
19883 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
19887 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
19889 /* If main algorithm works on QImode, no epilogue is needed.
19890 For small sizes just don't align anything. */
19891 if (size_needed == 1)
19892 desired_align = align;
19899 label = gen_label_rtx ();
19900 emit_cmp_and_jump_insns (count_exp,
19901 GEN_INT (epilogue_size_needed),
19902 LTU, 0, counter_mode (count_exp), 1, label);
19903 if (expected_size == -1 || expected_size < epilogue_size_needed)
19904 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19906 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19910 /* Emit code to decide on runtime whether library call or inline should be
19912 if (dynamic_check != -1)
19914 if (CONST_INT_P (count_exp))
19916 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
19918 emit_block_move_via_libcall (dst, src, count_exp, false);
19919 count_exp = const0_rtx;
19925 rtx hot_label = gen_label_rtx ();
19926 jump_around_label = gen_label_rtx ();
19927 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
19928 LEU, 0, GET_MODE (count_exp), 1, hot_label);
19929 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19930 emit_block_move_via_libcall (dst, src, count_exp, false);
19931 emit_jump (jump_around_label);
19932 emit_label (hot_label);
19936 /* Step 2: Alignment prologue. */
19938 if (desired_align > align)
19940 if (align_bytes == 0)
19942 /* Except for the first move in epilogue, we no longer know
19943 constant offset in aliasing info. It don't seems to worth
19944 the pain to maintain it for the first move, so throw away
19946 src = change_address (src, BLKmode, srcreg);
19947 dst = change_address (dst, BLKmode, destreg);
19948 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
19953 /* If we know how many bytes need to be stored before dst is
19954 sufficiently aligned, maintain aliasing info accurately. */
19955 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
19956 desired_align, align_bytes);
19957 count_exp = plus_constant (count_exp, -align_bytes);
19958 count -= align_bytes;
19960 if (need_zero_guard
19961 && (count < (unsigned HOST_WIDE_INT) size_needed
19962 || (align_bytes == 0
19963 && count < ((unsigned HOST_WIDE_INT) size_needed
19964 + desired_align - align))))
19966 /* It is possible that we copied enough so the main loop will not
19968 gcc_assert (size_needed > 1);
19969 if (label == NULL_RTX)
19970 label = gen_label_rtx ();
19971 emit_cmp_and_jump_insns (count_exp,
19972 GEN_INT (size_needed),
19973 LTU, 0, counter_mode (count_exp), 1, label);
19974 if (expected_size == -1
19975 || expected_size < (desired_align - align) / 2 + size_needed)
19976 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19978 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19981 if (label && size_needed == 1)
19983 emit_label (label);
19984 LABEL_NUSES (label) = 1;
19986 epilogue_size_needed = 1;
19988 else if (label == NULL_RTX)
19989 epilogue_size_needed = size_needed;
19991 /* Step 3: Main loop. */
19997 gcc_unreachable ();
19999 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20000 count_exp, QImode, 1, expected_size);
20003 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20004 count_exp, Pmode, 1, expected_size);
20006 case unrolled_loop:
20007 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
20008 registers for 4 temporaries anyway. */
20009 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20010 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
20013 case rep_prefix_8_byte:
20014 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20017 case rep_prefix_4_byte:
20018 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20021 case rep_prefix_1_byte:
20022 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20026 /* Adjust properly the offset of src and dest memory for aliasing. */
20027 if (CONST_INT_P (count_exp))
20029 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
20030 (count / size_needed) * size_needed);
20031 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
20032 (count / size_needed) * size_needed);
20036 src = change_address (src, BLKmode, srcreg);
20037 dst = change_address (dst, BLKmode, destreg);
20040 /* Step 4: Epilogue to copy the remaining bytes. */
20044 /* When the main loop is done, COUNT_EXP might hold original count,
20045 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
20046 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
20047 bytes. Compensate if needed. */
20049 if (size_needed < epilogue_size_needed)
20052 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
20053 GEN_INT (size_needed - 1), count_exp, 1,
20055 if (tmp != count_exp)
20056 emit_move_insn (count_exp, tmp);
20058 emit_label (label);
20059 LABEL_NUSES (label) = 1;
20062 if (count_exp != const0_rtx && epilogue_size_needed > 1)
20063 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
20064 epilogue_size_needed);
20065 if (jump_around_label)
20066 emit_label (jump_around_label);
20070 /* Helper function for memcpy. For QImode value 0xXY produce
20071 0xXYXYXYXY of wide specified by MODE. This is essentially
20072 a * 0x10101010, but we can do slightly better than
20073 synth_mult by unwinding the sequence by hand on CPUs with
20076 promote_duplicated_reg (enum machine_mode mode, rtx val)
20078 enum machine_mode valmode = GET_MODE (val);
20080 int nops = mode == DImode ? 3 : 2;
20082 gcc_assert (mode == SImode || mode == DImode);
20083 if (val == const0_rtx)
20084 return copy_to_mode_reg (mode, const0_rtx);
20085 if (CONST_INT_P (val))
20087 HOST_WIDE_INT v = INTVAL (val) & 255;
20091 if (mode == DImode)
20092 v |= (v << 16) << 16;
20093 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
20096 if (valmode == VOIDmode)
20098 if (valmode != QImode)
20099 val = gen_lowpart (QImode, val);
20100 if (mode == QImode)
20102 if (!TARGET_PARTIAL_REG_STALL)
20104 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
20105 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
20106 <= (ix86_cost->shift_const + ix86_cost->add) * nops
20107 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
20109 rtx reg = convert_modes (mode, QImode, val, true);
20110 tmp = promote_duplicated_reg (mode, const1_rtx);
20111 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
20116 rtx reg = convert_modes (mode, QImode, val, true);
20118 if (!TARGET_PARTIAL_REG_STALL)
20119 if (mode == SImode)
20120 emit_insn (gen_movsi_insv_1 (reg, reg));
20122 emit_insn (gen_movdi_insv_1 (reg, reg));
20125 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
20126 NULL, 1, OPTAB_DIRECT);
20128 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20130 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
20131 NULL, 1, OPTAB_DIRECT);
20132 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20133 if (mode == SImode)
20135 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
20136 NULL, 1, OPTAB_DIRECT);
20137 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20142 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
20143 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
20144 alignment from ALIGN to DESIRED_ALIGN. */
20146 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
20151 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
20152 promoted_val = promote_duplicated_reg (DImode, val);
20153 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
20154 promoted_val = promote_duplicated_reg (SImode, val);
20155 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
20156 promoted_val = promote_duplicated_reg (HImode, val);
20158 promoted_val = val;
20160 return promoted_val;
20163 /* Expand string clear operation (bzero). Use i386 string operations when
20164 profitable. See expand_movmem comment for explanation of individual
20165 steps performed. */
20167 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
20168 rtx expected_align_exp, rtx expected_size_exp)
20173 rtx jump_around_label = NULL;
20174 HOST_WIDE_INT align = 1;
20175 unsigned HOST_WIDE_INT count = 0;
20176 HOST_WIDE_INT expected_size = -1;
20177 int size_needed = 0, epilogue_size_needed;
20178 int desired_align = 0, align_bytes = 0;
20179 enum stringop_alg alg;
20180 rtx promoted_val = NULL;
20181 bool force_loopy_epilogue = false;
20183 bool need_zero_guard = false;
20185 if (CONST_INT_P (align_exp))
20186 align = INTVAL (align_exp);
20187 /* i386 can do misaligned access on reasonably increased cost. */
20188 if (CONST_INT_P (expected_align_exp)
20189 && INTVAL (expected_align_exp) > align)
20190 align = INTVAL (expected_align_exp);
20191 if (CONST_INT_P (count_exp))
20192 count = expected_size = INTVAL (count_exp);
20193 if (CONST_INT_P (expected_size_exp) && count == 0)
20194 expected_size = INTVAL (expected_size_exp);
20196 /* Make sure we don't need to care about overflow later on. */
20197 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
20200 /* Step 0: Decide on preferred algorithm, desired alignment and
20201 size of chunks to be copied by main loop. */
20203 alg = decide_alg (count, expected_size, true, &dynamic_check);
20204 desired_align = decide_alignment (align, alg, expected_size);
20206 if (!TARGET_ALIGN_STRINGOPS)
20207 align = desired_align;
20209 if (alg == libcall)
20211 gcc_assert (alg != no_stringop);
20213 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
20214 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
20219 gcc_unreachable ();
20221 need_zero_guard = true;
20222 size_needed = GET_MODE_SIZE (Pmode);
20224 case unrolled_loop:
20225 need_zero_guard = true;
20226 size_needed = GET_MODE_SIZE (Pmode) * 4;
20228 case rep_prefix_8_byte:
20231 case rep_prefix_4_byte:
20234 case rep_prefix_1_byte:
20238 need_zero_guard = true;
20242 epilogue_size_needed = size_needed;
20244 /* Step 1: Prologue guard. */
20246 /* Alignment code needs count to be in register. */
20247 if (CONST_INT_P (count_exp) && desired_align > align)
20249 if (INTVAL (count_exp) > desired_align
20250 && INTVAL (count_exp) > size_needed)
20253 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
20254 if (align_bytes <= 0)
20257 align_bytes = desired_align - align_bytes;
20259 if (align_bytes == 0)
20261 enum machine_mode mode = SImode;
20262 if (TARGET_64BIT && (count & ~0xffffffff))
20264 count_exp = force_reg (mode, count_exp);
20267 /* Do the cheap promotion to allow better CSE across the
20268 main loop and epilogue (ie one load of the big constant in the
20269 front of all code. */
20270 if (CONST_INT_P (val_exp))
20271 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
20272 desired_align, align);
20273 /* Ensure that alignment prologue won't copy past end of block. */
20274 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
20276 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
20277 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
20278 Make sure it is power of 2. */
20279 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
20281 /* To improve performance of small blocks, we jump around the VAL
20282 promoting mode. This mean that if the promoted VAL is not constant,
20283 we might not use it in the epilogue and have to use byte
20285 if (epilogue_size_needed > 2 && !promoted_val)
20286 force_loopy_epilogue = true;
20289 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
20291 /* If main algorithm works on QImode, no epilogue is needed.
20292 For small sizes just don't align anything. */
20293 if (size_needed == 1)
20294 desired_align = align;
20301 label = gen_label_rtx ();
20302 emit_cmp_and_jump_insns (count_exp,
20303 GEN_INT (epilogue_size_needed),
20304 LTU, 0, counter_mode (count_exp), 1, label);
20305 if (expected_size == -1 || expected_size <= epilogue_size_needed)
20306 predict_jump (REG_BR_PROB_BASE * 60 / 100);
20308 predict_jump (REG_BR_PROB_BASE * 20 / 100);
20311 if (dynamic_check != -1)
20313 rtx hot_label = gen_label_rtx ();
20314 jump_around_label = gen_label_rtx ();
20315 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
20316 LEU, 0, counter_mode (count_exp), 1, hot_label);
20317 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20318 set_storage_via_libcall (dst, count_exp, val_exp, false);
20319 emit_jump (jump_around_label);
20320 emit_label (hot_label);
20323 /* Step 2: Alignment prologue. */
20325 /* Do the expensive promotion once we branched off the small blocks. */
20327 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
20328 desired_align, align);
20329 gcc_assert (desired_align >= 1 && align >= 1);
20331 if (desired_align > align)
20333 if (align_bytes == 0)
20335 /* Except for the first move in epilogue, we no longer know
20336 constant offset in aliasing info. It don't seems to worth
20337 the pain to maintain it for the first move, so throw away
20339 dst = change_address (dst, BLKmode, destreg);
20340 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
20345 /* If we know how many bytes need to be stored before dst is
20346 sufficiently aligned, maintain aliasing info accurately. */
20347 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
20348 desired_align, align_bytes);
20349 count_exp = plus_constant (count_exp, -align_bytes);
20350 count -= align_bytes;
20352 if (need_zero_guard
20353 && (count < (unsigned HOST_WIDE_INT) size_needed
20354 || (align_bytes == 0
20355 && count < ((unsigned HOST_WIDE_INT) size_needed
20356 + desired_align - align))))
20358 /* It is possible that we copied enough so the main loop will not
20360 gcc_assert (size_needed > 1);
20361 if (label == NULL_RTX)
20362 label = gen_label_rtx ();
20363 emit_cmp_and_jump_insns (count_exp,
20364 GEN_INT (size_needed),
20365 LTU, 0, counter_mode (count_exp), 1, label);
20366 if (expected_size == -1
20367 || expected_size < (desired_align - align) / 2 + size_needed)
20368 predict_jump (REG_BR_PROB_BASE * 20 / 100);
20370 predict_jump (REG_BR_PROB_BASE * 60 / 100);
20373 if (label && size_needed == 1)
20375 emit_label (label);
20376 LABEL_NUSES (label) = 1;
20378 promoted_val = val_exp;
20379 epilogue_size_needed = 1;
20381 else if (label == NULL_RTX)
20382 epilogue_size_needed = size_needed;
20384 /* Step 3: Main loop. */
20390 gcc_unreachable ();
20392 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
20393 count_exp, QImode, 1, expected_size);
20396 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
20397 count_exp, Pmode, 1, expected_size);
20399 case unrolled_loop:
20400 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
20401 count_exp, Pmode, 4, expected_size);
20403 case rep_prefix_8_byte:
20404 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
20407 case rep_prefix_4_byte:
20408 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
20411 case rep_prefix_1_byte:
20412 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
20416 /* Adjust properly the offset of src and dest memory for aliasing. */
20417 if (CONST_INT_P (count_exp))
20418 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
20419 (count / size_needed) * size_needed);
20421 dst = change_address (dst, BLKmode, destreg);
20423 /* Step 4: Epilogue to copy the remaining bytes. */
20427 /* When the main loop is done, COUNT_EXP might hold original count,
20428 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
20429 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
20430 bytes. Compensate if needed. */
20432 if (size_needed < epilogue_size_needed)
20435 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
20436 GEN_INT (size_needed - 1), count_exp, 1,
20438 if (tmp != count_exp)
20439 emit_move_insn (count_exp, tmp);
20441 emit_label (label);
20442 LABEL_NUSES (label) = 1;
20445 if (count_exp != const0_rtx && epilogue_size_needed > 1)
20447 if (force_loopy_epilogue)
20448 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
20449 epilogue_size_needed);
20451 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
20452 epilogue_size_needed);
20454 if (jump_around_label)
20455 emit_label (jump_around_label);
20459 /* Expand the appropriate insns for doing strlen if not just doing
20462 out = result, initialized with the start address
20463 align_rtx = alignment of the address.
20464 scratch = scratch register, initialized with the startaddress when
20465 not aligned, otherwise undefined
20467 This is just the body. It needs the initializations mentioned above and
20468 some address computing at the end. These things are done in i386.md. */
20471 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
20475 rtx align_2_label = NULL_RTX;
20476 rtx align_3_label = NULL_RTX;
20477 rtx align_4_label = gen_label_rtx ();
20478 rtx end_0_label = gen_label_rtx ();
20480 rtx tmpreg = gen_reg_rtx (SImode);
20481 rtx scratch = gen_reg_rtx (SImode);
20485 if (CONST_INT_P (align_rtx))
20486 align = INTVAL (align_rtx);
20488 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
20490 /* Is there a known alignment and is it less than 4? */
20493 rtx scratch1 = gen_reg_rtx (Pmode);
20494 emit_move_insn (scratch1, out);
20495 /* Is there a known alignment and is it not 2? */
20498 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
20499 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
20501 /* Leave just the 3 lower bits. */
20502 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
20503 NULL_RTX, 0, OPTAB_WIDEN);
20505 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
20506 Pmode, 1, align_4_label);
20507 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
20508 Pmode, 1, align_2_label);
20509 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
20510 Pmode, 1, align_3_label);
20514 /* Since the alignment is 2, we have to check 2 or 0 bytes;
20515 check if is aligned to 4 - byte. */
20517 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
20518 NULL_RTX, 0, OPTAB_WIDEN);
20520 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
20521 Pmode, 1, align_4_label);
20524 mem = change_address (src, QImode, out);
20526 /* Now compare the bytes. */
20528 /* Compare the first n unaligned byte on a byte per byte basis. */
20529 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
20530 QImode, 1, end_0_label);
20532 /* Increment the address. */
20533 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
20535 /* Not needed with an alignment of 2 */
20538 emit_label (align_2_label);
20540 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
20543 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
20545 emit_label (align_3_label);
20548 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
20551 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
20554 /* Generate loop to check 4 bytes at a time. It is not a good idea to
20555 align this loop. It gives only huge programs, but does not help to
20557 emit_label (align_4_label);
20559 mem = change_address (src, SImode, out);
20560 emit_move_insn (scratch, mem);
20561 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
20563 /* This formula yields a nonzero result iff one of the bytes is zero.
20564 This saves three branches inside loop and many cycles. */
20566 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
20567 emit_insn (gen_one_cmplsi2 (scratch, scratch));
20568 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
20569 emit_insn (gen_andsi3 (tmpreg, tmpreg,
20570 gen_int_mode (0x80808080, SImode)));
20571 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
20576 rtx reg = gen_reg_rtx (SImode);
20577 rtx reg2 = gen_reg_rtx (Pmode);
20578 emit_move_insn (reg, tmpreg);
20579 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
20581 /* If zero is not in the first two bytes, move two bytes forward. */
20582 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
20583 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20584 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
20585 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
20586 gen_rtx_IF_THEN_ELSE (SImode, tmp,
20589 /* Emit lea manually to avoid clobbering of flags. */
20590 emit_insn (gen_rtx_SET (SImode, reg2,
20591 gen_rtx_PLUS (Pmode, out, const2_rtx)));
20593 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20594 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
20595 emit_insn (gen_rtx_SET (VOIDmode, out,
20596 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
20602 rtx end_2_label = gen_label_rtx ();
20603 /* Is zero in the first two bytes? */
20605 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
20606 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20607 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
20608 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20609 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
20611 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20612 JUMP_LABEL (tmp) = end_2_label;
20614 /* Not in the first two. Move two bytes forward. */
20615 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
20616 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
20618 emit_label (end_2_label);
20622 /* Avoid branch in fixing the byte. */
20623 tmpreg = gen_lowpart (QImode, tmpreg);
20624 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
20625 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
20626 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
20627 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
20629 emit_label (end_0_label);
20632 /* Expand strlen. */
20635 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
20637 rtx addr, scratch1, scratch2, scratch3, scratch4;
20639 /* The generic case of strlen expander is long. Avoid it's
20640 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
20642 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
20643 && !TARGET_INLINE_ALL_STRINGOPS
20644 && !optimize_insn_for_size_p ()
20645 && (!CONST_INT_P (align) || INTVAL (align) < 4))
20648 addr = force_reg (Pmode, XEXP (src, 0));
20649 scratch1 = gen_reg_rtx (Pmode);
20651 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
20652 && !optimize_insn_for_size_p ())
20654 /* Well it seems that some optimizer does not combine a call like
20655 foo(strlen(bar), strlen(bar));
20656 when the move and the subtraction is done here. It does calculate
20657 the length just once when these instructions are done inside of
20658 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
20659 often used and I use one fewer register for the lifetime of
20660 output_strlen_unroll() this is better. */
20662 emit_move_insn (out, addr);
20664 ix86_expand_strlensi_unroll_1 (out, src, align);
20666 /* strlensi_unroll_1 returns the address of the zero at the end of
20667 the string, like memchr(), so compute the length by subtracting
20668 the start address. */
20669 emit_insn (ix86_gen_sub3 (out, out, addr));
20675 /* Can't use this if the user has appropriated eax, ecx, or edi. */
20676 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
20679 scratch2 = gen_reg_rtx (Pmode);
20680 scratch3 = gen_reg_rtx (Pmode);
20681 scratch4 = force_reg (Pmode, constm1_rtx);
20683 emit_move_insn (scratch3, addr);
20684 eoschar = force_reg (QImode, eoschar);
20686 src = replace_equiv_address_nv (src, scratch3);
20688 /* If .md starts supporting :P, this can be done in .md. */
20689 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
20690 scratch4), UNSPEC_SCAS);
20691 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
20692 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
20693 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
20698 /* For given symbol (function) construct code to compute address of it's PLT
20699 entry in large x86-64 PIC model. */
20701 construct_plt_address (rtx symbol)
20703 rtx tmp = gen_reg_rtx (Pmode);
20704 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
20706 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
20707 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
20709 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
20710 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
20715 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
20717 rtx pop, int sibcall)
20719 rtx use = NULL, call;
20721 if (pop == const0_rtx)
20723 gcc_assert (!TARGET_64BIT || !pop);
20725 if (TARGET_MACHO && !TARGET_64BIT)
20728 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
20729 fnaddr = machopic_indirect_call_target (fnaddr);
20734 /* Static functions and indirect calls don't need the pic register. */
20735 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
20736 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
20737 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
20738 use_reg (&use, pic_offset_table_rtx);
20741 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
20743 rtx al = gen_rtx_REG (QImode, AX_REG);
20744 emit_move_insn (al, callarg2);
20745 use_reg (&use, al);
20748 if (ix86_cmodel == CM_LARGE_PIC
20750 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
20751 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
20752 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
20754 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
20755 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
20757 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
20758 fnaddr = gen_rtx_MEM (QImode, fnaddr);
20761 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
20763 call = gen_rtx_SET (VOIDmode, retval, call);
20766 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
20767 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
20768 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
20771 && ix86_cfun_abi () == MS_ABI
20772 && (!callarg2 || INTVAL (callarg2) != -2))
20774 /* We need to represent that SI and DI registers are clobbered
20776 static int clobbered_registers[] = {
20777 XMM6_REG, XMM7_REG, XMM8_REG,
20778 XMM9_REG, XMM10_REG, XMM11_REG,
20779 XMM12_REG, XMM13_REG, XMM14_REG,
20780 XMM15_REG, SI_REG, DI_REG
20783 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
20784 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
20785 UNSPEC_MS_TO_SYSV_CALL);
20789 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
20790 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
20793 (SSE_REGNO_P (clobbered_registers[i])
20795 clobbered_registers[i]));
20797 call = gen_rtx_PARALLEL (VOIDmode,
20798 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
20802 call = emit_call_insn (call);
20804 CALL_INSN_FUNCTION_USAGE (call) = use;
20810 /* Clear stack slot assignments remembered from previous functions.
20811 This is called from INIT_EXPANDERS once before RTL is emitted for each
20814 static struct machine_function *
20815 ix86_init_machine_status (void)
20817 struct machine_function *f;
20819 f = ggc_alloc_cleared_machine_function ();
20820 f->use_fast_prologue_epilogue_nregs = -1;
20821 f->tls_descriptor_call_expanded_p = 0;
20822 f->call_abi = ix86_abi;
20827 /* Return a MEM corresponding to a stack slot with mode MODE.
20828 Allocate a new slot if necessary.
20830 The RTL for a function can have several slots available: N is
20831 which slot to use. */
20834 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
20836 struct stack_local_entry *s;
20838 gcc_assert (n < MAX_386_STACK_LOCALS);
20840 /* Virtual slot is valid only before vregs are instantiated. */
20841 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
20843 for (s = ix86_stack_locals; s; s = s->next)
20844 if (s->mode == mode && s->n == n)
20845 return copy_rtx (s->rtl);
20847 s = ggc_alloc_stack_local_entry ();
20850 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
20852 s->next = ix86_stack_locals;
20853 ix86_stack_locals = s;
20857 /* Construct the SYMBOL_REF for the tls_get_addr function. */
20859 static GTY(()) rtx ix86_tls_symbol;
20861 ix86_tls_get_addr (void)
20864 if (!ix86_tls_symbol)
20866 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
20867 (TARGET_ANY_GNU_TLS
20869 ? "___tls_get_addr"
20870 : "__tls_get_addr");
20873 return ix86_tls_symbol;
20876 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
20878 static GTY(()) rtx ix86_tls_module_base_symbol;
20880 ix86_tls_module_base (void)
20883 if (!ix86_tls_module_base_symbol)
20885 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
20886 "_TLS_MODULE_BASE_");
20887 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
20888 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
20891 return ix86_tls_module_base_symbol;
20894 /* Calculate the length of the memory address in the instruction
20895 encoding. Does not include the one-byte modrm, opcode, or prefix. */
20898 memory_address_length (rtx addr)
20900 struct ix86_address parts;
20901 rtx base, index, disp;
20905 if (GET_CODE (addr) == PRE_DEC
20906 || GET_CODE (addr) == POST_INC
20907 || GET_CODE (addr) == PRE_MODIFY
20908 || GET_CODE (addr) == POST_MODIFY)
20911 ok = ix86_decompose_address (addr, &parts);
20914 if (parts.base && GET_CODE (parts.base) == SUBREG)
20915 parts.base = SUBREG_REG (parts.base);
20916 if (parts.index && GET_CODE (parts.index) == SUBREG)
20917 parts.index = SUBREG_REG (parts.index);
20920 index = parts.index;
20925 - esp as the base always wants an index,
20926 - ebp as the base always wants a displacement,
20927 - r12 as the base always wants an index,
20928 - r13 as the base always wants a displacement. */
20930 /* Register Indirect. */
20931 if (base && !index && !disp)
20933 /* esp (for its index) and ebp (for its displacement) need
20934 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
20937 && (addr == arg_pointer_rtx
20938 || addr == frame_pointer_rtx
20939 || REGNO (addr) == SP_REG
20940 || REGNO (addr) == BP_REG
20941 || REGNO (addr) == R12_REG
20942 || REGNO (addr) == R13_REG))
20946 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
20947 is not disp32, but disp32(%rip), so for disp32
20948 SIB byte is needed, unless print_operand_address
20949 optimizes it into disp32(%rip) or (%rip) is implied
20951 else if (disp && !base && !index)
20958 if (GET_CODE (disp) == CONST)
20959 symbol = XEXP (disp, 0);
20960 if (GET_CODE (symbol) == PLUS
20961 && CONST_INT_P (XEXP (symbol, 1)))
20962 symbol = XEXP (symbol, 0);
20964 if (GET_CODE (symbol) != LABEL_REF
20965 && (GET_CODE (symbol) != SYMBOL_REF
20966 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
20967 && (GET_CODE (symbol) != UNSPEC
20968 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
20969 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
20976 /* Find the length of the displacement constant. */
20979 if (base && satisfies_constraint_K (disp))
20984 /* ebp always wants a displacement. Similarly r13. */
20985 else if (base && REG_P (base)
20986 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
20989 /* An index requires the two-byte modrm form.... */
20991 /* ...like esp (or r12), which always wants an index. */
20992 || base == arg_pointer_rtx
20993 || base == frame_pointer_rtx
20994 || (base && REG_P (base)
20995 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
21012 /* Compute default value for "length_immediate" attribute. When SHORTFORM
21013 is set, expect that insn have 8bit immediate alternative. */
21015 ix86_attr_length_immediate_default (rtx insn, int shortform)
21019 extract_insn_cached (insn);
21020 for (i = recog_data.n_operands - 1; i >= 0; --i)
21021 if (CONSTANT_P (recog_data.operand[i]))
21023 enum attr_mode mode = get_attr_mode (insn);
21026 if (shortform && CONST_INT_P (recog_data.operand[i]))
21028 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
21035 ival = trunc_int_for_mode (ival, HImode);
21038 ival = trunc_int_for_mode (ival, SImode);
21043 if (IN_RANGE (ival, -128, 127))
21060 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
21065 fatal_insn ("unknown insn mode", insn);
21070 /* Compute default value for "length_address" attribute. */
21072 ix86_attr_length_address_default (rtx insn)
21076 if (get_attr_type (insn) == TYPE_LEA)
21078 rtx set = PATTERN (insn), addr;
21080 if (GET_CODE (set) == PARALLEL)
21081 set = XVECEXP (set, 0, 0);
21083 gcc_assert (GET_CODE (set) == SET);
21085 addr = SET_SRC (set);
21086 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
21088 if (GET_CODE (addr) == ZERO_EXTEND)
21089 addr = XEXP (addr, 0);
21090 if (GET_CODE (addr) == SUBREG)
21091 addr = SUBREG_REG (addr);
21094 return memory_address_length (addr);
21097 extract_insn_cached (insn);
21098 for (i = recog_data.n_operands - 1; i >= 0; --i)
21099 if (MEM_P (recog_data.operand[i]))
21101 constrain_operands_cached (reload_completed);
21102 if (which_alternative != -1)
21104 const char *constraints = recog_data.constraints[i];
21105 int alt = which_alternative;
21107 while (*constraints == '=' || *constraints == '+')
21110 while (*constraints++ != ',')
21112 /* Skip ignored operands. */
21113 if (*constraints == 'X')
21116 return memory_address_length (XEXP (recog_data.operand[i], 0));
21121 /* Compute default value for "length_vex" attribute. It includes
21122 2 or 3 byte VEX prefix and 1 opcode byte. */
21125 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
21130 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
21131 byte VEX prefix. */
21132 if (!has_0f_opcode || has_vex_w)
21135 /* We can always use 2 byte VEX prefix in 32bit. */
21139 extract_insn_cached (insn);
21141 for (i = recog_data.n_operands - 1; i >= 0; --i)
21142 if (REG_P (recog_data.operand[i]))
21144 /* REX.W bit uses 3 byte VEX prefix. */
21145 if (GET_MODE (recog_data.operand[i]) == DImode
21146 && GENERAL_REG_P (recog_data.operand[i]))
21151 /* REX.X or REX.B bits use 3 byte VEX prefix. */
21152 if (MEM_P (recog_data.operand[i])
21153 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
21160 /* Return the maximum number of instructions a cpu can issue. */
21163 ix86_issue_rate (void)
21167 case PROCESSOR_PENTIUM:
21168 case PROCESSOR_ATOM:
21172 case PROCESSOR_PENTIUMPRO:
21173 case PROCESSOR_PENTIUM4:
21174 case PROCESSOR_ATHLON:
21176 case PROCESSOR_AMDFAM10:
21177 case PROCESSOR_NOCONA:
21178 case PROCESSOR_GENERIC32:
21179 case PROCESSOR_GENERIC64:
21180 case PROCESSOR_BDVER1:
21183 case PROCESSOR_CORE2:
21191 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
21192 by DEP_INSN and nothing set by DEP_INSN. */
21195 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
21199 /* Simplify the test for uninteresting insns. */
21200 if (insn_type != TYPE_SETCC
21201 && insn_type != TYPE_ICMOV
21202 && insn_type != TYPE_FCMOV
21203 && insn_type != TYPE_IBR)
21206 if ((set = single_set (dep_insn)) != 0)
21208 set = SET_DEST (set);
21211 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
21212 && XVECLEN (PATTERN (dep_insn), 0) == 2
21213 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
21214 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
21216 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
21217 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
21222 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
21225 /* This test is true if the dependent insn reads the flags but
21226 not any other potentially set register. */
21227 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
21230 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
21236 /* Return true iff USE_INSN has a memory address with operands set by
21240 ix86_agi_dependent (rtx set_insn, rtx use_insn)
21243 extract_insn_cached (use_insn);
21244 for (i = recog_data.n_operands - 1; i >= 0; --i)
21245 if (MEM_P (recog_data.operand[i]))
21247 rtx addr = XEXP (recog_data.operand[i], 0);
21248 return modified_in_p (addr, set_insn) != 0;
21254 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
21256 enum attr_type insn_type, dep_insn_type;
21257 enum attr_memory memory;
21259 int dep_insn_code_number;
21261 /* Anti and output dependencies have zero cost on all CPUs. */
21262 if (REG_NOTE_KIND (link) != 0)
21265 dep_insn_code_number = recog_memoized (dep_insn);
21267 /* If we can't recognize the insns, we can't really do anything. */
21268 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
21271 insn_type = get_attr_type (insn);
21272 dep_insn_type = get_attr_type (dep_insn);
21276 case PROCESSOR_PENTIUM:
21277 /* Address Generation Interlock adds a cycle of latency. */
21278 if (insn_type == TYPE_LEA)
21280 rtx addr = PATTERN (insn);
21282 if (GET_CODE (addr) == PARALLEL)
21283 addr = XVECEXP (addr, 0, 0);
21285 gcc_assert (GET_CODE (addr) == SET);
21287 addr = SET_SRC (addr);
21288 if (modified_in_p (addr, dep_insn))
21291 else if (ix86_agi_dependent (dep_insn, insn))
21294 /* ??? Compares pair with jump/setcc. */
21295 if (ix86_flags_dependent (insn, dep_insn, insn_type))
21298 /* Floating point stores require value to be ready one cycle earlier. */
21299 if (insn_type == TYPE_FMOV
21300 && get_attr_memory (insn) == MEMORY_STORE
21301 && !ix86_agi_dependent (dep_insn, insn))
21305 case PROCESSOR_PENTIUMPRO:
21306 memory = get_attr_memory (insn);
21308 /* INT->FP conversion is expensive. */
21309 if (get_attr_fp_int_src (dep_insn))
21312 /* There is one cycle extra latency between an FP op and a store. */
21313 if (insn_type == TYPE_FMOV
21314 && (set = single_set (dep_insn)) != NULL_RTX
21315 && (set2 = single_set (insn)) != NULL_RTX
21316 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
21317 && MEM_P (SET_DEST (set2)))
21320 /* Show ability of reorder buffer to hide latency of load by executing
21321 in parallel with previous instruction in case
21322 previous instruction is not needed to compute the address. */
21323 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
21324 && !ix86_agi_dependent (dep_insn, insn))
21326 /* Claim moves to take one cycle, as core can issue one load
21327 at time and the next load can start cycle later. */
21328 if (dep_insn_type == TYPE_IMOV
21329 || dep_insn_type == TYPE_FMOV)
21337 memory = get_attr_memory (insn);
21339 /* The esp dependency is resolved before the instruction is really
21341 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
21342 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
21345 /* INT->FP conversion is expensive. */
21346 if (get_attr_fp_int_src (dep_insn))
21349 /* Show ability of reorder buffer to hide latency of load by executing
21350 in parallel with previous instruction in case
21351 previous instruction is not needed to compute the address. */
21352 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
21353 && !ix86_agi_dependent (dep_insn, insn))
21355 /* Claim moves to take one cycle, as core can issue one load
21356 at time and the next load can start cycle later. */
21357 if (dep_insn_type == TYPE_IMOV
21358 || dep_insn_type == TYPE_FMOV)
21367 case PROCESSOR_ATHLON:
21369 case PROCESSOR_AMDFAM10:
21370 case PROCESSOR_BDVER1:
21371 case PROCESSOR_ATOM:
21372 case PROCESSOR_GENERIC32:
21373 case PROCESSOR_GENERIC64:
21374 memory = get_attr_memory (insn);
21376 /* Show ability of reorder buffer to hide latency of load by executing
21377 in parallel with previous instruction in case
21378 previous instruction is not needed to compute the address. */
21379 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
21380 && !ix86_agi_dependent (dep_insn, insn))
21382 enum attr_unit unit = get_attr_unit (insn);
21385 /* Because of the difference between the length of integer and
21386 floating unit pipeline preparation stages, the memory operands
21387 for floating point are cheaper.
21389 ??? For Athlon it the difference is most probably 2. */
21390 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
21393 loadcost = TARGET_ATHLON ? 2 : 0;
21395 if (cost >= loadcost)
21408 /* How many alternative schedules to try. This should be as wide as the
21409 scheduling freedom in the DFA, but no wider. Making this value too
21410 large results extra work for the scheduler. */
21413 ia32_multipass_dfa_lookahead (void)
21417 case PROCESSOR_PENTIUM:
21420 case PROCESSOR_PENTIUMPRO:
21430 /* Compute the alignment given to a constant that is being placed in memory.
21431 EXP is the constant and ALIGN is the alignment that the object would
21433 The value of this function is used instead of that alignment to align
21437 ix86_constant_alignment (tree exp, int align)
21439 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
21440 || TREE_CODE (exp) == INTEGER_CST)
21442 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
21444 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
21447 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
21448 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
21449 return BITS_PER_WORD;
21454 /* Compute the alignment for a static variable.
21455 TYPE is the data type, and ALIGN is the alignment that
21456 the object would ordinarily have. The value of this function is used
21457 instead of that alignment to align the object. */
21460 ix86_data_alignment (tree type, int align)
21462 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
21464 if (AGGREGATE_TYPE_P (type)
21465 && TYPE_SIZE (type)
21466 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
21467 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
21468 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
21469 && align < max_align)
21472 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
21473 to 16byte boundary. */
21476 if (AGGREGATE_TYPE_P (type)
21477 && TYPE_SIZE (type)
21478 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
21479 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
21480 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
21484 if (TREE_CODE (type) == ARRAY_TYPE)
21486 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
21488 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
21491 else if (TREE_CODE (type) == COMPLEX_TYPE)
21494 if (TYPE_MODE (type) == DCmode && align < 64)
21496 if ((TYPE_MODE (type) == XCmode
21497 || TYPE_MODE (type) == TCmode) && align < 128)
21500 else if ((TREE_CODE (type) == RECORD_TYPE
21501 || TREE_CODE (type) == UNION_TYPE
21502 || TREE_CODE (type) == QUAL_UNION_TYPE)
21503 && TYPE_FIELDS (type))
21505 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
21507 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
21510 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
21511 || TREE_CODE (type) == INTEGER_TYPE)
21513 if (TYPE_MODE (type) == DFmode && align < 64)
21515 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
21522 /* Compute the alignment for a local variable or a stack slot. EXP is
21523 the data type or decl itself, MODE is the widest mode available and
21524 ALIGN is the alignment that the object would ordinarily have. The
21525 value of this macro is used instead of that alignment to align the
21529 ix86_local_alignment (tree exp, enum machine_mode mode,
21530 unsigned int align)
21534 if (exp && DECL_P (exp))
21536 type = TREE_TYPE (exp);
21545 /* Don't do dynamic stack realignment for long long objects with
21546 -mpreferred-stack-boundary=2. */
21549 && ix86_preferred_stack_boundary < 64
21550 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
21551 && (!type || !TYPE_USER_ALIGN (type))
21552 && (!decl || !DECL_USER_ALIGN (decl)))
21555 /* If TYPE is NULL, we are allocating a stack slot for caller-save
21556 register in MODE. We will return the largest alignment of XF
21560 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
21561 align = GET_MODE_ALIGNMENT (DFmode);
21565 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
21566 to 16byte boundary. Exact wording is:
21568 An array uses the same alignment as its elements, except that a local or
21569 global array variable of length at least 16 bytes or
21570 a C99 variable-length array variable always has alignment of at least 16 bytes.
21572 This was added to allow use of aligned SSE instructions at arrays. This
21573 rule is meant for static storage (where compiler can not do the analysis
21574 by itself). We follow it for automatic variables only when convenient.
21575 We fully control everything in the function compiled and functions from
21576 other unit can not rely on the alignment.
21578 Exclude va_list type. It is the common case of local array where
21579 we can not benefit from the alignment. */
21580 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
21583 if (AGGREGATE_TYPE_P (type)
21584 && (TYPE_MAIN_VARIANT (type)
21585 != TYPE_MAIN_VARIANT (va_list_type_node))
21586 && TYPE_SIZE (type)
21587 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
21588 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
21589 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
21592 if (TREE_CODE (type) == ARRAY_TYPE)
21594 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
21596 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
21599 else if (TREE_CODE (type) == COMPLEX_TYPE)
21601 if (TYPE_MODE (type) == DCmode && align < 64)
21603 if ((TYPE_MODE (type) == XCmode
21604 || TYPE_MODE (type) == TCmode) && align < 128)
21607 else if ((TREE_CODE (type) == RECORD_TYPE
21608 || TREE_CODE (type) == UNION_TYPE
21609 || TREE_CODE (type) == QUAL_UNION_TYPE)
21610 && TYPE_FIELDS (type))
21612 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
21614 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
21617 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
21618 || TREE_CODE (type) == INTEGER_TYPE)
21621 if (TYPE_MODE (type) == DFmode && align < 64)
21623 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
21629 /* Compute the minimum required alignment for dynamic stack realignment
21630 purposes for a local variable, parameter or a stack slot. EXP is
21631 the data type or decl itself, MODE is its mode and ALIGN is the
21632 alignment that the object would ordinarily have. */
21635 ix86_minimum_alignment (tree exp, enum machine_mode mode,
21636 unsigned int align)
21640 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
21643 if (exp && DECL_P (exp))
21645 type = TREE_TYPE (exp);
21654 /* Don't do dynamic stack realignment for long long objects with
21655 -mpreferred-stack-boundary=2. */
21656 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
21657 && (!type || !TYPE_USER_ALIGN (type))
21658 && (!decl || !DECL_USER_ALIGN (decl)))
21664 /* Find a location for the static chain incoming to a nested function.
21665 This is a register, unless all free registers are used by arguments. */
21668 ix86_static_chain (const_tree fndecl, bool incoming_p)
21672 if (!DECL_STATIC_CHAIN (fndecl))
21677 /* We always use R10 in 64-bit mode. */
21683 /* By default in 32-bit mode we use ECX to pass the static chain. */
21686 fntype = TREE_TYPE (fndecl);
21687 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
21689 /* Fastcall functions use ecx/edx for arguments, which leaves
21690 us with EAX for the static chain. */
21693 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
21695 /* Thiscall functions use ecx for arguments, which leaves
21696 us with EAX for the static chain. */
21699 else if (ix86_function_regparm (fntype, fndecl) == 3)
21701 /* For regparm 3, we have no free call-clobbered registers in
21702 which to store the static chain. In order to implement this,
21703 we have the trampoline push the static chain to the stack.
21704 However, we can't push a value below the return address when
21705 we call the nested function directly, so we have to use an
21706 alternate entry point. For this we use ESI, and have the
21707 alternate entry point push ESI, so that things appear the
21708 same once we're executing the nested function. */
21711 if (fndecl == current_function_decl)
21712 ix86_static_chain_on_stack = true;
21713 return gen_frame_mem (SImode,
21714 plus_constant (arg_pointer_rtx, -8));
21720 return gen_rtx_REG (Pmode, regno);
21723 /* Emit RTL insns to initialize the variable parts of a trampoline.
21724 FNDECL is the decl of the target address; M_TRAMP is a MEM for
21725 the trampoline, and CHAIN_VALUE is an RTX for the static chain
21726 to be passed to the target function. */
21729 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
21733 fnaddr = XEXP (DECL_RTL (fndecl), 0);
21740 /* Depending on the static chain location, either load a register
21741 with a constant, or push the constant to the stack. All of the
21742 instructions are the same size. */
21743 chain = ix86_static_chain (fndecl, true);
21746 if (REGNO (chain) == CX_REG)
21748 else if (REGNO (chain) == AX_REG)
21751 gcc_unreachable ();
21756 mem = adjust_address (m_tramp, QImode, 0);
21757 emit_move_insn (mem, gen_int_mode (opcode, QImode));
21759 mem = adjust_address (m_tramp, SImode, 1);
21760 emit_move_insn (mem, chain_value);
21762 /* Compute offset from the end of the jmp to the target function.
21763 In the case in which the trampoline stores the static chain on
21764 the stack, we need to skip the first insn which pushes the
21765 (call-saved) register static chain; this push is 1 byte. */
21766 disp = expand_binop (SImode, sub_optab, fnaddr,
21767 plus_constant (XEXP (m_tramp, 0),
21768 MEM_P (chain) ? 9 : 10),
21769 NULL_RTX, 1, OPTAB_DIRECT);
21771 mem = adjust_address (m_tramp, QImode, 5);
21772 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
21774 mem = adjust_address (m_tramp, SImode, 6);
21775 emit_move_insn (mem, disp);
21781 /* Load the function address to r11. Try to load address using
21782 the shorter movl instead of movabs. We may want to support
21783 movq for kernel mode, but kernel does not use trampolines at
21785 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
21787 fnaddr = copy_to_mode_reg (DImode, fnaddr);
21789 mem = adjust_address (m_tramp, HImode, offset);
21790 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
21792 mem = adjust_address (m_tramp, SImode, offset + 2);
21793 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
21798 mem = adjust_address (m_tramp, HImode, offset);
21799 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
21801 mem = adjust_address (m_tramp, DImode, offset + 2);
21802 emit_move_insn (mem, fnaddr);
21806 /* Load static chain using movabs to r10. */
21807 mem = adjust_address (m_tramp, HImode, offset);
21808 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
21810 mem = adjust_address (m_tramp, DImode, offset + 2);
21811 emit_move_insn (mem, chain_value);
21814 /* Jump to r11; the last (unused) byte is a nop, only there to
21815 pad the write out to a single 32-bit store. */
21816 mem = adjust_address (m_tramp, SImode, offset);
21817 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
21820 gcc_assert (offset <= TRAMPOLINE_SIZE);
21823 #ifdef ENABLE_EXECUTE_STACK
21824 #ifdef CHECK_EXECUTE_STACK_ENABLED
21825 if (CHECK_EXECUTE_STACK_ENABLED)
21827 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
21828 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
21832 /* The following file contains several enumerations and data structures
21833 built from the definitions in i386-builtin-types.def. */
21835 #include "i386-builtin-types.inc"
21837 /* Table for the ix86 builtin non-function types. */
21838 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
21840 /* Retrieve an element from the above table, building some of
21841 the types lazily. */
21844 ix86_get_builtin_type (enum ix86_builtin_type tcode)
21846 unsigned int index;
21849 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
21851 type = ix86_builtin_type_tab[(int) tcode];
21855 gcc_assert (tcode > IX86_BT_LAST_PRIM);
21856 if (tcode <= IX86_BT_LAST_VECT)
21858 enum machine_mode mode;
21860 index = tcode - IX86_BT_LAST_PRIM - 1;
21861 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
21862 mode = ix86_builtin_type_vect_mode[index];
21864 type = build_vector_type_for_mode (itype, mode);
21870 index = tcode - IX86_BT_LAST_VECT - 1;
21871 if (tcode <= IX86_BT_LAST_PTR)
21872 quals = TYPE_UNQUALIFIED;
21874 quals = TYPE_QUAL_CONST;
21876 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
21877 if (quals != TYPE_UNQUALIFIED)
21878 itype = build_qualified_type (itype, quals);
21880 type = build_pointer_type (itype);
21883 ix86_builtin_type_tab[(int) tcode] = type;
21887 /* Table for the ix86 builtin function types. */
21888 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
21890 /* Retrieve an element from the above table, building some of
21891 the types lazily. */
21894 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
21898 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
21900 type = ix86_builtin_func_type_tab[(int) tcode];
21904 if (tcode <= IX86_BT_LAST_FUNC)
21906 unsigned start = ix86_builtin_func_start[(int) tcode];
21907 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
21908 tree rtype, atype, args = void_list_node;
21911 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
21912 for (i = after - 1; i > start; --i)
21914 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
21915 args = tree_cons (NULL, atype, args);
21918 type = build_function_type (rtype, args);
21922 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
21923 enum ix86_builtin_func_type icode;
21925 icode = ix86_builtin_func_alias_base[index];
21926 type = ix86_get_builtin_func_type (icode);
21929 ix86_builtin_func_type_tab[(int) tcode] = type;
21934 /* Codes for all the SSE/MMX builtins. */
21937 IX86_BUILTIN_ADDPS,
21938 IX86_BUILTIN_ADDSS,
21939 IX86_BUILTIN_DIVPS,
21940 IX86_BUILTIN_DIVSS,
21941 IX86_BUILTIN_MULPS,
21942 IX86_BUILTIN_MULSS,
21943 IX86_BUILTIN_SUBPS,
21944 IX86_BUILTIN_SUBSS,
21946 IX86_BUILTIN_CMPEQPS,
21947 IX86_BUILTIN_CMPLTPS,
21948 IX86_BUILTIN_CMPLEPS,
21949 IX86_BUILTIN_CMPGTPS,
21950 IX86_BUILTIN_CMPGEPS,
21951 IX86_BUILTIN_CMPNEQPS,
21952 IX86_BUILTIN_CMPNLTPS,
21953 IX86_BUILTIN_CMPNLEPS,
21954 IX86_BUILTIN_CMPNGTPS,
21955 IX86_BUILTIN_CMPNGEPS,
21956 IX86_BUILTIN_CMPORDPS,
21957 IX86_BUILTIN_CMPUNORDPS,
21958 IX86_BUILTIN_CMPEQSS,
21959 IX86_BUILTIN_CMPLTSS,
21960 IX86_BUILTIN_CMPLESS,
21961 IX86_BUILTIN_CMPNEQSS,
21962 IX86_BUILTIN_CMPNLTSS,
21963 IX86_BUILTIN_CMPNLESS,
21964 IX86_BUILTIN_CMPNGTSS,
21965 IX86_BUILTIN_CMPNGESS,
21966 IX86_BUILTIN_CMPORDSS,
21967 IX86_BUILTIN_CMPUNORDSS,
21969 IX86_BUILTIN_COMIEQSS,
21970 IX86_BUILTIN_COMILTSS,
21971 IX86_BUILTIN_COMILESS,
21972 IX86_BUILTIN_COMIGTSS,
21973 IX86_BUILTIN_COMIGESS,
21974 IX86_BUILTIN_COMINEQSS,
21975 IX86_BUILTIN_UCOMIEQSS,
21976 IX86_BUILTIN_UCOMILTSS,
21977 IX86_BUILTIN_UCOMILESS,
21978 IX86_BUILTIN_UCOMIGTSS,
21979 IX86_BUILTIN_UCOMIGESS,
21980 IX86_BUILTIN_UCOMINEQSS,
21982 IX86_BUILTIN_CVTPI2PS,
21983 IX86_BUILTIN_CVTPS2PI,
21984 IX86_BUILTIN_CVTSI2SS,
21985 IX86_BUILTIN_CVTSI642SS,
21986 IX86_BUILTIN_CVTSS2SI,
21987 IX86_BUILTIN_CVTSS2SI64,
21988 IX86_BUILTIN_CVTTPS2PI,
21989 IX86_BUILTIN_CVTTSS2SI,
21990 IX86_BUILTIN_CVTTSS2SI64,
21992 IX86_BUILTIN_MAXPS,
21993 IX86_BUILTIN_MAXSS,
21994 IX86_BUILTIN_MINPS,
21995 IX86_BUILTIN_MINSS,
21997 IX86_BUILTIN_LOADUPS,
21998 IX86_BUILTIN_STOREUPS,
21999 IX86_BUILTIN_MOVSS,
22001 IX86_BUILTIN_MOVHLPS,
22002 IX86_BUILTIN_MOVLHPS,
22003 IX86_BUILTIN_LOADHPS,
22004 IX86_BUILTIN_LOADLPS,
22005 IX86_BUILTIN_STOREHPS,
22006 IX86_BUILTIN_STORELPS,
22008 IX86_BUILTIN_MASKMOVQ,
22009 IX86_BUILTIN_MOVMSKPS,
22010 IX86_BUILTIN_PMOVMSKB,
22012 IX86_BUILTIN_MOVNTPS,
22013 IX86_BUILTIN_MOVNTQ,
22015 IX86_BUILTIN_LOADDQU,
22016 IX86_BUILTIN_STOREDQU,
22018 IX86_BUILTIN_PACKSSWB,
22019 IX86_BUILTIN_PACKSSDW,
22020 IX86_BUILTIN_PACKUSWB,
22022 IX86_BUILTIN_PADDB,
22023 IX86_BUILTIN_PADDW,
22024 IX86_BUILTIN_PADDD,
22025 IX86_BUILTIN_PADDQ,
22026 IX86_BUILTIN_PADDSB,
22027 IX86_BUILTIN_PADDSW,
22028 IX86_BUILTIN_PADDUSB,
22029 IX86_BUILTIN_PADDUSW,
22030 IX86_BUILTIN_PSUBB,
22031 IX86_BUILTIN_PSUBW,
22032 IX86_BUILTIN_PSUBD,
22033 IX86_BUILTIN_PSUBQ,
22034 IX86_BUILTIN_PSUBSB,
22035 IX86_BUILTIN_PSUBSW,
22036 IX86_BUILTIN_PSUBUSB,
22037 IX86_BUILTIN_PSUBUSW,
22040 IX86_BUILTIN_PANDN,
22044 IX86_BUILTIN_PAVGB,
22045 IX86_BUILTIN_PAVGW,
22047 IX86_BUILTIN_PCMPEQB,
22048 IX86_BUILTIN_PCMPEQW,
22049 IX86_BUILTIN_PCMPEQD,
22050 IX86_BUILTIN_PCMPGTB,
22051 IX86_BUILTIN_PCMPGTW,
22052 IX86_BUILTIN_PCMPGTD,
22054 IX86_BUILTIN_PMADDWD,
22056 IX86_BUILTIN_PMAXSW,
22057 IX86_BUILTIN_PMAXUB,
22058 IX86_BUILTIN_PMINSW,
22059 IX86_BUILTIN_PMINUB,
22061 IX86_BUILTIN_PMULHUW,
22062 IX86_BUILTIN_PMULHW,
22063 IX86_BUILTIN_PMULLW,
22065 IX86_BUILTIN_PSADBW,
22066 IX86_BUILTIN_PSHUFW,
22068 IX86_BUILTIN_PSLLW,
22069 IX86_BUILTIN_PSLLD,
22070 IX86_BUILTIN_PSLLQ,
22071 IX86_BUILTIN_PSRAW,
22072 IX86_BUILTIN_PSRAD,
22073 IX86_BUILTIN_PSRLW,
22074 IX86_BUILTIN_PSRLD,
22075 IX86_BUILTIN_PSRLQ,
22076 IX86_BUILTIN_PSLLWI,
22077 IX86_BUILTIN_PSLLDI,
22078 IX86_BUILTIN_PSLLQI,
22079 IX86_BUILTIN_PSRAWI,
22080 IX86_BUILTIN_PSRADI,
22081 IX86_BUILTIN_PSRLWI,
22082 IX86_BUILTIN_PSRLDI,
22083 IX86_BUILTIN_PSRLQI,
22085 IX86_BUILTIN_PUNPCKHBW,
22086 IX86_BUILTIN_PUNPCKHWD,
22087 IX86_BUILTIN_PUNPCKHDQ,
22088 IX86_BUILTIN_PUNPCKLBW,
22089 IX86_BUILTIN_PUNPCKLWD,
22090 IX86_BUILTIN_PUNPCKLDQ,
22092 IX86_BUILTIN_SHUFPS,
22094 IX86_BUILTIN_RCPPS,
22095 IX86_BUILTIN_RCPSS,
22096 IX86_BUILTIN_RSQRTPS,
22097 IX86_BUILTIN_RSQRTPS_NR,
22098 IX86_BUILTIN_RSQRTSS,
22099 IX86_BUILTIN_RSQRTF,
22100 IX86_BUILTIN_SQRTPS,
22101 IX86_BUILTIN_SQRTPS_NR,
22102 IX86_BUILTIN_SQRTSS,
22104 IX86_BUILTIN_UNPCKHPS,
22105 IX86_BUILTIN_UNPCKLPS,
22107 IX86_BUILTIN_ANDPS,
22108 IX86_BUILTIN_ANDNPS,
22110 IX86_BUILTIN_XORPS,
22113 IX86_BUILTIN_LDMXCSR,
22114 IX86_BUILTIN_STMXCSR,
22115 IX86_BUILTIN_SFENCE,
22117 /* 3DNow! Original */
22118 IX86_BUILTIN_FEMMS,
22119 IX86_BUILTIN_PAVGUSB,
22120 IX86_BUILTIN_PF2ID,
22121 IX86_BUILTIN_PFACC,
22122 IX86_BUILTIN_PFADD,
22123 IX86_BUILTIN_PFCMPEQ,
22124 IX86_BUILTIN_PFCMPGE,
22125 IX86_BUILTIN_PFCMPGT,
22126 IX86_BUILTIN_PFMAX,
22127 IX86_BUILTIN_PFMIN,
22128 IX86_BUILTIN_PFMUL,
22129 IX86_BUILTIN_PFRCP,
22130 IX86_BUILTIN_PFRCPIT1,
22131 IX86_BUILTIN_PFRCPIT2,
22132 IX86_BUILTIN_PFRSQIT1,
22133 IX86_BUILTIN_PFRSQRT,
22134 IX86_BUILTIN_PFSUB,
22135 IX86_BUILTIN_PFSUBR,
22136 IX86_BUILTIN_PI2FD,
22137 IX86_BUILTIN_PMULHRW,
22139 /* 3DNow! Athlon Extensions */
22140 IX86_BUILTIN_PF2IW,
22141 IX86_BUILTIN_PFNACC,
22142 IX86_BUILTIN_PFPNACC,
22143 IX86_BUILTIN_PI2FW,
22144 IX86_BUILTIN_PSWAPDSI,
22145 IX86_BUILTIN_PSWAPDSF,
22148 IX86_BUILTIN_ADDPD,
22149 IX86_BUILTIN_ADDSD,
22150 IX86_BUILTIN_DIVPD,
22151 IX86_BUILTIN_DIVSD,
22152 IX86_BUILTIN_MULPD,
22153 IX86_BUILTIN_MULSD,
22154 IX86_BUILTIN_SUBPD,
22155 IX86_BUILTIN_SUBSD,
22157 IX86_BUILTIN_CMPEQPD,
22158 IX86_BUILTIN_CMPLTPD,
22159 IX86_BUILTIN_CMPLEPD,
22160 IX86_BUILTIN_CMPGTPD,
22161 IX86_BUILTIN_CMPGEPD,
22162 IX86_BUILTIN_CMPNEQPD,
22163 IX86_BUILTIN_CMPNLTPD,
22164 IX86_BUILTIN_CMPNLEPD,
22165 IX86_BUILTIN_CMPNGTPD,
22166 IX86_BUILTIN_CMPNGEPD,
22167 IX86_BUILTIN_CMPORDPD,
22168 IX86_BUILTIN_CMPUNORDPD,
22169 IX86_BUILTIN_CMPEQSD,
22170 IX86_BUILTIN_CMPLTSD,
22171 IX86_BUILTIN_CMPLESD,
22172 IX86_BUILTIN_CMPNEQSD,
22173 IX86_BUILTIN_CMPNLTSD,
22174 IX86_BUILTIN_CMPNLESD,
22175 IX86_BUILTIN_CMPORDSD,
22176 IX86_BUILTIN_CMPUNORDSD,
22178 IX86_BUILTIN_COMIEQSD,
22179 IX86_BUILTIN_COMILTSD,
22180 IX86_BUILTIN_COMILESD,
22181 IX86_BUILTIN_COMIGTSD,
22182 IX86_BUILTIN_COMIGESD,
22183 IX86_BUILTIN_COMINEQSD,
22184 IX86_BUILTIN_UCOMIEQSD,
22185 IX86_BUILTIN_UCOMILTSD,
22186 IX86_BUILTIN_UCOMILESD,
22187 IX86_BUILTIN_UCOMIGTSD,
22188 IX86_BUILTIN_UCOMIGESD,
22189 IX86_BUILTIN_UCOMINEQSD,
22191 IX86_BUILTIN_MAXPD,
22192 IX86_BUILTIN_MAXSD,
22193 IX86_BUILTIN_MINPD,
22194 IX86_BUILTIN_MINSD,
22196 IX86_BUILTIN_ANDPD,
22197 IX86_BUILTIN_ANDNPD,
22199 IX86_BUILTIN_XORPD,
22201 IX86_BUILTIN_SQRTPD,
22202 IX86_BUILTIN_SQRTSD,
22204 IX86_BUILTIN_UNPCKHPD,
22205 IX86_BUILTIN_UNPCKLPD,
22207 IX86_BUILTIN_SHUFPD,
22209 IX86_BUILTIN_LOADUPD,
22210 IX86_BUILTIN_STOREUPD,
22211 IX86_BUILTIN_MOVSD,
22213 IX86_BUILTIN_LOADHPD,
22214 IX86_BUILTIN_LOADLPD,
22216 IX86_BUILTIN_CVTDQ2PD,
22217 IX86_BUILTIN_CVTDQ2PS,
22219 IX86_BUILTIN_CVTPD2DQ,
22220 IX86_BUILTIN_CVTPD2PI,
22221 IX86_BUILTIN_CVTPD2PS,
22222 IX86_BUILTIN_CVTTPD2DQ,
22223 IX86_BUILTIN_CVTTPD2PI,
22225 IX86_BUILTIN_CVTPI2PD,
22226 IX86_BUILTIN_CVTSI2SD,
22227 IX86_BUILTIN_CVTSI642SD,
22229 IX86_BUILTIN_CVTSD2SI,
22230 IX86_BUILTIN_CVTSD2SI64,
22231 IX86_BUILTIN_CVTSD2SS,
22232 IX86_BUILTIN_CVTSS2SD,
22233 IX86_BUILTIN_CVTTSD2SI,
22234 IX86_BUILTIN_CVTTSD2SI64,
22236 IX86_BUILTIN_CVTPS2DQ,
22237 IX86_BUILTIN_CVTPS2PD,
22238 IX86_BUILTIN_CVTTPS2DQ,
22240 IX86_BUILTIN_MOVNTI,
22241 IX86_BUILTIN_MOVNTPD,
22242 IX86_BUILTIN_MOVNTDQ,
22244 IX86_BUILTIN_MOVQ128,
22247 IX86_BUILTIN_MASKMOVDQU,
22248 IX86_BUILTIN_MOVMSKPD,
22249 IX86_BUILTIN_PMOVMSKB128,
22251 IX86_BUILTIN_PACKSSWB128,
22252 IX86_BUILTIN_PACKSSDW128,
22253 IX86_BUILTIN_PACKUSWB128,
22255 IX86_BUILTIN_PADDB128,
22256 IX86_BUILTIN_PADDW128,
22257 IX86_BUILTIN_PADDD128,
22258 IX86_BUILTIN_PADDQ128,
22259 IX86_BUILTIN_PADDSB128,
22260 IX86_BUILTIN_PADDSW128,
22261 IX86_BUILTIN_PADDUSB128,
22262 IX86_BUILTIN_PADDUSW128,
22263 IX86_BUILTIN_PSUBB128,
22264 IX86_BUILTIN_PSUBW128,
22265 IX86_BUILTIN_PSUBD128,
22266 IX86_BUILTIN_PSUBQ128,
22267 IX86_BUILTIN_PSUBSB128,
22268 IX86_BUILTIN_PSUBSW128,
22269 IX86_BUILTIN_PSUBUSB128,
22270 IX86_BUILTIN_PSUBUSW128,
22272 IX86_BUILTIN_PAND128,
22273 IX86_BUILTIN_PANDN128,
22274 IX86_BUILTIN_POR128,
22275 IX86_BUILTIN_PXOR128,
22277 IX86_BUILTIN_PAVGB128,
22278 IX86_BUILTIN_PAVGW128,
22280 IX86_BUILTIN_PCMPEQB128,
22281 IX86_BUILTIN_PCMPEQW128,
22282 IX86_BUILTIN_PCMPEQD128,
22283 IX86_BUILTIN_PCMPGTB128,
22284 IX86_BUILTIN_PCMPGTW128,
22285 IX86_BUILTIN_PCMPGTD128,
22287 IX86_BUILTIN_PMADDWD128,
22289 IX86_BUILTIN_PMAXSW128,
22290 IX86_BUILTIN_PMAXUB128,
22291 IX86_BUILTIN_PMINSW128,
22292 IX86_BUILTIN_PMINUB128,
22294 IX86_BUILTIN_PMULUDQ,
22295 IX86_BUILTIN_PMULUDQ128,
22296 IX86_BUILTIN_PMULHUW128,
22297 IX86_BUILTIN_PMULHW128,
22298 IX86_BUILTIN_PMULLW128,
22300 IX86_BUILTIN_PSADBW128,
22301 IX86_BUILTIN_PSHUFHW,
22302 IX86_BUILTIN_PSHUFLW,
22303 IX86_BUILTIN_PSHUFD,
22305 IX86_BUILTIN_PSLLDQI128,
22306 IX86_BUILTIN_PSLLWI128,
22307 IX86_BUILTIN_PSLLDI128,
22308 IX86_BUILTIN_PSLLQI128,
22309 IX86_BUILTIN_PSRAWI128,
22310 IX86_BUILTIN_PSRADI128,
22311 IX86_BUILTIN_PSRLDQI128,
22312 IX86_BUILTIN_PSRLWI128,
22313 IX86_BUILTIN_PSRLDI128,
22314 IX86_BUILTIN_PSRLQI128,
22316 IX86_BUILTIN_PSLLDQ128,
22317 IX86_BUILTIN_PSLLW128,
22318 IX86_BUILTIN_PSLLD128,
22319 IX86_BUILTIN_PSLLQ128,
22320 IX86_BUILTIN_PSRAW128,
22321 IX86_BUILTIN_PSRAD128,
22322 IX86_BUILTIN_PSRLW128,
22323 IX86_BUILTIN_PSRLD128,
22324 IX86_BUILTIN_PSRLQ128,
22326 IX86_BUILTIN_PUNPCKHBW128,
22327 IX86_BUILTIN_PUNPCKHWD128,
22328 IX86_BUILTIN_PUNPCKHDQ128,
22329 IX86_BUILTIN_PUNPCKHQDQ128,
22330 IX86_BUILTIN_PUNPCKLBW128,
22331 IX86_BUILTIN_PUNPCKLWD128,
22332 IX86_BUILTIN_PUNPCKLDQ128,
22333 IX86_BUILTIN_PUNPCKLQDQ128,
22335 IX86_BUILTIN_CLFLUSH,
22336 IX86_BUILTIN_MFENCE,
22337 IX86_BUILTIN_LFENCE,
22339 IX86_BUILTIN_BSRSI,
22340 IX86_BUILTIN_BSRDI,
22341 IX86_BUILTIN_RDPMC,
22342 IX86_BUILTIN_RDTSC,
22343 IX86_BUILTIN_RDTSCP,
22344 IX86_BUILTIN_ROLQI,
22345 IX86_BUILTIN_ROLHI,
22346 IX86_BUILTIN_RORQI,
22347 IX86_BUILTIN_RORHI,
22350 IX86_BUILTIN_ADDSUBPS,
22351 IX86_BUILTIN_HADDPS,
22352 IX86_BUILTIN_HSUBPS,
22353 IX86_BUILTIN_MOVSHDUP,
22354 IX86_BUILTIN_MOVSLDUP,
22355 IX86_BUILTIN_ADDSUBPD,
22356 IX86_BUILTIN_HADDPD,
22357 IX86_BUILTIN_HSUBPD,
22358 IX86_BUILTIN_LDDQU,
22360 IX86_BUILTIN_MONITOR,
22361 IX86_BUILTIN_MWAIT,
22364 IX86_BUILTIN_PHADDW,
22365 IX86_BUILTIN_PHADDD,
22366 IX86_BUILTIN_PHADDSW,
22367 IX86_BUILTIN_PHSUBW,
22368 IX86_BUILTIN_PHSUBD,
22369 IX86_BUILTIN_PHSUBSW,
22370 IX86_BUILTIN_PMADDUBSW,
22371 IX86_BUILTIN_PMULHRSW,
22372 IX86_BUILTIN_PSHUFB,
22373 IX86_BUILTIN_PSIGNB,
22374 IX86_BUILTIN_PSIGNW,
22375 IX86_BUILTIN_PSIGND,
22376 IX86_BUILTIN_PALIGNR,
22377 IX86_BUILTIN_PABSB,
22378 IX86_BUILTIN_PABSW,
22379 IX86_BUILTIN_PABSD,
22381 IX86_BUILTIN_PHADDW128,
22382 IX86_BUILTIN_PHADDD128,
22383 IX86_BUILTIN_PHADDSW128,
22384 IX86_BUILTIN_PHSUBW128,
22385 IX86_BUILTIN_PHSUBD128,
22386 IX86_BUILTIN_PHSUBSW128,
22387 IX86_BUILTIN_PMADDUBSW128,
22388 IX86_BUILTIN_PMULHRSW128,
22389 IX86_BUILTIN_PSHUFB128,
22390 IX86_BUILTIN_PSIGNB128,
22391 IX86_BUILTIN_PSIGNW128,
22392 IX86_BUILTIN_PSIGND128,
22393 IX86_BUILTIN_PALIGNR128,
22394 IX86_BUILTIN_PABSB128,
22395 IX86_BUILTIN_PABSW128,
22396 IX86_BUILTIN_PABSD128,
22398 /* AMDFAM10 - SSE4A New Instructions. */
22399 IX86_BUILTIN_MOVNTSD,
22400 IX86_BUILTIN_MOVNTSS,
22401 IX86_BUILTIN_EXTRQI,
22402 IX86_BUILTIN_EXTRQ,
22403 IX86_BUILTIN_INSERTQI,
22404 IX86_BUILTIN_INSERTQ,
22407 IX86_BUILTIN_BLENDPD,
22408 IX86_BUILTIN_BLENDPS,
22409 IX86_BUILTIN_BLENDVPD,
22410 IX86_BUILTIN_BLENDVPS,
22411 IX86_BUILTIN_PBLENDVB128,
22412 IX86_BUILTIN_PBLENDW128,
22417 IX86_BUILTIN_INSERTPS128,
22419 IX86_BUILTIN_MOVNTDQA,
22420 IX86_BUILTIN_MPSADBW128,
22421 IX86_BUILTIN_PACKUSDW128,
22422 IX86_BUILTIN_PCMPEQQ,
22423 IX86_BUILTIN_PHMINPOSUW128,
22425 IX86_BUILTIN_PMAXSB128,
22426 IX86_BUILTIN_PMAXSD128,
22427 IX86_BUILTIN_PMAXUD128,
22428 IX86_BUILTIN_PMAXUW128,
22430 IX86_BUILTIN_PMINSB128,
22431 IX86_BUILTIN_PMINSD128,
22432 IX86_BUILTIN_PMINUD128,
22433 IX86_BUILTIN_PMINUW128,
22435 IX86_BUILTIN_PMOVSXBW128,
22436 IX86_BUILTIN_PMOVSXBD128,
22437 IX86_BUILTIN_PMOVSXBQ128,
22438 IX86_BUILTIN_PMOVSXWD128,
22439 IX86_BUILTIN_PMOVSXWQ128,
22440 IX86_BUILTIN_PMOVSXDQ128,
22442 IX86_BUILTIN_PMOVZXBW128,
22443 IX86_BUILTIN_PMOVZXBD128,
22444 IX86_BUILTIN_PMOVZXBQ128,
22445 IX86_BUILTIN_PMOVZXWD128,
22446 IX86_BUILTIN_PMOVZXWQ128,
22447 IX86_BUILTIN_PMOVZXDQ128,
22449 IX86_BUILTIN_PMULDQ128,
22450 IX86_BUILTIN_PMULLD128,
22452 IX86_BUILTIN_ROUNDPD,
22453 IX86_BUILTIN_ROUNDPS,
22454 IX86_BUILTIN_ROUNDSD,
22455 IX86_BUILTIN_ROUNDSS,
22457 IX86_BUILTIN_PTESTZ,
22458 IX86_BUILTIN_PTESTC,
22459 IX86_BUILTIN_PTESTNZC,
22461 IX86_BUILTIN_VEC_INIT_V2SI,
22462 IX86_BUILTIN_VEC_INIT_V4HI,
22463 IX86_BUILTIN_VEC_INIT_V8QI,
22464 IX86_BUILTIN_VEC_EXT_V2DF,
22465 IX86_BUILTIN_VEC_EXT_V2DI,
22466 IX86_BUILTIN_VEC_EXT_V4SF,
22467 IX86_BUILTIN_VEC_EXT_V4SI,
22468 IX86_BUILTIN_VEC_EXT_V8HI,
22469 IX86_BUILTIN_VEC_EXT_V2SI,
22470 IX86_BUILTIN_VEC_EXT_V4HI,
22471 IX86_BUILTIN_VEC_EXT_V16QI,
22472 IX86_BUILTIN_VEC_SET_V2DI,
22473 IX86_BUILTIN_VEC_SET_V4SF,
22474 IX86_BUILTIN_VEC_SET_V4SI,
22475 IX86_BUILTIN_VEC_SET_V8HI,
22476 IX86_BUILTIN_VEC_SET_V4HI,
22477 IX86_BUILTIN_VEC_SET_V16QI,
22479 IX86_BUILTIN_VEC_PACK_SFIX,
22482 IX86_BUILTIN_CRC32QI,
22483 IX86_BUILTIN_CRC32HI,
22484 IX86_BUILTIN_CRC32SI,
22485 IX86_BUILTIN_CRC32DI,
22487 IX86_BUILTIN_PCMPESTRI128,
22488 IX86_BUILTIN_PCMPESTRM128,
22489 IX86_BUILTIN_PCMPESTRA128,
22490 IX86_BUILTIN_PCMPESTRC128,
22491 IX86_BUILTIN_PCMPESTRO128,
22492 IX86_BUILTIN_PCMPESTRS128,
22493 IX86_BUILTIN_PCMPESTRZ128,
22494 IX86_BUILTIN_PCMPISTRI128,
22495 IX86_BUILTIN_PCMPISTRM128,
22496 IX86_BUILTIN_PCMPISTRA128,
22497 IX86_BUILTIN_PCMPISTRC128,
22498 IX86_BUILTIN_PCMPISTRO128,
22499 IX86_BUILTIN_PCMPISTRS128,
22500 IX86_BUILTIN_PCMPISTRZ128,
22502 IX86_BUILTIN_PCMPGTQ,
22504 /* AES instructions */
22505 IX86_BUILTIN_AESENC128,
22506 IX86_BUILTIN_AESENCLAST128,
22507 IX86_BUILTIN_AESDEC128,
22508 IX86_BUILTIN_AESDECLAST128,
22509 IX86_BUILTIN_AESIMC128,
22510 IX86_BUILTIN_AESKEYGENASSIST128,
22512 /* PCLMUL instruction */
22513 IX86_BUILTIN_PCLMULQDQ128,
22516 IX86_BUILTIN_ADDPD256,
22517 IX86_BUILTIN_ADDPS256,
22518 IX86_BUILTIN_ADDSUBPD256,
22519 IX86_BUILTIN_ADDSUBPS256,
22520 IX86_BUILTIN_ANDPD256,
22521 IX86_BUILTIN_ANDPS256,
22522 IX86_BUILTIN_ANDNPD256,
22523 IX86_BUILTIN_ANDNPS256,
22524 IX86_BUILTIN_BLENDPD256,
22525 IX86_BUILTIN_BLENDPS256,
22526 IX86_BUILTIN_BLENDVPD256,
22527 IX86_BUILTIN_BLENDVPS256,
22528 IX86_BUILTIN_DIVPD256,
22529 IX86_BUILTIN_DIVPS256,
22530 IX86_BUILTIN_DPPS256,
22531 IX86_BUILTIN_HADDPD256,
22532 IX86_BUILTIN_HADDPS256,
22533 IX86_BUILTIN_HSUBPD256,
22534 IX86_BUILTIN_HSUBPS256,
22535 IX86_BUILTIN_MAXPD256,
22536 IX86_BUILTIN_MAXPS256,
22537 IX86_BUILTIN_MINPD256,
22538 IX86_BUILTIN_MINPS256,
22539 IX86_BUILTIN_MULPD256,
22540 IX86_BUILTIN_MULPS256,
22541 IX86_BUILTIN_ORPD256,
22542 IX86_BUILTIN_ORPS256,
22543 IX86_BUILTIN_SHUFPD256,
22544 IX86_BUILTIN_SHUFPS256,
22545 IX86_BUILTIN_SUBPD256,
22546 IX86_BUILTIN_SUBPS256,
22547 IX86_BUILTIN_XORPD256,
22548 IX86_BUILTIN_XORPS256,
22549 IX86_BUILTIN_CMPSD,
22550 IX86_BUILTIN_CMPSS,
22551 IX86_BUILTIN_CMPPD,
22552 IX86_BUILTIN_CMPPS,
22553 IX86_BUILTIN_CMPPD256,
22554 IX86_BUILTIN_CMPPS256,
22555 IX86_BUILTIN_CVTDQ2PD256,
22556 IX86_BUILTIN_CVTDQ2PS256,
22557 IX86_BUILTIN_CVTPD2PS256,
22558 IX86_BUILTIN_CVTPS2DQ256,
22559 IX86_BUILTIN_CVTPS2PD256,
22560 IX86_BUILTIN_CVTTPD2DQ256,
22561 IX86_BUILTIN_CVTPD2DQ256,
22562 IX86_BUILTIN_CVTTPS2DQ256,
22563 IX86_BUILTIN_EXTRACTF128PD256,
22564 IX86_BUILTIN_EXTRACTF128PS256,
22565 IX86_BUILTIN_EXTRACTF128SI256,
22566 IX86_BUILTIN_VZEROALL,
22567 IX86_BUILTIN_VZEROUPPER,
22568 IX86_BUILTIN_VPERMILVARPD,
22569 IX86_BUILTIN_VPERMILVARPS,
22570 IX86_BUILTIN_VPERMILVARPD256,
22571 IX86_BUILTIN_VPERMILVARPS256,
22572 IX86_BUILTIN_VPERMILPD,
22573 IX86_BUILTIN_VPERMILPS,
22574 IX86_BUILTIN_VPERMILPD256,
22575 IX86_BUILTIN_VPERMILPS256,
22576 IX86_BUILTIN_VPERMIL2PD,
22577 IX86_BUILTIN_VPERMIL2PS,
22578 IX86_BUILTIN_VPERMIL2PD256,
22579 IX86_BUILTIN_VPERMIL2PS256,
22580 IX86_BUILTIN_VPERM2F128PD256,
22581 IX86_BUILTIN_VPERM2F128PS256,
22582 IX86_BUILTIN_VPERM2F128SI256,
22583 IX86_BUILTIN_VBROADCASTSS,
22584 IX86_BUILTIN_VBROADCASTSD256,
22585 IX86_BUILTIN_VBROADCASTSS256,
22586 IX86_BUILTIN_VBROADCASTPD256,
22587 IX86_BUILTIN_VBROADCASTPS256,
22588 IX86_BUILTIN_VINSERTF128PD256,
22589 IX86_BUILTIN_VINSERTF128PS256,
22590 IX86_BUILTIN_VINSERTF128SI256,
22591 IX86_BUILTIN_LOADUPD256,
22592 IX86_BUILTIN_LOADUPS256,
22593 IX86_BUILTIN_STOREUPD256,
22594 IX86_BUILTIN_STOREUPS256,
22595 IX86_BUILTIN_LDDQU256,
22596 IX86_BUILTIN_MOVNTDQ256,
22597 IX86_BUILTIN_MOVNTPD256,
22598 IX86_BUILTIN_MOVNTPS256,
22599 IX86_BUILTIN_LOADDQU256,
22600 IX86_BUILTIN_STOREDQU256,
22601 IX86_BUILTIN_MASKLOADPD,
22602 IX86_BUILTIN_MASKLOADPS,
22603 IX86_BUILTIN_MASKSTOREPD,
22604 IX86_BUILTIN_MASKSTOREPS,
22605 IX86_BUILTIN_MASKLOADPD256,
22606 IX86_BUILTIN_MASKLOADPS256,
22607 IX86_BUILTIN_MASKSTOREPD256,
22608 IX86_BUILTIN_MASKSTOREPS256,
22609 IX86_BUILTIN_MOVSHDUP256,
22610 IX86_BUILTIN_MOVSLDUP256,
22611 IX86_BUILTIN_MOVDDUP256,
22613 IX86_BUILTIN_SQRTPD256,
22614 IX86_BUILTIN_SQRTPS256,
22615 IX86_BUILTIN_SQRTPS_NR256,
22616 IX86_BUILTIN_RSQRTPS256,
22617 IX86_BUILTIN_RSQRTPS_NR256,
22619 IX86_BUILTIN_RCPPS256,
22621 IX86_BUILTIN_ROUNDPD256,
22622 IX86_BUILTIN_ROUNDPS256,
22624 IX86_BUILTIN_UNPCKHPD256,
22625 IX86_BUILTIN_UNPCKLPD256,
22626 IX86_BUILTIN_UNPCKHPS256,
22627 IX86_BUILTIN_UNPCKLPS256,
22629 IX86_BUILTIN_SI256_SI,
22630 IX86_BUILTIN_PS256_PS,
22631 IX86_BUILTIN_PD256_PD,
22632 IX86_BUILTIN_SI_SI256,
22633 IX86_BUILTIN_PS_PS256,
22634 IX86_BUILTIN_PD_PD256,
22636 IX86_BUILTIN_VTESTZPD,
22637 IX86_BUILTIN_VTESTCPD,
22638 IX86_BUILTIN_VTESTNZCPD,
22639 IX86_BUILTIN_VTESTZPS,
22640 IX86_BUILTIN_VTESTCPS,
22641 IX86_BUILTIN_VTESTNZCPS,
22642 IX86_BUILTIN_VTESTZPD256,
22643 IX86_BUILTIN_VTESTCPD256,
22644 IX86_BUILTIN_VTESTNZCPD256,
22645 IX86_BUILTIN_VTESTZPS256,
22646 IX86_BUILTIN_VTESTCPS256,
22647 IX86_BUILTIN_VTESTNZCPS256,
22648 IX86_BUILTIN_PTESTZ256,
22649 IX86_BUILTIN_PTESTC256,
22650 IX86_BUILTIN_PTESTNZC256,
22652 IX86_BUILTIN_MOVMSKPD256,
22653 IX86_BUILTIN_MOVMSKPS256,
22655 /* TFmode support builtins. */
22657 IX86_BUILTIN_HUGE_VALQ,
22658 IX86_BUILTIN_FABSQ,
22659 IX86_BUILTIN_COPYSIGNQ,
22661 /* Vectorizer support builtins. */
22662 IX86_BUILTIN_CPYSGNPS,
22663 IX86_BUILTIN_CPYSGNPD,
22665 IX86_BUILTIN_CVTUDQ2PS,
22667 IX86_BUILTIN_VEC_PERM_V2DF,
22668 IX86_BUILTIN_VEC_PERM_V4SF,
22669 IX86_BUILTIN_VEC_PERM_V2DI,
22670 IX86_BUILTIN_VEC_PERM_V4SI,
22671 IX86_BUILTIN_VEC_PERM_V8HI,
22672 IX86_BUILTIN_VEC_PERM_V16QI,
22673 IX86_BUILTIN_VEC_PERM_V2DI_U,
22674 IX86_BUILTIN_VEC_PERM_V4SI_U,
22675 IX86_BUILTIN_VEC_PERM_V8HI_U,
22676 IX86_BUILTIN_VEC_PERM_V16QI_U,
22677 IX86_BUILTIN_VEC_PERM_V4DF,
22678 IX86_BUILTIN_VEC_PERM_V8SF,
22680 /* FMA4 and XOP instructions. */
22681 IX86_BUILTIN_VFMADDSS,
22682 IX86_BUILTIN_VFMADDSD,
22683 IX86_BUILTIN_VFMADDPS,
22684 IX86_BUILTIN_VFMADDPD,
22685 IX86_BUILTIN_VFMSUBSS,
22686 IX86_BUILTIN_VFMSUBSD,
22687 IX86_BUILTIN_VFMSUBPS,
22688 IX86_BUILTIN_VFMSUBPD,
22689 IX86_BUILTIN_VFMADDSUBPS,
22690 IX86_BUILTIN_VFMADDSUBPD,
22691 IX86_BUILTIN_VFMSUBADDPS,
22692 IX86_BUILTIN_VFMSUBADDPD,
22693 IX86_BUILTIN_VFNMADDSS,
22694 IX86_BUILTIN_VFNMADDSD,
22695 IX86_BUILTIN_VFNMADDPS,
22696 IX86_BUILTIN_VFNMADDPD,
22697 IX86_BUILTIN_VFNMSUBSS,
22698 IX86_BUILTIN_VFNMSUBSD,
22699 IX86_BUILTIN_VFNMSUBPS,
22700 IX86_BUILTIN_VFNMSUBPD,
22701 IX86_BUILTIN_VFMADDPS256,
22702 IX86_BUILTIN_VFMADDPD256,
22703 IX86_BUILTIN_VFMSUBPS256,
22704 IX86_BUILTIN_VFMSUBPD256,
22705 IX86_BUILTIN_VFMADDSUBPS256,
22706 IX86_BUILTIN_VFMADDSUBPD256,
22707 IX86_BUILTIN_VFMSUBADDPS256,
22708 IX86_BUILTIN_VFMSUBADDPD256,
22709 IX86_BUILTIN_VFNMADDPS256,
22710 IX86_BUILTIN_VFNMADDPD256,
22711 IX86_BUILTIN_VFNMSUBPS256,
22712 IX86_BUILTIN_VFNMSUBPD256,
22714 IX86_BUILTIN_VPCMOV,
22715 IX86_BUILTIN_VPCMOV_V2DI,
22716 IX86_BUILTIN_VPCMOV_V4SI,
22717 IX86_BUILTIN_VPCMOV_V8HI,
22718 IX86_BUILTIN_VPCMOV_V16QI,
22719 IX86_BUILTIN_VPCMOV_V4SF,
22720 IX86_BUILTIN_VPCMOV_V2DF,
22721 IX86_BUILTIN_VPCMOV256,
22722 IX86_BUILTIN_VPCMOV_V4DI256,
22723 IX86_BUILTIN_VPCMOV_V8SI256,
22724 IX86_BUILTIN_VPCMOV_V16HI256,
22725 IX86_BUILTIN_VPCMOV_V32QI256,
22726 IX86_BUILTIN_VPCMOV_V8SF256,
22727 IX86_BUILTIN_VPCMOV_V4DF256,
22729 IX86_BUILTIN_VPPERM,
22731 IX86_BUILTIN_VPMACSSWW,
22732 IX86_BUILTIN_VPMACSWW,
22733 IX86_BUILTIN_VPMACSSWD,
22734 IX86_BUILTIN_VPMACSWD,
22735 IX86_BUILTIN_VPMACSSDD,
22736 IX86_BUILTIN_VPMACSDD,
22737 IX86_BUILTIN_VPMACSSDQL,
22738 IX86_BUILTIN_VPMACSSDQH,
22739 IX86_BUILTIN_VPMACSDQL,
22740 IX86_BUILTIN_VPMACSDQH,
22741 IX86_BUILTIN_VPMADCSSWD,
22742 IX86_BUILTIN_VPMADCSWD,
22744 IX86_BUILTIN_VPHADDBW,
22745 IX86_BUILTIN_VPHADDBD,
22746 IX86_BUILTIN_VPHADDBQ,
22747 IX86_BUILTIN_VPHADDWD,
22748 IX86_BUILTIN_VPHADDWQ,
22749 IX86_BUILTIN_VPHADDDQ,
22750 IX86_BUILTIN_VPHADDUBW,
22751 IX86_BUILTIN_VPHADDUBD,
22752 IX86_BUILTIN_VPHADDUBQ,
22753 IX86_BUILTIN_VPHADDUWD,
22754 IX86_BUILTIN_VPHADDUWQ,
22755 IX86_BUILTIN_VPHADDUDQ,
22756 IX86_BUILTIN_VPHSUBBW,
22757 IX86_BUILTIN_VPHSUBWD,
22758 IX86_BUILTIN_VPHSUBDQ,
22760 IX86_BUILTIN_VPROTB,
22761 IX86_BUILTIN_VPROTW,
22762 IX86_BUILTIN_VPROTD,
22763 IX86_BUILTIN_VPROTQ,
22764 IX86_BUILTIN_VPROTB_IMM,
22765 IX86_BUILTIN_VPROTW_IMM,
22766 IX86_BUILTIN_VPROTD_IMM,
22767 IX86_BUILTIN_VPROTQ_IMM,
22769 IX86_BUILTIN_VPSHLB,
22770 IX86_BUILTIN_VPSHLW,
22771 IX86_BUILTIN_VPSHLD,
22772 IX86_BUILTIN_VPSHLQ,
22773 IX86_BUILTIN_VPSHAB,
22774 IX86_BUILTIN_VPSHAW,
22775 IX86_BUILTIN_VPSHAD,
22776 IX86_BUILTIN_VPSHAQ,
22778 IX86_BUILTIN_VFRCZSS,
22779 IX86_BUILTIN_VFRCZSD,
22780 IX86_BUILTIN_VFRCZPS,
22781 IX86_BUILTIN_VFRCZPD,
22782 IX86_BUILTIN_VFRCZPS256,
22783 IX86_BUILTIN_VFRCZPD256,
22785 IX86_BUILTIN_VPCOMEQUB,
22786 IX86_BUILTIN_VPCOMNEUB,
22787 IX86_BUILTIN_VPCOMLTUB,
22788 IX86_BUILTIN_VPCOMLEUB,
22789 IX86_BUILTIN_VPCOMGTUB,
22790 IX86_BUILTIN_VPCOMGEUB,
22791 IX86_BUILTIN_VPCOMFALSEUB,
22792 IX86_BUILTIN_VPCOMTRUEUB,
22794 IX86_BUILTIN_VPCOMEQUW,
22795 IX86_BUILTIN_VPCOMNEUW,
22796 IX86_BUILTIN_VPCOMLTUW,
22797 IX86_BUILTIN_VPCOMLEUW,
22798 IX86_BUILTIN_VPCOMGTUW,
22799 IX86_BUILTIN_VPCOMGEUW,
22800 IX86_BUILTIN_VPCOMFALSEUW,
22801 IX86_BUILTIN_VPCOMTRUEUW,
22803 IX86_BUILTIN_VPCOMEQUD,
22804 IX86_BUILTIN_VPCOMNEUD,
22805 IX86_BUILTIN_VPCOMLTUD,
22806 IX86_BUILTIN_VPCOMLEUD,
22807 IX86_BUILTIN_VPCOMGTUD,
22808 IX86_BUILTIN_VPCOMGEUD,
22809 IX86_BUILTIN_VPCOMFALSEUD,
22810 IX86_BUILTIN_VPCOMTRUEUD,
22812 IX86_BUILTIN_VPCOMEQUQ,
22813 IX86_BUILTIN_VPCOMNEUQ,
22814 IX86_BUILTIN_VPCOMLTUQ,
22815 IX86_BUILTIN_VPCOMLEUQ,
22816 IX86_BUILTIN_VPCOMGTUQ,
22817 IX86_BUILTIN_VPCOMGEUQ,
22818 IX86_BUILTIN_VPCOMFALSEUQ,
22819 IX86_BUILTIN_VPCOMTRUEUQ,
22821 IX86_BUILTIN_VPCOMEQB,
22822 IX86_BUILTIN_VPCOMNEB,
22823 IX86_BUILTIN_VPCOMLTB,
22824 IX86_BUILTIN_VPCOMLEB,
22825 IX86_BUILTIN_VPCOMGTB,
22826 IX86_BUILTIN_VPCOMGEB,
22827 IX86_BUILTIN_VPCOMFALSEB,
22828 IX86_BUILTIN_VPCOMTRUEB,
22830 IX86_BUILTIN_VPCOMEQW,
22831 IX86_BUILTIN_VPCOMNEW,
22832 IX86_BUILTIN_VPCOMLTW,
22833 IX86_BUILTIN_VPCOMLEW,
22834 IX86_BUILTIN_VPCOMGTW,
22835 IX86_BUILTIN_VPCOMGEW,
22836 IX86_BUILTIN_VPCOMFALSEW,
22837 IX86_BUILTIN_VPCOMTRUEW,
22839 IX86_BUILTIN_VPCOMEQD,
22840 IX86_BUILTIN_VPCOMNED,
22841 IX86_BUILTIN_VPCOMLTD,
22842 IX86_BUILTIN_VPCOMLED,
22843 IX86_BUILTIN_VPCOMGTD,
22844 IX86_BUILTIN_VPCOMGED,
22845 IX86_BUILTIN_VPCOMFALSED,
22846 IX86_BUILTIN_VPCOMTRUED,
22848 IX86_BUILTIN_VPCOMEQQ,
22849 IX86_BUILTIN_VPCOMNEQ,
22850 IX86_BUILTIN_VPCOMLTQ,
22851 IX86_BUILTIN_VPCOMLEQ,
22852 IX86_BUILTIN_VPCOMGTQ,
22853 IX86_BUILTIN_VPCOMGEQ,
22854 IX86_BUILTIN_VPCOMFALSEQ,
22855 IX86_BUILTIN_VPCOMTRUEQ,
22857 /* LWP instructions. */
22858 IX86_BUILTIN_LLWPCB,
22859 IX86_BUILTIN_SLWPCB,
22860 IX86_BUILTIN_LWPVAL32,
22861 IX86_BUILTIN_LWPVAL64,
22862 IX86_BUILTIN_LWPINS32,
22863 IX86_BUILTIN_LWPINS64,
22867 /* FSGSBASE instructions. */
22868 IX86_BUILTIN_RDFSBASE32,
22869 IX86_BUILTIN_RDFSBASE64,
22870 IX86_BUILTIN_RDGSBASE32,
22871 IX86_BUILTIN_RDGSBASE64,
22872 IX86_BUILTIN_WRFSBASE32,
22873 IX86_BUILTIN_WRFSBASE64,
22874 IX86_BUILTIN_WRGSBASE32,
22875 IX86_BUILTIN_WRGSBASE64,
22877 /* RDRND instructions. */
22878 IX86_BUILTIN_RDRAND16,
22879 IX86_BUILTIN_RDRAND32,
22880 IX86_BUILTIN_RDRAND64,
22882 /* F16C instructions. */
22883 IX86_BUILTIN_CVTPH2PS,
22884 IX86_BUILTIN_CVTPH2PS256,
22885 IX86_BUILTIN_CVTPS2PH,
22886 IX86_BUILTIN_CVTPS2PH256,
22891 /* Table for the ix86 builtin decls. */
22892 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
22894 /* Table of all of the builtin functions that are possible with different ISA's
22895 but are waiting to be built until a function is declared to use that
22897 struct builtin_isa {
22898 const char *name; /* function name */
22899 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
22900 int isa; /* isa_flags this builtin is defined for */
22901 bool const_p; /* true if the declaration is constant */
22902 bool set_and_not_built_p;
22905 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
22908 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
22909 of which isa_flags to use in the ix86_builtins_isa array. Stores the
22910 function decl in the ix86_builtins array. Returns the function decl or
22911 NULL_TREE, if the builtin was not added.
22913 If the front end has a special hook for builtin functions, delay adding
22914 builtin functions that aren't in the current ISA until the ISA is changed
22915 with function specific optimization. Doing so, can save about 300K for the
22916 default compiler. When the builtin is expanded, check at that time whether
22919 If the front end doesn't have a special hook, record all builtins, even if
22920 it isn't an instruction set in the current ISA in case the user uses
22921 function specific options for a different ISA, so that we don't get scope
22922 errors if a builtin is added in the middle of a function scope. */
22925 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
22926 enum ix86_builtins code)
22928 tree decl = NULL_TREE;
22930 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
22932 ix86_builtins_isa[(int) code].isa = mask;
22934 mask &= ~OPTION_MASK_ISA_64BIT;
22936 || (mask & ix86_isa_flags) != 0
22937 || (lang_hooks.builtin_function
22938 == lang_hooks.builtin_function_ext_scope))
22941 tree type = ix86_get_builtin_func_type (tcode);
22942 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
22944 ix86_builtins[(int) code] = decl;
22945 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
22949 ix86_builtins[(int) code] = NULL_TREE;
22950 ix86_builtins_isa[(int) code].tcode = tcode;
22951 ix86_builtins_isa[(int) code].name = name;
22952 ix86_builtins_isa[(int) code].const_p = false;
22953 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
22960 /* Like def_builtin, but also marks the function decl "const". */
22963 def_builtin_const (int mask, const char *name,
22964 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
22966 tree decl = def_builtin (mask, name, tcode, code);
22968 TREE_READONLY (decl) = 1;
22970 ix86_builtins_isa[(int) code].const_p = true;
22975 /* Add any new builtin functions for a given ISA that may not have been
22976 declared. This saves a bit of space compared to adding all of the
22977 declarations to the tree, even if we didn't use them. */
22980 ix86_add_new_builtins (int isa)
22984 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
22986 if ((ix86_builtins_isa[i].isa & isa) != 0
22987 && ix86_builtins_isa[i].set_and_not_built_p)
22991 /* Don't define the builtin again. */
22992 ix86_builtins_isa[i].set_and_not_built_p = false;
22994 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
22995 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
22996 type, i, BUILT_IN_MD, NULL,
22999 ix86_builtins[i] = decl;
23000 if (ix86_builtins_isa[i].const_p)
23001 TREE_READONLY (decl) = 1;
23006 /* Bits for builtin_description.flag. */
23008 /* Set when we don't support the comparison natively, and should
23009 swap_comparison in order to support it. */
23010 #define BUILTIN_DESC_SWAP_OPERANDS 1
23012 struct builtin_description
23014 const unsigned int mask;
23015 const enum insn_code icode;
23016 const char *const name;
23017 const enum ix86_builtins code;
23018 const enum rtx_code comparison;
23022 static const struct builtin_description bdesc_comi[] =
23024 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
23025 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
23026 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
23027 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
23028 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
23029 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
23030 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
23031 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
23032 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
23033 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
23034 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
23035 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
23036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
23037 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
23038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
23039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
23040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
23041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
23042 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
23043 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
23044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
23045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
23046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
23047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
23050 static const struct builtin_description bdesc_pcmpestr[] =
23053 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
23054 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
23055 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
23056 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
23057 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
23058 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
23059 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
23062 static const struct builtin_description bdesc_pcmpistr[] =
23065 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
23066 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
23067 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
23068 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
23069 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
23070 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
23071 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
23074 /* Special builtins with variable number of arguments. */
23075 static const struct builtin_description bdesc_special_args[] =
23077 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
23078 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
23081 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
23084 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
23087 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
23088 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
23089 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
23091 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
23092 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
23093 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
23094 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
23096 /* SSE or 3DNow!A */
23097 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
23098 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
23101 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
23102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
23103 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
23104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
23105 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
23106 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
23107 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
23108 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
23109 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
23111 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
23112 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
23115 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
23118 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
23121 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
23122 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
23125 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
23126 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
23128 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
23129 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
23130 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
23131 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
23132 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
23134 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
23135 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
23136 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
23137 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
23138 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
23139 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
23140 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
23142 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
23143 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
23144 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
23146 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
23147 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
23148 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
23149 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
23150 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
23151 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
23152 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
23153 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
23155 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
23156 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
23157 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
23158 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
23159 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
23160 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
23163 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
23164 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
23165 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
23166 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
23167 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
23168 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
23169 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
23170 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
23173 { OPTION_MASK_ISA_RDRND, CODE_FOR_rdrandhi, "__builtin_ia32_rdrand16", IX86_BUILTIN_RDRAND16, UNKNOWN, (int) UINT16_FTYPE_VOID },
23174 { OPTION_MASK_ISA_RDRND, CODE_FOR_rdrandsi, "__builtin_ia32_rdrand32", IX86_BUILTIN_RDRAND32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
23175 { OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT, CODE_FOR_rdranddi, "__builtin_ia32_rdrand64", IX86_BUILTIN_RDRAND64, UNKNOWN, (int) UINT64_FTYPE_VOID },
23178 /* Builtins with variable number of arguments. */
23179 static const struct builtin_description bdesc_args[] =
23181 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
23182 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
23183 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
23184 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
23185 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
23186 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
23187 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
23190 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23191 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23192 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23193 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23194 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23195 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23197 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23198 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23199 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23200 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23201 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23202 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23203 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23204 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23206 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23207 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23209 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23210 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23211 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23212 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23214 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23215 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23216 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23217 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23218 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23219 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23221 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23222 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23223 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23224 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23225 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
23226 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
23228 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
23229 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
23230 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
23232 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
23234 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
23235 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
23236 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
23237 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
23238 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
23239 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
23241 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
23242 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
23243 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
23244 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
23245 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
23246 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
23248 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
23249 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
23250 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
23251 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
23254 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
23255 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
23256 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
23257 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
23259 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23260 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23261 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23262 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
23263 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
23264 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
23265 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23266 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23267 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23268 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23269 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23270 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23271 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23272 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23273 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23276 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
23277 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
23278 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
23279 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
23280 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23281 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
23284 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
23285 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
23286 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
23287 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
23288 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
23289 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
23290 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
23291 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
23292 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
23293 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
23294 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
23295 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
23297 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23299 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23300 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23301 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23302 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23303 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23304 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23305 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23306 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23308 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
23309 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
23310 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
23311 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
23312 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
23313 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
23314 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
23315 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
23316 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
23317 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
23318 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
23319 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
23320 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
23321 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
23322 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
23323 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
23324 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
23325 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
23326 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
23327 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
23328 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
23329 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
23331 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23332 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23333 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23334 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23336 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23337 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23338 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23339 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23341 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23343 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23344 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23345 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23346 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23347 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23349 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
23350 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
23351 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
23353 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
23355 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
23356 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
23357 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
23359 /* SSE MMX or 3Dnow!A */
23360 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23361 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23362 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23364 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23365 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23366 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23367 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23369 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
23370 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
23372 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
23375 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
23377 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
23378 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
23379 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
23380 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
23381 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
23382 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
23383 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
23384 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
23385 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
23386 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
23387 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
23388 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
23390 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
23391 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
23392 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
23393 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
23394 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
23395 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
23397 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
23398 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
23399 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
23400 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
23401 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
23403 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
23405 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
23406 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
23407 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
23408 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
23410 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
23411 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
23412 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
23414 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23415 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23416 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23417 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23418 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23419 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23420 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23421 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23423 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
23424 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
23425 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
23426 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
23427 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
23428 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
23429 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
23430 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
23431 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
23432 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
23433 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
23434 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
23435 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
23436 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
23437 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
23438 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
23439 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
23440 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
23441 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
23442 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
23444 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23445 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23446 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23447 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23449 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23450 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23451 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23452 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23454 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23456 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23457 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23458 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23460 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
23462 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23463 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23464 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23465 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23466 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23467 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23468 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23469 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23471 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23472 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23473 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23474 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23476 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23477 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23478 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23480 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23481 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
23483 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23484 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23485 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23486 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23488 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23489 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23491 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23492 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23493 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23494 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23495 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23496 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23498 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23499 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23500 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23501 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23503 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23504 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23505 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23506 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23507 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23508 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23509 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23510 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23512 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
23513 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
23514 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
23516 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23517 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
23519 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
23520 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
23522 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
23524 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
23525 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
23526 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
23527 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
23529 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
23530 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
23531 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
23532 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
23533 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
23534 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
23535 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
23537 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
23538 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
23539 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
23540 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
23541 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
23542 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
23543 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
23545 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
23546 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
23547 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
23548 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
23550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
23551 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
23552 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
23554 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
23556 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
23557 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
23559 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
23562 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
23563 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
23566 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
23567 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
23569 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23570 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23571 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23572 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23573 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
23574 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
23577 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
23578 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
23579 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
23580 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
23581 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
23582 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
23584 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23585 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23586 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23587 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23588 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23589 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23590 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23591 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23592 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23593 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23594 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23595 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23596 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
23597 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
23598 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23599 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23600 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23601 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23602 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23603 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
23604 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23605 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
23606 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23607 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
23610 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
23611 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
23614 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
23615 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23616 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
23617 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
23618 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
23619 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23620 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23621 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
23622 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
23623 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
23625 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
23626 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
23627 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
23628 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
23629 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
23630 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
23631 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
23632 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
23633 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
23634 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
23635 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
23636 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
23637 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
23639 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
23640 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23641 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23642 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23643 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23644 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23645 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
23646 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23647 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23648 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
23649 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
23650 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
23653 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
23654 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
23655 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
23656 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23658 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
23659 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
23660 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
23663 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23664 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
23665 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
23666 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
23667 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
23670 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
23671 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
23672 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
23673 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23676 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
23677 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
23679 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23680 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23681 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23682 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
23685 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
23688 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23689 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23690 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23691 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23692 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23693 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23694 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23695 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23696 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23697 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23698 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23699 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23700 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23701 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23702 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23703 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23704 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23705 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23706 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23707 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23708 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23709 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23710 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23711 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23712 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23713 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23715 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
23716 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
23717 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
23718 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
23720 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
23721 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
23722 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
23723 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
23724 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
23725 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
23726 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
23727 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
23728 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23729 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
23730 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
23731 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
23732 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
23733 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
23734 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
23735 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
23736 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
23737 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
23738 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
23739 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
23740 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
23741 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
23742 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
23743 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
23744 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
23745 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
23746 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
23747 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
23748 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
23749 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
23750 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
23751 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
23752 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
23753 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
23755 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23756 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23757 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
23759 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
23760 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23761 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23762 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23763 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23765 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
23767 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
23768 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
23770 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23771 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
23772 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23773 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
23775 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
23776 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
23777 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
23778 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
23779 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
23780 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
23782 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
23783 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
23784 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
23785 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
23786 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
23787 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
23788 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
23789 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
23790 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
23791 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
23792 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
23793 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
23794 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
23795 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
23796 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
23798 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
23799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
23801 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
23804 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
23805 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
23806 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
23807 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
23810 /* FMA4 and XOP. */
23811 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
23812 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
23813 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
23814 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
23815 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
23816 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
23817 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
23818 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
23819 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
23820 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
23821 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
23822 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
23823 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
23824 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
23825 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
23826 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
23827 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
23828 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
23829 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
23830 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
23831 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
23832 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
23833 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
23834 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
23835 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
23836 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
23837 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
23838 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
23839 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
23840 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
23841 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
23842 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
23843 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
23844 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
23845 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
23846 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
23847 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
23848 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
23849 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
23850 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
23851 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
23852 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
23853 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
23854 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
23855 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
23856 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
23857 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
23858 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
23859 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
23860 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
23861 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
23862 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
23864 static const struct builtin_description bdesc_multi_arg[] =
23866 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
23867 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
23868 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
23869 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
23870 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
23871 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
23872 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
23873 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
23875 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
23876 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
23877 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
23878 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
23879 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
23880 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
23881 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
23882 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
23884 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
23885 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
23886 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
23887 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
23889 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23890 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23891 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23892 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23894 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23895 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23896 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23897 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23899 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23900 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23901 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23902 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23904 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
23905 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
23906 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
23907 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
23908 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
23909 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
23910 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
23912 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
23913 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
23914 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
23915 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
23916 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
23917 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
23918 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
23920 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
23922 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
23923 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
23924 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
23925 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
23926 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
23927 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
23928 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
23929 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
23930 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
23931 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
23932 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
23933 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
23935 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
23936 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
23937 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
23938 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
23939 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
23940 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
23941 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
23942 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
23943 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
23944 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
23945 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
23946 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
23947 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
23948 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
23949 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
23950 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
23952 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
23953 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
23954 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
23955 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
23956 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
23957 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
23959 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
23960 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
23961 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
23962 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
23963 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
23964 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
23965 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
23966 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
23967 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
23968 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
23969 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
23970 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
23971 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
23972 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
23973 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
23975 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
23976 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
23977 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
23978 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
23979 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
23980 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
23981 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
23983 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
23984 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
23985 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
23986 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
23987 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
23988 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
23989 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
23991 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
23992 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
23993 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
23994 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
23995 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
23996 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
23997 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
23999 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
24000 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
24001 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
24002 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
24003 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
24004 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
24005 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
24007 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
24008 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
24009 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
24010 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
24011 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
24012 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
24013 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
24015 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
24016 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
24017 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
24018 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
24019 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
24020 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
24021 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
24023 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
24024 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
24025 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
24026 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
24027 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
24028 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
24029 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
24031 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
24032 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
24033 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
24034 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
24035 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
24036 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
24037 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
24039 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
24040 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
24041 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
24042 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
24043 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
24044 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
24045 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
24046 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
24048 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
24049 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
24050 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
24051 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
24052 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
24053 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
24054 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
24055 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
24057 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
24058 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
24059 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
24060 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
24064 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
24065 in the current target ISA to allow the user to compile particular modules
24066 with different target specific options that differ from the command line
24069 ix86_init_mmx_sse_builtins (void)
24071 const struct builtin_description * d;
24072 enum ix86_builtin_func_type ftype;
24075 /* Add all special builtins with variable number of operands. */
24076 for (i = 0, d = bdesc_special_args;
24077 i < ARRAY_SIZE (bdesc_special_args);
24083 ftype = (enum ix86_builtin_func_type) d->flag;
24084 def_builtin (d->mask, d->name, ftype, d->code);
24087 /* Add all builtins with variable number of operands. */
24088 for (i = 0, d = bdesc_args;
24089 i < ARRAY_SIZE (bdesc_args);
24095 ftype = (enum ix86_builtin_func_type) d->flag;
24096 def_builtin_const (d->mask, d->name, ftype, d->code);
24099 /* pcmpestr[im] insns. */
24100 for (i = 0, d = bdesc_pcmpestr;
24101 i < ARRAY_SIZE (bdesc_pcmpestr);
24104 if (d->code == IX86_BUILTIN_PCMPESTRM128)
24105 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
24107 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
24108 def_builtin_const (d->mask, d->name, ftype, d->code);
24111 /* pcmpistr[im] insns. */
24112 for (i = 0, d = bdesc_pcmpistr;
24113 i < ARRAY_SIZE (bdesc_pcmpistr);
24116 if (d->code == IX86_BUILTIN_PCMPISTRM128)
24117 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
24119 ftype = INT_FTYPE_V16QI_V16QI_INT;
24120 def_builtin_const (d->mask, d->name, ftype, d->code);
24123 /* comi/ucomi insns. */
24124 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24126 if (d->mask == OPTION_MASK_ISA_SSE2)
24127 ftype = INT_FTYPE_V2DF_V2DF;
24129 ftype = INT_FTYPE_V4SF_V4SF;
24130 def_builtin_const (d->mask, d->name, ftype, d->code);
24134 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
24135 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
24136 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
24137 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
24139 /* SSE or 3DNow!A */
24140 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
24141 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
24142 IX86_BUILTIN_MASKMOVQ);
24145 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
24146 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
24148 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
24149 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
24150 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
24151 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
24154 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
24155 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
24156 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
24157 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
24160 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
24161 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
24162 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
24163 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
24164 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
24165 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
24166 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
24167 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
24168 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
24169 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
24170 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
24171 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
24174 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
24175 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
24177 /* MMX access to the vec_init patterns. */
24178 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
24179 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
24181 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
24182 V4HI_FTYPE_HI_HI_HI_HI,
24183 IX86_BUILTIN_VEC_INIT_V4HI);
24185 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
24186 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
24187 IX86_BUILTIN_VEC_INIT_V8QI);
24189 /* Access to the vec_extract patterns. */
24190 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
24191 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
24192 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
24193 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
24194 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
24195 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
24196 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
24197 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
24198 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
24199 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
24201 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
24202 "__builtin_ia32_vec_ext_v4hi",
24203 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
24205 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
24206 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
24208 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
24209 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
24211 /* Access to the vec_set patterns. */
24212 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
24213 "__builtin_ia32_vec_set_v2di",
24214 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
24216 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
24217 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
24219 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
24220 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
24222 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
24223 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
24225 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
24226 "__builtin_ia32_vec_set_v4hi",
24227 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
24229 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
24230 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
24232 /* Add FMA4 multi-arg argument instructions */
24233 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24238 ftype = (enum ix86_builtin_func_type) d->flag;
24239 def_builtin_const (d->mask, d->name, ftype, d->code);
24243 /* Internal method for ix86_init_builtins. */
24246 ix86_init_builtins_va_builtins_abi (void)
24248 tree ms_va_ref, sysv_va_ref;
24249 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
24250 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
24251 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
24252 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
24256 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
24257 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
24258 ms_va_ref = build_reference_type (ms_va_list_type_node);
24260 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
24263 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
24264 fnvoid_va_start_ms =
24265 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
24266 fnvoid_va_end_sysv =
24267 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
24268 fnvoid_va_start_sysv =
24269 build_varargs_function_type_list (void_type_node, sysv_va_ref,
24271 fnvoid_va_copy_ms =
24272 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
24274 fnvoid_va_copy_sysv =
24275 build_function_type_list (void_type_node, sysv_va_ref,
24276 sysv_va_ref, NULL_TREE);
24278 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
24279 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
24280 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
24281 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
24282 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
24283 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
24284 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
24285 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
24286 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
24287 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
24288 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
24289 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
24293 ix86_init_builtin_types (void)
24295 tree float128_type_node, float80_type_node;
24297 /* The __float80 type. */
24298 float80_type_node = long_double_type_node;
24299 if (TYPE_MODE (float80_type_node) != XFmode)
24301 /* The __float80 type. */
24302 float80_type_node = make_node (REAL_TYPE);
24304 TYPE_PRECISION (float80_type_node) = 80;
24305 layout_type (float80_type_node);
24307 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
24309 /* The __float128 type. */
24310 float128_type_node = make_node (REAL_TYPE);
24311 TYPE_PRECISION (float128_type_node) = 128;
24312 layout_type (float128_type_node);
24313 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
24315 /* This macro is built by i386-builtin-types.awk. */
24316 DEFINE_BUILTIN_PRIMITIVE_TYPES;
24320 ix86_init_builtins (void)
24324 ix86_init_builtin_types ();
24326 /* TFmode support builtins. */
24327 def_builtin_const (0, "__builtin_infq",
24328 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
24329 def_builtin_const (0, "__builtin_huge_valq",
24330 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
24332 /* We will expand them to normal call if SSE2 isn't available since
24333 they are used by libgcc. */
24334 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
24335 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
24336 BUILT_IN_MD, "__fabstf2", NULL_TREE);
24337 TREE_READONLY (t) = 1;
24338 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
24340 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
24341 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
24342 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
24343 TREE_READONLY (t) = 1;
24344 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
24346 ix86_init_mmx_sse_builtins ();
24349 ix86_init_builtins_va_builtins_abi ();
24352 /* Return the ix86 builtin for CODE. */
24355 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
24357 if (code >= IX86_BUILTIN_MAX)
24358 return error_mark_node;
24360 return ix86_builtins[code];
24363 /* Errors in the source file can cause expand_expr to return const0_rtx
24364 where we expect a vector. To avoid crashing, use one of the vector
24365 clear instructions. */
24367 safe_vector_operand (rtx x, enum machine_mode mode)
24369 if (x == const0_rtx)
24370 x = CONST0_RTX (mode);
24374 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
24377 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
24380 tree arg0 = CALL_EXPR_ARG (exp, 0);
24381 tree arg1 = CALL_EXPR_ARG (exp, 1);
24382 rtx op0 = expand_normal (arg0);
24383 rtx op1 = expand_normal (arg1);
24384 enum machine_mode tmode = insn_data[icode].operand[0].mode;
24385 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
24386 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
24388 if (VECTOR_MODE_P (mode0))
24389 op0 = safe_vector_operand (op0, mode0);
24390 if (VECTOR_MODE_P (mode1))
24391 op1 = safe_vector_operand (op1, mode1);
24393 if (optimize || !target
24394 || GET_MODE (target) != tmode
24395 || !insn_data[icode].operand[0].predicate (target, tmode))
24396 target = gen_reg_rtx (tmode);
24398 if (GET_MODE (op1) == SImode && mode1 == TImode)
24400 rtx x = gen_reg_rtx (V4SImode);
24401 emit_insn (gen_sse2_loadd (x, op1));
24402 op1 = gen_lowpart (TImode, x);
24405 if (!insn_data[icode].operand[1].predicate (op0, mode0))
24406 op0 = copy_to_mode_reg (mode0, op0);
24407 if (!insn_data[icode].operand[2].predicate (op1, mode1))
24408 op1 = copy_to_mode_reg (mode1, op1);
24410 pat = GEN_FCN (icode) (target, op0, op1);
24419 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
24422 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
24423 enum ix86_builtin_func_type m_type,
24424 enum rtx_code sub_code)
24429 bool comparison_p = false;
24431 bool last_arg_constant = false;
24432 int num_memory = 0;
24435 enum machine_mode mode;
24438 enum machine_mode tmode = insn_data[icode].operand[0].mode;
24442 case MULTI_ARG_4_DF2_DI_I:
24443 case MULTI_ARG_4_DF2_DI_I1:
24444 case MULTI_ARG_4_SF2_SI_I:
24445 case MULTI_ARG_4_SF2_SI_I1:
24447 last_arg_constant = true;
24450 case MULTI_ARG_3_SF:
24451 case MULTI_ARG_3_DF:
24452 case MULTI_ARG_3_SF2:
24453 case MULTI_ARG_3_DF2:
24454 case MULTI_ARG_3_DI:
24455 case MULTI_ARG_3_SI:
24456 case MULTI_ARG_3_SI_DI:
24457 case MULTI_ARG_3_HI:
24458 case MULTI_ARG_3_HI_SI:
24459 case MULTI_ARG_3_QI:
24460 case MULTI_ARG_3_DI2:
24461 case MULTI_ARG_3_SI2:
24462 case MULTI_ARG_3_HI2:
24463 case MULTI_ARG_3_QI2:
24467 case MULTI_ARG_2_SF:
24468 case MULTI_ARG_2_DF:
24469 case MULTI_ARG_2_DI:
24470 case MULTI_ARG_2_SI:
24471 case MULTI_ARG_2_HI:
24472 case MULTI_ARG_2_QI:
24476 case MULTI_ARG_2_DI_IMM:
24477 case MULTI_ARG_2_SI_IMM:
24478 case MULTI_ARG_2_HI_IMM:
24479 case MULTI_ARG_2_QI_IMM:
24481 last_arg_constant = true;
24484 case MULTI_ARG_1_SF:
24485 case MULTI_ARG_1_DF:
24486 case MULTI_ARG_1_SF2:
24487 case MULTI_ARG_1_DF2:
24488 case MULTI_ARG_1_DI:
24489 case MULTI_ARG_1_SI:
24490 case MULTI_ARG_1_HI:
24491 case MULTI_ARG_1_QI:
24492 case MULTI_ARG_1_SI_DI:
24493 case MULTI_ARG_1_HI_DI:
24494 case MULTI_ARG_1_HI_SI:
24495 case MULTI_ARG_1_QI_DI:
24496 case MULTI_ARG_1_QI_SI:
24497 case MULTI_ARG_1_QI_HI:
24501 case MULTI_ARG_2_DI_CMP:
24502 case MULTI_ARG_2_SI_CMP:
24503 case MULTI_ARG_2_HI_CMP:
24504 case MULTI_ARG_2_QI_CMP:
24506 comparison_p = true;
24509 case MULTI_ARG_2_SF_TF:
24510 case MULTI_ARG_2_DF_TF:
24511 case MULTI_ARG_2_DI_TF:
24512 case MULTI_ARG_2_SI_TF:
24513 case MULTI_ARG_2_HI_TF:
24514 case MULTI_ARG_2_QI_TF:
24520 gcc_unreachable ();
24523 if (optimize || !target
24524 || GET_MODE (target) != tmode
24525 || !insn_data[icode].operand[0].predicate (target, tmode))
24526 target = gen_reg_rtx (tmode);
24528 gcc_assert (nargs <= 4);
24530 for (i = 0; i < nargs; i++)
24532 tree arg = CALL_EXPR_ARG (exp, i);
24533 rtx op = expand_normal (arg);
24534 int adjust = (comparison_p) ? 1 : 0;
24535 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
24537 if (last_arg_constant && i == nargs-1)
24539 if (!CONST_INT_P (op))
24541 error ("last argument must be an immediate");
24542 return gen_reg_rtx (tmode);
24547 if (VECTOR_MODE_P (mode))
24548 op = safe_vector_operand (op, mode);
24550 /* If we aren't optimizing, only allow one memory operand to be
24552 if (memory_operand (op, mode))
24555 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
24558 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
24560 op = force_reg (mode, op);
24564 args[i].mode = mode;
24570 pat = GEN_FCN (icode) (target, args[0].op);
24575 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
24576 GEN_INT ((int)sub_code));
24577 else if (! comparison_p)
24578 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24581 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
24585 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
24590 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24594 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
24598 gcc_unreachable ();
24608 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
24609 insns with vec_merge. */
24612 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
24616 tree arg0 = CALL_EXPR_ARG (exp, 0);
24617 rtx op1, op0 = expand_normal (arg0);
24618 enum machine_mode tmode = insn_data[icode].operand[0].mode;
24619 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
24621 if (optimize || !target
24622 || GET_MODE (target) != tmode
24623 || !insn_data[icode].operand[0].predicate (target, tmode))
24624 target = gen_reg_rtx (tmode);
24626 if (VECTOR_MODE_P (mode0))
24627 op0 = safe_vector_operand (op0, mode0);
24629 if ((optimize && !register_operand (op0, mode0))
24630 || !insn_data[icode].operand[1].predicate (op0, mode0))
24631 op0 = copy_to_mode_reg (mode0, op0);
24634 if (!insn_data[icode].operand[2].predicate (op1, mode0))
24635 op1 = copy_to_mode_reg (mode0, op1);
24637 pat = GEN_FCN (icode) (target, op0, op1);
24644 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
24647 ix86_expand_sse_compare (const struct builtin_description *d,
24648 tree exp, rtx target, bool swap)
24651 tree arg0 = CALL_EXPR_ARG (exp, 0);
24652 tree arg1 = CALL_EXPR_ARG (exp, 1);
24653 rtx op0 = expand_normal (arg0);
24654 rtx op1 = expand_normal (arg1);
24656 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
24657 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
24658 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
24659 enum rtx_code comparison = d->comparison;
24661 if (VECTOR_MODE_P (mode0))
24662 op0 = safe_vector_operand (op0, mode0);
24663 if (VECTOR_MODE_P (mode1))
24664 op1 = safe_vector_operand (op1, mode1);
24666 /* Swap operands if we have a comparison that isn't available in
24670 rtx tmp = gen_reg_rtx (mode1);
24671 emit_move_insn (tmp, op1);
24676 if (optimize || !target
24677 || GET_MODE (target) != tmode
24678 || !insn_data[d->icode].operand[0].predicate (target, tmode))
24679 target = gen_reg_rtx (tmode);
24681 if ((optimize && !register_operand (op0, mode0))
24682 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
24683 op0 = copy_to_mode_reg (mode0, op0);
24684 if ((optimize && !register_operand (op1, mode1))
24685 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
24686 op1 = copy_to_mode_reg (mode1, op1);
24688 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
24689 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
24696 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
24699 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
24703 tree arg0 = CALL_EXPR_ARG (exp, 0);
24704 tree arg1 = CALL_EXPR_ARG (exp, 1);
24705 rtx op0 = expand_normal (arg0);
24706 rtx op1 = expand_normal (arg1);
24707 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
24708 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
24709 enum rtx_code comparison = d->comparison;
24711 if (VECTOR_MODE_P (mode0))
24712 op0 = safe_vector_operand (op0, mode0);
24713 if (VECTOR_MODE_P (mode1))
24714 op1 = safe_vector_operand (op1, mode1);
24716 /* Swap operands if we have a comparison that isn't available in
24718 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
24725 target = gen_reg_rtx (SImode);
24726 emit_move_insn (target, const0_rtx);
24727 target = gen_rtx_SUBREG (QImode, target, 0);
24729 if ((optimize && !register_operand (op0, mode0))
24730 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
24731 op0 = copy_to_mode_reg (mode0, op0);
24732 if ((optimize && !register_operand (op1, mode1))
24733 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
24734 op1 = copy_to_mode_reg (mode1, op1);
24736 pat = GEN_FCN (d->icode) (op0, op1);
24740 emit_insn (gen_rtx_SET (VOIDmode,
24741 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24742 gen_rtx_fmt_ee (comparison, QImode,
24746 return SUBREG_REG (target);
24749 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
24752 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
24756 tree arg0 = CALL_EXPR_ARG (exp, 0);
24757 tree arg1 = CALL_EXPR_ARG (exp, 1);
24758 rtx op0 = expand_normal (arg0);
24759 rtx op1 = expand_normal (arg1);
24760 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
24761 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
24762 enum rtx_code comparison = d->comparison;
24764 if (VECTOR_MODE_P (mode0))
24765 op0 = safe_vector_operand (op0, mode0);
24766 if (VECTOR_MODE_P (mode1))
24767 op1 = safe_vector_operand (op1, mode1);
24769 target = gen_reg_rtx (SImode);
24770 emit_move_insn (target, const0_rtx);
24771 target = gen_rtx_SUBREG (QImode, target, 0);
24773 if ((optimize && !register_operand (op0, mode0))
24774 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
24775 op0 = copy_to_mode_reg (mode0, op0);
24776 if ((optimize && !register_operand (op1, mode1))
24777 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
24778 op1 = copy_to_mode_reg (mode1, op1);
24780 pat = GEN_FCN (d->icode) (op0, op1);
24784 emit_insn (gen_rtx_SET (VOIDmode,
24785 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24786 gen_rtx_fmt_ee (comparison, QImode,
24790 return SUBREG_REG (target);
24793 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
24796 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
24797 tree exp, rtx target)
24800 tree arg0 = CALL_EXPR_ARG (exp, 0);
24801 tree arg1 = CALL_EXPR_ARG (exp, 1);
24802 tree arg2 = CALL_EXPR_ARG (exp, 2);
24803 tree arg3 = CALL_EXPR_ARG (exp, 3);
24804 tree arg4 = CALL_EXPR_ARG (exp, 4);
24805 rtx scratch0, scratch1;
24806 rtx op0 = expand_normal (arg0);
24807 rtx op1 = expand_normal (arg1);
24808 rtx op2 = expand_normal (arg2);
24809 rtx op3 = expand_normal (arg3);
24810 rtx op4 = expand_normal (arg4);
24811 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
24813 tmode0 = insn_data[d->icode].operand[0].mode;
24814 tmode1 = insn_data[d->icode].operand[1].mode;
24815 modev2 = insn_data[d->icode].operand[2].mode;
24816 modei3 = insn_data[d->icode].operand[3].mode;
24817 modev4 = insn_data[d->icode].operand[4].mode;
24818 modei5 = insn_data[d->icode].operand[5].mode;
24819 modeimm = insn_data[d->icode].operand[6].mode;
24821 if (VECTOR_MODE_P (modev2))
24822 op0 = safe_vector_operand (op0, modev2);
24823 if (VECTOR_MODE_P (modev4))
24824 op2 = safe_vector_operand (op2, modev4);
24826 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
24827 op0 = copy_to_mode_reg (modev2, op0);
24828 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
24829 op1 = copy_to_mode_reg (modei3, op1);
24830 if ((optimize && !register_operand (op2, modev4))
24831 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
24832 op2 = copy_to_mode_reg (modev4, op2);
24833 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
24834 op3 = copy_to_mode_reg (modei5, op3);
24836 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
24838 error ("the fifth argument must be a 8-bit immediate");
24842 if (d->code == IX86_BUILTIN_PCMPESTRI128)
24844 if (optimize || !target
24845 || GET_MODE (target) != tmode0
24846 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
24847 target = gen_reg_rtx (tmode0);
24849 scratch1 = gen_reg_rtx (tmode1);
24851 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
24853 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
24855 if (optimize || !target
24856 || GET_MODE (target) != tmode1
24857 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
24858 target = gen_reg_rtx (tmode1);
24860 scratch0 = gen_reg_rtx (tmode0);
24862 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
24866 gcc_assert (d->flag);
24868 scratch0 = gen_reg_rtx (tmode0);
24869 scratch1 = gen_reg_rtx (tmode1);
24871 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
24881 target = gen_reg_rtx (SImode);
24882 emit_move_insn (target, const0_rtx);
24883 target = gen_rtx_SUBREG (QImode, target, 0);
24886 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24887 gen_rtx_fmt_ee (EQ, QImode,
24888 gen_rtx_REG ((enum machine_mode) d->flag,
24891 return SUBREG_REG (target);
24898 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
24901 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
24902 tree exp, rtx target)
24905 tree arg0 = CALL_EXPR_ARG (exp, 0);
24906 tree arg1 = CALL_EXPR_ARG (exp, 1);
24907 tree arg2 = CALL_EXPR_ARG (exp, 2);
24908 rtx scratch0, scratch1;
24909 rtx op0 = expand_normal (arg0);
24910 rtx op1 = expand_normal (arg1);
24911 rtx op2 = expand_normal (arg2);
24912 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
24914 tmode0 = insn_data[d->icode].operand[0].mode;
24915 tmode1 = insn_data[d->icode].operand[1].mode;
24916 modev2 = insn_data[d->icode].operand[2].mode;
24917 modev3 = insn_data[d->icode].operand[3].mode;
24918 modeimm = insn_data[d->icode].operand[4].mode;
24920 if (VECTOR_MODE_P (modev2))
24921 op0 = safe_vector_operand (op0, modev2);
24922 if (VECTOR_MODE_P (modev3))
24923 op1 = safe_vector_operand (op1, modev3);
24925 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
24926 op0 = copy_to_mode_reg (modev2, op0);
24927 if ((optimize && !register_operand (op1, modev3))
24928 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
24929 op1 = copy_to_mode_reg (modev3, op1);
24931 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
24933 error ("the third argument must be a 8-bit immediate");
24937 if (d->code == IX86_BUILTIN_PCMPISTRI128)
24939 if (optimize || !target
24940 || GET_MODE (target) != tmode0
24941 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
24942 target = gen_reg_rtx (tmode0);
24944 scratch1 = gen_reg_rtx (tmode1);
24946 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
24948 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
24950 if (optimize || !target
24951 || GET_MODE (target) != tmode1
24952 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
24953 target = gen_reg_rtx (tmode1);
24955 scratch0 = gen_reg_rtx (tmode0);
24957 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
24961 gcc_assert (d->flag);
24963 scratch0 = gen_reg_rtx (tmode0);
24964 scratch1 = gen_reg_rtx (tmode1);
24966 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
24976 target = gen_reg_rtx (SImode);
24977 emit_move_insn (target, const0_rtx);
24978 target = gen_rtx_SUBREG (QImode, target, 0);
24981 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24982 gen_rtx_fmt_ee (EQ, QImode,
24983 gen_rtx_REG ((enum machine_mode) d->flag,
24986 return SUBREG_REG (target);
24992 /* Subroutine of ix86_expand_builtin to take care of insns with
24993 variable number of operands. */
24996 ix86_expand_args_builtin (const struct builtin_description *d,
24997 tree exp, rtx target)
24999 rtx pat, real_target;
25000 unsigned int i, nargs;
25001 unsigned int nargs_constant = 0;
25002 int num_memory = 0;
25006 enum machine_mode mode;
25008 bool last_arg_count = false;
25009 enum insn_code icode = d->icode;
25010 const struct insn_data_d *insn_p = &insn_data[icode];
25011 enum machine_mode tmode = insn_p->operand[0].mode;
25012 enum machine_mode rmode = VOIDmode;
25014 enum rtx_code comparison = d->comparison;
25016 switch ((enum ix86_builtin_func_type) d->flag)
25018 case INT_FTYPE_V8SF_V8SF_PTEST:
25019 case INT_FTYPE_V4DI_V4DI_PTEST:
25020 case INT_FTYPE_V4DF_V4DF_PTEST:
25021 case INT_FTYPE_V4SF_V4SF_PTEST:
25022 case INT_FTYPE_V2DI_V2DI_PTEST:
25023 case INT_FTYPE_V2DF_V2DF_PTEST:
25024 return ix86_expand_sse_ptest (d, exp, target);
25025 case FLOAT128_FTYPE_FLOAT128:
25026 case FLOAT_FTYPE_FLOAT:
25027 case INT_FTYPE_INT:
25028 case UINT64_FTYPE_INT:
25029 case UINT16_FTYPE_UINT16:
25030 case INT64_FTYPE_INT64:
25031 case INT64_FTYPE_V4SF:
25032 case INT64_FTYPE_V2DF:
25033 case INT_FTYPE_V16QI:
25034 case INT_FTYPE_V8QI:
25035 case INT_FTYPE_V8SF:
25036 case INT_FTYPE_V4DF:
25037 case INT_FTYPE_V4SF:
25038 case INT_FTYPE_V2DF:
25039 case V16QI_FTYPE_V16QI:
25040 case V8SI_FTYPE_V8SF:
25041 case V8SI_FTYPE_V4SI:
25042 case V8HI_FTYPE_V8HI:
25043 case V8HI_FTYPE_V16QI:
25044 case V8QI_FTYPE_V8QI:
25045 case V8SF_FTYPE_V8SF:
25046 case V8SF_FTYPE_V8SI:
25047 case V8SF_FTYPE_V4SF:
25048 case V8SF_FTYPE_V8HI:
25049 case V4SI_FTYPE_V4SI:
25050 case V4SI_FTYPE_V16QI:
25051 case V4SI_FTYPE_V4SF:
25052 case V4SI_FTYPE_V8SI:
25053 case V4SI_FTYPE_V8HI:
25054 case V4SI_FTYPE_V4DF:
25055 case V4SI_FTYPE_V2DF:
25056 case V4HI_FTYPE_V4HI:
25057 case V4DF_FTYPE_V4DF:
25058 case V4DF_FTYPE_V4SI:
25059 case V4DF_FTYPE_V4SF:
25060 case V4DF_FTYPE_V2DF:
25061 case V4SF_FTYPE_V4SF:
25062 case V4SF_FTYPE_V4SI:
25063 case V4SF_FTYPE_V8SF:
25064 case V4SF_FTYPE_V4DF:
25065 case V4SF_FTYPE_V8HI:
25066 case V4SF_FTYPE_V2DF:
25067 case V2DI_FTYPE_V2DI:
25068 case V2DI_FTYPE_V16QI:
25069 case V2DI_FTYPE_V8HI:
25070 case V2DI_FTYPE_V4SI:
25071 case V2DF_FTYPE_V2DF:
25072 case V2DF_FTYPE_V4SI:
25073 case V2DF_FTYPE_V4DF:
25074 case V2DF_FTYPE_V4SF:
25075 case V2DF_FTYPE_V2SI:
25076 case V2SI_FTYPE_V2SI:
25077 case V2SI_FTYPE_V4SF:
25078 case V2SI_FTYPE_V2SF:
25079 case V2SI_FTYPE_V2DF:
25080 case V2SF_FTYPE_V2SF:
25081 case V2SF_FTYPE_V2SI:
25084 case V4SF_FTYPE_V4SF_VEC_MERGE:
25085 case V2DF_FTYPE_V2DF_VEC_MERGE:
25086 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
25087 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
25088 case V16QI_FTYPE_V16QI_V16QI:
25089 case V16QI_FTYPE_V8HI_V8HI:
25090 case V8QI_FTYPE_V8QI_V8QI:
25091 case V8QI_FTYPE_V4HI_V4HI:
25092 case V8HI_FTYPE_V8HI_V8HI:
25093 case V8HI_FTYPE_V16QI_V16QI:
25094 case V8HI_FTYPE_V4SI_V4SI:
25095 case V8SF_FTYPE_V8SF_V8SF:
25096 case V8SF_FTYPE_V8SF_V8SI:
25097 case V4SI_FTYPE_V4SI_V4SI:
25098 case V4SI_FTYPE_V8HI_V8HI:
25099 case V4SI_FTYPE_V4SF_V4SF:
25100 case V4SI_FTYPE_V2DF_V2DF:
25101 case V4HI_FTYPE_V4HI_V4HI:
25102 case V4HI_FTYPE_V8QI_V8QI:
25103 case V4HI_FTYPE_V2SI_V2SI:
25104 case V4DF_FTYPE_V4DF_V4DF:
25105 case V4DF_FTYPE_V4DF_V4DI:
25106 case V4SF_FTYPE_V4SF_V4SF:
25107 case V4SF_FTYPE_V4SF_V4SI:
25108 case V4SF_FTYPE_V4SF_V2SI:
25109 case V4SF_FTYPE_V4SF_V2DF:
25110 case V4SF_FTYPE_V4SF_DI:
25111 case V4SF_FTYPE_V4SF_SI:
25112 case V2DI_FTYPE_V2DI_V2DI:
25113 case V2DI_FTYPE_V16QI_V16QI:
25114 case V2DI_FTYPE_V4SI_V4SI:
25115 case V2DI_FTYPE_V2DI_V16QI:
25116 case V2DI_FTYPE_V2DF_V2DF:
25117 case V2SI_FTYPE_V2SI_V2SI:
25118 case V2SI_FTYPE_V4HI_V4HI:
25119 case V2SI_FTYPE_V2SF_V2SF:
25120 case V2DF_FTYPE_V2DF_V2DF:
25121 case V2DF_FTYPE_V2DF_V4SF:
25122 case V2DF_FTYPE_V2DF_V2DI:
25123 case V2DF_FTYPE_V2DF_DI:
25124 case V2DF_FTYPE_V2DF_SI:
25125 case V2SF_FTYPE_V2SF_V2SF:
25126 case V1DI_FTYPE_V1DI_V1DI:
25127 case V1DI_FTYPE_V8QI_V8QI:
25128 case V1DI_FTYPE_V2SI_V2SI:
25129 if (comparison == UNKNOWN)
25130 return ix86_expand_binop_builtin (icode, exp, target);
25133 case V4SF_FTYPE_V4SF_V4SF_SWAP:
25134 case V2DF_FTYPE_V2DF_V2DF_SWAP:
25135 gcc_assert (comparison != UNKNOWN);
25139 case V8HI_FTYPE_V8HI_V8HI_COUNT:
25140 case V8HI_FTYPE_V8HI_SI_COUNT:
25141 case V4SI_FTYPE_V4SI_V4SI_COUNT:
25142 case V4SI_FTYPE_V4SI_SI_COUNT:
25143 case V4HI_FTYPE_V4HI_V4HI_COUNT:
25144 case V4HI_FTYPE_V4HI_SI_COUNT:
25145 case V2DI_FTYPE_V2DI_V2DI_COUNT:
25146 case V2DI_FTYPE_V2DI_SI_COUNT:
25147 case V2SI_FTYPE_V2SI_V2SI_COUNT:
25148 case V2SI_FTYPE_V2SI_SI_COUNT:
25149 case V1DI_FTYPE_V1DI_V1DI_COUNT:
25150 case V1DI_FTYPE_V1DI_SI_COUNT:
25152 last_arg_count = true;
25154 case UINT64_FTYPE_UINT64_UINT64:
25155 case UINT_FTYPE_UINT_UINT:
25156 case UINT_FTYPE_UINT_USHORT:
25157 case UINT_FTYPE_UINT_UCHAR:
25158 case UINT16_FTYPE_UINT16_INT:
25159 case UINT8_FTYPE_UINT8_INT:
25162 case V2DI_FTYPE_V2DI_INT_CONVERT:
25165 nargs_constant = 1;
25167 case V8HI_FTYPE_V8HI_INT:
25168 case V8HI_FTYPE_V8SF_INT:
25169 case V8HI_FTYPE_V4SF_INT:
25170 case V8SF_FTYPE_V8SF_INT:
25171 case V4SI_FTYPE_V4SI_INT:
25172 case V4SI_FTYPE_V8SI_INT:
25173 case V4HI_FTYPE_V4HI_INT:
25174 case V4DF_FTYPE_V4DF_INT:
25175 case V4SF_FTYPE_V4SF_INT:
25176 case V4SF_FTYPE_V8SF_INT:
25177 case V2DI_FTYPE_V2DI_INT:
25178 case V2DF_FTYPE_V2DF_INT:
25179 case V2DF_FTYPE_V4DF_INT:
25181 nargs_constant = 1;
25183 case V16QI_FTYPE_V16QI_V16QI_V16QI:
25184 case V8SF_FTYPE_V8SF_V8SF_V8SF:
25185 case V4DF_FTYPE_V4DF_V4DF_V4DF:
25186 case V4SF_FTYPE_V4SF_V4SF_V4SF:
25187 case V2DF_FTYPE_V2DF_V2DF_V2DF:
25190 case V16QI_FTYPE_V16QI_V16QI_INT:
25191 case V8HI_FTYPE_V8HI_V8HI_INT:
25192 case V8SI_FTYPE_V8SI_V8SI_INT:
25193 case V8SI_FTYPE_V8SI_V4SI_INT:
25194 case V8SF_FTYPE_V8SF_V8SF_INT:
25195 case V8SF_FTYPE_V8SF_V4SF_INT:
25196 case V4SI_FTYPE_V4SI_V4SI_INT:
25197 case V4DF_FTYPE_V4DF_V4DF_INT:
25198 case V4DF_FTYPE_V4DF_V2DF_INT:
25199 case V4SF_FTYPE_V4SF_V4SF_INT:
25200 case V2DI_FTYPE_V2DI_V2DI_INT:
25201 case V2DF_FTYPE_V2DF_V2DF_INT:
25203 nargs_constant = 1;
25205 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
25208 nargs_constant = 1;
25210 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
25213 nargs_constant = 1;
25215 case V2DI_FTYPE_V2DI_UINT_UINT:
25217 nargs_constant = 2;
25219 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
25220 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
25221 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
25222 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
25224 nargs_constant = 1;
25226 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
25228 nargs_constant = 2;
25231 gcc_unreachable ();
25234 gcc_assert (nargs <= ARRAY_SIZE (args));
25236 if (comparison != UNKNOWN)
25238 gcc_assert (nargs == 2);
25239 return ix86_expand_sse_compare (d, exp, target, swap);
25242 if (rmode == VOIDmode || rmode == tmode)
25246 || GET_MODE (target) != tmode
25247 || !insn_p->operand[0].predicate (target, tmode))
25248 target = gen_reg_rtx (tmode);
25249 real_target = target;
25253 target = gen_reg_rtx (rmode);
25254 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
25257 for (i = 0; i < nargs; i++)
25259 tree arg = CALL_EXPR_ARG (exp, i);
25260 rtx op = expand_normal (arg);
25261 enum machine_mode mode = insn_p->operand[i + 1].mode;
25262 bool match = insn_p->operand[i + 1].predicate (op, mode);
25264 if (last_arg_count && (i + 1) == nargs)
25266 /* SIMD shift insns take either an 8-bit immediate or
25267 register as count. But builtin functions take int as
25268 count. If count doesn't match, we put it in register. */
25271 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
25272 if (!insn_p->operand[i + 1].predicate (op, mode))
25273 op = copy_to_reg (op);
25276 else if ((nargs - i) <= nargs_constant)
25281 case CODE_FOR_sse4_1_roundpd:
25282 case CODE_FOR_sse4_1_roundps:
25283 case CODE_FOR_sse4_1_roundsd:
25284 case CODE_FOR_sse4_1_roundss:
25285 case CODE_FOR_sse4_1_blendps:
25286 case CODE_FOR_avx_blendpd256:
25287 case CODE_FOR_avx_vpermilv4df:
25288 case CODE_FOR_avx_roundpd256:
25289 case CODE_FOR_avx_roundps256:
25290 error ("the last argument must be a 4-bit immediate");
25293 case CODE_FOR_sse4_1_blendpd:
25294 case CODE_FOR_avx_vpermilv2df:
25295 case CODE_FOR_xop_vpermil2v2df3:
25296 case CODE_FOR_xop_vpermil2v4sf3:
25297 case CODE_FOR_xop_vpermil2v4df3:
25298 case CODE_FOR_xop_vpermil2v8sf3:
25299 error ("the last argument must be a 2-bit immediate");
25302 case CODE_FOR_avx_vextractf128v4df:
25303 case CODE_FOR_avx_vextractf128v8sf:
25304 case CODE_FOR_avx_vextractf128v8si:
25305 case CODE_FOR_avx_vinsertf128v4df:
25306 case CODE_FOR_avx_vinsertf128v8sf:
25307 case CODE_FOR_avx_vinsertf128v8si:
25308 error ("the last argument must be a 1-bit immediate");
25311 case CODE_FOR_avx_cmpsdv2df3:
25312 case CODE_FOR_avx_cmpssv4sf3:
25313 case CODE_FOR_avx_cmppdv2df3:
25314 case CODE_FOR_avx_cmppsv4sf3:
25315 case CODE_FOR_avx_cmppdv4df3:
25316 case CODE_FOR_avx_cmppsv8sf3:
25317 error ("the last argument must be a 5-bit immediate");
25321 switch (nargs_constant)
25324 if ((nargs - i) == nargs_constant)
25326 error ("the next to last argument must be an 8-bit immediate");
25330 error ("the last argument must be an 8-bit immediate");
25333 gcc_unreachable ();
25340 if (VECTOR_MODE_P (mode))
25341 op = safe_vector_operand (op, mode);
25343 /* If we aren't optimizing, only allow one memory operand to
25345 if (memory_operand (op, mode))
25348 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
25350 if (optimize || !match || num_memory > 1)
25351 op = copy_to_mode_reg (mode, op);
25355 op = copy_to_reg (op);
25356 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
25361 args[i].mode = mode;
25367 pat = GEN_FCN (icode) (real_target, args[0].op);
25370 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
25373 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
25377 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
25378 args[2].op, args[3].op);
25381 gcc_unreachable ();
25391 /* Subroutine of ix86_expand_builtin to take care of special insns
25392 with variable number of operands. */
25395 ix86_expand_special_args_builtin (const struct builtin_description *d,
25396 tree exp, rtx target)
25400 unsigned int i, nargs, arg_adjust, memory;
25404 enum machine_mode mode;
25406 enum insn_code icode = d->icode;
25407 bool last_arg_constant = false;
25408 const struct insn_data_d *insn_p = &insn_data[icode];
25409 enum machine_mode tmode = insn_p->operand[0].mode;
25410 enum { load, store } klass;
25412 switch ((enum ix86_builtin_func_type) d->flag)
25414 case VOID_FTYPE_VOID:
25415 emit_insn (GEN_FCN (icode) (target));
25417 case VOID_FTYPE_UINT64:
25418 case VOID_FTYPE_UNSIGNED:
25424 case UINT64_FTYPE_VOID:
25425 case UNSIGNED_FTYPE_VOID:
25426 case UINT16_FTYPE_VOID:
25431 case UINT64_FTYPE_PUNSIGNED:
25432 case V2DI_FTYPE_PV2DI:
25433 case V32QI_FTYPE_PCCHAR:
25434 case V16QI_FTYPE_PCCHAR:
25435 case V8SF_FTYPE_PCV4SF:
25436 case V8SF_FTYPE_PCFLOAT:
25437 case V4SF_FTYPE_PCFLOAT:
25438 case V4DF_FTYPE_PCV2DF:
25439 case V4DF_FTYPE_PCDOUBLE:
25440 case V2DF_FTYPE_PCDOUBLE:
25441 case VOID_FTYPE_PVOID:
25446 case VOID_FTYPE_PV2SF_V4SF:
25447 case VOID_FTYPE_PV4DI_V4DI:
25448 case VOID_FTYPE_PV2DI_V2DI:
25449 case VOID_FTYPE_PCHAR_V32QI:
25450 case VOID_FTYPE_PCHAR_V16QI:
25451 case VOID_FTYPE_PFLOAT_V8SF:
25452 case VOID_FTYPE_PFLOAT_V4SF:
25453 case VOID_FTYPE_PDOUBLE_V4DF:
25454 case VOID_FTYPE_PDOUBLE_V2DF:
25455 case VOID_FTYPE_PULONGLONG_ULONGLONG:
25456 case VOID_FTYPE_PINT_INT:
25459 /* Reserve memory operand for target. */
25460 memory = ARRAY_SIZE (args);
25462 case V4SF_FTYPE_V4SF_PCV2SF:
25463 case V2DF_FTYPE_V2DF_PCDOUBLE:
25468 case V8SF_FTYPE_PCV8SF_V8SF:
25469 case V4DF_FTYPE_PCV4DF_V4DF:
25470 case V4SF_FTYPE_PCV4SF_V4SF:
25471 case V2DF_FTYPE_PCV2DF_V2DF:
25476 case VOID_FTYPE_PV8SF_V8SF_V8SF:
25477 case VOID_FTYPE_PV4DF_V4DF_V4DF:
25478 case VOID_FTYPE_PV4SF_V4SF_V4SF:
25479 case VOID_FTYPE_PV2DF_V2DF_V2DF:
25482 /* Reserve memory operand for target. */
25483 memory = ARRAY_SIZE (args);
25485 case VOID_FTYPE_UINT_UINT_UINT:
25486 case VOID_FTYPE_UINT64_UINT_UINT:
25487 case UCHAR_FTYPE_UINT_UINT_UINT:
25488 case UCHAR_FTYPE_UINT64_UINT_UINT:
25491 memory = ARRAY_SIZE (args);
25492 last_arg_constant = true;
25495 gcc_unreachable ();
25498 gcc_assert (nargs <= ARRAY_SIZE (args));
25500 if (klass == store)
25502 arg = CALL_EXPR_ARG (exp, 0);
25503 op = expand_normal (arg);
25504 gcc_assert (target == 0);
25506 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
25508 target = force_reg (tmode, op);
25516 || GET_MODE (target) != tmode
25517 || !insn_p->operand[0].predicate (target, tmode))
25518 target = gen_reg_rtx (tmode);
25521 for (i = 0; i < nargs; i++)
25523 enum machine_mode mode = insn_p->operand[i + 1].mode;
25526 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
25527 op = expand_normal (arg);
25528 match = insn_p->operand[i + 1].predicate (op, mode);
25530 if (last_arg_constant && (i + 1) == nargs)
25534 if (icode == CODE_FOR_lwp_lwpvalsi3
25535 || icode == CODE_FOR_lwp_lwpinssi3
25536 || icode == CODE_FOR_lwp_lwpvaldi3
25537 || icode == CODE_FOR_lwp_lwpinsdi3)
25538 error ("the last argument must be a 32-bit immediate");
25540 error ("the last argument must be an 8-bit immediate");
25548 /* This must be the memory operand. */
25549 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
25550 gcc_assert (GET_MODE (op) == mode
25551 || GET_MODE (op) == VOIDmode);
25555 /* This must be register. */
25556 if (VECTOR_MODE_P (mode))
25557 op = safe_vector_operand (op, mode);
25559 gcc_assert (GET_MODE (op) == mode
25560 || GET_MODE (op) == VOIDmode);
25561 op = copy_to_mode_reg (mode, op);
25566 args[i].mode = mode;
25572 pat = GEN_FCN (icode) (target);
25575 pat = GEN_FCN (icode) (target, args[0].op);
25578 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
25581 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
25584 gcc_unreachable ();
25590 return klass == store ? 0 : target;
25593 /* Return the integer constant in ARG. Constrain it to be in the range
25594 of the subparts of VEC_TYPE; issue an error if not. */
25597 get_element_number (tree vec_type, tree arg)
25599 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
25601 if (!host_integerp (arg, 1)
25602 || (elt = tree_low_cst (arg, 1), elt > max))
25604 error ("selector must be an integer constant in the range 0..%wi", max);
25611 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
25612 ix86_expand_vector_init. We DO have language-level syntax for this, in
25613 the form of (type){ init-list }. Except that since we can't place emms
25614 instructions from inside the compiler, we can't allow the use of MMX
25615 registers unless the user explicitly asks for it. So we do *not* define
25616 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
25617 we have builtins invoked by mmintrin.h that gives us license to emit
25618 these sorts of instructions. */
25621 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
25623 enum machine_mode tmode = TYPE_MODE (type);
25624 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
25625 int i, n_elt = GET_MODE_NUNITS (tmode);
25626 rtvec v = rtvec_alloc (n_elt);
25628 gcc_assert (VECTOR_MODE_P (tmode));
25629 gcc_assert (call_expr_nargs (exp) == n_elt);
25631 for (i = 0; i < n_elt; ++i)
25633 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
25634 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
25637 if (!target || !register_operand (target, tmode))
25638 target = gen_reg_rtx (tmode);
25640 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
25644 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
25645 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
25646 had a language-level syntax for referencing vector elements. */
25649 ix86_expand_vec_ext_builtin (tree exp, rtx target)
25651 enum machine_mode tmode, mode0;
25656 arg0 = CALL_EXPR_ARG (exp, 0);
25657 arg1 = CALL_EXPR_ARG (exp, 1);
25659 op0 = expand_normal (arg0);
25660 elt = get_element_number (TREE_TYPE (arg0), arg1);
25662 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
25663 mode0 = TYPE_MODE (TREE_TYPE (arg0));
25664 gcc_assert (VECTOR_MODE_P (mode0));
25666 op0 = force_reg (mode0, op0);
25668 if (optimize || !target || !register_operand (target, tmode))
25669 target = gen_reg_rtx (tmode);
25671 ix86_expand_vector_extract (true, target, op0, elt);
25676 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
25677 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
25678 a language-level syntax for referencing vector elements. */
25681 ix86_expand_vec_set_builtin (tree exp)
25683 enum machine_mode tmode, mode1;
25684 tree arg0, arg1, arg2;
25686 rtx op0, op1, target;
25688 arg0 = CALL_EXPR_ARG (exp, 0);
25689 arg1 = CALL_EXPR_ARG (exp, 1);
25690 arg2 = CALL_EXPR_ARG (exp, 2);
25692 tmode = TYPE_MODE (TREE_TYPE (arg0));
25693 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
25694 gcc_assert (VECTOR_MODE_P (tmode));
25696 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
25697 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
25698 elt = get_element_number (TREE_TYPE (arg0), arg2);
25700 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
25701 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
25703 op0 = force_reg (tmode, op0);
25704 op1 = force_reg (mode1, op1);
25706 /* OP0 is the source of these builtin functions and shouldn't be
25707 modified. Create a copy, use it and return it as target. */
25708 target = gen_reg_rtx (tmode);
25709 emit_move_insn (target, op0);
25710 ix86_expand_vector_set (true, target, op1, elt);
25715 /* Expand an expression EXP that calls a built-in function,
25716 with result going to TARGET if that's convenient
25717 (and in mode MODE if that's convenient).
25718 SUBTARGET may be used as the target for computing one of EXP's operands.
25719 IGNORE is nonzero if the value is to be ignored. */
25722 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
25723 enum machine_mode mode ATTRIBUTE_UNUSED,
25724 int ignore ATTRIBUTE_UNUSED)
25726 const struct builtin_description *d;
25728 enum insn_code icode;
25729 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
25730 tree arg0, arg1, arg2;
25731 rtx op0, op1, op2, pat;
25732 enum machine_mode mode0, mode1, mode2;
25733 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
25735 /* Determine whether the builtin function is available under the current ISA.
25736 Originally the builtin was not created if it wasn't applicable to the
25737 current ISA based on the command line switches. With function specific
25738 options, we need to check in the context of the function making the call
25739 whether it is supported. */
25740 if (ix86_builtins_isa[fcode].isa
25741 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
25743 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
25744 NULL, NULL, false);
25747 error ("%qE needs unknown isa option", fndecl);
25750 gcc_assert (opts != NULL);
25751 error ("%qE needs isa option %s", fndecl, opts);
25759 case IX86_BUILTIN_MASKMOVQ:
25760 case IX86_BUILTIN_MASKMOVDQU:
25761 icode = (fcode == IX86_BUILTIN_MASKMOVQ
25762 ? CODE_FOR_mmx_maskmovq
25763 : CODE_FOR_sse2_maskmovdqu);
25764 /* Note the arg order is different from the operand order. */
25765 arg1 = CALL_EXPR_ARG (exp, 0);
25766 arg2 = CALL_EXPR_ARG (exp, 1);
25767 arg0 = CALL_EXPR_ARG (exp, 2);
25768 op0 = expand_normal (arg0);
25769 op1 = expand_normal (arg1);
25770 op2 = expand_normal (arg2);
25771 mode0 = insn_data[icode].operand[0].mode;
25772 mode1 = insn_data[icode].operand[1].mode;
25773 mode2 = insn_data[icode].operand[2].mode;
25775 op0 = force_reg (Pmode, op0);
25776 op0 = gen_rtx_MEM (mode1, op0);
25778 if (!insn_data[icode].operand[0].predicate (op0, mode0))
25779 op0 = copy_to_mode_reg (mode0, op0);
25780 if (!insn_data[icode].operand[1].predicate (op1, mode1))
25781 op1 = copy_to_mode_reg (mode1, op1);
25782 if (!insn_data[icode].operand[2].predicate (op2, mode2))
25783 op2 = copy_to_mode_reg (mode2, op2);
25784 pat = GEN_FCN (icode) (op0, op1, op2);
25790 case IX86_BUILTIN_LDMXCSR:
25791 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
25792 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
25793 emit_move_insn (target, op0);
25794 emit_insn (gen_sse_ldmxcsr (target));
25797 case IX86_BUILTIN_STMXCSR:
25798 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
25799 emit_insn (gen_sse_stmxcsr (target));
25800 return copy_to_mode_reg (SImode, target);
25802 case IX86_BUILTIN_CLFLUSH:
25803 arg0 = CALL_EXPR_ARG (exp, 0);
25804 op0 = expand_normal (arg0);
25805 icode = CODE_FOR_sse2_clflush;
25806 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
25807 op0 = copy_to_mode_reg (Pmode, op0);
25809 emit_insn (gen_sse2_clflush (op0));
25812 case IX86_BUILTIN_MONITOR:
25813 arg0 = CALL_EXPR_ARG (exp, 0);
25814 arg1 = CALL_EXPR_ARG (exp, 1);
25815 arg2 = CALL_EXPR_ARG (exp, 2);
25816 op0 = expand_normal (arg0);
25817 op1 = expand_normal (arg1);
25818 op2 = expand_normal (arg2);
25820 op0 = copy_to_mode_reg (Pmode, op0);
25822 op1 = copy_to_mode_reg (SImode, op1);
25824 op2 = copy_to_mode_reg (SImode, op2);
25825 emit_insn (ix86_gen_monitor (op0, op1, op2));
25828 case IX86_BUILTIN_MWAIT:
25829 arg0 = CALL_EXPR_ARG (exp, 0);
25830 arg1 = CALL_EXPR_ARG (exp, 1);
25831 op0 = expand_normal (arg0);
25832 op1 = expand_normal (arg1);
25834 op0 = copy_to_mode_reg (SImode, op0);
25836 op1 = copy_to_mode_reg (SImode, op1);
25837 emit_insn (gen_sse3_mwait (op0, op1));
25840 case IX86_BUILTIN_VEC_INIT_V2SI:
25841 case IX86_BUILTIN_VEC_INIT_V4HI:
25842 case IX86_BUILTIN_VEC_INIT_V8QI:
25843 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
25845 case IX86_BUILTIN_VEC_EXT_V2DF:
25846 case IX86_BUILTIN_VEC_EXT_V2DI:
25847 case IX86_BUILTIN_VEC_EXT_V4SF:
25848 case IX86_BUILTIN_VEC_EXT_V4SI:
25849 case IX86_BUILTIN_VEC_EXT_V8HI:
25850 case IX86_BUILTIN_VEC_EXT_V2SI:
25851 case IX86_BUILTIN_VEC_EXT_V4HI:
25852 case IX86_BUILTIN_VEC_EXT_V16QI:
25853 return ix86_expand_vec_ext_builtin (exp, target);
25855 case IX86_BUILTIN_VEC_SET_V2DI:
25856 case IX86_BUILTIN_VEC_SET_V4SF:
25857 case IX86_BUILTIN_VEC_SET_V4SI:
25858 case IX86_BUILTIN_VEC_SET_V8HI:
25859 case IX86_BUILTIN_VEC_SET_V4HI:
25860 case IX86_BUILTIN_VEC_SET_V16QI:
25861 return ix86_expand_vec_set_builtin (exp);
25863 case IX86_BUILTIN_VEC_PERM_V2DF:
25864 case IX86_BUILTIN_VEC_PERM_V4SF:
25865 case IX86_BUILTIN_VEC_PERM_V2DI:
25866 case IX86_BUILTIN_VEC_PERM_V4SI:
25867 case IX86_BUILTIN_VEC_PERM_V8HI:
25868 case IX86_BUILTIN_VEC_PERM_V16QI:
25869 case IX86_BUILTIN_VEC_PERM_V2DI_U:
25870 case IX86_BUILTIN_VEC_PERM_V4SI_U:
25871 case IX86_BUILTIN_VEC_PERM_V8HI_U:
25872 case IX86_BUILTIN_VEC_PERM_V16QI_U:
25873 case IX86_BUILTIN_VEC_PERM_V4DF:
25874 case IX86_BUILTIN_VEC_PERM_V8SF:
25875 return ix86_expand_vec_perm_builtin (exp);
25877 case IX86_BUILTIN_INFQ:
25878 case IX86_BUILTIN_HUGE_VALQ:
25880 REAL_VALUE_TYPE inf;
25884 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
25886 tmp = validize_mem (force_const_mem (mode, tmp));
25889 target = gen_reg_rtx (mode);
25891 emit_move_insn (target, tmp);
25895 case IX86_BUILTIN_LLWPCB:
25896 arg0 = CALL_EXPR_ARG (exp, 0);
25897 op0 = expand_normal (arg0);
25898 icode = CODE_FOR_lwp_llwpcb;
25899 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
25900 op0 = copy_to_mode_reg (Pmode, op0);
25901 emit_insn (gen_lwp_llwpcb (op0));
25904 case IX86_BUILTIN_SLWPCB:
25905 icode = CODE_FOR_lwp_slwpcb;
25907 || !insn_data[icode].operand[0].predicate (target, Pmode))
25908 target = gen_reg_rtx (Pmode);
25909 emit_insn (gen_lwp_slwpcb (target));
25916 for (i = 0, d = bdesc_special_args;
25917 i < ARRAY_SIZE (bdesc_special_args);
25919 if (d->code == fcode)
25920 return ix86_expand_special_args_builtin (d, exp, target);
25922 for (i = 0, d = bdesc_args;
25923 i < ARRAY_SIZE (bdesc_args);
25925 if (d->code == fcode)
25928 case IX86_BUILTIN_FABSQ:
25929 case IX86_BUILTIN_COPYSIGNQ:
25931 /* Emit a normal call if SSE2 isn't available. */
25932 return expand_call (exp, target, ignore);
25934 return ix86_expand_args_builtin (d, exp, target);
25937 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25938 if (d->code == fcode)
25939 return ix86_expand_sse_comi (d, exp, target);
25941 for (i = 0, d = bdesc_pcmpestr;
25942 i < ARRAY_SIZE (bdesc_pcmpestr);
25944 if (d->code == fcode)
25945 return ix86_expand_sse_pcmpestr (d, exp, target);
25947 for (i = 0, d = bdesc_pcmpistr;
25948 i < ARRAY_SIZE (bdesc_pcmpistr);
25950 if (d->code == fcode)
25951 return ix86_expand_sse_pcmpistr (d, exp, target);
25953 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25954 if (d->code == fcode)
25955 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
25956 (enum ix86_builtin_func_type)
25957 d->flag, d->comparison);
25959 gcc_unreachable ();
25962 /* Returns a function decl for a vectorized version of the builtin function
25963 with builtin function code FN and the result vector type TYPE, or NULL_TREE
25964 if it is not available. */
25967 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
25970 enum machine_mode in_mode, out_mode;
25972 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
25974 if (TREE_CODE (type_out) != VECTOR_TYPE
25975 || TREE_CODE (type_in) != VECTOR_TYPE
25976 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
25979 out_mode = TYPE_MODE (TREE_TYPE (type_out));
25980 out_n = TYPE_VECTOR_SUBPARTS (type_out);
25981 in_mode = TYPE_MODE (TREE_TYPE (type_in));
25982 in_n = TYPE_VECTOR_SUBPARTS (type_in);
25986 case BUILT_IN_SQRT:
25987 if (out_mode == DFmode && out_n == 2
25988 && in_mode == DFmode && in_n == 2)
25989 return ix86_builtins[IX86_BUILTIN_SQRTPD];
25992 case BUILT_IN_SQRTF:
25993 if (out_mode == SFmode && out_n == 4
25994 && in_mode == SFmode && in_n == 4)
25995 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
25998 case BUILT_IN_LRINT:
25999 if (out_mode == SImode && out_n == 4
26000 && in_mode == DFmode && in_n == 2)
26001 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
26004 case BUILT_IN_LRINTF:
26005 if (out_mode == SImode && out_n == 4
26006 && in_mode == SFmode && in_n == 4)
26007 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
26010 case BUILT_IN_COPYSIGN:
26011 if (out_mode == DFmode && out_n == 2
26012 && in_mode == DFmode && in_n == 2)
26013 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
26016 case BUILT_IN_COPYSIGNF:
26017 if (out_mode == SFmode && out_n == 4
26018 && in_mode == SFmode && in_n == 4)
26019 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
26026 /* Dispatch to a handler for a vectorization library. */
26027 if (ix86_veclib_handler)
26028 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
26034 /* Handler for an SVML-style interface to
26035 a library with vectorized intrinsics. */
26038 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
26041 tree fntype, new_fndecl, args;
26044 enum machine_mode el_mode, in_mode;
26047 /* The SVML is suitable for unsafe math only. */
26048 if (!flag_unsafe_math_optimizations)
26051 el_mode = TYPE_MODE (TREE_TYPE (type_out));
26052 n = TYPE_VECTOR_SUBPARTS (type_out);
26053 in_mode = TYPE_MODE (TREE_TYPE (type_in));
26054 in_n = TYPE_VECTOR_SUBPARTS (type_in);
26055 if (el_mode != in_mode
26063 case BUILT_IN_LOG10:
26065 case BUILT_IN_TANH:
26067 case BUILT_IN_ATAN:
26068 case BUILT_IN_ATAN2:
26069 case BUILT_IN_ATANH:
26070 case BUILT_IN_CBRT:
26071 case BUILT_IN_SINH:
26073 case BUILT_IN_ASINH:
26074 case BUILT_IN_ASIN:
26075 case BUILT_IN_COSH:
26077 case BUILT_IN_ACOSH:
26078 case BUILT_IN_ACOS:
26079 if (el_mode != DFmode || n != 2)
26083 case BUILT_IN_EXPF:
26084 case BUILT_IN_LOGF:
26085 case BUILT_IN_LOG10F:
26086 case BUILT_IN_POWF:
26087 case BUILT_IN_TANHF:
26088 case BUILT_IN_TANF:
26089 case BUILT_IN_ATANF:
26090 case BUILT_IN_ATAN2F:
26091 case BUILT_IN_ATANHF:
26092 case BUILT_IN_CBRTF:
26093 case BUILT_IN_SINHF:
26094 case BUILT_IN_SINF:
26095 case BUILT_IN_ASINHF:
26096 case BUILT_IN_ASINF:
26097 case BUILT_IN_COSHF:
26098 case BUILT_IN_COSF:
26099 case BUILT_IN_ACOSHF:
26100 case BUILT_IN_ACOSF:
26101 if (el_mode != SFmode || n != 4)
26109 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
26111 if (fn == BUILT_IN_LOGF)
26112 strcpy (name, "vmlsLn4");
26113 else if (fn == BUILT_IN_LOG)
26114 strcpy (name, "vmldLn2");
26117 sprintf (name, "vmls%s", bname+10);
26118 name[strlen (name)-1] = '4';
26121 sprintf (name, "vmld%s2", bname+10);
26123 /* Convert to uppercase. */
26127 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
26128 args = TREE_CHAIN (args))
26132 fntype = build_function_type_list (type_out, type_in, NULL);
26134 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
26136 /* Build a function declaration for the vectorized function. */
26137 new_fndecl = build_decl (BUILTINS_LOCATION,
26138 FUNCTION_DECL, get_identifier (name), fntype);
26139 TREE_PUBLIC (new_fndecl) = 1;
26140 DECL_EXTERNAL (new_fndecl) = 1;
26141 DECL_IS_NOVOPS (new_fndecl) = 1;
26142 TREE_READONLY (new_fndecl) = 1;
26147 /* Handler for an ACML-style interface to
26148 a library with vectorized intrinsics. */
26151 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
26153 char name[20] = "__vr.._";
26154 tree fntype, new_fndecl, args;
26157 enum machine_mode el_mode, in_mode;
26160 /* The ACML is 64bits only and suitable for unsafe math only as
26161 it does not correctly support parts of IEEE with the required
26162 precision such as denormals. */
26164 || !flag_unsafe_math_optimizations)
26167 el_mode = TYPE_MODE (TREE_TYPE (type_out));
26168 n = TYPE_VECTOR_SUBPARTS (type_out);
26169 in_mode = TYPE_MODE (TREE_TYPE (type_in));
26170 in_n = TYPE_VECTOR_SUBPARTS (type_in);
26171 if (el_mode != in_mode
26181 case BUILT_IN_LOG2:
26182 case BUILT_IN_LOG10:
26185 if (el_mode != DFmode
26190 case BUILT_IN_SINF:
26191 case BUILT_IN_COSF:
26192 case BUILT_IN_EXPF:
26193 case BUILT_IN_POWF:
26194 case BUILT_IN_LOGF:
26195 case BUILT_IN_LOG2F:
26196 case BUILT_IN_LOG10F:
26199 if (el_mode != SFmode
26208 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
26209 sprintf (name + 7, "%s", bname+10);
26212 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
26213 args = TREE_CHAIN (args))
26217 fntype = build_function_type_list (type_out, type_in, NULL);
26219 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
26221 /* Build a function declaration for the vectorized function. */
26222 new_fndecl = build_decl (BUILTINS_LOCATION,
26223 FUNCTION_DECL, get_identifier (name), fntype);
26224 TREE_PUBLIC (new_fndecl) = 1;
26225 DECL_EXTERNAL (new_fndecl) = 1;
26226 DECL_IS_NOVOPS (new_fndecl) = 1;
26227 TREE_READONLY (new_fndecl) = 1;
26233 /* Returns a decl of a function that implements conversion of an integer vector
26234 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
26235 are the types involved when converting according to CODE.
26236 Return NULL_TREE if it is not available. */
26239 ix86_vectorize_builtin_conversion (unsigned int code,
26240 tree dest_type, tree src_type)
26248 switch (TYPE_MODE (src_type))
26251 switch (TYPE_MODE (dest_type))
26254 return (TYPE_UNSIGNED (src_type)
26255 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
26256 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
26258 return (TYPE_UNSIGNED (src_type)
26260 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
26266 switch (TYPE_MODE (dest_type))
26269 return (TYPE_UNSIGNED (src_type)
26271 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
26280 case FIX_TRUNC_EXPR:
26281 switch (TYPE_MODE (dest_type))
26284 switch (TYPE_MODE (src_type))
26287 return (TYPE_UNSIGNED (dest_type)
26289 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
26291 return (TYPE_UNSIGNED (dest_type)
26293 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
26300 switch (TYPE_MODE (src_type))
26303 return (TYPE_UNSIGNED (dest_type)
26305 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
26322 /* Returns a code for a target-specific builtin that implements
26323 reciprocal of the function, or NULL_TREE if not available. */
26326 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
26327 bool sqrt ATTRIBUTE_UNUSED)
26329 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
26330 && flag_finite_math_only && !flag_trapping_math
26331 && flag_unsafe_math_optimizations))
26335 /* Machine dependent builtins. */
26338 /* Vectorized version of sqrt to rsqrt conversion. */
26339 case IX86_BUILTIN_SQRTPS_NR:
26340 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
26346 /* Normal builtins. */
26349 /* Sqrt to rsqrt conversion. */
26350 case BUILT_IN_SQRTF:
26351 return ix86_builtins[IX86_BUILTIN_RSQRTF];
26358 /* Helper for avx_vpermilps256_operand et al. This is also used by
26359 the expansion functions to turn the parallel back into a mask.
26360 The return value is 0 for no match and the imm8+1 for a match. */
26363 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
26365 unsigned i, nelt = GET_MODE_NUNITS (mode);
26367 unsigned char ipar[8];
26369 if (XVECLEN (par, 0) != (int) nelt)
26372 /* Validate that all of the elements are constants, and not totally
26373 out of range. Copy the data into an integral array to make the
26374 subsequent checks easier. */
26375 for (i = 0; i < nelt; ++i)
26377 rtx er = XVECEXP (par, 0, i);
26378 unsigned HOST_WIDE_INT ei;
26380 if (!CONST_INT_P (er))
26391 /* In the 256-bit DFmode case, we can only move elements within
26393 for (i = 0; i < 2; ++i)
26397 mask |= ipar[i] << i;
26399 for (i = 2; i < 4; ++i)
26403 mask |= (ipar[i] - 2) << i;
26408 /* In the 256-bit SFmode case, we have full freedom of movement
26409 within the low 128-bit lane, but the high 128-bit lane must
26410 mirror the exact same pattern. */
26411 for (i = 0; i < 4; ++i)
26412 if (ipar[i] + 4 != ipar[i + 4])
26419 /* In the 128-bit case, we've full freedom in the placement of
26420 the elements from the source operand. */
26421 for (i = 0; i < nelt; ++i)
26422 mask |= ipar[i] << (i * (nelt / 2));
26426 gcc_unreachable ();
26429 /* Make sure success has a non-zero value by adding one. */
26433 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
26434 the expansion functions to turn the parallel back into a mask.
26435 The return value is 0 for no match and the imm8+1 for a match. */
26438 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
26440 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
26442 unsigned char ipar[8];
26444 if (XVECLEN (par, 0) != (int) nelt)
26447 /* Validate that all of the elements are constants, and not totally
26448 out of range. Copy the data into an integral array to make the
26449 subsequent checks easier. */
26450 for (i = 0; i < nelt; ++i)
26452 rtx er = XVECEXP (par, 0, i);
26453 unsigned HOST_WIDE_INT ei;
26455 if (!CONST_INT_P (er))
26458 if (ei >= 2 * nelt)
26463 /* Validate that the halves of the permute are halves. */
26464 for (i = 0; i < nelt2 - 1; ++i)
26465 if (ipar[i] + 1 != ipar[i + 1])
26467 for (i = nelt2; i < nelt - 1; ++i)
26468 if (ipar[i] + 1 != ipar[i + 1])
26471 /* Reconstruct the mask. */
26472 for (i = 0; i < 2; ++i)
26474 unsigned e = ipar[i * nelt2];
26478 mask |= e << (i * 4);
26481 /* Make sure success has a non-zero value by adding one. */
26486 /* Store OPERAND to the memory after reload is completed. This means
26487 that we can't easily use assign_stack_local. */
26489 ix86_force_to_memory (enum machine_mode mode, rtx operand)
26493 gcc_assert (reload_completed);
26494 if (ix86_using_red_zone ())
26496 result = gen_rtx_MEM (mode,
26497 gen_rtx_PLUS (Pmode,
26499 GEN_INT (-RED_ZONE_SIZE)));
26500 emit_move_insn (result, operand);
26502 else if (TARGET_64BIT)
26508 operand = gen_lowpart (DImode, operand);
26512 gen_rtx_SET (VOIDmode,
26513 gen_rtx_MEM (DImode,
26514 gen_rtx_PRE_DEC (DImode,
26515 stack_pointer_rtx)),
26519 gcc_unreachable ();
26521 result = gen_rtx_MEM (mode, stack_pointer_rtx);
26530 split_double_mode (mode, &operand, 1, operands, operands + 1);
26532 gen_rtx_SET (VOIDmode,
26533 gen_rtx_MEM (SImode,
26534 gen_rtx_PRE_DEC (Pmode,
26535 stack_pointer_rtx)),
26538 gen_rtx_SET (VOIDmode,
26539 gen_rtx_MEM (SImode,
26540 gen_rtx_PRE_DEC (Pmode,
26541 stack_pointer_rtx)),
26546 /* Store HImodes as SImodes. */
26547 operand = gen_lowpart (SImode, operand);
26551 gen_rtx_SET (VOIDmode,
26552 gen_rtx_MEM (GET_MODE (operand),
26553 gen_rtx_PRE_DEC (SImode,
26554 stack_pointer_rtx)),
26558 gcc_unreachable ();
26560 result = gen_rtx_MEM (mode, stack_pointer_rtx);
26565 /* Free operand from the memory. */
26567 ix86_free_from_memory (enum machine_mode mode)
26569 if (!ix86_using_red_zone ())
26573 if (mode == DImode || TARGET_64BIT)
26577 /* Use LEA to deallocate stack space. In peephole2 it will be converted
26578 to pop or add instruction if registers are available. */
26579 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
26580 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
26585 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
26586 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
26588 static const reg_class_t *
26589 i386_ira_cover_classes (void)
26591 static const reg_class_t sse_fpmath_classes[] = {
26592 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
26594 static const reg_class_t no_sse_fpmath_classes[] = {
26595 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
26598 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
26601 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
26602 QImode must go into class Q_REGS.
26603 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
26604 movdf to do mem-to-mem moves through integer regs. */
26606 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
26608 enum machine_mode mode = GET_MODE (x);
26610 /* We're only allowed to return a subclass of CLASS. Many of the
26611 following checks fail for NO_REGS, so eliminate that early. */
26612 if (regclass == NO_REGS)
26615 /* All classes can load zeros. */
26616 if (x == CONST0_RTX (mode))
26619 /* Force constants into memory if we are loading a (nonzero) constant into
26620 an MMX or SSE register. This is because there are no MMX/SSE instructions
26621 to load from a constant. */
26623 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
26626 /* Prefer SSE regs only, if we can use them for math. */
26627 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
26628 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
26630 /* Floating-point constants need more complex checks. */
26631 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
26633 /* General regs can load everything. */
26634 if (reg_class_subset_p (regclass, GENERAL_REGS))
26637 /* Floats can load 0 and 1 plus some others. Note that we eliminated
26638 zero above. We only want to wind up preferring 80387 registers if
26639 we plan on doing computation with them. */
26641 && standard_80387_constant_p (x))
26643 /* Limit class to non-sse. */
26644 if (regclass == FLOAT_SSE_REGS)
26646 if (regclass == FP_TOP_SSE_REGS)
26648 if (regclass == FP_SECOND_SSE_REGS)
26649 return FP_SECOND_REG;
26650 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
26657 /* Generally when we see PLUS here, it's the function invariant
26658 (plus soft-fp const_int). Which can only be computed into general
26660 if (GET_CODE (x) == PLUS)
26661 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
26663 /* QImode constants are easy to load, but non-constant QImode data
26664 must go into Q_REGS. */
26665 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
26667 if (reg_class_subset_p (regclass, Q_REGS))
26669 if (reg_class_subset_p (Q_REGS, regclass))
26677 /* Discourage putting floating-point values in SSE registers unless
26678 SSE math is being used, and likewise for the 387 registers. */
26680 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
26682 enum machine_mode mode = GET_MODE (x);
26684 /* Restrict the output reload class to the register bank that we are doing
26685 math on. If we would like not to return a subset of CLASS, reject this
26686 alternative: if reload cannot do this, it will still use its choice. */
26687 mode = GET_MODE (x);
26688 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
26689 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
26691 if (X87_FLOAT_MODE_P (mode))
26693 if (regclass == FP_TOP_SSE_REGS)
26695 else if (regclass == FP_SECOND_SSE_REGS)
26696 return FP_SECOND_REG;
26698 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
26705 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
26706 enum machine_mode mode,
26707 secondary_reload_info *sri ATTRIBUTE_UNUSED)
26709 /* QImode spills from non-QI registers require
26710 intermediate register on 32bit targets. */
26711 if (!in_p && mode == QImode && !TARGET_64BIT
26712 && (rclass == GENERAL_REGS
26713 || rclass == LEGACY_REGS
26714 || rclass == INDEX_REGS))
26723 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
26724 regno = true_regnum (x);
26726 /* Return Q_REGS if the operand is in memory. */
26734 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
26737 ix86_class_likely_spilled_p (reg_class_t rclass)
26748 case SSE_FIRST_REG:
26750 case FP_SECOND_REG:
26760 /* If we are copying between general and FP registers, we need a memory
26761 location. The same is true for SSE and MMX registers.
26763 To optimize register_move_cost performance, allow inline variant.
26765 The macro can't work reliably when one of the CLASSES is class containing
26766 registers from multiple units (SSE, MMX, integer). We avoid this by never
26767 combining those units in single alternative in the machine description.
26768 Ensure that this constraint holds to avoid unexpected surprises.
26770 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
26771 enforce these sanity checks. */
26774 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
26775 enum machine_mode mode, int strict)
26777 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
26778 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
26779 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
26780 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
26781 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
26782 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
26784 gcc_assert (!strict);
26788 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
26791 /* ??? This is a lie. We do have moves between mmx/general, and for
26792 mmx/sse2. But by saying we need secondary memory we discourage the
26793 register allocator from using the mmx registers unless needed. */
26794 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
26797 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
26799 /* SSE1 doesn't have any direct moves from other classes. */
26803 /* If the target says that inter-unit moves are more expensive
26804 than moving through memory, then don't generate them. */
26805 if (!TARGET_INTER_UNIT_MOVES)
26808 /* Between SSE and general, we have moves no larger than word size. */
26809 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
26817 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
26818 enum machine_mode mode, int strict)
26820 return inline_secondary_memory_needed (class1, class2, mode, strict);
26823 /* Return true if the registers in CLASS cannot represent the change from
26824 modes FROM to TO. */
26827 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
26828 enum reg_class regclass)
26833 /* x87 registers can't do subreg at all, as all values are reformatted
26834 to extended precision. */
26835 if (MAYBE_FLOAT_CLASS_P (regclass))
26838 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
26840 /* Vector registers do not support QI or HImode loads. If we don't
26841 disallow a change to these modes, reload will assume it's ok to
26842 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
26843 the vec_dupv4hi pattern. */
26844 if (GET_MODE_SIZE (from) < 4)
26847 /* Vector registers do not support subreg with nonzero offsets, which
26848 are otherwise valid for integer registers. Since we can't see
26849 whether we have a nonzero offset from here, prohibit all
26850 nonparadoxical subregs changing size. */
26851 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
26858 /* Return the cost of moving data of mode M between a
26859 register and memory. A value of 2 is the default; this cost is
26860 relative to those in `REGISTER_MOVE_COST'.
26862 This function is used extensively by register_move_cost that is used to
26863 build tables at startup. Make it inline in this case.
26864 When IN is 2, return maximum of in and out move cost.
26866 If moving between registers and memory is more expensive than
26867 between two registers, you should define this macro to express the
26870 Model also increased moving costs of QImode registers in non
26874 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
26878 if (FLOAT_CLASS_P (regclass))
26896 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
26897 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
26899 if (SSE_CLASS_P (regclass))
26902 switch (GET_MODE_SIZE (mode))
26917 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
26918 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
26920 if (MMX_CLASS_P (regclass))
26923 switch (GET_MODE_SIZE (mode))
26935 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
26936 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
26938 switch (GET_MODE_SIZE (mode))
26941 if (Q_CLASS_P (regclass) || TARGET_64BIT)
26944 return ix86_cost->int_store[0];
26945 if (TARGET_PARTIAL_REG_DEPENDENCY
26946 && optimize_function_for_speed_p (cfun))
26947 cost = ix86_cost->movzbl_load;
26949 cost = ix86_cost->int_load[0];
26951 return MAX (cost, ix86_cost->int_store[0]);
26957 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
26959 return ix86_cost->movzbl_load;
26961 return ix86_cost->int_store[0] + 4;
26966 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
26967 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
26969 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
26970 if (mode == TFmode)
26973 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
26975 cost = ix86_cost->int_load[2];
26977 cost = ix86_cost->int_store[2];
26978 return (cost * (((int) GET_MODE_SIZE (mode)
26979 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
26984 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
26987 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
26991 /* Return the cost of moving data from a register in class CLASS1 to
26992 one in class CLASS2.
26994 It is not required that the cost always equal 2 when FROM is the same as TO;
26995 on some machines it is expensive to move between registers if they are not
26996 general registers. */
26999 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
27000 reg_class_t class2_i)
27002 enum reg_class class1 = (enum reg_class) class1_i;
27003 enum reg_class class2 = (enum reg_class) class2_i;
27005 /* In case we require secondary memory, compute cost of the store followed
27006 by load. In order to avoid bad register allocation choices, we need
27007 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
27009 if (inline_secondary_memory_needed (class1, class2, mode, 0))
27013 cost += inline_memory_move_cost (mode, class1, 2);
27014 cost += inline_memory_move_cost (mode, class2, 2);
27016 /* In case of copying from general_purpose_register we may emit multiple
27017 stores followed by single load causing memory size mismatch stall.
27018 Count this as arbitrarily high cost of 20. */
27019 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
27022 /* In the case of FP/MMX moves, the registers actually overlap, and we
27023 have to switch modes in order to treat them differently. */
27024 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
27025 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
27031 /* Moves between SSE/MMX and integer unit are expensive. */
27032 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
27033 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
27035 /* ??? By keeping returned value relatively high, we limit the number
27036 of moves between integer and MMX/SSE registers for all targets.
27037 Additionally, high value prevents problem with x86_modes_tieable_p(),
27038 where integer modes in MMX/SSE registers are not tieable
27039 because of missing QImode and HImode moves to, from or between
27040 MMX/SSE registers. */
27041 return MAX (8, ix86_cost->mmxsse_to_integer);
27043 if (MAYBE_FLOAT_CLASS_P (class1))
27044 return ix86_cost->fp_move;
27045 if (MAYBE_SSE_CLASS_P (class1))
27046 return ix86_cost->sse_move;
27047 if (MAYBE_MMX_CLASS_P (class1))
27048 return ix86_cost->mmx_move;
27052 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
27055 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
27057 /* Flags and only flags can only hold CCmode values. */
27058 if (CC_REGNO_P (regno))
27059 return GET_MODE_CLASS (mode) == MODE_CC;
27060 if (GET_MODE_CLASS (mode) == MODE_CC
27061 || GET_MODE_CLASS (mode) == MODE_RANDOM
27062 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
27064 if (FP_REGNO_P (regno))
27065 return VALID_FP_MODE_P (mode);
27066 if (SSE_REGNO_P (regno))
27068 /* We implement the move patterns for all vector modes into and
27069 out of SSE registers, even when no operation instructions
27070 are available. OImode move is available only when AVX is
27072 return ((TARGET_AVX && mode == OImode)
27073 || VALID_AVX256_REG_MODE (mode)
27074 || VALID_SSE_REG_MODE (mode)
27075 || VALID_SSE2_REG_MODE (mode)
27076 || VALID_MMX_REG_MODE (mode)
27077 || VALID_MMX_REG_MODE_3DNOW (mode));
27079 if (MMX_REGNO_P (regno))
27081 /* We implement the move patterns for 3DNOW modes even in MMX mode,
27082 so if the register is available at all, then we can move data of
27083 the given mode into or out of it. */
27084 return (VALID_MMX_REG_MODE (mode)
27085 || VALID_MMX_REG_MODE_3DNOW (mode));
27088 if (mode == QImode)
27090 /* Take care for QImode values - they can be in non-QI regs,
27091 but then they do cause partial register stalls. */
27092 if (regno <= BX_REG || TARGET_64BIT)
27094 if (!TARGET_PARTIAL_REG_STALL)
27096 return reload_in_progress || reload_completed;
27098 /* We handle both integer and floats in the general purpose registers. */
27099 else if (VALID_INT_MODE_P (mode))
27101 else if (VALID_FP_MODE_P (mode))
27103 else if (VALID_DFP_MODE_P (mode))
27105 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
27106 on to use that value in smaller contexts, this can easily force a
27107 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
27108 supporting DImode, allow it. */
27109 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
27115 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
27116 tieable integer mode. */
27119 ix86_tieable_integer_mode_p (enum machine_mode mode)
27128 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
27131 return TARGET_64BIT;
27138 /* Return true if MODE1 is accessible in a register that can hold MODE2
27139 without copying. That is, all register classes that can hold MODE2
27140 can also hold MODE1. */
27143 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
27145 if (mode1 == mode2)
27148 if (ix86_tieable_integer_mode_p (mode1)
27149 && ix86_tieable_integer_mode_p (mode2))
27152 /* MODE2 being XFmode implies fp stack or general regs, which means we
27153 can tie any smaller floating point modes to it. Note that we do not
27154 tie this with TFmode. */
27155 if (mode2 == XFmode)
27156 return mode1 == SFmode || mode1 == DFmode;
27158 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
27159 that we can tie it with SFmode. */
27160 if (mode2 == DFmode)
27161 return mode1 == SFmode;
27163 /* If MODE2 is only appropriate for an SSE register, then tie with
27164 any other mode acceptable to SSE registers. */
27165 if (GET_MODE_SIZE (mode2) == 16
27166 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
27167 return (GET_MODE_SIZE (mode1) == 16
27168 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
27170 /* If MODE2 is appropriate for an MMX register, then tie
27171 with any other mode acceptable to MMX registers. */
27172 if (GET_MODE_SIZE (mode2) == 8
27173 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
27174 return (GET_MODE_SIZE (mode1) == 8
27175 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
27180 /* Compute a (partial) cost for rtx X. Return true if the complete
27181 cost has been computed, and false if subexpressions should be
27182 scanned. In either case, *TOTAL contains the cost result. */
27185 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
27187 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
27188 enum machine_mode mode = GET_MODE (x);
27189 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
27197 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
27199 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
27201 else if (flag_pic && SYMBOLIC_CONST (x)
27203 || (!GET_CODE (x) != LABEL_REF
27204 && (GET_CODE (x) != SYMBOL_REF
27205 || !SYMBOL_REF_LOCAL_P (x)))))
27212 if (mode == VOIDmode)
27215 switch (standard_80387_constant_p (x))
27220 default: /* Other constants */
27225 /* Start with (MEM (SYMBOL_REF)), since that's where
27226 it'll probably end up. Add a penalty for size. */
27227 *total = (COSTS_N_INSNS (1)
27228 + (flag_pic != 0 && !TARGET_64BIT)
27229 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
27235 /* The zero extensions is often completely free on x86_64, so make
27236 it as cheap as possible. */
27237 if (TARGET_64BIT && mode == DImode
27238 && GET_MODE (XEXP (x, 0)) == SImode)
27240 else if (TARGET_ZERO_EXTEND_WITH_AND)
27241 *total = cost->add;
27243 *total = cost->movzx;
27247 *total = cost->movsx;
27251 if (CONST_INT_P (XEXP (x, 1))
27252 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
27254 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
27257 *total = cost->add;
27260 if ((value == 2 || value == 3)
27261 && cost->lea <= cost->shift_const)
27263 *total = cost->lea;
27273 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
27275 if (CONST_INT_P (XEXP (x, 1)))
27277 if (INTVAL (XEXP (x, 1)) > 32)
27278 *total = cost->shift_const + COSTS_N_INSNS (2);
27280 *total = cost->shift_const * 2;
27284 if (GET_CODE (XEXP (x, 1)) == AND)
27285 *total = cost->shift_var * 2;
27287 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
27292 if (CONST_INT_P (XEXP (x, 1)))
27293 *total = cost->shift_const;
27295 *total = cost->shift_var;
27300 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
27302 /* ??? SSE scalar cost should be used here. */
27303 *total = cost->fmul;
27306 else if (X87_FLOAT_MODE_P (mode))
27308 *total = cost->fmul;
27311 else if (FLOAT_MODE_P (mode))
27313 /* ??? SSE vector cost should be used here. */
27314 *total = cost->fmul;
27319 rtx op0 = XEXP (x, 0);
27320 rtx op1 = XEXP (x, 1);
27322 if (CONST_INT_P (XEXP (x, 1)))
27324 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
27325 for (nbits = 0; value != 0; value &= value - 1)
27329 /* This is arbitrary. */
27332 /* Compute costs correctly for widening multiplication. */
27333 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
27334 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
27335 == GET_MODE_SIZE (mode))
27337 int is_mulwiden = 0;
27338 enum machine_mode inner_mode = GET_MODE (op0);
27340 if (GET_CODE (op0) == GET_CODE (op1))
27341 is_mulwiden = 1, op1 = XEXP (op1, 0);
27342 else if (CONST_INT_P (op1))
27344 if (GET_CODE (op0) == SIGN_EXTEND)
27345 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
27348 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
27352 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
27355 *total = (cost->mult_init[MODE_INDEX (mode)]
27356 + nbits * cost->mult_bit
27357 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
27366 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
27367 /* ??? SSE cost should be used here. */
27368 *total = cost->fdiv;
27369 else if (X87_FLOAT_MODE_P (mode))
27370 *total = cost->fdiv;
27371 else if (FLOAT_MODE_P (mode))
27372 /* ??? SSE vector cost should be used here. */
27373 *total = cost->fdiv;
27375 *total = cost->divide[MODE_INDEX (mode)];
27379 if (GET_MODE_CLASS (mode) == MODE_INT
27380 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
27382 if (GET_CODE (XEXP (x, 0)) == PLUS
27383 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
27384 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
27385 && CONSTANT_P (XEXP (x, 1)))
27387 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
27388 if (val == 2 || val == 4 || val == 8)
27390 *total = cost->lea;
27391 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
27392 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
27393 outer_code, speed);
27394 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
27398 else if (GET_CODE (XEXP (x, 0)) == MULT
27399 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
27401 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
27402 if (val == 2 || val == 4 || val == 8)
27404 *total = cost->lea;
27405 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
27406 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
27410 else if (GET_CODE (XEXP (x, 0)) == PLUS)
27412 *total = cost->lea;
27413 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
27414 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
27415 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
27422 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
27424 /* ??? SSE cost should be used here. */
27425 *total = cost->fadd;
27428 else if (X87_FLOAT_MODE_P (mode))
27430 *total = cost->fadd;
27433 else if (FLOAT_MODE_P (mode))
27435 /* ??? SSE vector cost should be used here. */
27436 *total = cost->fadd;
27444 if (!TARGET_64BIT && mode == DImode)
27446 *total = (cost->add * 2
27447 + (rtx_cost (XEXP (x, 0), outer_code, speed)
27448 << (GET_MODE (XEXP (x, 0)) != DImode))
27449 + (rtx_cost (XEXP (x, 1), outer_code, speed)
27450 << (GET_MODE (XEXP (x, 1)) != DImode)));
27456 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
27458 /* ??? SSE cost should be used here. */
27459 *total = cost->fchs;
27462 else if (X87_FLOAT_MODE_P (mode))
27464 *total = cost->fchs;
27467 else if (FLOAT_MODE_P (mode))
27469 /* ??? SSE vector cost should be used here. */
27470 *total = cost->fchs;
27476 if (!TARGET_64BIT && mode == DImode)
27477 *total = cost->add * 2;
27479 *total = cost->add;
27483 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
27484 && XEXP (XEXP (x, 0), 1) == const1_rtx
27485 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
27486 && XEXP (x, 1) == const0_rtx)
27488 /* This kind of construct is implemented using test[bwl].
27489 Treat it as if we had an AND. */
27490 *total = (cost->add
27491 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
27492 + rtx_cost (const1_rtx, outer_code, speed));
27498 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
27503 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
27504 /* ??? SSE cost should be used here. */
27505 *total = cost->fabs;
27506 else if (X87_FLOAT_MODE_P (mode))
27507 *total = cost->fabs;
27508 else if (FLOAT_MODE_P (mode))
27509 /* ??? SSE vector cost should be used here. */
27510 *total = cost->fabs;
27514 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
27515 /* ??? SSE cost should be used here. */
27516 *total = cost->fsqrt;
27517 else if (X87_FLOAT_MODE_P (mode))
27518 *total = cost->fsqrt;
27519 else if (FLOAT_MODE_P (mode))
27520 /* ??? SSE vector cost should be used here. */
27521 *total = cost->fsqrt;
27525 if (XINT (x, 1) == UNSPEC_TP)
27532 case VEC_DUPLICATE:
27533 /* ??? Assume all of these vector manipulation patterns are
27534 recognizable. In which case they all pretty much have the
27536 *total = COSTS_N_INSNS (1);
27546 static int current_machopic_label_num;
27548 /* Given a symbol name and its associated stub, write out the
27549 definition of the stub. */
27552 machopic_output_stub (FILE *file, const char *symb, const char *stub)
27554 unsigned int length;
27555 char *binder_name, *symbol_name, lazy_ptr_name[32];
27556 int label = ++current_machopic_label_num;
27558 /* For 64-bit we shouldn't get here. */
27559 gcc_assert (!TARGET_64BIT);
27561 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
27562 symb = targetm.strip_name_encoding (symb);
27564 length = strlen (stub);
27565 binder_name = XALLOCAVEC (char, length + 32);
27566 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
27568 length = strlen (symb);
27569 symbol_name = XALLOCAVEC (char, length + 32);
27570 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
27572 sprintf (lazy_ptr_name, "L%d$lz", label);
27575 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
27577 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
27579 fprintf (file, "%s:\n", stub);
27580 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27584 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
27585 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
27586 fprintf (file, "\tjmp\t*%%edx\n");
27589 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
27591 fprintf (file, "%s:\n", binder_name);
27595 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
27596 fputs ("\tpushl\t%eax\n", file);
27599 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
27601 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
27603 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
27604 fprintf (file, "%s:\n", lazy_ptr_name);
27605 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27606 fprintf (file, ASM_LONG "%s\n", binder_name);
27608 #endif /* TARGET_MACHO */
27610 /* Order the registers for register allocator. */
27613 x86_order_regs_for_local_alloc (void)
27618 /* First allocate the local general purpose registers. */
27619 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
27620 if (GENERAL_REGNO_P (i) && call_used_regs[i])
27621 reg_alloc_order [pos++] = i;
27623 /* Global general purpose registers. */
27624 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
27625 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
27626 reg_alloc_order [pos++] = i;
27628 /* x87 registers come first in case we are doing FP math
27630 if (!TARGET_SSE_MATH)
27631 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
27632 reg_alloc_order [pos++] = i;
27634 /* SSE registers. */
27635 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
27636 reg_alloc_order [pos++] = i;
27637 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
27638 reg_alloc_order [pos++] = i;
27640 /* x87 registers. */
27641 if (TARGET_SSE_MATH)
27642 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
27643 reg_alloc_order [pos++] = i;
27645 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
27646 reg_alloc_order [pos++] = i;
27648 /* Initialize the rest of array as we do not allocate some registers
27650 while (pos < FIRST_PSEUDO_REGISTER)
27651 reg_alloc_order [pos++] = 0;
27654 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
27655 struct attribute_spec.handler. */
27657 ix86_handle_abi_attribute (tree *node, tree name,
27658 tree args ATTRIBUTE_UNUSED,
27659 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
27661 if (TREE_CODE (*node) != FUNCTION_TYPE
27662 && TREE_CODE (*node) != METHOD_TYPE
27663 && TREE_CODE (*node) != FIELD_DECL
27664 && TREE_CODE (*node) != TYPE_DECL)
27666 warning (OPT_Wattributes, "%qE attribute only applies to functions",
27668 *no_add_attrs = true;
27673 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
27675 *no_add_attrs = true;
27679 /* Can combine regparm with all attributes but fastcall. */
27680 if (is_attribute_p ("ms_abi", name))
27682 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
27684 error ("ms_abi and sysv_abi attributes are not compatible");
27689 else if (is_attribute_p ("sysv_abi", name))
27691 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
27693 error ("ms_abi and sysv_abi attributes are not compatible");
27702 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
27703 struct attribute_spec.handler. */
27705 ix86_handle_struct_attribute (tree *node, tree name,
27706 tree args ATTRIBUTE_UNUSED,
27707 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
27710 if (DECL_P (*node))
27712 if (TREE_CODE (*node) == TYPE_DECL)
27713 type = &TREE_TYPE (*node);
27718 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
27719 || TREE_CODE (*type) == UNION_TYPE)))
27721 warning (OPT_Wattributes, "%qE attribute ignored",
27723 *no_add_attrs = true;
27726 else if ((is_attribute_p ("ms_struct", name)
27727 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
27728 || ((is_attribute_p ("gcc_struct", name)
27729 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
27731 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
27733 *no_add_attrs = true;
27740 ix86_handle_fndecl_attribute (tree *node, tree name,
27741 tree args ATTRIBUTE_UNUSED,
27742 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
27744 if (TREE_CODE (*node) != FUNCTION_DECL)
27746 warning (OPT_Wattributes, "%qE attribute only applies to functions",
27748 *no_add_attrs = true;
27754 ix86_ms_bitfield_layout_p (const_tree record_type)
27756 return ((TARGET_MS_BITFIELD_LAYOUT
27757 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
27758 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
27761 /* Returns an expression indicating where the this parameter is
27762 located on entry to the FUNCTION. */
27765 x86_this_parameter (tree function)
27767 tree type = TREE_TYPE (function);
27768 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
27773 const int *parm_regs;
27775 if (ix86_function_type_abi (type) == MS_ABI)
27776 parm_regs = x86_64_ms_abi_int_parameter_registers;
27778 parm_regs = x86_64_int_parameter_registers;
27779 return gen_rtx_REG (DImode, parm_regs[aggr]);
27782 nregs = ix86_function_regparm (type, function);
27784 if (nregs > 0 && !stdarg_p (type))
27788 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
27789 regno = aggr ? DX_REG : CX_REG;
27790 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
27794 return gen_rtx_MEM (SImode,
27795 plus_constant (stack_pointer_rtx, 4));
27804 return gen_rtx_MEM (SImode,
27805 plus_constant (stack_pointer_rtx, 4));
27808 return gen_rtx_REG (SImode, regno);
27811 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
27814 /* Determine whether x86_output_mi_thunk can succeed. */
27817 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
27818 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
27819 HOST_WIDE_INT vcall_offset, const_tree function)
27821 /* 64-bit can handle anything. */
27825 /* For 32-bit, everything's fine if we have one free register. */
27826 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
27829 /* Need a free register for vcall_offset. */
27833 /* Need a free register for GOT references. */
27834 if (flag_pic && !targetm.binds_local_p (function))
27837 /* Otherwise ok. */
27841 /* Output the assembler code for a thunk function. THUNK_DECL is the
27842 declaration for the thunk function itself, FUNCTION is the decl for
27843 the target function. DELTA is an immediate constant offset to be
27844 added to THIS. If VCALL_OFFSET is nonzero, the word at
27845 *(*this + vcall_offset) should be added to THIS. */
27848 x86_output_mi_thunk (FILE *file,
27849 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
27850 HOST_WIDE_INT vcall_offset, tree function)
27853 rtx this_param = x86_this_parameter (function);
27856 /* Make sure unwind info is emitted for the thunk if needed. */
27857 final_start_function (emit_barrier (), file, 1);
27859 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
27860 pull it in now and let DELTA benefit. */
27861 if (REG_P (this_param))
27862 this_reg = this_param;
27863 else if (vcall_offset)
27865 /* Put the this parameter into %eax. */
27866 xops[0] = this_param;
27867 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
27868 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
27871 this_reg = NULL_RTX;
27873 /* Adjust the this parameter by a fixed constant. */
27876 xops[0] = GEN_INT (delta);
27877 xops[1] = this_reg ? this_reg : this_param;
27880 if (!x86_64_general_operand (xops[0], DImode))
27882 tmp = gen_rtx_REG (DImode, R10_REG);
27884 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
27886 xops[1] = this_param;
27888 if (x86_maybe_negate_const_int (&xops[0], DImode))
27889 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
27891 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
27893 else if (x86_maybe_negate_const_int (&xops[0], SImode))
27894 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
27896 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
27899 /* Adjust the this parameter by a value stored in the vtable. */
27903 tmp = gen_rtx_REG (DImode, R10_REG);
27906 int tmp_regno = CX_REG;
27907 if (lookup_attribute ("fastcall",
27908 TYPE_ATTRIBUTES (TREE_TYPE (function)))
27909 || lookup_attribute ("thiscall",
27910 TYPE_ATTRIBUTES (TREE_TYPE (function))))
27911 tmp_regno = AX_REG;
27912 tmp = gen_rtx_REG (SImode, tmp_regno);
27915 xops[0] = gen_rtx_MEM (Pmode, this_reg);
27917 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
27919 /* Adjust the this parameter. */
27920 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
27921 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
27923 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
27924 xops[0] = GEN_INT (vcall_offset);
27926 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
27927 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
27929 xops[1] = this_reg;
27930 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
27933 /* If necessary, drop THIS back to its stack slot. */
27934 if (this_reg && this_reg != this_param)
27936 xops[0] = this_reg;
27937 xops[1] = this_param;
27938 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
27941 xops[0] = XEXP (DECL_RTL (function), 0);
27944 if (!flag_pic || targetm.binds_local_p (function))
27945 output_asm_insn ("jmp\t%P0", xops);
27946 /* All thunks should be in the same object as their target,
27947 and thus binds_local_p should be true. */
27948 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
27949 gcc_unreachable ();
27952 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
27953 tmp = gen_rtx_CONST (Pmode, tmp);
27954 tmp = gen_rtx_MEM (QImode, tmp);
27956 output_asm_insn ("jmp\t%A0", xops);
27961 if (!flag_pic || targetm.binds_local_p (function))
27962 output_asm_insn ("jmp\t%P0", xops);
27967 rtx sym_ref = XEXP (DECL_RTL (function), 0);
27968 if (TARGET_MACHO_BRANCH_ISLANDS)
27969 sym_ref = (gen_rtx_SYMBOL_REF
27971 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
27972 tmp = gen_rtx_MEM (QImode, sym_ref);
27974 output_asm_insn ("jmp\t%0", xops);
27977 #endif /* TARGET_MACHO */
27979 tmp = gen_rtx_REG (SImode, CX_REG);
27980 output_set_got (tmp, NULL_RTX);
27983 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
27984 output_asm_insn ("jmp\t{*}%1", xops);
27987 final_end_function ();
27991 x86_file_start (void)
27993 default_file_start ();
27995 darwin_file_start ();
27997 if (X86_FILE_START_VERSION_DIRECTIVE)
27998 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
27999 if (X86_FILE_START_FLTUSED)
28000 fputs ("\t.global\t__fltused\n", asm_out_file);
28001 if (ix86_asm_dialect == ASM_INTEL)
28002 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
28006 x86_field_alignment (tree field, int computed)
28008 enum machine_mode mode;
28009 tree type = TREE_TYPE (field);
28011 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
28013 mode = TYPE_MODE (strip_array_types (type));
28014 if (mode == DFmode || mode == DCmode
28015 || GET_MODE_CLASS (mode) == MODE_INT
28016 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
28017 return MIN (32, computed);
28021 /* Output assembler code to FILE to increment profiler label # LABELNO
28022 for profiling a function entry. */
28024 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
28026 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
28031 #ifndef NO_PROFILE_COUNTERS
28032 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
28035 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
28036 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
28038 fprintf (file, "\tcall\t%s\n", mcount_name);
28042 #ifndef NO_PROFILE_COUNTERS
28043 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
28046 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
28050 #ifndef NO_PROFILE_COUNTERS
28051 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
28054 fprintf (file, "\tcall\t%s\n", mcount_name);
28058 /* We don't have exact information about the insn sizes, but we may assume
28059 quite safely that we are informed about all 1 byte insns and memory
28060 address sizes. This is enough to eliminate unnecessary padding in
28064 min_insn_size (rtx insn)
28068 if (!INSN_P (insn) || !active_insn_p (insn))
28071 /* Discard alignments we've emit and jump instructions. */
28072 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
28073 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
28075 if (JUMP_TABLE_DATA_P (insn))
28078 /* Important case - calls are always 5 bytes.
28079 It is common to have many calls in the row. */
28081 && symbolic_reference_mentioned_p (PATTERN (insn))
28082 && !SIBLING_CALL_P (insn))
28084 len = get_attr_length (insn);
28088 /* For normal instructions we rely on get_attr_length being exact,
28089 with a few exceptions. */
28090 if (!JUMP_P (insn))
28092 enum attr_type type = get_attr_type (insn);
28097 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
28098 || asm_noperands (PATTERN (insn)) >= 0)
28105 /* Otherwise trust get_attr_length. */
28109 l = get_attr_length_address (insn);
28110 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
28119 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
28121 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
28125 ix86_avoid_jump_mispredicts (void)
28127 rtx insn, start = get_insns ();
28128 int nbytes = 0, njumps = 0;
28131 /* Look for all minimal intervals of instructions containing 4 jumps.
28132 The intervals are bounded by START and INSN. NBYTES is the total
28133 size of instructions in the interval including INSN and not including
28134 START. When the NBYTES is smaller than 16 bytes, it is possible
28135 that the end of START and INSN ends up in the same 16byte page.
28137 The smallest offset in the page INSN can start is the case where START
28138 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
28139 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
28141 for (insn = start; insn; insn = NEXT_INSN (insn))
28145 if (LABEL_P (insn))
28147 int align = label_to_alignment (insn);
28148 int max_skip = label_to_max_skip (insn);
28152 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
28153 already in the current 16 byte page, because otherwise
28154 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
28155 bytes to reach 16 byte boundary. */
28157 || (align <= 3 && max_skip != (1 << align) - 1))
28160 fprintf (dump_file, "Label %i with max_skip %i\n",
28161 INSN_UID (insn), max_skip);
28164 while (nbytes + max_skip >= 16)
28166 start = NEXT_INSN (start);
28167 if ((JUMP_P (start)
28168 && GET_CODE (PATTERN (start)) != ADDR_VEC
28169 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
28171 njumps--, isjump = 1;
28174 nbytes -= min_insn_size (start);
28180 min_size = min_insn_size (insn);
28181 nbytes += min_size;
28183 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
28184 INSN_UID (insn), min_size);
28186 && GET_CODE (PATTERN (insn)) != ADDR_VEC
28187 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
28195 start = NEXT_INSN (start);
28196 if ((JUMP_P (start)
28197 && GET_CODE (PATTERN (start)) != ADDR_VEC
28198 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
28200 njumps--, isjump = 1;
28203 nbytes -= min_insn_size (start);
28205 gcc_assert (njumps >= 0);
28207 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
28208 INSN_UID (start), INSN_UID (insn), nbytes);
28210 if (njumps == 3 && isjump && nbytes < 16)
28212 int padsize = 15 - nbytes + min_insn_size (insn);
28215 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
28216 INSN_UID (insn), padsize);
28217 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
28223 /* AMD Athlon works faster
28224 when RET is not destination of conditional jump or directly preceded
28225 by other jump instruction. We avoid the penalty by inserting NOP just
28226 before the RET instructions in such cases. */
28228 ix86_pad_returns (void)
28233 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
28235 basic_block bb = e->src;
28236 rtx ret = BB_END (bb);
28238 bool replace = false;
28240 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
28241 || optimize_bb_for_size_p (bb))
28243 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
28244 if (active_insn_p (prev) || LABEL_P (prev))
28246 if (prev && LABEL_P (prev))
28251 FOR_EACH_EDGE (e, ei, bb->preds)
28252 if (EDGE_FREQUENCY (e) && e->src->index >= 0
28253 && !(e->flags & EDGE_FALLTHRU))
28258 prev = prev_active_insn (ret);
28260 && ((JUMP_P (prev) && any_condjump_p (prev))
28263 /* Empty functions get branch mispredict even when the jump destination
28264 is not visible to us. */
28265 if (!prev && !optimize_function_for_size_p (cfun))
28270 emit_jump_insn_before (gen_return_internal_long (), ret);
28276 /* Count the minimum number of instructions in BB. Return 4 if the
28277 number of instructions >= 4. */
28280 ix86_count_insn_bb (basic_block bb)
28283 int insn_count = 0;
28285 /* Count number of instructions in this block. Return 4 if the number
28286 of instructions >= 4. */
28287 FOR_BB_INSNS (bb, insn)
28289 /* Only happen in exit blocks. */
28291 && GET_CODE (PATTERN (insn)) == RETURN)
28294 if (NONDEBUG_INSN_P (insn)
28295 && GET_CODE (PATTERN (insn)) != USE
28296 && GET_CODE (PATTERN (insn)) != CLOBBER)
28299 if (insn_count >= 4)
28308 /* Count the minimum number of instructions in code path in BB.
28309 Return 4 if the number of instructions >= 4. */
28312 ix86_count_insn (basic_block bb)
28316 int min_prev_count;
28318 /* Only bother counting instructions along paths with no
28319 more than 2 basic blocks between entry and exit. Given
28320 that BB has an edge to exit, determine if a predecessor
28321 of BB has an edge from entry. If so, compute the number
28322 of instructions in the predecessor block. If there
28323 happen to be multiple such blocks, compute the minimum. */
28324 min_prev_count = 4;
28325 FOR_EACH_EDGE (e, ei, bb->preds)
28328 edge_iterator prev_ei;
28330 if (e->src == ENTRY_BLOCK_PTR)
28332 min_prev_count = 0;
28335 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
28337 if (prev_e->src == ENTRY_BLOCK_PTR)
28339 int count = ix86_count_insn_bb (e->src);
28340 if (count < min_prev_count)
28341 min_prev_count = count;
28347 if (min_prev_count < 4)
28348 min_prev_count += ix86_count_insn_bb (bb);
28350 return min_prev_count;
28353 /* Pad short funtion to 4 instructions. */
28356 ix86_pad_short_function (void)
28361 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
28363 rtx ret = BB_END (e->src);
28364 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
28366 int insn_count = ix86_count_insn (e->src);
28368 /* Pad short function. */
28369 if (insn_count < 4)
28373 /* Find epilogue. */
28376 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
28377 insn = PREV_INSN (insn);
28382 /* Two NOPs are counted as one instruction. */
28383 insn_count = 2 * (4 - insn_count);
28384 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
28390 /* Implement machine specific optimizations. We implement padding of returns
28391 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
28395 if (optimize && optimize_function_for_speed_p (cfun))
28397 if (TARGET_PAD_SHORT_FUNCTION)
28398 ix86_pad_short_function ();
28399 else if (TARGET_PAD_RETURNS)
28400 ix86_pad_returns ();
28401 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
28402 if (TARGET_FOUR_JUMP_LIMIT)
28403 ix86_avoid_jump_mispredicts ();
28408 /* Return nonzero when QImode register that must be represented via REX prefix
28411 x86_extended_QIreg_mentioned_p (rtx insn)
28414 extract_insn_cached (insn);
28415 for (i = 0; i < recog_data.n_operands; i++)
28416 if (REG_P (recog_data.operand[i])
28417 && REGNO (recog_data.operand[i]) > BX_REG)
28422 /* Return nonzero when P points to register encoded via REX prefix.
28423 Called via for_each_rtx. */
28425 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
28427 unsigned int regno;
28430 regno = REGNO (*p);
28431 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
28434 /* Return true when INSN mentions register that must be encoded using REX
28437 x86_extended_reg_mentioned_p (rtx insn)
28439 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
28440 extended_reg_mentioned_1, NULL);
28443 /* If profitable, negate (without causing overflow) integer constant
28444 of mode MODE at location LOC. Return true in this case. */
28446 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
28450 if (!CONST_INT_P (*loc))
28456 /* DImode x86_64 constants must fit in 32 bits. */
28457 gcc_assert (x86_64_immediate_operand (*loc, mode));
28468 gcc_unreachable ();
28471 /* Avoid overflows. */
28472 if (mode_signbit_p (mode, *loc))
28475 val = INTVAL (*loc);
28477 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
28478 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
28479 if ((val < 0 && val != -128)
28482 *loc = GEN_INT (-val);
28489 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
28490 optabs would emit if we didn't have TFmode patterns. */
28493 x86_emit_floatuns (rtx operands[2])
28495 rtx neglab, donelab, i0, i1, f0, in, out;
28496 enum machine_mode mode, inmode;
28498 inmode = GET_MODE (operands[1]);
28499 gcc_assert (inmode == SImode || inmode == DImode);
28502 in = force_reg (inmode, operands[1]);
28503 mode = GET_MODE (out);
28504 neglab = gen_label_rtx ();
28505 donelab = gen_label_rtx ();
28506 f0 = gen_reg_rtx (mode);
28508 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
28510 expand_float (out, in, 0);
28512 emit_jump_insn (gen_jump (donelab));
28515 emit_label (neglab);
28517 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
28519 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
28521 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
28523 expand_float (f0, i0, 0);
28525 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
28527 emit_label (donelab);
28530 /* AVX does not support 32-byte integer vector operations,
28531 thus the longest vector we are faced with is V16QImode. */
28532 #define MAX_VECT_LEN 16
28534 struct expand_vec_perm_d
28536 rtx target, op0, op1;
28537 unsigned char perm[MAX_VECT_LEN];
28538 enum machine_mode vmode;
28539 unsigned char nelt;
28543 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
28544 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
28546 /* Get a vector mode of the same size as the original but with elements
28547 twice as wide. This is only guaranteed to apply to integral vectors. */
28549 static inline enum machine_mode
28550 get_mode_wider_vector (enum machine_mode o)
28552 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
28553 enum machine_mode n = GET_MODE_WIDER_MODE (o);
28554 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
28555 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
28559 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
28560 with all elements equal to VAR. Return true if successful. */
28563 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
28564 rtx target, rtx val)
28587 /* First attempt to recognize VAL as-is. */
28588 dup = gen_rtx_VEC_DUPLICATE (mode, val);
28589 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
28590 if (recog_memoized (insn) < 0)
28593 /* If that fails, force VAL into a register. */
28596 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
28597 seq = get_insns ();
28600 emit_insn_before (seq, insn);
28602 ok = recog_memoized (insn) >= 0;
28611 if (TARGET_SSE || TARGET_3DNOW_A)
28615 val = gen_lowpart (SImode, val);
28616 x = gen_rtx_TRUNCATE (HImode, val);
28617 x = gen_rtx_VEC_DUPLICATE (mode, x);
28618 emit_insn (gen_rtx_SET (VOIDmode, target, x));
28631 struct expand_vec_perm_d dperm;
28635 memset (&dperm, 0, sizeof (dperm));
28636 dperm.target = target;
28637 dperm.vmode = mode;
28638 dperm.nelt = GET_MODE_NUNITS (mode);
28639 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
28641 /* Extend to SImode using a paradoxical SUBREG. */
28642 tmp1 = gen_reg_rtx (SImode);
28643 emit_move_insn (tmp1, gen_lowpart (SImode, val));
28645 /* Insert the SImode value as low element of a V4SImode vector. */
28646 tmp2 = gen_lowpart (V4SImode, dperm.op0);
28647 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
28649 ok = (expand_vec_perm_1 (&dperm)
28650 || expand_vec_perm_broadcast_1 (&dperm));
28662 /* Replicate the value once into the next wider mode and recurse. */
28664 enum machine_mode smode, wsmode, wvmode;
28667 smode = GET_MODE_INNER (mode);
28668 wvmode = get_mode_wider_vector (mode);
28669 wsmode = GET_MODE_INNER (wvmode);
28671 val = convert_modes (wsmode, smode, val, true);
28672 x = expand_simple_binop (wsmode, ASHIFT, val,
28673 GEN_INT (GET_MODE_BITSIZE (smode)),
28674 NULL_RTX, 1, OPTAB_LIB_WIDEN);
28675 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
28677 x = gen_lowpart (wvmode, target);
28678 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
28686 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
28687 rtx x = gen_reg_rtx (hvmode);
28689 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
28692 x = gen_rtx_VEC_CONCAT (mode, x, x);
28693 emit_insn (gen_rtx_SET (VOIDmode, target, x));
28702 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
28703 whose ONE_VAR element is VAR, and other elements are zero. Return true
28707 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
28708 rtx target, rtx var, int one_var)
28710 enum machine_mode vsimode;
28713 bool use_vector_set = false;
28718 /* For SSE4.1, we normally use vector set. But if the second
28719 element is zero and inter-unit moves are OK, we use movq
28721 use_vector_set = (TARGET_64BIT
28723 && !(TARGET_INTER_UNIT_MOVES
28729 use_vector_set = TARGET_SSE4_1;
28732 use_vector_set = TARGET_SSE2;
28735 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
28742 use_vector_set = TARGET_AVX;
28745 /* Use ix86_expand_vector_set in 64bit mode only. */
28746 use_vector_set = TARGET_AVX && TARGET_64BIT;
28752 if (use_vector_set)
28754 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
28755 var = force_reg (GET_MODE_INNER (mode), var);
28756 ix86_expand_vector_set (mmx_ok, target, var, one_var);
28772 var = force_reg (GET_MODE_INNER (mode), var);
28773 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
28774 emit_insn (gen_rtx_SET (VOIDmode, target, x));
28779 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
28780 new_target = gen_reg_rtx (mode);
28782 new_target = target;
28783 var = force_reg (GET_MODE_INNER (mode), var);
28784 x = gen_rtx_VEC_DUPLICATE (mode, var);
28785 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
28786 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
28789 /* We need to shuffle the value to the correct position, so
28790 create a new pseudo to store the intermediate result. */
28792 /* With SSE2, we can use the integer shuffle insns. */
28793 if (mode != V4SFmode && TARGET_SSE2)
28795 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
28797 GEN_INT (one_var == 1 ? 0 : 1),
28798 GEN_INT (one_var == 2 ? 0 : 1),
28799 GEN_INT (one_var == 3 ? 0 : 1)));
28800 if (target != new_target)
28801 emit_move_insn (target, new_target);
28805 /* Otherwise convert the intermediate result to V4SFmode and
28806 use the SSE1 shuffle instructions. */
28807 if (mode != V4SFmode)
28809 tmp = gen_reg_rtx (V4SFmode);
28810 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
28815 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
28817 GEN_INT (one_var == 1 ? 0 : 1),
28818 GEN_INT (one_var == 2 ? 0+4 : 1+4),
28819 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
28821 if (mode != V4SFmode)
28822 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
28823 else if (tmp != target)
28824 emit_move_insn (target, tmp);
28826 else if (target != new_target)
28827 emit_move_insn (target, new_target);
28832 vsimode = V4SImode;
28838 vsimode = V2SImode;
28844 /* Zero extend the variable element to SImode and recurse. */
28845 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
28847 x = gen_reg_rtx (vsimode);
28848 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
28850 gcc_unreachable ();
28852 emit_move_insn (target, gen_lowpart (mode, x));
28860 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
28861 consisting of the values in VALS. It is known that all elements
28862 except ONE_VAR are constants. Return true if successful. */
28865 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
28866 rtx target, rtx vals, int one_var)
28868 rtx var = XVECEXP (vals, 0, one_var);
28869 enum machine_mode wmode;
28872 const_vec = copy_rtx (vals);
28873 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
28874 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
28882 /* For the two element vectors, it's just as easy to use
28883 the general case. */
28887 /* Use ix86_expand_vector_set in 64bit mode only. */
28910 /* There's no way to set one QImode entry easily. Combine
28911 the variable value with its adjacent constant value, and
28912 promote to an HImode set. */
28913 x = XVECEXP (vals, 0, one_var ^ 1);
28916 var = convert_modes (HImode, QImode, var, true);
28917 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
28918 NULL_RTX, 1, OPTAB_LIB_WIDEN);
28919 x = GEN_INT (INTVAL (x) & 0xff);
28923 var = convert_modes (HImode, QImode, var, true);
28924 x = gen_int_mode (INTVAL (x) << 8, HImode);
28926 if (x != const0_rtx)
28927 var = expand_simple_binop (HImode, IOR, var, x, var,
28928 1, OPTAB_LIB_WIDEN);
28930 x = gen_reg_rtx (wmode);
28931 emit_move_insn (x, gen_lowpart (wmode, const_vec));
28932 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
28934 emit_move_insn (target, gen_lowpart (mode, x));
28941 emit_move_insn (target, const_vec);
28942 ix86_expand_vector_set (mmx_ok, target, var, one_var);
28946 /* A subroutine of ix86_expand_vector_init_general. Use vector
28947 concatenate to handle the most general case: all values variable,
28948 and none identical. */
28951 ix86_expand_vector_init_concat (enum machine_mode mode,
28952 rtx target, rtx *ops, int n)
28954 enum machine_mode cmode, hmode = VOIDmode;
28955 rtx first[8], second[4];
28995 gcc_unreachable ();
28998 if (!register_operand (ops[1], cmode))
28999 ops[1] = force_reg (cmode, ops[1]);
29000 if (!register_operand (ops[0], cmode))
29001 ops[0] = force_reg (cmode, ops[0]);
29002 emit_insn (gen_rtx_SET (VOIDmode, target,
29003 gen_rtx_VEC_CONCAT (mode, ops[0],
29023 gcc_unreachable ();
29039 gcc_unreachable ();
29044 /* FIXME: We process inputs backward to help RA. PR 36222. */
29047 for (; i > 0; i -= 2, j--)
29049 first[j] = gen_reg_rtx (cmode);
29050 v = gen_rtvec (2, ops[i - 1], ops[i]);
29051 ix86_expand_vector_init (false, first[j],
29052 gen_rtx_PARALLEL (cmode, v));
29058 gcc_assert (hmode != VOIDmode);
29059 for (i = j = 0; i < n; i += 2, j++)
29061 second[j] = gen_reg_rtx (hmode);
29062 ix86_expand_vector_init_concat (hmode, second [j],
29066 ix86_expand_vector_init_concat (mode, target, second, n);
29069 ix86_expand_vector_init_concat (mode, target, first, n);
29073 gcc_unreachable ();
29077 /* A subroutine of ix86_expand_vector_init_general. Use vector
29078 interleave to handle the most general case: all values variable,
29079 and none identical. */
29082 ix86_expand_vector_init_interleave (enum machine_mode mode,
29083 rtx target, rtx *ops, int n)
29085 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
29088 rtx (*gen_load_even) (rtx, rtx, rtx);
29089 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
29090 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
29095 gen_load_even = gen_vec_setv8hi;
29096 gen_interleave_first_low = gen_vec_interleave_lowv4si;
29097 gen_interleave_second_low = gen_vec_interleave_lowv2di;
29098 inner_mode = HImode;
29099 first_imode = V4SImode;
29100 second_imode = V2DImode;
29101 third_imode = VOIDmode;
29104 gen_load_even = gen_vec_setv16qi;
29105 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
29106 gen_interleave_second_low = gen_vec_interleave_lowv4si;
29107 inner_mode = QImode;
29108 first_imode = V8HImode;
29109 second_imode = V4SImode;
29110 third_imode = V2DImode;
29113 gcc_unreachable ();
29116 for (i = 0; i < n; i++)
29118 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
29119 op0 = gen_reg_rtx (SImode);
29120 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
29122 /* Insert the SImode value as low element of V4SImode vector. */
29123 op1 = gen_reg_rtx (V4SImode);
29124 op0 = gen_rtx_VEC_MERGE (V4SImode,
29125 gen_rtx_VEC_DUPLICATE (V4SImode,
29127 CONST0_RTX (V4SImode),
29129 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
29131 /* Cast the V4SImode vector back to a vector in orignal mode. */
29132 op0 = gen_reg_rtx (mode);
29133 emit_move_insn (op0, gen_lowpart (mode, op1));
29135 /* Load even elements into the second positon. */
29136 emit_insn (gen_load_even (op0,
29137 force_reg (inner_mode,
29141 /* Cast vector to FIRST_IMODE vector. */
29142 ops[i] = gen_reg_rtx (first_imode);
29143 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
29146 /* Interleave low FIRST_IMODE vectors. */
29147 for (i = j = 0; i < n; i += 2, j++)
29149 op0 = gen_reg_rtx (first_imode);
29150 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
29152 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
29153 ops[j] = gen_reg_rtx (second_imode);
29154 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
29157 /* Interleave low SECOND_IMODE vectors. */
29158 switch (second_imode)
29161 for (i = j = 0; i < n / 2; i += 2, j++)
29163 op0 = gen_reg_rtx (second_imode);
29164 emit_insn (gen_interleave_second_low (op0, ops[i],
29167 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
29169 ops[j] = gen_reg_rtx (third_imode);
29170 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
29172 second_imode = V2DImode;
29173 gen_interleave_second_low = gen_vec_interleave_lowv2di;
29177 op0 = gen_reg_rtx (second_imode);
29178 emit_insn (gen_interleave_second_low (op0, ops[0],
29181 /* Cast the SECOND_IMODE vector back to a vector on original
29183 emit_insn (gen_rtx_SET (VOIDmode, target,
29184 gen_lowpart (mode, op0)));
29188 gcc_unreachable ();
29192 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
29193 all values variable, and none identical. */
29196 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
29197 rtx target, rtx vals)
29199 rtx ops[32], op0, op1;
29200 enum machine_mode half_mode = VOIDmode;
29207 if (!mmx_ok && !TARGET_SSE)
29219 n = GET_MODE_NUNITS (mode);
29220 for (i = 0; i < n; i++)
29221 ops[i] = XVECEXP (vals, 0, i);
29222 ix86_expand_vector_init_concat (mode, target, ops, n);
29226 half_mode = V16QImode;
29230 half_mode = V8HImode;
29234 n = GET_MODE_NUNITS (mode);
29235 for (i = 0; i < n; i++)
29236 ops[i] = XVECEXP (vals, 0, i);
29237 op0 = gen_reg_rtx (half_mode);
29238 op1 = gen_reg_rtx (half_mode);
29239 ix86_expand_vector_init_interleave (half_mode, op0, ops,
29241 ix86_expand_vector_init_interleave (half_mode, op1,
29242 &ops [n >> 1], n >> 2);
29243 emit_insn (gen_rtx_SET (VOIDmode, target,
29244 gen_rtx_VEC_CONCAT (mode, op0, op1)));
29248 if (!TARGET_SSE4_1)
29256 /* Don't use ix86_expand_vector_init_interleave if we can't
29257 move from GPR to SSE register directly. */
29258 if (!TARGET_INTER_UNIT_MOVES)
29261 n = GET_MODE_NUNITS (mode);
29262 for (i = 0; i < n; i++)
29263 ops[i] = XVECEXP (vals, 0, i);
29264 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
29272 gcc_unreachable ();
29276 int i, j, n_elts, n_words, n_elt_per_word;
29277 enum machine_mode inner_mode;
29278 rtx words[4], shift;
29280 inner_mode = GET_MODE_INNER (mode);
29281 n_elts = GET_MODE_NUNITS (mode);
29282 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
29283 n_elt_per_word = n_elts / n_words;
29284 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
29286 for (i = 0; i < n_words; ++i)
29288 rtx word = NULL_RTX;
29290 for (j = 0; j < n_elt_per_word; ++j)
29292 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
29293 elt = convert_modes (word_mode, inner_mode, elt, true);
29299 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
29300 word, 1, OPTAB_LIB_WIDEN);
29301 word = expand_simple_binop (word_mode, IOR, word, elt,
29302 word, 1, OPTAB_LIB_WIDEN);
29310 emit_move_insn (target, gen_lowpart (mode, words[0]));
29311 else if (n_words == 2)
29313 rtx tmp = gen_reg_rtx (mode);
29314 emit_clobber (tmp);
29315 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
29316 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
29317 emit_move_insn (target, tmp);
29319 else if (n_words == 4)
29321 rtx tmp = gen_reg_rtx (V4SImode);
29322 gcc_assert (word_mode == SImode);
29323 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
29324 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
29325 emit_move_insn (target, gen_lowpart (mode, tmp));
29328 gcc_unreachable ();
29332 /* Initialize vector TARGET via VALS. Suppress the use of MMX
29333 instructions unless MMX_OK is true. */
29336 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
29338 enum machine_mode mode = GET_MODE (target);
29339 enum machine_mode inner_mode = GET_MODE_INNER (mode);
29340 int n_elts = GET_MODE_NUNITS (mode);
29341 int n_var = 0, one_var = -1;
29342 bool all_same = true, all_const_zero = true;
29346 for (i = 0; i < n_elts; ++i)
29348 x = XVECEXP (vals, 0, i);
29349 if (!(CONST_INT_P (x)
29350 || GET_CODE (x) == CONST_DOUBLE
29351 || GET_CODE (x) == CONST_FIXED))
29352 n_var++, one_var = i;
29353 else if (x != CONST0_RTX (inner_mode))
29354 all_const_zero = false;
29355 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
29359 /* Constants are best loaded from the constant pool. */
29362 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
29366 /* If all values are identical, broadcast the value. */
29368 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
29369 XVECEXP (vals, 0, 0)))
29372 /* Values where only one field is non-constant are best loaded from
29373 the pool and overwritten via move later. */
29377 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
29378 XVECEXP (vals, 0, one_var),
29382 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
29386 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
29390 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
29392 enum machine_mode mode = GET_MODE (target);
29393 enum machine_mode inner_mode = GET_MODE_INNER (mode);
29394 enum machine_mode half_mode;
29395 bool use_vec_merge = false;
29397 static rtx (*gen_extract[6][2]) (rtx, rtx)
29399 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
29400 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
29401 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
29402 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
29403 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
29404 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
29406 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
29408 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
29409 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
29410 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
29411 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
29412 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
29413 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
29423 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
29424 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
29426 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
29428 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
29429 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
29435 use_vec_merge = TARGET_SSE4_1;
29443 /* For the two element vectors, we implement a VEC_CONCAT with
29444 the extraction of the other element. */
29446 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
29447 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
29450 op0 = val, op1 = tmp;
29452 op0 = tmp, op1 = val;
29454 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
29455 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
29460 use_vec_merge = TARGET_SSE4_1;
29467 use_vec_merge = true;
29471 /* tmp = target = A B C D */
29472 tmp = copy_to_reg (target);
29473 /* target = A A B B */
29474 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
29475 /* target = X A B B */
29476 ix86_expand_vector_set (false, target, val, 0);
29477 /* target = A X C D */
29478 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
29479 const1_rtx, const0_rtx,
29480 GEN_INT (2+4), GEN_INT (3+4)));
29484 /* tmp = target = A B C D */
29485 tmp = copy_to_reg (target);
29486 /* tmp = X B C D */
29487 ix86_expand_vector_set (false, tmp, val, 0);
29488 /* target = A B X D */
29489 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
29490 const0_rtx, const1_rtx,
29491 GEN_INT (0+4), GEN_INT (3+4)));
29495 /* tmp = target = A B C D */
29496 tmp = copy_to_reg (target);
29497 /* tmp = X B C D */
29498 ix86_expand_vector_set (false, tmp, val, 0);
29499 /* target = A B X D */
29500 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
29501 const0_rtx, const1_rtx,
29502 GEN_INT (2+4), GEN_INT (0+4)));
29506 gcc_unreachable ();
29511 use_vec_merge = TARGET_SSE4_1;
29515 /* Element 0 handled by vec_merge below. */
29518 use_vec_merge = true;
29524 /* With SSE2, use integer shuffles to swap element 0 and ELT,
29525 store into element 0, then shuffle them back. */
29529 order[0] = GEN_INT (elt);
29530 order[1] = const1_rtx;
29531 order[2] = const2_rtx;
29532 order[3] = GEN_INT (3);
29533 order[elt] = const0_rtx;
29535 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
29536 order[1], order[2], order[3]));
29538 ix86_expand_vector_set (false, target, val, 0);
29540 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
29541 order[1], order[2], order[3]));
29545 /* For SSE1, we have to reuse the V4SF code. */
29546 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
29547 gen_lowpart (SFmode, val), elt);
29552 use_vec_merge = TARGET_SSE2;
29555 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
29559 use_vec_merge = TARGET_SSE4_1;
29566 half_mode = V16QImode;
29572 half_mode = V8HImode;
29578 half_mode = V4SImode;
29584 half_mode = V2DImode;
29590 half_mode = V4SFmode;
29596 half_mode = V2DFmode;
29602 /* Compute offset. */
29606 gcc_assert (i <= 1);
29608 /* Extract the half. */
29609 tmp = gen_reg_rtx (half_mode);
29610 emit_insn (gen_extract[j][i] (tmp, target));
29612 /* Put val in tmp at elt. */
29613 ix86_expand_vector_set (false, tmp, val, elt);
29616 emit_insn (gen_insert[j][i] (target, target, tmp));
29625 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
29626 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
29627 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
29631 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
29633 emit_move_insn (mem, target);
29635 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
29636 emit_move_insn (tmp, val);
29638 emit_move_insn (target, mem);
29643 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
29645 enum machine_mode mode = GET_MODE (vec);
29646 enum machine_mode inner_mode = GET_MODE_INNER (mode);
29647 bool use_vec_extr = false;
29660 use_vec_extr = true;
29664 use_vec_extr = TARGET_SSE4_1;
29676 tmp = gen_reg_rtx (mode);
29677 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
29678 GEN_INT (elt), GEN_INT (elt),
29679 GEN_INT (elt+4), GEN_INT (elt+4)));
29683 tmp = gen_reg_rtx (mode);
29684 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
29688 gcc_unreachable ();
29691 use_vec_extr = true;
29696 use_vec_extr = TARGET_SSE4_1;
29710 tmp = gen_reg_rtx (mode);
29711 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
29712 GEN_INT (elt), GEN_INT (elt),
29713 GEN_INT (elt), GEN_INT (elt)));
29717 tmp = gen_reg_rtx (mode);
29718 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
29722 gcc_unreachable ();
29725 use_vec_extr = true;
29730 /* For SSE1, we have to reuse the V4SF code. */
29731 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
29732 gen_lowpart (V4SFmode, vec), elt);
29738 use_vec_extr = TARGET_SSE2;
29741 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
29745 use_vec_extr = TARGET_SSE4_1;
29749 /* ??? Could extract the appropriate HImode element and shift. */
29756 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
29757 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
29759 /* Let the rtl optimizers know about the zero extension performed. */
29760 if (inner_mode == QImode || inner_mode == HImode)
29762 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
29763 target = gen_lowpart (SImode, target);
29766 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
29770 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
29772 emit_move_insn (mem, vec);
29774 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
29775 emit_move_insn (target, tmp);
29779 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
29780 pattern to reduce; DEST is the destination; IN is the input vector. */
29783 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
29785 rtx tmp1, tmp2, tmp3;
29787 tmp1 = gen_reg_rtx (V4SFmode);
29788 tmp2 = gen_reg_rtx (V4SFmode);
29789 tmp3 = gen_reg_rtx (V4SFmode);
29791 emit_insn (gen_sse_movhlps (tmp1, in, in));
29792 emit_insn (fn (tmp2, tmp1, in));
29794 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
29795 const1_rtx, const1_rtx,
29796 GEN_INT (1+4), GEN_INT (1+4)));
29797 emit_insn (fn (dest, tmp2, tmp3));
29800 /* Target hook for scalar_mode_supported_p. */
29802 ix86_scalar_mode_supported_p (enum machine_mode mode)
29804 if (DECIMAL_FLOAT_MODE_P (mode))
29805 return default_decimal_float_supported_p ();
29806 else if (mode == TFmode)
29809 return default_scalar_mode_supported_p (mode);
29812 /* Implements target hook vector_mode_supported_p. */
29814 ix86_vector_mode_supported_p (enum machine_mode mode)
29816 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
29818 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
29820 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
29822 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
29824 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
29829 /* Target hook for c_mode_for_suffix. */
29830 static enum machine_mode
29831 ix86_c_mode_for_suffix (char suffix)
29841 /* Worker function for TARGET_MD_ASM_CLOBBERS.
29843 We do this in the new i386 backend to maintain source compatibility
29844 with the old cc0-based compiler. */
29847 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
29848 tree inputs ATTRIBUTE_UNUSED,
29851 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
29853 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
29858 /* Implements target vector targetm.asm.encode_section_info. This
29859 is not used by netware. */
29861 static void ATTRIBUTE_UNUSED
29862 ix86_encode_section_info (tree decl, rtx rtl, int first)
29864 default_encode_section_info (decl, rtl, first);
29866 if (TREE_CODE (decl) == VAR_DECL
29867 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
29868 && ix86_in_large_data_p (decl))
29869 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
29872 /* Worker function for REVERSE_CONDITION. */
29875 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
29877 return (mode != CCFPmode && mode != CCFPUmode
29878 ? reverse_condition (code)
29879 : reverse_condition_maybe_unordered (code));
29882 /* Output code to perform an x87 FP register move, from OPERANDS[1]
29886 output_387_reg_move (rtx insn, rtx *operands)
29888 if (REG_P (operands[0]))
29890 if (REG_P (operands[1])
29891 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
29893 if (REGNO (operands[0]) == FIRST_STACK_REG)
29894 return output_387_ffreep (operands, 0);
29895 return "fstp\t%y0";
29897 if (STACK_TOP_P (operands[0]))
29898 return "fld%Z1\t%y1";
29901 else if (MEM_P (operands[0]))
29903 gcc_assert (REG_P (operands[1]));
29904 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
29905 return "fstp%Z0\t%y0";
29908 /* There is no non-popping store to memory for XFmode.
29909 So if we need one, follow the store with a load. */
29910 if (GET_MODE (operands[0]) == XFmode)
29911 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
29913 return "fst%Z0\t%y0";
29920 /* Output code to perform a conditional jump to LABEL, if C2 flag in
29921 FP status register is set. */
29924 ix86_emit_fp_unordered_jump (rtx label)
29926 rtx reg = gen_reg_rtx (HImode);
29929 emit_insn (gen_x86_fnstsw_1 (reg));
29931 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
29933 emit_insn (gen_x86_sahf_1 (reg));
29935 temp = gen_rtx_REG (CCmode, FLAGS_REG);
29936 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
29940 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
29942 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
29943 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
29946 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
29947 gen_rtx_LABEL_REF (VOIDmode, label),
29949 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
29951 emit_jump_insn (temp);
29952 predict_jump (REG_BR_PROB_BASE * 10 / 100);
29955 /* Output code to perform a log1p XFmode calculation. */
29957 void ix86_emit_i387_log1p (rtx op0, rtx op1)
29959 rtx label1 = gen_label_rtx ();
29960 rtx label2 = gen_label_rtx ();
29962 rtx tmp = gen_reg_rtx (XFmode);
29963 rtx tmp2 = gen_reg_rtx (XFmode);
29966 emit_insn (gen_absxf2 (tmp, op1));
29967 test = gen_rtx_GE (VOIDmode, tmp,
29968 CONST_DOUBLE_FROM_REAL_VALUE (
29969 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
29971 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
29973 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
29974 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
29975 emit_jump (label2);
29977 emit_label (label1);
29978 emit_move_insn (tmp, CONST1_RTX (XFmode));
29979 emit_insn (gen_addxf3 (tmp, op1, tmp));
29980 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
29981 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
29983 emit_label (label2);
29986 /* Output code to perform a Newton-Rhapson approximation of a single precision
29987 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
29989 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
29991 rtx x0, x1, e0, e1, two;
29993 x0 = gen_reg_rtx (mode);
29994 e0 = gen_reg_rtx (mode);
29995 e1 = gen_reg_rtx (mode);
29996 x1 = gen_reg_rtx (mode);
29998 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
30000 if (VECTOR_MODE_P (mode))
30001 two = ix86_build_const_vector (SFmode, true, two);
30003 two = force_reg (mode, two);
30005 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
30007 /* x0 = rcp(b) estimate */
30008 emit_insn (gen_rtx_SET (VOIDmode, x0,
30009 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
30012 emit_insn (gen_rtx_SET (VOIDmode, e0,
30013 gen_rtx_MULT (mode, x0, a)));
30015 emit_insn (gen_rtx_SET (VOIDmode, e1,
30016 gen_rtx_MULT (mode, x0, b)));
30018 emit_insn (gen_rtx_SET (VOIDmode, x1,
30019 gen_rtx_MINUS (mode, two, e1)));
30020 /* res = e0 * x1 */
30021 emit_insn (gen_rtx_SET (VOIDmode, res,
30022 gen_rtx_MULT (mode, e0, x1)));
30025 /* Output code to perform a Newton-Rhapson approximation of a
30026 single precision floating point [reciprocal] square root. */
30028 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
30031 rtx x0, e0, e1, e2, e3, mthree, mhalf;
30034 x0 = gen_reg_rtx (mode);
30035 e0 = gen_reg_rtx (mode);
30036 e1 = gen_reg_rtx (mode);
30037 e2 = gen_reg_rtx (mode);
30038 e3 = gen_reg_rtx (mode);
30040 real_from_integer (&r, VOIDmode, -3, -1, 0);
30041 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
30043 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
30044 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
30046 if (VECTOR_MODE_P (mode))
30048 mthree = ix86_build_const_vector (SFmode, true, mthree);
30049 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
30052 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
30053 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
30055 /* x0 = rsqrt(a) estimate */
30056 emit_insn (gen_rtx_SET (VOIDmode, x0,
30057 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
30060 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
30065 zero = gen_reg_rtx (mode);
30066 mask = gen_reg_rtx (mode);
30068 zero = force_reg (mode, CONST0_RTX(mode));
30069 emit_insn (gen_rtx_SET (VOIDmode, mask,
30070 gen_rtx_NE (mode, zero, a)));
30072 emit_insn (gen_rtx_SET (VOIDmode, x0,
30073 gen_rtx_AND (mode, x0, mask)));
30077 emit_insn (gen_rtx_SET (VOIDmode, e0,
30078 gen_rtx_MULT (mode, x0, a)));
30080 emit_insn (gen_rtx_SET (VOIDmode, e1,
30081 gen_rtx_MULT (mode, e0, x0)));
30084 mthree = force_reg (mode, mthree);
30085 emit_insn (gen_rtx_SET (VOIDmode, e2,
30086 gen_rtx_PLUS (mode, e1, mthree)));
30088 mhalf = force_reg (mode, mhalf);
30090 /* e3 = -.5 * x0 */
30091 emit_insn (gen_rtx_SET (VOIDmode, e3,
30092 gen_rtx_MULT (mode, x0, mhalf)));
30094 /* e3 = -.5 * e0 */
30095 emit_insn (gen_rtx_SET (VOIDmode, e3,
30096 gen_rtx_MULT (mode, e0, mhalf)));
30097 /* ret = e2 * e3 */
30098 emit_insn (gen_rtx_SET (VOIDmode, res,
30099 gen_rtx_MULT (mode, e2, e3)));
30102 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
30104 static void ATTRIBUTE_UNUSED
30105 i386_solaris_elf_named_section (const char *name, unsigned int flags,
30108 /* With Binutils 2.15, the "@unwind" marker must be specified on
30109 every occurrence of the ".eh_frame" section, not just the first
30112 && strcmp (name, ".eh_frame") == 0)
30114 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
30115 flags & SECTION_WRITE ? "aw" : "a");
30118 default_elf_asm_named_section (name, flags, decl);
30121 /* Return the mangling of TYPE if it is an extended fundamental type. */
30123 static const char *
30124 ix86_mangle_type (const_tree type)
30126 type = TYPE_MAIN_VARIANT (type);
30128 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
30129 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
30132 switch (TYPE_MODE (type))
30135 /* __float128 is "g". */
30138 /* "long double" or __float80 is "e". */
30145 /* For 32-bit code we can save PIC register setup by using
30146 __stack_chk_fail_local hidden function instead of calling
30147 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
30148 register, so it is better to call __stack_chk_fail directly. */
30151 ix86_stack_protect_fail (void)
30153 return TARGET_64BIT
30154 ? default_external_stack_protect_fail ()
30155 : default_hidden_stack_protect_fail ();
30158 /* Select a format to encode pointers in exception handling data. CODE
30159 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
30160 true if the symbol may be affected by dynamic relocations.
30162 ??? All x86 object file formats are capable of representing this.
30163 After all, the relocation needed is the same as for the call insn.
30164 Whether or not a particular assembler allows us to enter such, I
30165 guess we'll have to see. */
30167 asm_preferred_eh_data_format (int code, int global)
30171 int type = DW_EH_PE_sdata8;
30173 || ix86_cmodel == CM_SMALL_PIC
30174 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
30175 type = DW_EH_PE_sdata4;
30176 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
30178 if (ix86_cmodel == CM_SMALL
30179 || (ix86_cmodel == CM_MEDIUM && code))
30180 return DW_EH_PE_udata4;
30181 return DW_EH_PE_absptr;
30184 /* Expand copysign from SIGN to the positive value ABS_VALUE
30185 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
30188 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
30190 enum machine_mode mode = GET_MODE (sign);
30191 rtx sgn = gen_reg_rtx (mode);
30192 if (mask == NULL_RTX)
30194 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
30195 if (!VECTOR_MODE_P (mode))
30197 /* We need to generate a scalar mode mask in this case. */
30198 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
30199 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
30200 mask = gen_reg_rtx (mode);
30201 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
30205 mask = gen_rtx_NOT (mode, mask);
30206 emit_insn (gen_rtx_SET (VOIDmode, sgn,
30207 gen_rtx_AND (mode, mask, sign)));
30208 emit_insn (gen_rtx_SET (VOIDmode, result,
30209 gen_rtx_IOR (mode, abs_value, sgn)));
30212 /* Expand fabs (OP0) and return a new rtx that holds the result. The
30213 mask for masking out the sign-bit is stored in *SMASK, if that is
30216 ix86_expand_sse_fabs (rtx op0, rtx *smask)
30218 enum machine_mode mode = GET_MODE (op0);
30221 xa = gen_reg_rtx (mode);
30222 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
30223 if (!VECTOR_MODE_P (mode))
30225 /* We need to generate a scalar mode mask in this case. */
30226 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
30227 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
30228 mask = gen_reg_rtx (mode);
30229 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
30231 emit_insn (gen_rtx_SET (VOIDmode, xa,
30232 gen_rtx_AND (mode, op0, mask)));
30240 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
30241 swapping the operands if SWAP_OPERANDS is true. The expanded
30242 code is a forward jump to a newly created label in case the
30243 comparison is true. The generated label rtx is returned. */
30245 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
30246 bool swap_operands)
30257 label = gen_label_rtx ();
30258 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
30259 emit_insn (gen_rtx_SET (VOIDmode, tmp,
30260 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
30261 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
30262 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
30263 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
30264 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
30265 JUMP_LABEL (tmp) = label;
30270 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
30271 using comparison code CODE. Operands are swapped for the comparison if
30272 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
30274 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
30275 bool swap_operands)
30277 enum machine_mode mode = GET_MODE (op0);
30278 rtx mask = gen_reg_rtx (mode);
30287 if (mode == DFmode)
30288 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
30289 gen_rtx_fmt_ee (code, mode, op0, op1)));
30291 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
30292 gen_rtx_fmt_ee (code, mode, op0, op1)));
30297 /* Generate and return a rtx of mode MODE for 2**n where n is the number
30298 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
30300 ix86_gen_TWO52 (enum machine_mode mode)
30302 REAL_VALUE_TYPE TWO52r;
30305 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
30306 TWO52 = const_double_from_real_value (TWO52r, mode);
30307 TWO52 = force_reg (mode, TWO52);
30312 /* Expand SSE sequence for computing lround from OP1 storing
30315 ix86_expand_lround (rtx op0, rtx op1)
30317 /* C code for the stuff we're doing below:
30318 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
30321 enum machine_mode mode = GET_MODE (op1);
30322 const struct real_format *fmt;
30323 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
30326 /* load nextafter (0.5, 0.0) */
30327 fmt = REAL_MODE_FORMAT (mode);
30328 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
30329 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
30331 /* adj = copysign (0.5, op1) */
30332 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
30333 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
30335 /* adj = op1 + adj */
30336 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
30338 /* op0 = (imode)adj */
30339 expand_fix (op0, adj, 0);
30342 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
30345 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
30347 /* C code for the stuff we're doing below (for do_floor):
30349 xi -= (double)xi > op1 ? 1 : 0;
30352 enum machine_mode fmode = GET_MODE (op1);
30353 enum machine_mode imode = GET_MODE (op0);
30354 rtx ireg, freg, label, tmp;
30356 /* reg = (long)op1 */
30357 ireg = gen_reg_rtx (imode);
30358 expand_fix (ireg, op1, 0);
30360 /* freg = (double)reg */
30361 freg = gen_reg_rtx (fmode);
30362 expand_float (freg, ireg, 0);
30364 /* ireg = (freg > op1) ? ireg - 1 : ireg */
30365 label = ix86_expand_sse_compare_and_jump (UNLE,
30366 freg, op1, !do_floor);
30367 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
30368 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
30369 emit_move_insn (ireg, tmp);
30371 emit_label (label);
30372 LABEL_NUSES (label) = 1;
30374 emit_move_insn (op0, ireg);
30377 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
30378 result in OPERAND0. */
30380 ix86_expand_rint (rtx operand0, rtx operand1)
30382 /* C code for the stuff we're doing below:
30383 xa = fabs (operand1);
30384 if (!isless (xa, 2**52))
30386 xa = xa + 2**52 - 2**52;
30387 return copysign (xa, operand1);
30389 enum machine_mode mode = GET_MODE (operand0);
30390 rtx res, xa, label, TWO52, mask;
30392 res = gen_reg_rtx (mode);
30393 emit_move_insn (res, operand1);
30395 /* xa = abs (operand1) */
30396 xa = ix86_expand_sse_fabs (res, &mask);
30398 /* if (!isless (xa, TWO52)) goto label; */
30399 TWO52 = ix86_gen_TWO52 (mode);
30400 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30402 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
30403 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
30405 ix86_sse_copysign_to_positive (res, xa, res, mask);
30407 emit_label (label);
30408 LABEL_NUSES (label) = 1;
30410 emit_move_insn (operand0, res);
30413 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
30416 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
30418 /* C code for the stuff we expand below.
30419 double xa = fabs (x), x2;
30420 if (!isless (xa, TWO52))
30422 xa = xa + TWO52 - TWO52;
30423 x2 = copysign (xa, x);
30432 enum machine_mode mode = GET_MODE (operand0);
30433 rtx xa, TWO52, tmp, label, one, res, mask;
30435 TWO52 = ix86_gen_TWO52 (mode);
30437 /* Temporary for holding the result, initialized to the input
30438 operand to ease control flow. */
30439 res = gen_reg_rtx (mode);
30440 emit_move_insn (res, operand1);
30442 /* xa = abs (operand1) */
30443 xa = ix86_expand_sse_fabs (res, &mask);
30445 /* if (!isless (xa, TWO52)) goto label; */
30446 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30448 /* xa = xa + TWO52 - TWO52; */
30449 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
30450 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
30452 /* xa = copysign (xa, operand1) */
30453 ix86_sse_copysign_to_positive (xa, xa, res, mask);
30455 /* generate 1.0 or -1.0 */
30456 one = force_reg (mode,
30457 const_double_from_real_value (do_floor
30458 ? dconst1 : dconstm1, mode));
30460 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
30461 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
30462 emit_insn (gen_rtx_SET (VOIDmode, tmp,
30463 gen_rtx_AND (mode, one, tmp)));
30464 /* We always need to subtract here to preserve signed zero. */
30465 tmp = expand_simple_binop (mode, MINUS,
30466 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
30467 emit_move_insn (res, tmp);
30469 emit_label (label);
30470 LABEL_NUSES (label) = 1;
30472 emit_move_insn (operand0, res);
30475 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
30478 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
30480 /* C code for the stuff we expand below.
30481 double xa = fabs (x), x2;
30482 if (!isless (xa, TWO52))
30484 x2 = (double)(long)x;
30491 if (HONOR_SIGNED_ZEROS (mode))
30492 return copysign (x2, x);
30495 enum machine_mode mode = GET_MODE (operand0);
30496 rtx xa, xi, TWO52, tmp, label, one, res, mask;
30498 TWO52 = ix86_gen_TWO52 (mode);
30500 /* Temporary for holding the result, initialized to the input
30501 operand to ease control flow. */
30502 res = gen_reg_rtx (mode);
30503 emit_move_insn (res, operand1);
30505 /* xa = abs (operand1) */
30506 xa = ix86_expand_sse_fabs (res, &mask);
30508 /* if (!isless (xa, TWO52)) goto label; */
30509 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30511 /* xa = (double)(long)x */
30512 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
30513 expand_fix (xi, res, 0);
30514 expand_float (xa, xi, 0);
30517 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
30519 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
30520 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
30521 emit_insn (gen_rtx_SET (VOIDmode, tmp,
30522 gen_rtx_AND (mode, one, tmp)));
30523 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
30524 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
30525 emit_move_insn (res, tmp);
30527 if (HONOR_SIGNED_ZEROS (mode))
30528 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
30530 emit_label (label);
30531 LABEL_NUSES (label) = 1;
30533 emit_move_insn (operand0, res);
30536 /* Expand SSE sequence for computing round from OPERAND1 storing
30537 into OPERAND0. Sequence that works without relying on DImode truncation
30538 via cvttsd2siq that is only available on 64bit targets. */
30540 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
30542 /* C code for the stuff we expand below.
30543 double xa = fabs (x), xa2, x2;
30544 if (!isless (xa, TWO52))
30546 Using the absolute value and copying back sign makes
30547 -0.0 -> -0.0 correct.
30548 xa2 = xa + TWO52 - TWO52;
30553 else if (dxa > 0.5)
30555 x2 = copysign (xa2, x);
30558 enum machine_mode mode = GET_MODE (operand0);
30559 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
30561 TWO52 = ix86_gen_TWO52 (mode);
30563 /* Temporary for holding the result, initialized to the input
30564 operand to ease control flow. */
30565 res = gen_reg_rtx (mode);
30566 emit_move_insn (res, operand1);
30568 /* xa = abs (operand1) */
30569 xa = ix86_expand_sse_fabs (res, &mask);
30571 /* if (!isless (xa, TWO52)) goto label; */
30572 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30574 /* xa2 = xa + TWO52 - TWO52; */
30575 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
30576 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
30578 /* dxa = xa2 - xa; */
30579 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
30581 /* generate 0.5, 1.0 and -0.5 */
30582 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
30583 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
30584 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
30588 tmp = gen_reg_rtx (mode);
30589 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
30590 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
30591 emit_insn (gen_rtx_SET (VOIDmode, tmp,
30592 gen_rtx_AND (mode, one, tmp)));
30593 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
30594 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
30595 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
30596 emit_insn (gen_rtx_SET (VOIDmode, tmp,
30597 gen_rtx_AND (mode, one, tmp)));
30598 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
30600 /* res = copysign (xa2, operand1) */
30601 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
30603 emit_label (label);
30604 LABEL_NUSES (label) = 1;
30606 emit_move_insn (operand0, res);
30609 /* Expand SSE sequence for computing trunc from OPERAND1 storing
30612 ix86_expand_trunc (rtx operand0, rtx operand1)
30614 /* C code for SSE variant we expand below.
30615 double xa = fabs (x), x2;
30616 if (!isless (xa, TWO52))
30618 x2 = (double)(long)x;
30619 if (HONOR_SIGNED_ZEROS (mode))
30620 return copysign (x2, x);
30623 enum machine_mode mode = GET_MODE (operand0);
30624 rtx xa, xi, TWO52, label, res, mask;
30626 TWO52 = ix86_gen_TWO52 (mode);
30628 /* Temporary for holding the result, initialized to the input
30629 operand to ease control flow. */
30630 res = gen_reg_rtx (mode);
30631 emit_move_insn (res, operand1);
30633 /* xa = abs (operand1) */
30634 xa = ix86_expand_sse_fabs (res, &mask);
30636 /* if (!isless (xa, TWO52)) goto label; */
30637 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30639 /* x = (double)(long)x */
30640 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
30641 expand_fix (xi, res, 0);
30642 expand_float (res, xi, 0);
30644 if (HONOR_SIGNED_ZEROS (mode))
30645 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
30647 emit_label (label);
30648 LABEL_NUSES (label) = 1;
30650 emit_move_insn (operand0, res);
30653 /* Expand SSE sequence for computing trunc from OPERAND1 storing
30656 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
30658 enum machine_mode mode = GET_MODE (operand0);
30659 rtx xa, mask, TWO52, label, one, res, smask, tmp;
30661 /* C code for SSE variant we expand below.
30662 double xa = fabs (x), x2;
30663 if (!isless (xa, TWO52))
30665 xa2 = xa + TWO52 - TWO52;
30669 x2 = copysign (xa2, x);
30673 TWO52 = ix86_gen_TWO52 (mode);
30675 /* Temporary for holding the result, initialized to the input
30676 operand to ease control flow. */
30677 res = gen_reg_rtx (mode);
30678 emit_move_insn (res, operand1);
30680 /* xa = abs (operand1) */
30681 xa = ix86_expand_sse_fabs (res, &smask);
30683 /* if (!isless (xa, TWO52)) goto label; */
30684 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30686 /* res = xa + TWO52 - TWO52; */
30687 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
30688 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
30689 emit_move_insn (res, tmp);
30692 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
30694 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
30695 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
30696 emit_insn (gen_rtx_SET (VOIDmode, mask,
30697 gen_rtx_AND (mode, mask, one)));
30698 tmp = expand_simple_binop (mode, MINUS,
30699 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
30700 emit_move_insn (res, tmp);
30702 /* res = copysign (res, operand1) */
30703 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
30705 emit_label (label);
30706 LABEL_NUSES (label) = 1;
30708 emit_move_insn (operand0, res);
30711 /* Expand SSE sequence for computing round from OPERAND1 storing
30714 ix86_expand_round (rtx operand0, rtx operand1)
30716 /* C code for the stuff we're doing below:
30717 double xa = fabs (x);
30718 if (!isless (xa, TWO52))
30720 xa = (double)(long)(xa + nextafter (0.5, 0.0));
30721 return copysign (xa, x);
30723 enum machine_mode mode = GET_MODE (operand0);
30724 rtx res, TWO52, xa, label, xi, half, mask;
30725 const struct real_format *fmt;
30726 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
30728 /* Temporary for holding the result, initialized to the input
30729 operand to ease control flow. */
30730 res = gen_reg_rtx (mode);
30731 emit_move_insn (res, operand1);
30733 TWO52 = ix86_gen_TWO52 (mode);
30734 xa = ix86_expand_sse_fabs (res, &mask);
30735 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
30737 /* load nextafter (0.5, 0.0) */
30738 fmt = REAL_MODE_FORMAT (mode);
30739 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
30740 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
30742 /* xa = xa + 0.5 */
30743 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
30744 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
30746 /* xa = (double)(int64_t)xa */
30747 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
30748 expand_fix (xi, xa, 0);
30749 expand_float (xa, xi, 0);
30751 /* res = copysign (xa, operand1) */
30752 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
30754 emit_label (label);
30755 LABEL_NUSES (label) = 1;
30757 emit_move_insn (operand0, res);
30761 /* Table of valid machine attributes. */
30762 static const struct attribute_spec ix86_attribute_table[] =
30764 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
30765 /* Stdcall attribute says callee is responsible for popping arguments
30766 if they are not variable. */
30767 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
30768 /* Fastcall attribute says callee is responsible for popping arguments
30769 if they are not variable. */
30770 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
30771 /* Thiscall attribute says callee is responsible for popping arguments
30772 if they are not variable. */
30773 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
30774 /* Cdecl attribute says the callee is a normal C declaration */
30775 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
30776 /* Regparm attribute specifies how many integer arguments are to be
30777 passed in registers. */
30778 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
30779 /* Sseregparm attribute says we are using x86_64 calling conventions
30780 for FP arguments. */
30781 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
30782 /* force_align_arg_pointer says this function realigns the stack at entry. */
30783 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
30784 false, true, true, ix86_handle_cconv_attribute },
30785 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30786 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
30787 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
30788 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
30790 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
30791 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
30792 #ifdef SUBTARGET_ATTRIBUTE_TABLE
30793 SUBTARGET_ATTRIBUTE_TABLE,
30795 /* ms_abi and sysv_abi calling convention function attributes. */
30796 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
30797 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
30798 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
30800 { NULL, 0, 0, false, false, false, NULL }
30803 /* Implement targetm.vectorize.builtin_vectorization_cost. */
30805 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
30806 tree vectype ATTRIBUTE_UNUSED,
30807 int misalign ATTRIBUTE_UNUSED)
30809 switch (type_of_cost)
30812 return ix86_cost->scalar_stmt_cost;
30815 return ix86_cost->scalar_load_cost;
30818 return ix86_cost->scalar_store_cost;
30821 return ix86_cost->vec_stmt_cost;
30824 return ix86_cost->vec_align_load_cost;
30827 return ix86_cost->vec_store_cost;
30829 case vec_to_scalar:
30830 return ix86_cost->vec_to_scalar_cost;
30832 case scalar_to_vec:
30833 return ix86_cost->scalar_to_vec_cost;
30835 case unaligned_load:
30836 case unaligned_store:
30837 return ix86_cost->vec_unalign_load_cost;
30839 case cond_branch_taken:
30840 return ix86_cost->cond_taken_branch_cost;
30842 case cond_branch_not_taken:
30843 return ix86_cost->cond_not_taken_branch_cost;
30849 gcc_unreachable ();
30854 /* Implement targetm.vectorize.builtin_vec_perm. */
30857 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
30859 tree itype = TREE_TYPE (vec_type);
30860 bool u = TYPE_UNSIGNED (itype);
30861 enum machine_mode vmode = TYPE_MODE (vec_type);
30862 enum ix86_builtins fcode;
30863 bool ok = TARGET_SSE2;
30869 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
30872 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
30874 itype = ix86_get_builtin_type (IX86_BT_DI);
30879 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
30883 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
30885 itype = ix86_get_builtin_type (IX86_BT_SI);
30889 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
30892 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
30895 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
30898 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
30908 *mask_type = itype;
30909 return ix86_builtins[(int) fcode];
30912 /* Return a vector mode with twice as many elements as VMODE. */
30913 /* ??? Consider moving this to a table generated by genmodes.c. */
30915 static enum machine_mode
30916 doublesize_vector_mode (enum machine_mode vmode)
30920 case V2SFmode: return V4SFmode;
30921 case V1DImode: return V2DImode;
30922 case V2SImode: return V4SImode;
30923 case V4HImode: return V8HImode;
30924 case V8QImode: return V16QImode;
30926 case V2DFmode: return V4DFmode;
30927 case V4SFmode: return V8SFmode;
30928 case V2DImode: return V4DImode;
30929 case V4SImode: return V8SImode;
30930 case V8HImode: return V16HImode;
30931 case V16QImode: return V32QImode;
30933 case V4DFmode: return V8DFmode;
30934 case V8SFmode: return V16SFmode;
30935 case V4DImode: return V8DImode;
30936 case V8SImode: return V16SImode;
30937 case V16HImode: return V32HImode;
30938 case V32QImode: return V64QImode;
30941 gcc_unreachable ();
30945 /* Construct (set target (vec_select op0 (parallel perm))) and
30946 return true if that's a valid instruction in the active ISA. */
30949 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
30951 rtx rperm[MAX_VECT_LEN], x;
30954 for (i = 0; i < nelt; ++i)
30955 rperm[i] = GEN_INT (perm[i]);
30957 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
30958 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
30959 x = gen_rtx_SET (VOIDmode, target, x);
30962 if (recog_memoized (x) < 0)
30970 /* Similar, but generate a vec_concat from op0 and op1 as well. */
30973 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
30974 const unsigned char *perm, unsigned nelt)
30976 enum machine_mode v2mode;
30979 v2mode = doublesize_vector_mode (GET_MODE (op0));
30980 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
30981 return expand_vselect (target, x, perm, nelt);
30984 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
30985 in terms of blendp[sd] / pblendw / pblendvb. */
30988 expand_vec_perm_blend (struct expand_vec_perm_d *d)
30990 enum machine_mode vmode = d->vmode;
30991 unsigned i, mask, nelt = d->nelt;
30992 rtx target, op0, op1, x;
30994 if (!TARGET_SSE4_1 || d->op0 == d->op1)
30996 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
30999 /* This is a blend, not a permute. Elements must stay in their
31000 respective lanes. */
31001 for (i = 0; i < nelt; ++i)
31003 unsigned e = d->perm[i];
31004 if (!(e == i || e == i + nelt))
31011 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
31012 decision should be extracted elsewhere, so that we only try that
31013 sequence once all budget==3 options have been tried. */
31015 /* For bytes, see if bytes move in pairs so we can use pblendw with
31016 an immediate argument, rather than pblendvb with a vector argument. */
31017 if (vmode == V16QImode)
31019 bool pblendw_ok = true;
31020 for (i = 0; i < 16 && pblendw_ok; i += 2)
31021 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
31025 rtx rperm[16], vperm;
31027 for (i = 0; i < nelt; ++i)
31028 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
31030 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
31031 vperm = force_reg (V16QImode, vperm);
31033 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
31038 target = d->target;
31050 for (i = 0; i < nelt; ++i)
31051 mask |= (d->perm[i] >= nelt) << i;
31055 for (i = 0; i < 2; ++i)
31056 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
31060 for (i = 0; i < 4; ++i)
31061 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
31065 for (i = 0; i < 8; ++i)
31066 mask |= (d->perm[i * 2] >= 16) << i;
31070 target = gen_lowpart (vmode, target);
31071 op0 = gen_lowpart (vmode, op0);
31072 op1 = gen_lowpart (vmode, op1);
31076 gcc_unreachable ();
31079 /* This matches five different patterns with the different modes. */
31080 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
31081 x = gen_rtx_SET (VOIDmode, target, x);
31087 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
31088 in terms of the variable form of vpermilps.
31090 Note that we will have already failed the immediate input vpermilps,
31091 which requires that the high and low part shuffle be identical; the
31092 variable form doesn't require that. */
31095 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
31097 rtx rperm[8], vperm;
31100 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
31103 /* We can only permute within the 128-bit lane. */
31104 for (i = 0; i < 8; ++i)
31106 unsigned e = d->perm[i];
31107 if (i < 4 ? e >= 4 : e < 4)
31114 for (i = 0; i < 8; ++i)
31116 unsigned e = d->perm[i];
31118 /* Within each 128-bit lane, the elements of op0 are numbered
31119 from 0 and the elements of op1 are numbered from 4. */
31125 rperm[i] = GEN_INT (e);
31128 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
31129 vperm = force_reg (V8SImode, vperm);
31130 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
31135 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
31136 in terms of pshufb or vpperm. */
31139 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
31141 unsigned i, nelt, eltsz;
31142 rtx rperm[16], vperm, target, op0, op1;
31144 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
31146 if (GET_MODE_SIZE (d->vmode) != 16)
31153 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
31155 for (i = 0; i < nelt; ++i)
31157 unsigned j, e = d->perm[i];
31158 for (j = 0; j < eltsz; ++j)
31159 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
31162 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
31163 vperm = force_reg (V16QImode, vperm);
31165 target = gen_lowpart (V16QImode, d->target);
31166 op0 = gen_lowpart (V16QImode, d->op0);
31167 if (d->op0 == d->op1)
31168 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
31171 op1 = gen_lowpart (V16QImode, d->op1);
31172 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
31178 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
31179 in a single instruction. */
31182 expand_vec_perm_1 (struct expand_vec_perm_d *d)
31184 unsigned i, nelt = d->nelt;
31185 unsigned char perm2[MAX_VECT_LEN];
31187 /* Check plain VEC_SELECT first, because AVX has instructions that could
31188 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
31189 input where SEL+CONCAT may not. */
31190 if (d->op0 == d->op1)
31192 int mask = nelt - 1;
31194 for (i = 0; i < nelt; i++)
31195 perm2[i] = d->perm[i] & mask;
31197 if (expand_vselect (d->target, d->op0, perm2, nelt))
31200 /* There are plenty of patterns in sse.md that are written for
31201 SEL+CONCAT and are not replicated for a single op. Perhaps
31202 that should be changed, to avoid the nastiness here. */
31204 /* Recognize interleave style patterns, which means incrementing
31205 every other permutation operand. */
31206 for (i = 0; i < nelt; i += 2)
31208 perm2[i] = d->perm[i] & mask;
31209 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
31211 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
31214 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
31217 for (i = 0; i < nelt; i += 4)
31219 perm2[i + 0] = d->perm[i + 0] & mask;
31220 perm2[i + 1] = d->perm[i + 1] & mask;
31221 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
31222 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
31225 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
31230 /* Finally, try the fully general two operand permute. */
31231 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
31234 /* Recognize interleave style patterns with reversed operands. */
31235 if (d->op0 != d->op1)
31237 for (i = 0; i < nelt; ++i)
31239 unsigned e = d->perm[i];
31247 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
31251 /* Try the SSE4.1 blend variable merge instructions. */
31252 if (expand_vec_perm_blend (d))
31255 /* Try one of the AVX vpermil variable permutations. */
31256 if (expand_vec_perm_vpermil (d))
31259 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
31260 if (expand_vec_perm_pshufb (d))
31266 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
31267 in terms of a pair of pshuflw + pshufhw instructions. */
31270 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
31272 unsigned char perm2[MAX_VECT_LEN];
31276 if (d->vmode != V8HImode || d->op0 != d->op1)
31279 /* The two permutations only operate in 64-bit lanes. */
31280 for (i = 0; i < 4; ++i)
31281 if (d->perm[i] >= 4)
31283 for (i = 4; i < 8; ++i)
31284 if (d->perm[i] < 4)
31290 /* Emit the pshuflw. */
31291 memcpy (perm2, d->perm, 4);
31292 for (i = 4; i < 8; ++i)
31294 ok = expand_vselect (d->target, d->op0, perm2, 8);
31297 /* Emit the pshufhw. */
31298 memcpy (perm2 + 4, d->perm + 4, 4);
31299 for (i = 0; i < 4; ++i)
31301 ok = expand_vselect (d->target, d->target, perm2, 8);
31307 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
31308 the permutation using the SSSE3 palignr instruction. This succeeds
31309 when all of the elements in PERM fit within one vector and we merely
31310 need to shift them down so that a single vector permutation has a
31311 chance to succeed. */
31314 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
31316 unsigned i, nelt = d->nelt;
31321 /* Even with AVX, palignr only operates on 128-bit vectors. */
31322 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
31325 min = nelt, max = 0;
31326 for (i = 0; i < nelt; ++i)
31328 unsigned e = d->perm[i];
31334 if (min == 0 || max - min >= nelt)
31337 /* Given that we have SSSE3, we know we'll be able to implement the
31338 single operand permutation after the palignr with pshufb. */
31342 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
31343 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
31344 gen_lowpart (TImode, d->op1),
31345 gen_lowpart (TImode, d->op0), shift));
31347 d->op0 = d->op1 = d->target;
31350 for (i = 0; i < nelt; ++i)
31352 unsigned e = d->perm[i] - min;
31358 /* Test for the degenerate case where the alignment by itself
31359 produces the desired permutation. */
31363 ok = expand_vec_perm_1 (d);
31369 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
31370 a two vector permutation into a single vector permutation by using
31371 an interleave operation to merge the vectors. */
31374 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
31376 struct expand_vec_perm_d dremap, dfinal;
31377 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
31378 unsigned contents, h1, h2, h3, h4;
31379 unsigned char remap[2 * MAX_VECT_LEN];
31383 if (d->op0 == d->op1)
31386 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
31387 lanes. We can use similar techniques with the vperm2f128 instruction,
31388 but it requires slightly different logic. */
31389 if (GET_MODE_SIZE (d->vmode) != 16)
31392 /* Examine from whence the elements come. */
31394 for (i = 0; i < nelt; ++i)
31395 contents |= 1u << d->perm[i];
31397 /* Split the two input vectors into 4 halves. */
31398 h1 = (1u << nelt2) - 1;
31403 memset (remap, 0xff, sizeof (remap));
31406 /* If the elements from the low halves use interleave low, and similarly
31407 for interleave high. If the elements are from mis-matched halves, we
31408 can use shufps for V4SF/V4SI or do a DImode shuffle. */
31409 if ((contents & (h1 | h3)) == contents)
31411 for (i = 0; i < nelt2; ++i)
31414 remap[i + nelt] = i * 2 + 1;
31415 dremap.perm[i * 2] = i;
31416 dremap.perm[i * 2 + 1] = i + nelt;
31419 else if ((contents & (h2 | h4)) == contents)
31421 for (i = 0; i < nelt2; ++i)
31423 remap[i + nelt2] = i * 2;
31424 remap[i + nelt + nelt2] = i * 2 + 1;
31425 dremap.perm[i * 2] = i + nelt2;
31426 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
31429 else if ((contents & (h1 | h4)) == contents)
31431 for (i = 0; i < nelt2; ++i)
31434 remap[i + nelt + nelt2] = i + nelt2;
31435 dremap.perm[i] = i;
31436 dremap.perm[i + nelt2] = i + nelt + nelt2;
31440 dremap.vmode = V2DImode;
31442 dremap.perm[0] = 0;
31443 dremap.perm[1] = 3;
31446 else if ((contents & (h2 | h3)) == contents)
31448 for (i = 0; i < nelt2; ++i)
31450 remap[i + nelt2] = i;
31451 remap[i + nelt] = i + nelt2;
31452 dremap.perm[i] = i + nelt2;
31453 dremap.perm[i + nelt2] = i + nelt;
31457 dremap.vmode = V2DImode;
31459 dremap.perm[0] = 1;
31460 dremap.perm[1] = 2;
31466 /* Use the remapping array set up above to move the elements from their
31467 swizzled locations into their final destinations. */
31469 for (i = 0; i < nelt; ++i)
31471 unsigned e = remap[d->perm[i]];
31472 gcc_assert (e < nelt);
31473 dfinal.perm[i] = e;
31475 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
31476 dfinal.op1 = dfinal.op0;
31477 dremap.target = dfinal.op0;
31479 /* Test if the final remap can be done with a single insn. For V4SFmode or
31480 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
31482 ok = expand_vec_perm_1 (&dfinal);
31483 seq = get_insns ();
31489 if (dremap.vmode != dfinal.vmode)
31491 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
31492 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
31493 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
31496 ok = expand_vec_perm_1 (&dremap);
31503 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
31504 permutation with two pshufb insns and an ior. We should have already
31505 failed all two instruction sequences. */
31508 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
31510 rtx rperm[2][16], vperm, l, h, op, m128;
31511 unsigned int i, nelt, eltsz;
31513 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
31515 gcc_assert (d->op0 != d->op1);
31518 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
31520 /* Generate two permutation masks. If the required element is within
31521 the given vector it is shuffled into the proper lane. If the required
31522 element is in the other vector, force a zero into the lane by setting
31523 bit 7 in the permutation mask. */
31524 m128 = GEN_INT (-128);
31525 for (i = 0; i < nelt; ++i)
31527 unsigned j, e = d->perm[i];
31528 unsigned which = (e >= nelt);
31532 for (j = 0; j < eltsz; ++j)
31534 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
31535 rperm[1-which][i*eltsz + j] = m128;
31539 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
31540 vperm = force_reg (V16QImode, vperm);
31542 l = gen_reg_rtx (V16QImode);
31543 op = gen_lowpart (V16QImode, d->op0);
31544 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
31546 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
31547 vperm = force_reg (V16QImode, vperm);
31549 h = gen_reg_rtx (V16QImode);
31550 op = gen_lowpart (V16QImode, d->op1);
31551 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
31553 op = gen_lowpart (V16QImode, d->target);
31554 emit_insn (gen_iorv16qi3 (op, l, h));
31559 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
31560 and extract-odd permutations. */
31563 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
31565 rtx t1, t2, t3, t4;
31570 t1 = gen_reg_rtx (V4DFmode);
31571 t2 = gen_reg_rtx (V4DFmode);
31573 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
31574 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
31575 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
31577 /* Now an unpck[lh]pd will produce the result required. */
31579 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
31581 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
31587 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
31588 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
31589 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
31591 t1 = gen_reg_rtx (V8SFmode);
31592 t2 = gen_reg_rtx (V8SFmode);
31593 t3 = gen_reg_rtx (V8SFmode);
31594 t4 = gen_reg_rtx (V8SFmode);
31596 /* Shuffle within the 128-bit lanes to produce:
31597 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
31598 expand_vselect (t1, d->op0, perm1, 8);
31599 expand_vselect (t2, d->op1, perm1, 8);
31601 /* Shuffle the lanes around to produce:
31602 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
31603 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
31604 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
31606 /* Now a vpermil2p will produce the result required. */
31607 /* ??? The vpermil2p requires a vector constant. Another option
31608 is a unpck[lh]ps to merge the two vectors to produce
31609 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
31610 vpermilps to get the elements into the final order. */
31613 memcpy (d->perm, odd ? permo: perme, 8);
31614 expand_vec_perm_vpermil (d);
31622 /* These are always directly implementable by expand_vec_perm_1. */
31623 gcc_unreachable ();
31627 return expand_vec_perm_pshufb2 (d);
31630 /* We need 2*log2(N)-1 operations to achieve odd/even
31631 with interleave. */
31632 t1 = gen_reg_rtx (V8HImode);
31633 t2 = gen_reg_rtx (V8HImode);
31634 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
31635 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
31636 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
31637 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
31639 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
31641 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
31648 return expand_vec_perm_pshufb2 (d);
31651 t1 = gen_reg_rtx (V16QImode);
31652 t2 = gen_reg_rtx (V16QImode);
31653 t3 = gen_reg_rtx (V16QImode);
31654 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
31655 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
31656 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
31657 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
31658 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
31659 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
31661 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
31663 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
31669 gcc_unreachable ();
31675 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
31676 extract-even and extract-odd permutations. */
31679 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
31681 unsigned i, odd, nelt = d->nelt;
31684 if (odd != 0 && odd != 1)
31687 for (i = 1; i < nelt; ++i)
31688 if (d->perm[i] != 2 * i + odd)
31691 return expand_vec_perm_even_odd_1 (d, odd);
31694 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
31695 permutations. We assume that expand_vec_perm_1 has already failed. */
31698 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
31700 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
31701 enum machine_mode vmode = d->vmode;
31702 unsigned char perm2[4];
31710 /* These are special-cased in sse.md so that we can optionally
31711 use the vbroadcast instruction. They expand to two insns
31712 if the input happens to be in a register. */
31713 gcc_unreachable ();
31719 /* These are always implementable using standard shuffle patterns. */
31720 gcc_unreachable ();
31724 /* These can be implemented via interleave. We save one insn by
31725 stopping once we have promoted to V4SImode and then use pshufd. */
31728 optab otab = vec_interleave_low_optab;
31732 otab = vec_interleave_high_optab;
31737 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
31738 vmode = get_mode_wider_vector (vmode);
31739 op0 = gen_lowpart (vmode, op0);
31741 while (vmode != V4SImode);
31743 memset (perm2, elt, 4);
31744 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
31749 gcc_unreachable ();
31753 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
31754 broadcast permutations. */
31757 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
31759 unsigned i, elt, nelt = d->nelt;
31761 if (d->op0 != d->op1)
31765 for (i = 1; i < nelt; ++i)
31766 if (d->perm[i] != elt)
31769 return expand_vec_perm_broadcast_1 (d);
31772 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
31773 With all of the interface bits taken care of, perform the expansion
31774 in D and return true on success. */
31777 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
31779 /* Try a single instruction expansion. */
31780 if (expand_vec_perm_1 (d))
31783 /* Try sequences of two instructions. */
31785 if (expand_vec_perm_pshuflw_pshufhw (d))
31788 if (expand_vec_perm_palignr (d))
31791 if (expand_vec_perm_interleave2 (d))
31794 if (expand_vec_perm_broadcast (d))
31797 /* Try sequences of three instructions. */
31799 if (expand_vec_perm_pshufb2 (d))
31802 /* ??? Look for narrow permutations whose element orderings would
31803 allow the promotion to a wider mode. */
31805 /* ??? Look for sequences of interleave or a wider permute that place
31806 the data into the correct lanes for a half-vector shuffle like
31807 pshuf[lh]w or vpermilps. */
31809 /* ??? Look for sequences of interleave that produce the desired results.
31810 The combinatorics of punpck[lh] get pretty ugly... */
31812 if (expand_vec_perm_even_odd (d))
31818 /* Extract the values from the vector CST into the permutation array in D.
31819 Return 0 on error, 1 if all values from the permutation come from the
31820 first vector, 2 if all values from the second vector, and 3 otherwise. */
31823 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
31825 tree list = TREE_VECTOR_CST_ELTS (cst);
31826 unsigned i, nelt = d->nelt;
31829 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
31831 unsigned HOST_WIDE_INT e;
31833 if (!host_integerp (TREE_VALUE (list), 1))
31835 e = tree_low_cst (TREE_VALUE (list), 1);
31839 ret |= (e < nelt ? 1 : 2);
31842 gcc_assert (list == NULL);
31844 /* For all elements from second vector, fold the elements to first. */
31846 for (i = 0; i < nelt; ++i)
31847 d->perm[i] -= nelt;
31853 ix86_expand_vec_perm_builtin (tree exp)
31855 struct expand_vec_perm_d d;
31856 tree arg0, arg1, arg2;
31858 arg0 = CALL_EXPR_ARG (exp, 0);
31859 arg1 = CALL_EXPR_ARG (exp, 1);
31860 arg2 = CALL_EXPR_ARG (exp, 2);
31862 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
31863 d.nelt = GET_MODE_NUNITS (d.vmode);
31864 d.testing_p = false;
31865 gcc_assert (VECTOR_MODE_P (d.vmode));
31867 if (TREE_CODE (arg2) != VECTOR_CST)
31869 error_at (EXPR_LOCATION (exp),
31870 "vector permutation requires vector constant");
31874 switch (extract_vec_perm_cst (&d, arg2))
31880 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
31884 if (!operand_equal_p (arg0, arg1, 0))
31886 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
31887 d.op0 = force_reg (d.vmode, d.op0);
31888 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
31889 d.op1 = force_reg (d.vmode, d.op1);
31893 /* The elements of PERM do not suggest that only the first operand
31894 is used, but both operands are identical. Allow easier matching
31895 of the permutation by folding the permutation into the single
31898 unsigned i, nelt = d.nelt;
31899 for (i = 0; i < nelt; ++i)
31900 if (d.perm[i] >= nelt)
31906 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
31907 d.op0 = force_reg (d.vmode, d.op0);
31912 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
31913 d.op0 = force_reg (d.vmode, d.op0);
31918 d.target = gen_reg_rtx (d.vmode);
31919 if (ix86_expand_vec_perm_builtin_1 (&d))
31922 /* For compiler generated permutations, we should never got here, because
31923 the compiler should also be checking the ok hook. But since this is a
31924 builtin the user has access too, so don't abort. */
31928 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
31931 sorry ("vector permutation (%d %d %d %d)",
31932 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
31935 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
31936 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
31937 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
31940 sorry ("vector permutation "
31941 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
31942 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
31943 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
31944 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
31945 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
31948 gcc_unreachable ();
31951 return CONST0_RTX (d.vmode);
31954 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
31957 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
31959 struct expand_vec_perm_d d;
31963 d.vmode = TYPE_MODE (vec_type);
31964 d.nelt = GET_MODE_NUNITS (d.vmode);
31965 d.testing_p = true;
31967 /* Given sufficient ISA support we can just return true here
31968 for selected vector modes. */
31969 if (GET_MODE_SIZE (d.vmode) == 16)
31971 /* All implementable with a single vpperm insn. */
31974 /* All implementable with 2 pshufb + 1 ior. */
31977 /* All implementable with shufpd or unpck[lh]pd. */
31982 vec_mask = extract_vec_perm_cst (&d, mask);
31984 /* This hook is cannot be called in response to something that the
31985 user does (unlike the builtin expander) so we shouldn't ever see
31986 an error generated from the extract. */
31987 gcc_assert (vec_mask > 0 && vec_mask <= 3);
31988 one_vec = (vec_mask != 3);
31990 /* Implementable with shufps or pshufd. */
31991 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
31994 /* Otherwise we have to go through the motions and see if we can
31995 figure out how to generate the requested permutation. */
31996 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
31997 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
31999 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
32002 ret = ix86_expand_vec_perm_builtin_1 (&d);
32009 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
32011 struct expand_vec_perm_d d;
32017 d.vmode = GET_MODE (targ);
32018 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
32019 d.testing_p = false;
32021 for (i = 0; i < nelt; ++i)
32022 d.perm[i] = i * 2 + odd;
32024 /* We'll either be able to implement the permutation directly... */
32025 if (expand_vec_perm_1 (&d))
32028 /* ... or we use the special-case patterns. */
32029 expand_vec_perm_even_odd_1 (&d, odd);
32032 /* This function returns the calling abi specific va_list type node.
32033 It returns the FNDECL specific va_list type. */
32036 ix86_fn_abi_va_list (tree fndecl)
32039 return va_list_type_node;
32040 gcc_assert (fndecl != NULL_TREE);
32042 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
32043 return ms_va_list_type_node;
32045 return sysv_va_list_type_node;
32048 /* Returns the canonical va_list type specified by TYPE. If there
32049 is no valid TYPE provided, it return NULL_TREE. */
32052 ix86_canonical_va_list_type (tree type)
32056 /* Resolve references and pointers to va_list type. */
32057 if (TREE_CODE (type) == MEM_REF)
32058 type = TREE_TYPE (type);
32059 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
32060 type = TREE_TYPE (type);
32061 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
32062 type = TREE_TYPE (type);
32066 wtype = va_list_type_node;
32067 gcc_assert (wtype != NULL_TREE);
32069 if (TREE_CODE (wtype) == ARRAY_TYPE)
32071 /* If va_list is an array type, the argument may have decayed
32072 to a pointer type, e.g. by being passed to another function.
32073 In that case, unwrap both types so that we can compare the
32074 underlying records. */
32075 if (TREE_CODE (htype) == ARRAY_TYPE
32076 || POINTER_TYPE_P (htype))
32078 wtype = TREE_TYPE (wtype);
32079 htype = TREE_TYPE (htype);
32082 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
32083 return va_list_type_node;
32084 wtype = sysv_va_list_type_node;
32085 gcc_assert (wtype != NULL_TREE);
32087 if (TREE_CODE (wtype) == ARRAY_TYPE)
32089 /* If va_list is an array type, the argument may have decayed
32090 to a pointer type, e.g. by being passed to another function.
32091 In that case, unwrap both types so that we can compare the
32092 underlying records. */
32093 if (TREE_CODE (htype) == ARRAY_TYPE
32094 || POINTER_TYPE_P (htype))
32096 wtype = TREE_TYPE (wtype);
32097 htype = TREE_TYPE (htype);
32100 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
32101 return sysv_va_list_type_node;
32102 wtype = ms_va_list_type_node;
32103 gcc_assert (wtype != NULL_TREE);
32105 if (TREE_CODE (wtype) == ARRAY_TYPE)
32107 /* If va_list is an array type, the argument may have decayed
32108 to a pointer type, e.g. by being passed to another function.
32109 In that case, unwrap both types so that we can compare the
32110 underlying records. */
32111 if (TREE_CODE (htype) == ARRAY_TYPE
32112 || POINTER_TYPE_P (htype))
32114 wtype = TREE_TYPE (wtype);
32115 htype = TREE_TYPE (htype);
32118 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
32119 return ms_va_list_type_node;
32122 return std_canonical_va_list_type (type);
32125 /* Iterate through the target-specific builtin types for va_list.
32126 IDX denotes the iterator, *PTREE is set to the result type of
32127 the va_list builtin, and *PNAME to its internal type.
32128 Returns zero if there is no element for this index, otherwise
32129 IDX should be increased upon the next call.
32130 Note, do not iterate a base builtin's name like __builtin_va_list.
32131 Used from c_common_nodes_and_builtins. */
32134 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
32144 *ptree = ms_va_list_type_node;
32145 *pname = "__builtin_ms_va_list";
32149 *ptree = sysv_va_list_type_node;
32150 *pname = "__builtin_sysv_va_list";
32158 #undef TARGET_SCHED_DISPATCH
32159 #define TARGET_SCHED_DISPATCH has_dispatch
32160 #undef TARGET_SCHED_DISPATCH_DO
32161 #define TARGET_SCHED_DISPATCH_DO do_dispatch
32163 /* The size of the dispatch window is the total number of bytes of
32164 object code allowed in a window. */
32165 #define DISPATCH_WINDOW_SIZE 16
32167 /* Number of dispatch windows considered for scheduling. */
32168 #define MAX_DISPATCH_WINDOWS 3
32170 /* Maximum number of instructions in a window. */
32173 /* Maximum number of immediate operands in a window. */
32176 /* Maximum number of immediate bits allowed in a window. */
32177 #define MAX_IMM_SIZE 128
32179 /* Maximum number of 32 bit immediates allowed in a window. */
32180 #define MAX_IMM_32 4
32182 /* Maximum number of 64 bit immediates allowed in a window. */
32183 #define MAX_IMM_64 2
32185 /* Maximum total of loads or prefetches allowed in a window. */
32188 /* Maximum total of stores allowed in a window. */
32189 #define MAX_STORE 1
32195 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
32196 enum dispatch_group {
32211 /* Number of allowable groups in a dispatch window. It is an array
32212 indexed by dispatch_group enum. 100 is used as a big number,
32213 because the number of these kind of operations does not have any
32214 effect in dispatch window, but we need them for other reasons in
32216 static unsigned int num_allowable_groups[disp_last] = {
32217 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
32220 char group_name[disp_last + 1][16] = {
32221 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
32222 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
32223 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
32226 /* Instruction path. */
32229 path_single, /* Single micro op. */
32230 path_double, /* Double micro op. */
32231 path_multi, /* Instructions with more than 2 micro op.. */
32235 /* sched_insn_info defines a window to the instructions scheduled in
32236 the basic block. It contains a pointer to the insn_info table and
32237 the instruction scheduled.
32239 Windows are allocated for each basic block and are linked
32241 typedef struct sched_insn_info_s {
32243 enum dispatch_group group;
32244 enum insn_path path;
32249 /* Linked list of dispatch windows. This is a two way list of
32250 dispatch windows of a basic block. It contains information about
32251 the number of uops in the window and the total number of
32252 instructions and of bytes in the object code for this dispatch
32254 typedef struct dispatch_windows_s {
32255 int num_insn; /* Number of insn in the window. */
32256 int num_uops; /* Number of uops in the window. */
32257 int window_size; /* Number of bytes in the window. */
32258 int window_num; /* Window number between 0 or 1. */
32259 int num_imm; /* Number of immediates in an insn. */
32260 int num_imm_32; /* Number of 32 bit immediates in an insn. */
32261 int num_imm_64; /* Number of 64 bit immediates in an insn. */
32262 int imm_size; /* Total immediates in the window. */
32263 int num_loads; /* Total memory loads in the window. */
32264 int num_stores; /* Total memory stores in the window. */
32265 int violation; /* Violation exists in window. */
32266 sched_insn_info *window; /* Pointer to the window. */
32267 struct dispatch_windows_s *next;
32268 struct dispatch_windows_s *prev;
32269 } dispatch_windows;
32271 /* Immediate valuse used in an insn. */
32272 typedef struct imm_info_s
32279 static dispatch_windows *dispatch_window_list;
32280 static dispatch_windows *dispatch_window_list1;
32282 /* Get dispatch group of insn. */
32284 static enum dispatch_group
32285 get_mem_group (rtx insn)
32287 enum attr_memory memory;
32289 if (INSN_CODE (insn) < 0)
32290 return disp_no_group;
32291 memory = get_attr_memory (insn);
32292 if (memory == MEMORY_STORE)
32295 if (memory == MEMORY_LOAD)
32298 if (memory == MEMORY_BOTH)
32299 return disp_load_store;
32301 return disp_no_group;
32304 /* Return true if insn is a compare instruction. */
32309 enum attr_type type;
32311 type = get_attr_type (insn);
32312 return (type == TYPE_TEST
32313 || type == TYPE_ICMP
32314 || type == TYPE_FCMP
32315 || GET_CODE (PATTERN (insn)) == COMPARE);
32318 /* Return true if a dispatch violation encountered. */
32321 dispatch_violation (void)
32323 if (dispatch_window_list->next)
32324 return dispatch_window_list->next->violation;
32325 return dispatch_window_list->violation;
32328 /* Return true if insn is a branch instruction. */
32331 is_branch (rtx insn)
32333 return (CALL_P (insn) || JUMP_P (insn));
32336 /* Return true if insn is a prefetch instruction. */
32339 is_prefetch (rtx insn)
32341 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
32344 /* This function initializes a dispatch window and the list container holding a
32345 pointer to the window. */
32348 init_window (int window_num)
32351 dispatch_windows *new_list;
32353 if (window_num == 0)
32354 new_list = dispatch_window_list;
32356 new_list = dispatch_window_list1;
32358 new_list->num_insn = 0;
32359 new_list->num_uops = 0;
32360 new_list->window_size = 0;
32361 new_list->next = NULL;
32362 new_list->prev = NULL;
32363 new_list->window_num = window_num;
32364 new_list->num_imm = 0;
32365 new_list->num_imm_32 = 0;
32366 new_list->num_imm_64 = 0;
32367 new_list->imm_size = 0;
32368 new_list->num_loads = 0;
32369 new_list->num_stores = 0;
32370 new_list->violation = false;
32372 for (i = 0; i < MAX_INSN; i++)
32374 new_list->window[i].insn = NULL;
32375 new_list->window[i].group = disp_no_group;
32376 new_list->window[i].path = no_path;
32377 new_list->window[i].byte_len = 0;
32378 new_list->window[i].imm_bytes = 0;
32383 /* This function allocates and initializes a dispatch window and the
32384 list container holding a pointer to the window. */
32386 static dispatch_windows *
32387 allocate_window (void)
32389 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
32390 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
32395 /* This routine initializes the dispatch scheduling information. It
32396 initiates building dispatch scheduler tables and constructs the
32397 first dispatch window. */
32400 init_dispatch_sched (void)
32402 /* Allocate a dispatch list and a window. */
32403 dispatch_window_list = allocate_window ();
32404 dispatch_window_list1 = allocate_window ();
32409 /* This function returns true if a branch is detected. End of a basic block
32410 does not have to be a branch, but here we assume only branches end a
32414 is_end_basic_block (enum dispatch_group group)
32416 return group == disp_branch;
32419 /* This function is called when the end of a window processing is reached. */
32422 process_end_window (void)
32424 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
32425 if (dispatch_window_list->next)
32427 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
32428 gcc_assert (dispatch_window_list->window_size
32429 + dispatch_window_list1->window_size <= 48);
32435 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
32436 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
32437 for 48 bytes of instructions. Note that these windows are not dispatch
32438 windows that their sizes are DISPATCH_WINDOW_SIZE. */
32440 static dispatch_windows *
32441 allocate_next_window (int window_num)
32443 if (window_num == 0)
32445 if (dispatch_window_list->next)
32448 return dispatch_window_list;
32451 dispatch_window_list->next = dispatch_window_list1;
32452 dispatch_window_list1->prev = dispatch_window_list;
32454 return dispatch_window_list1;
32457 /* Increment the number of immediate operands of an instruction. */
32460 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
32465 switch ( GET_CODE (*in_rtx))
32470 (imm_values->imm)++;
32471 if (x86_64_immediate_operand (*in_rtx, SImode))
32472 (imm_values->imm32)++;
32474 (imm_values->imm64)++;
32478 (imm_values->imm)++;
32479 (imm_values->imm64)++;
32483 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
32485 (imm_values->imm)++;
32486 (imm_values->imm32)++;
32497 /* Compute number of immediate operands of an instruction. */
32500 find_constant (rtx in_rtx, imm_info *imm_values)
32502 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
32503 (rtx_function) find_constant_1, (void *) imm_values);
32506 /* Return total size of immediate operands of an instruction along with number
32507 of corresponding immediate-operands. It initializes its parameters to zero
32508 befor calling FIND_CONSTANT.
32509 INSN is the input instruction. IMM is the total of immediates.
32510 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
32514 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
32516 imm_info imm_values = {0, 0, 0};
32518 find_constant (insn, &imm_values);
32519 *imm = imm_values.imm;
32520 *imm32 = imm_values.imm32;
32521 *imm64 = imm_values.imm64;
32522 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
32525 /* This function indicates if an operand of an instruction is an
32529 has_immediate (rtx insn)
32531 int num_imm_operand;
32532 int num_imm32_operand;
32533 int num_imm64_operand;
32536 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
32537 &num_imm64_operand);
32541 /* Return single or double path for instructions. */
32543 static enum insn_path
32544 get_insn_path (rtx insn)
32546 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
32548 if ((int)path == 0)
32549 return path_single;
32551 if ((int)path == 1)
32552 return path_double;
32557 /* Return insn dispatch group. */
32559 static enum dispatch_group
32560 get_insn_group (rtx insn)
32562 enum dispatch_group group = get_mem_group (insn);
32566 if (is_branch (insn))
32567 return disp_branch;
32572 if (has_immediate (insn))
32575 if (is_prefetch (insn))
32576 return disp_prefetch;
32578 return disp_no_group;
32581 /* Count number of GROUP restricted instructions in a dispatch
32582 window WINDOW_LIST. */
32585 count_num_restricted (rtx insn, dispatch_windows *window_list)
32587 enum dispatch_group group = get_insn_group (insn);
32589 int num_imm_operand;
32590 int num_imm32_operand;
32591 int num_imm64_operand;
32593 if (group == disp_no_group)
32596 if (group == disp_imm)
32598 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
32599 &num_imm64_operand);
32600 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
32601 || num_imm_operand + window_list->num_imm > MAX_IMM
32602 || (num_imm32_operand > 0
32603 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
32604 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
32605 || (num_imm64_operand > 0
32606 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
32607 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
32608 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
32609 && num_imm64_operand > 0
32610 && ((window_list->num_imm_64 > 0
32611 && window_list->num_insn >= 2)
32612 || window_list->num_insn >= 3)))
32618 if ((group == disp_load_store
32619 && (window_list->num_loads >= MAX_LOAD
32620 || window_list->num_stores >= MAX_STORE))
32621 || ((group == disp_load
32622 || group == disp_prefetch)
32623 && window_list->num_loads >= MAX_LOAD)
32624 || (group == disp_store
32625 && window_list->num_stores >= MAX_STORE))
32631 /* This function returns true if insn satisfies dispatch rules on the
32632 last window scheduled. */
32635 fits_dispatch_window (rtx insn)
32637 dispatch_windows *window_list = dispatch_window_list;
32638 dispatch_windows *window_list_next = dispatch_window_list->next;
32639 unsigned int num_restrict;
32640 enum dispatch_group group = get_insn_group (insn);
32641 enum insn_path path = get_insn_path (insn);
32644 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
32645 instructions should be given the lowest priority in the
32646 scheduling process in Haifa scheduler to make sure they will be
32647 scheduled in the same dispatch window as the refrence to them. */
32648 if (group == disp_jcc || group == disp_cmp)
32651 /* Check nonrestricted. */
32652 if (group == disp_no_group || group == disp_branch)
32655 /* Get last dispatch window. */
32656 if (window_list_next)
32657 window_list = window_list_next;
32659 if (window_list->window_num == 1)
32661 sum = window_list->prev->window_size + window_list->window_size;
32664 || (min_insn_size (insn) + sum) >= 48)
32665 /* Window 1 is full. Go for next window. */
32669 num_restrict = count_num_restricted (insn, window_list);
32671 if (num_restrict > num_allowable_groups[group])
32674 /* See if it fits in the first window. */
32675 if (window_list->window_num == 0)
32677 /* The first widow should have only single and double path
32679 if (path == path_double
32680 && (window_list->num_uops + 2) > MAX_INSN)
32682 else if (path != path_single)
32688 /* Add an instruction INSN with NUM_UOPS micro-operations to the
32689 dispatch window WINDOW_LIST. */
32692 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
32694 int byte_len = min_insn_size (insn);
32695 int num_insn = window_list->num_insn;
32697 sched_insn_info *window = window_list->window;
32698 enum dispatch_group group = get_insn_group (insn);
32699 enum insn_path path = get_insn_path (insn);
32700 int num_imm_operand;
32701 int num_imm32_operand;
32702 int num_imm64_operand;
32704 if (!window_list->violation && group != disp_cmp
32705 && !fits_dispatch_window (insn))
32706 window_list->violation = true;
32708 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
32709 &num_imm64_operand);
32711 /* Initialize window with new instruction. */
32712 window[num_insn].insn = insn;
32713 window[num_insn].byte_len = byte_len;
32714 window[num_insn].group = group;
32715 window[num_insn].path = path;
32716 window[num_insn].imm_bytes = imm_size;
32718 window_list->window_size += byte_len;
32719 window_list->num_insn = num_insn + 1;
32720 window_list->num_uops = window_list->num_uops + num_uops;
32721 window_list->imm_size += imm_size;
32722 window_list->num_imm += num_imm_operand;
32723 window_list->num_imm_32 += num_imm32_operand;
32724 window_list->num_imm_64 += num_imm64_operand;
32726 if (group == disp_store)
32727 window_list->num_stores += 1;
32728 else if (group == disp_load
32729 || group == disp_prefetch)
32730 window_list->num_loads += 1;
32731 else if (group == disp_load_store)
32733 window_list->num_stores += 1;
32734 window_list->num_loads += 1;
32738 /* Adds a scheduled instruction, INSN, to the current dispatch window.
32739 If the total bytes of instructions or the number of instructions in
32740 the window exceed allowable, it allocates a new window. */
32743 add_to_dispatch_window (rtx insn)
32746 dispatch_windows *window_list;
32747 dispatch_windows *next_list;
32748 dispatch_windows *window0_list;
32749 enum insn_path path;
32750 enum dispatch_group insn_group;
32758 if (INSN_CODE (insn) < 0)
32761 byte_len = min_insn_size (insn);
32762 window_list = dispatch_window_list;
32763 next_list = window_list->next;
32764 path = get_insn_path (insn);
32765 insn_group = get_insn_group (insn);
32767 /* Get the last dispatch window. */
32769 window_list = dispatch_window_list->next;
32771 if (path == path_single)
32773 else if (path == path_double)
32776 insn_num_uops = (int) path;
32778 /* If current window is full, get a new window.
32779 Window number zero is full, if MAX_INSN uops are scheduled in it.
32780 Window number one is full, if window zero's bytes plus window
32781 one's bytes is 32, or if the bytes of the new instruction added
32782 to the total makes it greater than 48, or it has already MAX_INSN
32783 instructions in it. */
32784 num_insn = window_list->num_insn;
32785 num_uops = window_list->num_uops;
32786 window_num = window_list->window_num;
32787 insn_fits = fits_dispatch_window (insn);
32789 if (num_insn >= MAX_INSN
32790 || num_uops + insn_num_uops > MAX_INSN
32793 window_num = ~window_num & 1;
32794 window_list = allocate_next_window (window_num);
32797 if (window_num == 0)
32799 add_insn_window (insn, window_list, insn_num_uops);
32800 if (window_list->num_insn >= MAX_INSN
32801 && insn_group == disp_branch)
32803 process_end_window ();
32807 else if (window_num == 1)
32809 window0_list = window_list->prev;
32810 sum = window0_list->window_size + window_list->window_size;
32812 || (byte_len + sum) >= 48)
32814 process_end_window ();
32815 window_list = dispatch_window_list;
32818 add_insn_window (insn, window_list, insn_num_uops);
32821 gcc_unreachable ();
32823 if (is_end_basic_block (insn_group))
32825 /* End of basic block is reached do end-basic-block process. */
32826 process_end_window ();
32831 /* Print the dispatch window, WINDOW_NUM, to FILE. */
32833 DEBUG_FUNCTION static void
32834 debug_dispatch_window_file (FILE *file, int window_num)
32836 dispatch_windows *list;
32839 if (window_num == 0)
32840 list = dispatch_window_list;
32842 list = dispatch_window_list1;
32844 fprintf (file, "Window #%d:\n", list->window_num);
32845 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
32846 list->num_insn, list->num_uops, list->window_size);
32847 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
32848 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
32850 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
32852 fprintf (file, " insn info:\n");
32854 for (i = 0; i < MAX_INSN; i++)
32856 if (!list->window[i].insn)
32858 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
32859 i, group_name[list->window[i].group],
32860 i, (void *)list->window[i].insn,
32861 i, list->window[i].path,
32862 i, list->window[i].byte_len,
32863 i, list->window[i].imm_bytes);
32867 /* Print to stdout a dispatch window. */
32869 DEBUG_FUNCTION void
32870 debug_dispatch_window (int window_num)
32872 debug_dispatch_window_file (stdout, window_num);
32875 /* Print INSN dispatch information to FILE. */
32877 DEBUG_FUNCTION static void
32878 debug_insn_dispatch_info_file (FILE *file, rtx insn)
32881 enum insn_path path;
32882 enum dispatch_group group;
32884 int num_imm_operand;
32885 int num_imm32_operand;
32886 int num_imm64_operand;
32888 if (INSN_CODE (insn) < 0)
32891 byte_len = min_insn_size (insn);
32892 path = get_insn_path (insn);
32893 group = get_insn_group (insn);
32894 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
32895 &num_imm64_operand);
32897 fprintf (file, " insn info:\n");
32898 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
32899 group_name[group], path, byte_len);
32900 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
32901 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
32904 /* Print to STDERR the status of the ready list with respect to
32905 dispatch windows. */
32907 DEBUG_FUNCTION void
32908 debug_ready_dispatch (void)
32911 int no_ready = number_in_ready ();
32913 fprintf (stdout, "Number of ready: %d\n", no_ready);
32915 for (i = 0; i < no_ready; i++)
32916 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
32919 /* This routine is the driver of the dispatch scheduler. */
32922 do_dispatch (rtx insn, int mode)
32924 if (mode == DISPATCH_INIT)
32925 init_dispatch_sched ();
32926 else if (mode == ADD_TO_DISPATCH_WINDOW)
32927 add_to_dispatch_window (insn);
32930 /* Return TRUE if Dispatch Scheduling is supported. */
32933 has_dispatch (rtx insn, int action)
32935 if (ix86_tune == PROCESSOR_BDVER1 && flag_dispatch_scheduler)
32941 case IS_DISPATCH_ON:
32946 return is_cmp (insn);
32948 case DISPATCH_VIOLATION:
32949 return dispatch_violation ();
32951 case FITS_DISPATCH_WINDOW:
32952 return fits_dispatch_window (insn);
32958 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
32959 place emms and femms instructions. */
32961 static unsigned int
32962 ix86_units_per_simd_word (enum machine_mode mode)
32964 /* Disable double precision vectorizer if needed. */
32965 if (mode == DFmode && !TARGET_VECTORIZE_DOUBLE)
32966 return UNITS_PER_WORD;
32969 /* FIXME: AVX has 32byte floating point vector operations and 16byte
32970 integer vector operations. But vectorizer doesn't support
32971 different sizes for integer and floating point vectors. We limit
32972 vector size to 16byte. */
32974 return (mode == DFmode || mode == SFmode) ? 32 : 16;
32977 return TARGET_SSE ? 16 : UNITS_PER_WORD;
32980 /* Initialize the GCC target structure. */
32981 #undef TARGET_RETURN_IN_MEMORY
32982 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
32984 #undef TARGET_LEGITIMIZE_ADDRESS
32985 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
32987 #undef TARGET_ATTRIBUTE_TABLE
32988 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
32989 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32990 # undef TARGET_MERGE_DECL_ATTRIBUTES
32991 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
32994 #undef TARGET_COMP_TYPE_ATTRIBUTES
32995 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
32997 #undef TARGET_INIT_BUILTINS
32998 #define TARGET_INIT_BUILTINS ix86_init_builtins
32999 #undef TARGET_BUILTIN_DECL
33000 #define TARGET_BUILTIN_DECL ix86_builtin_decl
33001 #undef TARGET_EXPAND_BUILTIN
33002 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
33004 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
33005 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
33006 ix86_builtin_vectorized_function
33008 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
33009 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
33011 #undef TARGET_BUILTIN_RECIPROCAL
33012 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
33014 #undef TARGET_ASM_FUNCTION_EPILOGUE
33015 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
33017 #undef TARGET_ENCODE_SECTION_INFO
33018 #ifndef SUBTARGET_ENCODE_SECTION_INFO
33019 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
33021 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
33024 #undef TARGET_ASM_OPEN_PAREN
33025 #define TARGET_ASM_OPEN_PAREN ""
33026 #undef TARGET_ASM_CLOSE_PAREN
33027 #define TARGET_ASM_CLOSE_PAREN ""
33029 #undef TARGET_ASM_BYTE_OP
33030 #define TARGET_ASM_BYTE_OP ASM_BYTE
33032 #undef TARGET_ASM_ALIGNED_HI_OP
33033 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
33034 #undef TARGET_ASM_ALIGNED_SI_OP
33035 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
33037 #undef TARGET_ASM_ALIGNED_DI_OP
33038 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
33041 #undef TARGET_PROFILE_BEFORE_PROLOGUE
33042 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
33044 #undef TARGET_ASM_UNALIGNED_HI_OP
33045 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
33046 #undef TARGET_ASM_UNALIGNED_SI_OP
33047 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
33048 #undef TARGET_ASM_UNALIGNED_DI_OP
33049 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
33051 #undef TARGET_PRINT_OPERAND
33052 #define TARGET_PRINT_OPERAND ix86_print_operand
33053 #undef TARGET_PRINT_OPERAND_ADDRESS
33054 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
33055 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
33056 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
33057 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
33058 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
33060 #undef TARGET_SCHED_ADJUST_COST
33061 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
33062 #undef TARGET_SCHED_ISSUE_RATE
33063 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
33064 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
33065 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
33066 ia32_multipass_dfa_lookahead
33068 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
33069 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
33072 #undef TARGET_HAVE_TLS
33073 #define TARGET_HAVE_TLS true
33075 #undef TARGET_CANNOT_FORCE_CONST_MEM
33076 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
33077 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
33078 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
33080 #undef TARGET_DELEGITIMIZE_ADDRESS
33081 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
33083 #undef TARGET_MS_BITFIELD_LAYOUT_P
33084 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
33087 #undef TARGET_BINDS_LOCAL_P
33088 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
33090 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
33091 #undef TARGET_BINDS_LOCAL_P
33092 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
33095 #undef TARGET_ASM_OUTPUT_MI_THUNK
33096 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
33097 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
33098 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
33100 #undef TARGET_ASM_FILE_START
33101 #define TARGET_ASM_FILE_START x86_file_start
33103 #undef TARGET_DEFAULT_TARGET_FLAGS
33104 #define TARGET_DEFAULT_TARGET_FLAGS \
33106 | TARGET_SUBTARGET_DEFAULT \
33107 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
33110 #undef TARGET_HANDLE_OPTION
33111 #define TARGET_HANDLE_OPTION ix86_handle_option
33113 #undef TARGET_OPTION_OVERRIDE
33114 #define TARGET_OPTION_OVERRIDE ix86_option_override
33115 #undef TARGET_OPTION_OPTIMIZATION
33116 #define TARGET_OPTION_OPTIMIZATION ix86_option_optimization
33118 #undef TARGET_REGISTER_MOVE_COST
33119 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
33120 #undef TARGET_MEMORY_MOVE_COST
33121 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
33122 #undef TARGET_RTX_COSTS
33123 #define TARGET_RTX_COSTS ix86_rtx_costs
33124 #undef TARGET_ADDRESS_COST
33125 #define TARGET_ADDRESS_COST ix86_address_cost
33127 #undef TARGET_FIXED_CONDITION_CODE_REGS
33128 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
33129 #undef TARGET_CC_MODES_COMPATIBLE
33130 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
33132 #undef TARGET_MACHINE_DEPENDENT_REORG
33133 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
33135 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
33136 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
33138 #undef TARGET_BUILD_BUILTIN_VA_LIST
33139 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
33141 #undef TARGET_ENUM_VA_LIST_P
33142 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
33144 #undef TARGET_FN_ABI_VA_LIST
33145 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
33147 #undef TARGET_CANONICAL_VA_LIST_TYPE
33148 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
33150 #undef TARGET_EXPAND_BUILTIN_VA_START
33151 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
33153 #undef TARGET_MD_ASM_CLOBBERS
33154 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
33156 #undef TARGET_PROMOTE_PROTOTYPES
33157 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
33158 #undef TARGET_STRUCT_VALUE_RTX
33159 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
33160 #undef TARGET_SETUP_INCOMING_VARARGS
33161 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
33162 #undef TARGET_MUST_PASS_IN_STACK
33163 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
33164 #undef TARGET_FUNCTION_ARG_ADVANCE
33165 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
33166 #undef TARGET_FUNCTION_ARG
33167 #define TARGET_FUNCTION_ARG ix86_function_arg
33168 #undef TARGET_PASS_BY_REFERENCE
33169 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
33170 #undef TARGET_INTERNAL_ARG_POINTER
33171 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
33172 #undef TARGET_UPDATE_STACK_BOUNDARY
33173 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
33174 #undef TARGET_GET_DRAP_RTX
33175 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
33176 #undef TARGET_STRICT_ARGUMENT_NAMING
33177 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
33178 #undef TARGET_STATIC_CHAIN
33179 #define TARGET_STATIC_CHAIN ix86_static_chain
33180 #undef TARGET_TRAMPOLINE_INIT
33181 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
33182 #undef TARGET_RETURN_POPS_ARGS
33183 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
33185 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
33186 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
33188 #undef TARGET_SCALAR_MODE_SUPPORTED_P
33189 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
33191 #undef TARGET_VECTOR_MODE_SUPPORTED_P
33192 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
33194 #undef TARGET_C_MODE_FOR_SUFFIX
33195 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
33198 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
33199 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
33202 #ifdef SUBTARGET_INSERT_ATTRIBUTES
33203 #undef TARGET_INSERT_ATTRIBUTES
33204 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
33207 #undef TARGET_MANGLE_TYPE
33208 #define TARGET_MANGLE_TYPE ix86_mangle_type
33210 #undef TARGET_STACK_PROTECT_FAIL
33211 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
33213 #undef TARGET_SUPPORTS_SPLIT_STACK
33214 #define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
33216 #undef TARGET_FUNCTION_VALUE
33217 #define TARGET_FUNCTION_VALUE ix86_function_value
33219 #undef TARGET_FUNCTION_VALUE_REGNO_P
33220 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
33222 #undef TARGET_SECONDARY_RELOAD
33223 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
33225 #undef TARGET_CLASS_LIKELY_SPILLED_P
33226 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
33228 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
33229 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
33230 ix86_builtin_vectorization_cost
33231 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
33232 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
33233 ix86_vectorize_builtin_vec_perm
33234 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
33235 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
33236 ix86_vectorize_builtin_vec_perm_ok
33237 #undef TARGET_VECTORIZE_UNITS_PER_SIMD_WORD
33238 #define TARGET_VECTORIZE_UNITS_PER_SIMD_WORD \
33239 ix86_units_per_simd_word
33241 #undef TARGET_SET_CURRENT_FUNCTION
33242 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
33244 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
33245 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
33247 #undef TARGET_OPTION_SAVE
33248 #define TARGET_OPTION_SAVE ix86_function_specific_save
33250 #undef TARGET_OPTION_RESTORE
33251 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
33253 #undef TARGET_OPTION_PRINT
33254 #define TARGET_OPTION_PRINT ix86_function_specific_print
33256 #undef TARGET_CAN_INLINE_P
33257 #define TARGET_CAN_INLINE_P ix86_can_inline_p
33259 #undef TARGET_EXPAND_TO_RTL_HOOK
33260 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
33262 #undef TARGET_LEGITIMATE_ADDRESS_P
33263 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
33265 #undef TARGET_IRA_COVER_CLASSES
33266 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
33268 #undef TARGET_FRAME_POINTER_REQUIRED
33269 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
33271 #undef TARGET_CAN_ELIMINATE
33272 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
33274 #undef TARGET_EXTRA_LIVE_ON_ENTRY
33275 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
33277 #undef TARGET_ASM_CODE_END
33278 #define TARGET_ASM_CODE_END ix86_code_end
33280 struct gcc_target targetm = TARGET_INITIALIZER;
33282 #include "gt-i386.h"