1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
43 #include "basic-block.h"
44 #include "integrate.h"
50 #include "target-def.h"
51 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "sched-int.h"
56 #include "tree-flow.h"
59 #include "tm-constrs.h"
61 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #include "gstab.h" /* for N_SLINE */
67 #ifndef TARGET_NO_PROTOTYPE
68 #define TARGET_NO_PROTOTYPE 0
71 #define min(A,B) ((A) < (B) ? (A) : (B))
72 #define max(A,B) ((A) > (B) ? (A) : (B))
74 /* Structure used to define the rs6000 stack */
75 typedef struct rs6000_stack {
76 int first_gp_reg_save; /* first callee saved GP register used */
77 int first_fp_reg_save; /* first callee saved FP register used */
78 int first_altivec_reg_save; /* first callee saved AltiVec register used */
79 int lr_save_p; /* true if the link reg needs to be saved */
80 int cr_save_p; /* true if the CR reg needs to be saved */
81 unsigned int vrsave_mask; /* mask of vec registers to save */
82 int push_p; /* true if we need to allocate stack space */
83 int calls_p; /* true if the function makes any calls */
84 int world_save_p; /* true if we're saving *everything*:
85 r13-r31, cr, f14-f31, vrsave, v20-v31 */
86 enum rs6000_abi abi; /* which ABI to use */
87 int gp_save_offset; /* offset to save GP regs from initial SP */
88 int fp_save_offset; /* offset to save FP regs from initial SP */
89 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
90 int lr_save_offset; /* offset to save LR from initial SP */
91 int cr_save_offset; /* offset to save CR from initial SP */
92 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
93 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
94 int varargs_save_offset; /* offset to save the varargs registers */
95 int ehrd_offset; /* offset to EH return data */
96 int reg_size; /* register size (4 or 8) */
97 HOST_WIDE_INT vars_size; /* variable save area size */
98 int parm_size; /* outgoing parameter size */
99 int save_size; /* save area size */
100 int fixed_size; /* fixed size of stack frame */
101 int gp_size; /* size of saved GP registers */
102 int fp_size; /* size of saved FP registers */
103 int altivec_size; /* size of saved AltiVec registers */
104 int cr_size; /* size to hold CR if not in save_size */
105 int vrsave_size; /* size to hold VRSAVE if not in save_size */
106 int altivec_padding_size; /* size of altivec alignment padding if
108 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
109 int spe_padding_size;
110 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
111 int spe_64bit_regs_used;
114 /* A C structure for machine-specific, per-function data.
115 This is added to the cfun structure. */
116 typedef struct machine_function GTY(())
118 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
119 int ra_needs_full_frame;
120 /* Some local-dynamic symbol. */
121 const char *some_ld_name;
122 /* Whether the instruction chain has been scanned already. */
123 int insn_chain_scanned_p;
124 /* Flags if __builtin_return_address (0) was used. */
126 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
127 varargs save area. */
128 HOST_WIDE_INT varargs_save_offset;
129 /* Temporary stack slot to use for SDmode copies. This slot is
130 64-bits wide and is allocated early enough so that the offset
131 does not overflow the 16-bit load/store offset field. */
132 rtx sdmode_stack_slot;
135 /* Target cpu type */
137 enum processor_type rs6000_cpu;
138 struct rs6000_cpu_select rs6000_select[3] =
140 /* switch name, tune arch */
141 { (const char *)0, "--with-cpu=", 1, 1 },
142 { (const char *)0, "-mcpu=", 1, 1 },
143 { (const char *)0, "-mtune=", 1, 0 },
146 /* Always emit branch hint bits. */
147 static GTY(()) bool rs6000_always_hint;
149 /* Schedule instructions for group formation. */
150 static GTY(()) bool rs6000_sched_groups;
152 /* Align branch targets. */
153 static GTY(()) bool rs6000_align_branch_targets;
155 /* Support for -msched-costly-dep option. */
156 const char *rs6000_sched_costly_dep_str;
157 enum rs6000_dependence_cost rs6000_sched_costly_dep;
159 /* Support for -minsert-sched-nops option. */
160 const char *rs6000_sched_insert_nops_str;
161 enum rs6000_nop_insertion rs6000_sched_insert_nops;
163 /* Support targetm.vectorize.builtin_mask_for_load. */
164 static GTY(()) tree altivec_builtin_mask_for_load;
166 /* Size of long double. */
167 int rs6000_long_double_type_size;
169 /* IEEE quad extended precision long double. */
172 /* Nonzero to use AltiVec ABI. */
173 int rs6000_altivec_abi;
175 /* Nonzero if we want SPE SIMD instructions. */
178 /* Nonzero if we want SPE ABI extensions. */
181 /* Nonzero to use isel instructions. */
184 /* Nonzero if floating point operations are done in the GPRs. */
185 int rs6000_float_gprs = 0;
187 /* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
188 int rs6000_darwin64_abi;
190 /* Set to nonzero once AIX common-mode calls have been defined. */
191 static GTY(()) int common_mode_defined;
193 /* Save information from a "cmpxx" operation until the branch or scc is
195 rtx rs6000_compare_op0, rs6000_compare_op1;
196 int rs6000_compare_fp_p;
198 /* Label number of label created for -mrelocatable, to call to so we can
199 get the address of the GOT section */
200 int rs6000_pic_labelno;
203 /* Which abi to adhere to */
204 const char *rs6000_abi_name;
206 /* Semantics of the small data area */
207 enum rs6000_sdata_type rs6000_sdata = SDATA_DATA;
209 /* Which small data model to use */
210 const char *rs6000_sdata_name = (char *)0;
212 /* Counter for labels which are to be placed in .fixup. */
213 int fixuplabelno = 0;
216 /* Bit size of immediate TLS offsets and string from which it is decoded. */
217 int rs6000_tls_size = 32;
218 const char *rs6000_tls_size_string;
220 /* ABI enumeration available for subtarget to use. */
221 enum rs6000_abi rs6000_current_abi;
223 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
227 const char *rs6000_debug_name;
228 int rs6000_debug_stack; /* debug stack applications */
229 int rs6000_debug_arg; /* debug argument handling */
231 /* Value is TRUE if register/mode pair is acceptable. */
232 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
234 /* Built in types. */
236 tree rs6000_builtin_types[RS6000_BTI_MAX];
237 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
239 const char *rs6000_traceback_name;
241 traceback_default = 0,
247 /* Flag to say the TOC is initialized */
249 char toc_label_name[10];
251 /* Cached value of rs6000_variable_issue. This is cached in
252 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
253 static short cached_can_issue_more;
255 static GTY(()) section *read_only_data_section;
256 static GTY(()) section *private_data_section;
257 static GTY(()) section *read_only_private_data_section;
258 static GTY(()) section *sdata2_section;
259 static GTY(()) section *toc_section;
261 /* Control alignment for fields within structures. */
262 /* String from -malign-XXXXX. */
263 int rs6000_alignment_flags;
265 /* True for any options that were explicitly set. */
267 bool aix_struct_ret; /* True if -maix-struct-ret was used. */
268 bool alignment; /* True if -malign- was used. */
269 bool spe_abi; /* True if -mabi=spe/no-spe was used. */
270 bool altivec_abi; /* True if -mabi=altivec/no-altivec used. */
271 bool spe; /* True if -mspe= was used. */
272 bool float_gprs; /* True if -mfloat-gprs= was used. */
273 bool isel; /* True if -misel was used. */
274 bool long_double; /* True if -mlong-double- was used. */
275 bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
276 bool vrsave; /* True if -mvrsave was used. */
277 } rs6000_explicit_options;
279 struct builtin_description
281 /* mask is not const because we're going to alter it below. This
282 nonsense will go away when we rewrite the -march infrastructure
283 to give us more target flag bits. */
285 const enum insn_code icode;
286 const char *const name;
287 const enum rs6000_builtins code;
290 /* Target cpu costs. */
292 struct processor_costs {
293 const int mulsi; /* cost of SImode multiplication. */
294 const int mulsi_const; /* cost of SImode multiplication by constant. */
295 const int mulsi_const9; /* cost of SImode mult by short constant. */
296 const int muldi; /* cost of DImode multiplication. */
297 const int divsi; /* cost of SImode division. */
298 const int divdi; /* cost of DImode division. */
299 const int fp; /* cost of simple SFmode and DFmode insns. */
300 const int dmul; /* cost of DFmode multiplication (and fmadd). */
301 const int sdiv; /* cost of SFmode division (fdivs). */
302 const int ddiv; /* cost of DFmode division (fdiv). */
303 const int cache_line_size; /* cache line size in bytes. */
304 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
305 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
306 const int simultaneous_prefetches; /* number of parallel prefetch
310 const struct processor_costs *rs6000_cost;
312 /* Processor costs (relative to an add) */
314 /* Instruction size costs on 32bit processors. */
316 struct processor_costs size32_cost = {
317 COSTS_N_INSNS (1), /* mulsi */
318 COSTS_N_INSNS (1), /* mulsi_const */
319 COSTS_N_INSNS (1), /* mulsi_const9 */
320 COSTS_N_INSNS (1), /* muldi */
321 COSTS_N_INSNS (1), /* divsi */
322 COSTS_N_INSNS (1), /* divdi */
323 COSTS_N_INSNS (1), /* fp */
324 COSTS_N_INSNS (1), /* dmul */
325 COSTS_N_INSNS (1), /* sdiv */
326 COSTS_N_INSNS (1), /* ddiv */
333 /* Instruction size costs on 64bit processors. */
335 struct processor_costs size64_cost = {
336 COSTS_N_INSNS (1), /* mulsi */
337 COSTS_N_INSNS (1), /* mulsi_const */
338 COSTS_N_INSNS (1), /* mulsi_const9 */
339 COSTS_N_INSNS (1), /* muldi */
340 COSTS_N_INSNS (1), /* divsi */
341 COSTS_N_INSNS (1), /* divdi */
342 COSTS_N_INSNS (1), /* fp */
343 COSTS_N_INSNS (1), /* dmul */
344 COSTS_N_INSNS (1), /* sdiv */
345 COSTS_N_INSNS (1), /* ddiv */
352 /* Instruction costs on RIOS1 processors. */
354 struct processor_costs rios1_cost = {
355 COSTS_N_INSNS (5), /* mulsi */
356 COSTS_N_INSNS (4), /* mulsi_const */
357 COSTS_N_INSNS (3), /* mulsi_const9 */
358 COSTS_N_INSNS (5), /* muldi */
359 COSTS_N_INSNS (19), /* divsi */
360 COSTS_N_INSNS (19), /* divdi */
361 COSTS_N_INSNS (2), /* fp */
362 COSTS_N_INSNS (2), /* dmul */
363 COSTS_N_INSNS (19), /* sdiv */
364 COSTS_N_INSNS (19), /* ddiv */
365 128, /* cache line size */
371 /* Instruction costs on RIOS2 processors. */
373 struct processor_costs rios2_cost = {
374 COSTS_N_INSNS (2), /* mulsi */
375 COSTS_N_INSNS (2), /* mulsi_const */
376 COSTS_N_INSNS (2), /* mulsi_const9 */
377 COSTS_N_INSNS (2), /* muldi */
378 COSTS_N_INSNS (13), /* divsi */
379 COSTS_N_INSNS (13), /* divdi */
380 COSTS_N_INSNS (2), /* fp */
381 COSTS_N_INSNS (2), /* dmul */
382 COSTS_N_INSNS (17), /* sdiv */
383 COSTS_N_INSNS (17), /* ddiv */
384 256, /* cache line size */
390 /* Instruction costs on RS64A processors. */
392 struct processor_costs rs64a_cost = {
393 COSTS_N_INSNS (20), /* mulsi */
394 COSTS_N_INSNS (12), /* mulsi_const */
395 COSTS_N_INSNS (8), /* mulsi_const9 */
396 COSTS_N_INSNS (34), /* muldi */
397 COSTS_N_INSNS (65), /* divsi */
398 COSTS_N_INSNS (67), /* divdi */
399 COSTS_N_INSNS (4), /* fp */
400 COSTS_N_INSNS (4), /* dmul */
401 COSTS_N_INSNS (31), /* sdiv */
402 COSTS_N_INSNS (31), /* ddiv */
403 128, /* cache line size */
409 /* Instruction costs on MPCCORE processors. */
411 struct processor_costs mpccore_cost = {
412 COSTS_N_INSNS (2), /* mulsi */
413 COSTS_N_INSNS (2), /* mulsi_const */
414 COSTS_N_INSNS (2), /* mulsi_const9 */
415 COSTS_N_INSNS (2), /* muldi */
416 COSTS_N_INSNS (6), /* divsi */
417 COSTS_N_INSNS (6), /* divdi */
418 COSTS_N_INSNS (4), /* fp */
419 COSTS_N_INSNS (5), /* dmul */
420 COSTS_N_INSNS (10), /* sdiv */
421 COSTS_N_INSNS (17), /* ddiv */
422 32, /* cache line size */
428 /* Instruction costs on PPC403 processors. */
430 struct processor_costs ppc403_cost = {
431 COSTS_N_INSNS (4), /* mulsi */
432 COSTS_N_INSNS (4), /* mulsi_const */
433 COSTS_N_INSNS (4), /* mulsi_const9 */
434 COSTS_N_INSNS (4), /* muldi */
435 COSTS_N_INSNS (33), /* divsi */
436 COSTS_N_INSNS (33), /* divdi */
437 COSTS_N_INSNS (11), /* fp */
438 COSTS_N_INSNS (11), /* dmul */
439 COSTS_N_INSNS (11), /* sdiv */
440 COSTS_N_INSNS (11), /* ddiv */
441 32, /* cache line size */
447 /* Instruction costs on PPC405 processors. */
449 struct processor_costs ppc405_cost = {
450 COSTS_N_INSNS (5), /* mulsi */
451 COSTS_N_INSNS (4), /* mulsi_const */
452 COSTS_N_INSNS (3), /* mulsi_const9 */
453 COSTS_N_INSNS (5), /* muldi */
454 COSTS_N_INSNS (35), /* divsi */
455 COSTS_N_INSNS (35), /* divdi */
456 COSTS_N_INSNS (11), /* fp */
457 COSTS_N_INSNS (11), /* dmul */
458 COSTS_N_INSNS (11), /* sdiv */
459 COSTS_N_INSNS (11), /* ddiv */
460 32, /* cache line size */
466 /* Instruction costs on PPC440 processors. */
468 struct processor_costs ppc440_cost = {
469 COSTS_N_INSNS (3), /* mulsi */
470 COSTS_N_INSNS (2), /* mulsi_const */
471 COSTS_N_INSNS (2), /* mulsi_const9 */
472 COSTS_N_INSNS (3), /* muldi */
473 COSTS_N_INSNS (34), /* divsi */
474 COSTS_N_INSNS (34), /* divdi */
475 COSTS_N_INSNS (5), /* fp */
476 COSTS_N_INSNS (5), /* dmul */
477 COSTS_N_INSNS (19), /* sdiv */
478 COSTS_N_INSNS (33), /* ddiv */
479 32, /* cache line size */
485 /* Instruction costs on PPC601 processors. */
487 struct processor_costs ppc601_cost = {
488 COSTS_N_INSNS (5), /* mulsi */
489 COSTS_N_INSNS (5), /* mulsi_const */
490 COSTS_N_INSNS (5), /* mulsi_const9 */
491 COSTS_N_INSNS (5), /* muldi */
492 COSTS_N_INSNS (36), /* divsi */
493 COSTS_N_INSNS (36), /* divdi */
494 COSTS_N_INSNS (4), /* fp */
495 COSTS_N_INSNS (5), /* dmul */
496 COSTS_N_INSNS (17), /* sdiv */
497 COSTS_N_INSNS (31), /* ddiv */
498 32, /* cache line size */
504 /* Instruction costs on PPC603 processors. */
506 struct processor_costs ppc603_cost = {
507 COSTS_N_INSNS (5), /* mulsi */
508 COSTS_N_INSNS (3), /* mulsi_const */
509 COSTS_N_INSNS (2), /* mulsi_const9 */
510 COSTS_N_INSNS (5), /* muldi */
511 COSTS_N_INSNS (37), /* divsi */
512 COSTS_N_INSNS (37), /* divdi */
513 COSTS_N_INSNS (3), /* fp */
514 COSTS_N_INSNS (4), /* dmul */
515 COSTS_N_INSNS (18), /* sdiv */
516 COSTS_N_INSNS (33), /* ddiv */
517 32, /* cache line size */
523 /* Instruction costs on PPC604 processors. */
525 struct processor_costs ppc604_cost = {
526 COSTS_N_INSNS (4), /* mulsi */
527 COSTS_N_INSNS (4), /* mulsi_const */
528 COSTS_N_INSNS (4), /* mulsi_const9 */
529 COSTS_N_INSNS (4), /* muldi */
530 COSTS_N_INSNS (20), /* divsi */
531 COSTS_N_INSNS (20), /* divdi */
532 COSTS_N_INSNS (3), /* fp */
533 COSTS_N_INSNS (3), /* dmul */
534 COSTS_N_INSNS (18), /* sdiv */
535 COSTS_N_INSNS (32), /* ddiv */
536 32, /* cache line size */
542 /* Instruction costs on PPC604e processors. */
544 struct processor_costs ppc604e_cost = {
545 COSTS_N_INSNS (2), /* mulsi */
546 COSTS_N_INSNS (2), /* mulsi_const */
547 COSTS_N_INSNS (2), /* mulsi_const9 */
548 COSTS_N_INSNS (2), /* muldi */
549 COSTS_N_INSNS (20), /* divsi */
550 COSTS_N_INSNS (20), /* divdi */
551 COSTS_N_INSNS (3), /* fp */
552 COSTS_N_INSNS (3), /* dmul */
553 COSTS_N_INSNS (18), /* sdiv */
554 COSTS_N_INSNS (32), /* ddiv */
555 32, /* cache line size */
561 /* Instruction costs on PPC620 processors. */
563 struct processor_costs ppc620_cost = {
564 COSTS_N_INSNS (5), /* mulsi */
565 COSTS_N_INSNS (4), /* mulsi_const */
566 COSTS_N_INSNS (3), /* mulsi_const9 */
567 COSTS_N_INSNS (7), /* muldi */
568 COSTS_N_INSNS (21), /* divsi */
569 COSTS_N_INSNS (37), /* divdi */
570 COSTS_N_INSNS (3), /* fp */
571 COSTS_N_INSNS (3), /* dmul */
572 COSTS_N_INSNS (18), /* sdiv */
573 COSTS_N_INSNS (32), /* ddiv */
574 128, /* cache line size */
580 /* Instruction costs on PPC630 processors. */
582 struct processor_costs ppc630_cost = {
583 COSTS_N_INSNS (5), /* mulsi */
584 COSTS_N_INSNS (4), /* mulsi_const */
585 COSTS_N_INSNS (3), /* mulsi_const9 */
586 COSTS_N_INSNS (7), /* muldi */
587 COSTS_N_INSNS (21), /* divsi */
588 COSTS_N_INSNS (37), /* divdi */
589 COSTS_N_INSNS (3), /* fp */
590 COSTS_N_INSNS (3), /* dmul */
591 COSTS_N_INSNS (17), /* sdiv */
592 COSTS_N_INSNS (21), /* ddiv */
593 128, /* cache line size */
599 /* Instruction costs on Cell processor. */
600 /* COSTS_N_INSNS (1) ~ one add. */
602 struct processor_costs ppccell_cost = {
603 COSTS_N_INSNS (9/2)+2, /* mulsi */
604 COSTS_N_INSNS (6/2), /* mulsi_const */
605 COSTS_N_INSNS (6/2), /* mulsi_const9 */
606 COSTS_N_INSNS (15/2)+2, /* muldi */
607 COSTS_N_INSNS (38/2), /* divsi */
608 COSTS_N_INSNS (70/2), /* divdi */
609 COSTS_N_INSNS (10/2), /* fp */
610 COSTS_N_INSNS (10/2), /* dmul */
611 COSTS_N_INSNS (74/2), /* sdiv */
612 COSTS_N_INSNS (74/2), /* ddiv */
613 128, /* cache line size */
619 /* Instruction costs on PPC750 and PPC7400 processors. */
621 struct processor_costs ppc750_cost = {
622 COSTS_N_INSNS (5), /* mulsi */
623 COSTS_N_INSNS (3), /* mulsi_const */
624 COSTS_N_INSNS (2), /* mulsi_const9 */
625 COSTS_N_INSNS (5), /* muldi */
626 COSTS_N_INSNS (17), /* divsi */
627 COSTS_N_INSNS (17), /* divdi */
628 COSTS_N_INSNS (3), /* fp */
629 COSTS_N_INSNS (3), /* dmul */
630 COSTS_N_INSNS (17), /* sdiv */
631 COSTS_N_INSNS (31), /* ddiv */
632 32, /* cache line size */
638 /* Instruction costs on PPC7450 processors. */
640 struct processor_costs ppc7450_cost = {
641 COSTS_N_INSNS (4), /* mulsi */
642 COSTS_N_INSNS (3), /* mulsi_const */
643 COSTS_N_INSNS (3), /* mulsi_const9 */
644 COSTS_N_INSNS (4), /* muldi */
645 COSTS_N_INSNS (23), /* divsi */
646 COSTS_N_INSNS (23), /* divdi */
647 COSTS_N_INSNS (5), /* fp */
648 COSTS_N_INSNS (5), /* dmul */
649 COSTS_N_INSNS (21), /* sdiv */
650 COSTS_N_INSNS (35), /* ddiv */
651 32, /* cache line size */
657 /* Instruction costs on PPC8540 processors. */
659 struct processor_costs ppc8540_cost = {
660 COSTS_N_INSNS (4), /* mulsi */
661 COSTS_N_INSNS (4), /* mulsi_const */
662 COSTS_N_INSNS (4), /* mulsi_const9 */
663 COSTS_N_INSNS (4), /* muldi */
664 COSTS_N_INSNS (19), /* divsi */
665 COSTS_N_INSNS (19), /* divdi */
666 COSTS_N_INSNS (4), /* fp */
667 COSTS_N_INSNS (4), /* dmul */
668 COSTS_N_INSNS (29), /* sdiv */
669 COSTS_N_INSNS (29), /* ddiv */
670 32, /* cache line size */
673 1, /* prefetch streams /*/
676 /* Instruction costs on E300C2 and E300C3 cores. */
678 struct processor_costs ppce300c2c3_cost = {
679 COSTS_N_INSNS (4), /* mulsi */
680 COSTS_N_INSNS (4), /* mulsi_const */
681 COSTS_N_INSNS (4), /* mulsi_const9 */
682 COSTS_N_INSNS (4), /* muldi */
683 COSTS_N_INSNS (19), /* divsi */
684 COSTS_N_INSNS (19), /* divdi */
685 COSTS_N_INSNS (3), /* fp */
686 COSTS_N_INSNS (4), /* dmul */
687 COSTS_N_INSNS (18), /* sdiv */
688 COSTS_N_INSNS (33), /* ddiv */
692 1, /* prefetch streams /*/
695 /* Instruction costs on PPCE500MC processors. */
697 struct processor_costs ppce500mc_cost = {
698 COSTS_N_INSNS (4), /* mulsi */
699 COSTS_N_INSNS (4), /* mulsi_const */
700 COSTS_N_INSNS (4), /* mulsi_const9 */
701 COSTS_N_INSNS (4), /* muldi */
702 COSTS_N_INSNS (14), /* divsi */
703 COSTS_N_INSNS (14), /* divdi */
704 COSTS_N_INSNS (8), /* fp */
705 COSTS_N_INSNS (10), /* dmul */
706 COSTS_N_INSNS (36), /* sdiv */
707 COSTS_N_INSNS (66), /* ddiv */
708 64, /* cache line size */
711 1, /* prefetch streams /*/
714 /* Instruction costs on POWER4 and POWER5 processors. */
716 struct processor_costs power4_cost = {
717 COSTS_N_INSNS (3), /* mulsi */
718 COSTS_N_INSNS (2), /* mulsi_const */
719 COSTS_N_INSNS (2), /* mulsi_const9 */
720 COSTS_N_INSNS (4), /* muldi */
721 COSTS_N_INSNS (18), /* divsi */
722 COSTS_N_INSNS (34), /* divdi */
723 COSTS_N_INSNS (3), /* fp */
724 COSTS_N_INSNS (3), /* dmul */
725 COSTS_N_INSNS (17), /* sdiv */
726 COSTS_N_INSNS (17), /* ddiv */
727 128, /* cache line size */
730 8, /* prefetch streams /*/
733 /* Instruction costs on POWER6 processors. */
735 struct processor_costs power6_cost = {
736 COSTS_N_INSNS (8), /* mulsi */
737 COSTS_N_INSNS (8), /* mulsi_const */
738 COSTS_N_INSNS (8), /* mulsi_const9 */
739 COSTS_N_INSNS (8), /* muldi */
740 COSTS_N_INSNS (22), /* divsi */
741 COSTS_N_INSNS (28), /* divdi */
742 COSTS_N_INSNS (3), /* fp */
743 COSTS_N_INSNS (3), /* dmul */
744 COSTS_N_INSNS (13), /* sdiv */
745 COSTS_N_INSNS (16), /* ddiv */
746 128, /* cache line size */
749 16, /* prefetch streams */
753 static bool rs6000_function_ok_for_sibcall (tree, tree);
754 static const char *rs6000_invalid_within_doloop (const_rtx);
755 static rtx rs6000_generate_compare (enum rtx_code);
756 static void rs6000_emit_stack_tie (void);
757 static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx);
758 static bool spe_func_has_64bit_regs_p (void);
759 static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int,
761 static rtx gen_frame_mem_offset (enum machine_mode, rtx, int);
762 static void rs6000_emit_allocate_stack (HOST_WIDE_INT, int, int);
763 static unsigned rs6000_hash_constant (rtx);
764 static unsigned toc_hash_function (const void *);
765 static int toc_hash_eq (const void *, const void *);
766 static bool constant_pool_expr_p (rtx);
767 static bool legitimate_small_data_p (enum machine_mode, rtx);
768 static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
769 static struct machine_function * rs6000_init_machine_status (void);
770 static bool rs6000_assemble_integer (rtx, unsigned int, int);
771 static bool no_global_regs_above (int, bool);
772 #ifdef HAVE_GAS_HIDDEN
773 static void rs6000_assemble_visibility (tree, int);
775 static int rs6000_ra_ever_killed (void);
776 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
777 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
778 static bool rs6000_ms_bitfield_layout_p (const_tree);
779 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
780 static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
781 static const char *rs6000_mangle_type (const_tree);
782 extern const struct attribute_spec rs6000_attribute_table[];
783 static void rs6000_set_default_type_attributes (tree);
784 static rtx rs6000_savres_routine_sym (rs6000_stack_t *, bool, bool, bool);
785 static void rs6000_emit_stack_reset (rs6000_stack_t *, rtx, rtx, int, bool);
786 static rtx rs6000_make_savres_rtx (rs6000_stack_t *, rtx, int,
787 enum machine_mode, bool, bool, bool);
788 static bool rs6000_reg_live_or_pic_offset_p (int);
789 static int rs6000_savres_strategy (rs6000_stack_t *, bool, int, int);
790 static void rs6000_restore_saved_cr (rtx, int);
791 static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
792 static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT);
793 static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
795 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
796 static bool rs6000_return_in_memory (const_tree, const_tree);
797 static void rs6000_file_start (void);
799 static int rs6000_elf_reloc_rw_mask (void);
800 static void rs6000_elf_asm_out_constructor (rtx, int);
801 static void rs6000_elf_asm_out_destructor (rtx, int);
802 static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
803 static void rs6000_elf_asm_init_sections (void);
804 static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
805 unsigned HOST_WIDE_INT);
806 static void rs6000_elf_encode_section_info (tree, rtx, int)
809 static bool rs6000_use_blocks_for_constant_p (enum machine_mode, const_rtx);
810 static void rs6000_alloc_sdmode_stack_slot (void);
811 static void rs6000_instantiate_decls (void);
813 static void rs6000_xcoff_asm_output_anchor (rtx);
814 static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
815 static void rs6000_xcoff_asm_init_sections (void);
816 static int rs6000_xcoff_reloc_rw_mask (void);
817 static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
818 static section *rs6000_xcoff_select_section (tree, int,
819 unsigned HOST_WIDE_INT);
820 static void rs6000_xcoff_unique_section (tree, int);
821 static section *rs6000_xcoff_select_rtx_section
822 (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
823 static const char * rs6000_xcoff_strip_name_encoding (const char *);
824 static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
825 static void rs6000_xcoff_file_start (void);
826 static void rs6000_xcoff_file_end (void);
828 static int rs6000_variable_issue (FILE *, int, rtx, int);
829 static bool rs6000_rtx_costs (rtx, int, int, int *, bool);
830 static int rs6000_adjust_cost (rtx, rtx, rtx, int);
831 static void rs6000_sched_init (FILE *, int, int);
832 static bool is_microcoded_insn (rtx);
833 static bool is_nonpipeline_insn (rtx);
834 static bool is_cracked_insn (rtx);
835 static bool is_branch_slot_insn (rtx);
836 static bool is_load_insn (rtx);
837 static rtx get_store_dest (rtx pat);
838 static bool is_store_insn (rtx);
839 static bool set_to_load_agen (rtx,rtx);
840 static bool adjacent_mem_locations (rtx,rtx);
841 static int rs6000_adjust_priority (rtx, int);
842 static int rs6000_issue_rate (void);
843 static bool rs6000_is_costly_dependence (dep_t, int, int);
844 static rtx get_next_active_insn (rtx, rtx);
845 static bool insn_terminates_group_p (rtx , enum group_termination);
846 static bool insn_must_be_first_in_group (rtx);
847 static bool insn_must_be_last_in_group (rtx);
848 static bool is_costly_group (rtx *, rtx);
849 static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *);
850 static int redefine_groups (FILE *, int, rtx, rtx);
851 static int pad_groups (FILE *, int, rtx, rtx);
852 static void rs6000_sched_finish (FILE *, int);
853 static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int);
854 static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
855 static int rs6000_use_sched_lookahead (void);
856 static int rs6000_use_sched_lookahead_guard (rtx);
857 static void * rs6000_alloc_sched_context (void);
858 static void rs6000_init_sched_context (void *, bool);
859 static void rs6000_set_sched_context (void *);
860 static void rs6000_free_sched_context (void *);
861 static tree rs6000_builtin_reciprocal (unsigned int, bool, bool);
862 static tree rs6000_builtin_mask_for_load (void);
863 static tree rs6000_builtin_mul_widen_even (tree);
864 static tree rs6000_builtin_mul_widen_odd (tree);
865 static tree rs6000_builtin_conversion (enum tree_code, tree);
866 static tree rs6000_builtin_vec_perm (tree, tree *);
868 static void def_builtin (int, const char *, tree, int);
869 static bool rs6000_vector_alignment_reachable (const_tree, bool);
870 static void rs6000_init_builtins (void);
871 static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
872 static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
873 static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx);
874 static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
875 static void altivec_init_builtins (void);
876 static void rs6000_common_init_builtins (void);
877 static void rs6000_init_libfuncs (void);
879 static void paired_init_builtins (void);
880 static rtx paired_expand_builtin (tree, rtx, bool *);
881 static rtx paired_expand_lv_builtin (enum insn_code, tree, rtx);
882 static rtx paired_expand_stv_builtin (enum insn_code, tree);
883 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
885 static void enable_mask_for_builtins (struct builtin_description *, int,
886 enum rs6000_builtins,
887 enum rs6000_builtins);
888 static tree build_opaque_vector_type (tree, int);
889 static void spe_init_builtins (void);
890 static rtx spe_expand_builtin (tree, rtx, bool *);
891 static rtx spe_expand_stv_builtin (enum insn_code, tree);
892 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
893 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
894 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
895 static rs6000_stack_t *rs6000_stack_info (void);
896 static void debug_stack_info (rs6000_stack_t *);
898 static rtx altivec_expand_builtin (tree, rtx, bool *);
899 static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
900 static rtx altivec_expand_st_builtin (tree, rtx, bool *);
901 static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
902 static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
903 static rtx altivec_expand_predicate_builtin (enum insn_code,
904 const char *, tree, rtx);
905 static rtx altivec_expand_stv_builtin (enum insn_code, tree);
906 static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
907 static rtx altivec_expand_vec_set_builtin (tree);
908 static rtx altivec_expand_vec_ext_builtin (tree, rtx);
909 static int get_element_number (tree, tree);
910 static bool rs6000_handle_option (size_t, const char *, int);
911 static void rs6000_parse_tls_size_option (void);
912 static void rs6000_parse_yes_no_option (const char *, const char *, int *);
913 static int first_altivec_reg_to_save (void);
914 static unsigned int compute_vrsave_mask (void);
915 static void compute_save_world_info (rs6000_stack_t *info_ptr);
916 static void is_altivec_return_reg (rtx, void *);
917 static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
918 int easy_vector_constant (rtx, enum machine_mode);
919 static bool rs6000_is_opaque_type (const_tree);
920 static rtx rs6000_dwarf_register_span (rtx);
921 static void rs6000_init_dwarf_reg_sizes_extra (tree);
922 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
923 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
924 static rtx rs6000_tls_get_addr (void);
925 static rtx rs6000_got_sym (void);
926 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
927 static const char *rs6000_get_some_local_dynamic_name (void);
928 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
929 static rtx rs6000_complex_function_value (enum machine_mode);
930 static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
931 enum machine_mode, tree);
932 static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
934 static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
935 tree, HOST_WIDE_INT);
936 static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
939 static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
940 const_tree, HOST_WIDE_INT,
942 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, int, bool);
943 static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
944 static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
945 static void setup_incoming_varargs (CUMULATIVE_ARGS *,
946 enum machine_mode, tree,
948 static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
950 static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
952 static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree);
954 static void macho_branch_islands (void);
955 static int no_previous_def (tree function_name);
956 static tree get_prev_label (tree function_name);
957 static void rs6000_darwin_file_start (void);
960 static tree rs6000_build_builtin_va_list (void);
961 static void rs6000_va_start (tree, rtx);
962 static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
963 static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
964 static bool rs6000_scalar_mode_supported_p (enum machine_mode);
965 static bool rs6000_vector_mode_supported_p (enum machine_mode);
966 static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
968 static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
970 static int get_vsel_insn (enum machine_mode);
971 static void rs6000_emit_vector_select (rtx, rtx, rtx, rtx);
972 static tree rs6000_stack_protect_fail (void);
974 const int INSN_NOT_AVAILABLE = -1;
975 static enum machine_mode rs6000_eh_return_filter_mode (void);
977 /* Hash table stuff for keeping track of TOC entries. */
979 struct toc_hash_struct GTY(())
981 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
982 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
984 enum machine_mode key_mode;
988 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
990 /* Default register names. */
991 char rs6000_reg_names[][8] =
993 "0", "1", "2", "3", "4", "5", "6", "7",
994 "8", "9", "10", "11", "12", "13", "14", "15",
995 "16", "17", "18", "19", "20", "21", "22", "23",
996 "24", "25", "26", "27", "28", "29", "30", "31",
997 "0", "1", "2", "3", "4", "5", "6", "7",
998 "8", "9", "10", "11", "12", "13", "14", "15",
999 "16", "17", "18", "19", "20", "21", "22", "23",
1000 "24", "25", "26", "27", "28", "29", "30", "31",
1001 "mq", "lr", "ctr","ap",
1002 "0", "1", "2", "3", "4", "5", "6", "7",
1004 /* AltiVec registers. */
1005 "0", "1", "2", "3", "4", "5", "6", "7",
1006 "8", "9", "10", "11", "12", "13", "14", "15",
1007 "16", "17", "18", "19", "20", "21", "22", "23",
1008 "24", "25", "26", "27", "28", "29", "30", "31",
1010 /* SPE registers. */
1011 "spe_acc", "spefscr",
1012 /* Soft frame pointer. */
1016 #ifdef TARGET_REGNAMES
1017 static const char alt_reg_names[][8] =
1019 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1020 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1021 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1022 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1023 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1024 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1025 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1026 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1027 "mq", "lr", "ctr", "ap",
1028 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1030 /* AltiVec registers. */
1031 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1032 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1033 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1034 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1036 /* SPE registers. */
1037 "spe_acc", "spefscr",
1038 /* Soft frame pointer. */
1043 #ifndef MASK_STRICT_ALIGN
1044 #define MASK_STRICT_ALIGN 0
1046 #ifndef TARGET_PROFILE_KERNEL
1047 #define TARGET_PROFILE_KERNEL 0
1050 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1051 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1053 /* Initialize the GCC target structure. */
1054 #undef TARGET_ATTRIBUTE_TABLE
1055 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1056 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1057 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1059 #undef TARGET_ASM_ALIGNED_DI_OP
1060 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1062 /* Default unaligned ops are only provided for ELF. Find the ops needed
1063 for non-ELF systems. */
1064 #ifndef OBJECT_FORMAT_ELF
1066 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1068 #undef TARGET_ASM_UNALIGNED_HI_OP
1069 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1070 #undef TARGET_ASM_UNALIGNED_SI_OP
1071 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1072 #undef TARGET_ASM_UNALIGNED_DI_OP
1073 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1076 #undef TARGET_ASM_UNALIGNED_HI_OP
1077 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1078 #undef TARGET_ASM_UNALIGNED_SI_OP
1079 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1080 #undef TARGET_ASM_UNALIGNED_DI_OP
1081 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1082 #undef TARGET_ASM_ALIGNED_DI_OP
1083 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1087 /* This hook deals with fixups for relocatable code and DI-mode objects
1089 #undef TARGET_ASM_INTEGER
1090 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1092 #ifdef HAVE_GAS_HIDDEN
1093 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1094 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1097 #undef TARGET_HAVE_TLS
1098 #define TARGET_HAVE_TLS HAVE_AS_TLS
1100 #undef TARGET_CANNOT_FORCE_CONST_MEM
1101 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
1103 #undef TARGET_ASM_FUNCTION_PROLOGUE
1104 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1105 #undef TARGET_ASM_FUNCTION_EPILOGUE
1106 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1108 #undef TARGET_SCHED_VARIABLE_ISSUE
1109 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1111 #undef TARGET_SCHED_ISSUE_RATE
1112 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1113 #undef TARGET_SCHED_ADJUST_COST
1114 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1115 #undef TARGET_SCHED_ADJUST_PRIORITY
1116 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1117 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1118 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1119 #undef TARGET_SCHED_INIT
1120 #define TARGET_SCHED_INIT rs6000_sched_init
1121 #undef TARGET_SCHED_FINISH
1122 #define TARGET_SCHED_FINISH rs6000_sched_finish
1123 #undef TARGET_SCHED_REORDER
1124 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1125 #undef TARGET_SCHED_REORDER2
1126 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1128 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1129 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1131 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1132 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1134 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1135 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1136 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1137 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1138 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1139 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1140 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1141 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1143 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1144 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1145 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
1146 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
1147 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
1148 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
1149 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
1150 #define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion
1151 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
1152 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM rs6000_builtin_vec_perm
1154 #undef TARGET_VECTOR_ALIGNMENT_REACHABLE
1155 #define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1157 #undef TARGET_INIT_BUILTINS
1158 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1160 #undef TARGET_EXPAND_BUILTIN
1161 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1163 #undef TARGET_MANGLE_TYPE
1164 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1166 #undef TARGET_INIT_LIBFUNCS
1167 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1170 #undef TARGET_BINDS_LOCAL_P
1171 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1174 #undef TARGET_MS_BITFIELD_LAYOUT_P
1175 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1177 #undef TARGET_ASM_OUTPUT_MI_THUNK
1178 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1180 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1181 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1183 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1184 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1186 #undef TARGET_INVALID_WITHIN_DOLOOP
1187 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1189 #undef TARGET_RTX_COSTS
1190 #define TARGET_RTX_COSTS rs6000_rtx_costs
1191 #undef TARGET_ADDRESS_COST
1192 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1194 #undef TARGET_VECTOR_OPAQUE_P
1195 #define TARGET_VECTOR_OPAQUE_P rs6000_is_opaque_type
1197 #undef TARGET_DWARF_REGISTER_SPAN
1198 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1200 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1201 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1203 /* On rs6000, function arguments are promoted, as are function return
1205 #undef TARGET_PROMOTE_FUNCTION_ARGS
1206 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1207 #undef TARGET_PROMOTE_FUNCTION_RETURN
1208 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1210 #undef TARGET_RETURN_IN_MEMORY
1211 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1213 #undef TARGET_SETUP_INCOMING_VARARGS
1214 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1216 /* Always strict argument naming on rs6000. */
1217 #undef TARGET_STRICT_ARGUMENT_NAMING
1218 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1219 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1220 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1221 #undef TARGET_SPLIT_COMPLEX_ARG
1222 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1223 #undef TARGET_MUST_PASS_IN_STACK
1224 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1225 #undef TARGET_PASS_BY_REFERENCE
1226 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1227 #undef TARGET_ARG_PARTIAL_BYTES
1228 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1230 #undef TARGET_BUILD_BUILTIN_VA_LIST
1231 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1233 #undef TARGET_EXPAND_BUILTIN_VA_START
1234 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1236 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1237 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1239 #undef TARGET_EH_RETURN_FILTER_MODE
1240 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1242 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1243 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1245 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1246 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1248 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1249 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1251 #undef TARGET_HANDLE_OPTION
1252 #define TARGET_HANDLE_OPTION rs6000_handle_option
1254 #undef TARGET_DEFAULT_TARGET_FLAGS
1255 #define TARGET_DEFAULT_TARGET_FLAGS \
1258 #undef TARGET_STACK_PROTECT_FAIL
1259 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1261 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1262 The PowerPC architecture requires only weak consistency among
1263 processors--that is, memory accesses between processors need not be
1264 sequentially consistent and memory accesses among processors can occur
1265 in any order. The ability to order memory accesses weakly provides
1266 opportunities for more efficient use of the system bus. Unless a
1267 dependency exists, the 604e allows read operations to precede store
1269 #undef TARGET_RELAXED_ORDERING
1270 #define TARGET_RELAXED_ORDERING true
1273 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1274 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1277 /* Use a 32-bit anchor range. This leads to sequences like:
1279 addis tmp,anchor,high
1282 where tmp itself acts as an anchor, and can be shared between
1283 accesses to the same 64k page. */
1284 #undef TARGET_MIN_ANCHOR_OFFSET
1285 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1286 #undef TARGET_MAX_ANCHOR_OFFSET
1287 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1288 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1289 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1291 #undef TARGET_BUILTIN_RECIPROCAL
1292 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1294 #undef TARGET_EXPAND_TO_RTL_HOOK
1295 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1297 #undef TARGET_INSTANTIATE_DECLS
1298 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1300 struct gcc_target targetm = TARGET_INITIALIZER;
1303 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1306 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1308 /* The GPRs can hold any mode, but values bigger than one register
1309 cannot go past R31. */
1310 if (INT_REGNO_P (regno))
1311 return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
1313 /* The float registers can only hold floating modes and DImode.
1314 This excludes the 32-bit decimal float mode for now. */
1315 if (FP_REGNO_P (regno))
1317 ((SCALAR_FLOAT_MODE_P (mode)
1318 && (mode != TDmode || (regno % 2) == 0)
1319 && FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
1320 || (GET_MODE_CLASS (mode) == MODE_INT
1321 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1322 || (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1323 && PAIRED_VECTOR_MODE (mode)));
1325 /* The CR register can only hold CC modes. */
1326 if (CR_REGNO_P (regno))
1327 return GET_MODE_CLASS (mode) == MODE_CC;
1329 if (XER_REGNO_P (regno))
1330 return mode == PSImode;
1332 /* AltiVec only in AldyVec registers. */
1333 if (ALTIVEC_REGNO_P (regno))
1334 return ALTIVEC_VECTOR_MODE (mode);
1336 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1337 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1340 /* We cannot put TImode anywhere except general register and it must be
1341 able to fit within the register set. */
1343 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1346 /* Initialize rs6000_hard_regno_mode_ok_p table. */
1348 rs6000_init_hard_regno_mode_ok (void)
1352 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
1353 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1354 if (rs6000_hard_regno_mode_ok (r, m))
1355 rs6000_hard_regno_mode_ok_p[m][r] = true;
1359 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
1362 darwin_rs6000_override_options (void)
1364 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
1366 rs6000_altivec_abi = 1;
1367 TARGET_ALTIVEC_VRSAVE = 1;
1368 if (DEFAULT_ABI == ABI_DARWIN)
1370 if (MACHO_DYNAMIC_NO_PIC_P)
1373 warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
1376 else if (flag_pic == 1)
1381 if (TARGET_64BIT && ! TARGET_POWERPC64)
1383 target_flags |= MASK_POWERPC64;
1384 warning (0, "-m64 requires PowerPC64 architecture, enabling");
1388 rs6000_default_long_calls = 1;
1389 target_flags |= MASK_SOFT_FLOAT;
1392 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
1394 if (!flag_mkernel && !flag_apple_kext
1396 && ! (target_flags_explicit & MASK_ALTIVEC))
1397 target_flags |= MASK_ALTIVEC;
1399 /* Unless the user (not the configurer) has explicitly overridden
1400 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
1401 G4 unless targetting the kernel. */
1404 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
1405 && ! (target_flags_explicit & MASK_ALTIVEC)
1406 && ! rs6000_select[1].string)
1408 target_flags |= MASK_ALTIVEC;
1413 /* If not otherwise specified by a target, make 'long double' equivalent to
1416 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
1417 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
1420 /* Override command line options. Mostly we process the processor
1421 type and sometimes adjust other TARGET_ options. */
1424 rs6000_override_options (const char *default_cpu)
1427 struct rs6000_cpu_select *ptr;
1430 /* Simplifications for entries below. */
1433 POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
1434 POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
1437 /* This table occasionally claims that a processor does not support
1438 a particular feature even though it does, but the feature is slower
1439 than the alternative. Thus, it shouldn't be relied on as a
1440 complete description of the processor's support.
1442 Please keep this list in order, and don't forget to update the
1443 documentation in invoke.texi when adding a new processor or
1447 const char *const name; /* Canonical processor name. */
1448 const enum processor_type processor; /* Processor type enum value. */
1449 const int target_enable; /* Target flags to enable. */
1450 } const processor_target_table[]
1451 = {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1452 {"403", PROCESSOR_PPC403,
1453 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
1454 {"405", PROCESSOR_PPC405,
1455 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
1456 {"405fp", PROCESSOR_PPC405,
1457 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
1458 {"440", PROCESSOR_PPC440,
1459 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
1460 {"440fp", PROCESSOR_PPC440,
1461 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
1462 {"464", PROCESSOR_PPC440,
1463 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
1464 {"464fp", PROCESSOR_PPC440,
1465 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
1466 {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
1467 {"601", PROCESSOR_PPC601,
1468 MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
1469 {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1470 {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1471 {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1472 {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1473 {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1474 {"620", PROCESSOR_PPC620,
1475 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1476 {"630", PROCESSOR_PPC630,
1477 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1478 {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1479 {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK},
1480 {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK},
1481 {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1482 {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1483 {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1484 {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1485 {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
1486 /* 8548 has a dummy entry for now. */
1487 {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
1488 {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1489 {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK},
1490 {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1491 {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1492 {"970", PROCESSOR_POWER4,
1493 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
1494 {"cell", PROCESSOR_CELL,
1495 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
1496 {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
1497 {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1498 {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1499 {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK},
1500 {"G5", PROCESSOR_POWER4,
1501 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
1502 {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1503 {"power2", PROCESSOR_POWER,
1504 MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
1505 {"power3", PROCESSOR_PPC630,
1506 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1507 {"power4", PROCESSOR_POWER4,
1508 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1510 {"power5", PROCESSOR_POWER5,
1511 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1512 | MASK_MFCRF | MASK_POPCNTB},
1513 {"power5+", PROCESSOR_POWER5,
1514 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1515 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
1516 {"power6", PROCESSOR_POWER6,
1517 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1518 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
1519 {"power6x", PROCESSOR_POWER6,
1520 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1521 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
1523 {"power7", PROCESSOR_POWER5,
1524 POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF
1525 | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
1526 {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
1527 {"powerpc64", PROCESSOR_POWERPC64,
1528 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1529 {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1530 {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1531 {"rios2", PROCESSOR_RIOS2,
1532 MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
1533 {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1534 {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1535 {"rs64", PROCESSOR_RS64A,
1536 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
1539 const size_t ptt_size = ARRAY_SIZE (processor_target_table);
1541 /* Some OSs don't support saving the high part of 64-bit registers on
1542 context switch. Other OSs don't support saving Altivec registers.
1543 On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
1544 settings; if the user wants either, the user must explicitly specify
1545 them and we won't interfere with the user's specification. */
1548 POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
1549 POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1550 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1551 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1552 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP)
1555 set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
1556 #ifdef OS_MISSING_POWERPC64
1557 if (OS_MISSING_POWERPC64)
1558 set_masks &= ~MASK_POWERPC64;
1560 #ifdef OS_MISSING_ALTIVEC
1561 if (OS_MISSING_ALTIVEC)
1562 set_masks &= ~MASK_ALTIVEC;
1565 /* Don't override by the processor default if given explicitly. */
1566 set_masks &= ~target_flags_explicit;
1568 /* Identify the processor type. */
1569 rs6000_select[0].string = default_cpu;
1570 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
1572 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
1574 ptr = &rs6000_select[i];
1575 if (ptr->string != (char *)0 && ptr->string[0] != '\0')
1577 for (j = 0; j < ptt_size; j++)
1578 if (! strcmp (ptr->string, processor_target_table[j].name))
1580 if (ptr->set_tune_p)
1581 rs6000_cpu = processor_target_table[j].processor;
1583 if (ptr->set_arch_p)
1585 target_flags &= ~set_masks;
1586 target_flags |= (processor_target_table[j].target_enable
1593 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
1597 if ((TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
1598 && !rs6000_explicit_options.isel)
1601 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
1602 || rs6000_cpu == PROCESSOR_PPCE500MC)
1605 error ("AltiVec not supported in this target");
1607 error ("Spe not supported in this target");
1610 /* Disable cell micro code if we are optimizing for the cell
1611 and not optimizing for size. */
1612 if (rs6000_gen_cell_microcode == -1)
1613 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
1616 /* If we are optimizing big endian systems for space, use the load/store
1617 multiple and string instructions unless we are not generating
1619 if (BYTES_BIG_ENDIAN && optimize_size && !rs6000_gen_cell_microcode)
1620 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
1622 /* Don't allow -mmultiple or -mstring on little endian systems
1623 unless the cpu is a 750, because the hardware doesn't support the
1624 instructions used in little endian mode, and causes an alignment
1625 trap. The 750 does not cause an alignment trap (except when the
1626 target is unaligned). */
1628 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
1630 if (TARGET_MULTIPLE)
1632 target_flags &= ~MASK_MULTIPLE;
1633 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
1634 warning (0, "-mmultiple is not supported on little endian systems");
1639 target_flags &= ~MASK_STRING;
1640 if ((target_flags_explicit & MASK_STRING) != 0)
1641 warning (0, "-mstring is not supported on little endian systems");
1645 /* Set debug flags */
1646 if (rs6000_debug_name)
1648 if (! strcmp (rs6000_debug_name, "all"))
1649 rs6000_debug_stack = rs6000_debug_arg = 1;
1650 else if (! strcmp (rs6000_debug_name, "stack"))
1651 rs6000_debug_stack = 1;
1652 else if (! strcmp (rs6000_debug_name, "arg"))
1653 rs6000_debug_arg = 1;
1655 error ("unknown -mdebug-%s switch", rs6000_debug_name);
1658 if (rs6000_traceback_name)
1660 if (! strncmp (rs6000_traceback_name, "full", 4))
1661 rs6000_traceback = traceback_full;
1662 else if (! strncmp (rs6000_traceback_name, "part", 4))
1663 rs6000_traceback = traceback_part;
1664 else if (! strncmp (rs6000_traceback_name, "no", 2))
1665 rs6000_traceback = traceback_none;
1667 error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
1668 rs6000_traceback_name);
1671 if (!rs6000_explicit_options.long_double)
1672 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
1674 #ifndef POWERPC_LINUX
1675 if (!rs6000_explicit_options.ieee)
1676 rs6000_ieeequad = 1;
1679 /* Enable Altivec ABI for AIX -maltivec. */
1680 if (TARGET_XCOFF && TARGET_ALTIVEC)
1681 rs6000_altivec_abi = 1;
1683 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
1684 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
1685 be explicitly overridden in either case. */
1688 if (!rs6000_explicit_options.altivec_abi
1689 && (TARGET_64BIT || TARGET_ALTIVEC))
1690 rs6000_altivec_abi = 1;
1692 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
1693 if (!rs6000_explicit_options.vrsave)
1694 TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
1697 /* Set the Darwin64 ABI as default for 64-bit Darwin. */
1698 if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
1700 rs6000_darwin64_abi = 1;
1702 darwin_one_byte_bool = 1;
1704 /* Default to natural alignment, for better performance. */
1705 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
1708 /* Place FP constants in the constant pool instead of TOC
1709 if section anchors enabled. */
1710 if (flag_section_anchors)
1711 TARGET_NO_FP_IN_TOC = 1;
1713 /* Handle -mtls-size option. */
1714 rs6000_parse_tls_size_option ();
1716 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1717 SUBTARGET_OVERRIDE_OPTIONS;
1719 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1720 SUBSUBTARGET_OVERRIDE_OPTIONS;
1722 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
1723 SUB3TARGET_OVERRIDE_OPTIONS;
1726 if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
1728 /* The e500 and e500mc do not have string instructions, and we set
1729 MASK_STRING above when optimizing for size. */
1730 if ((target_flags & MASK_STRING) != 0)
1731 target_flags = target_flags & ~MASK_STRING;
1733 else if (rs6000_select[1].string != NULL)
1735 /* For the powerpc-eabispe configuration, we set all these by
1736 default, so let's unset them if we manually set another
1737 CPU that is not the E500. */
1738 if (!rs6000_explicit_options.spe_abi)
1740 if (!rs6000_explicit_options.spe)
1742 if (!rs6000_explicit_options.float_gprs)
1743 rs6000_float_gprs = 0;
1744 if (!rs6000_explicit_options.isel)
1748 /* Detect invalid option combinations with E500. */
1751 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
1752 && rs6000_cpu != PROCESSOR_POWER5
1753 && rs6000_cpu != PROCESSOR_POWER6
1754 && rs6000_cpu != PROCESSOR_CELL);
1755 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
1756 || rs6000_cpu == PROCESSOR_POWER5);
1757 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
1758 || rs6000_cpu == PROCESSOR_POWER5
1759 || rs6000_cpu == PROCESSOR_POWER6);
1761 rs6000_sched_restricted_insns_priority
1762 = (rs6000_sched_groups ? 1 : 0);
1764 /* Handle -msched-costly-dep option. */
1765 rs6000_sched_costly_dep
1766 = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
1768 if (rs6000_sched_costly_dep_str)
1770 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
1771 rs6000_sched_costly_dep = no_dep_costly;
1772 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
1773 rs6000_sched_costly_dep = all_deps_costly;
1774 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
1775 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
1776 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
1777 rs6000_sched_costly_dep = store_to_load_dep_costly;
1779 rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
1782 /* Handle -minsert-sched-nops option. */
1783 rs6000_sched_insert_nops
1784 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
1786 if (rs6000_sched_insert_nops_str)
1788 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
1789 rs6000_sched_insert_nops = sched_finish_none;
1790 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
1791 rs6000_sched_insert_nops = sched_finish_pad_groups;
1792 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
1793 rs6000_sched_insert_nops = sched_finish_regroup_exact;
1795 rs6000_sched_insert_nops = atoi (rs6000_sched_insert_nops_str);
1798 #ifdef TARGET_REGNAMES
1799 /* If the user desires alternate register names, copy in the
1800 alternate names now. */
1801 if (TARGET_REGNAMES)
1802 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
1805 /* Set aix_struct_return last, after the ABI is determined.
1806 If -maix-struct-return or -msvr4-struct-return was explicitly
1807 used, don't override with the ABI default. */
1808 if (!rs6000_explicit_options.aix_struct_ret)
1809 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
1811 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
1812 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
1815 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
1817 /* We can only guarantee the availability of DI pseudo-ops when
1818 assembling for 64-bit targets. */
1821 targetm.asm_out.aligned_op.di = NULL;
1822 targetm.asm_out.unaligned_op.di = NULL;
1825 /* Set branch target alignment, if not optimizing for size. */
1828 /* Cell wants to be aligned 8byte for dual issue. */
1829 if (rs6000_cpu == PROCESSOR_CELL)
1831 if (align_functions <= 0)
1832 align_functions = 8;
1833 if (align_jumps <= 0)
1835 if (align_loops <= 0)
1838 if (rs6000_align_branch_targets)
1840 if (align_functions <= 0)
1841 align_functions = 16;
1842 if (align_jumps <= 0)
1844 if (align_loops <= 0)
1847 if (align_jumps_max_skip <= 0)
1848 align_jumps_max_skip = 15;
1849 if (align_loops_max_skip <= 0)
1850 align_loops_max_skip = 15;
1853 /* Arrange to save and restore machine status around nested functions. */
1854 init_machine_status = rs6000_init_machine_status;
1856 /* We should always be splitting complex arguments, but we can't break
1857 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
1858 if (DEFAULT_ABI != ABI_AIX)
1859 targetm.calls.split_complex_arg = NULL;
1861 /* Initialize rs6000_cost with the appropriate target costs. */
1863 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
1867 case PROCESSOR_RIOS1:
1868 rs6000_cost = &rios1_cost;
1871 case PROCESSOR_RIOS2:
1872 rs6000_cost = &rios2_cost;
1875 case PROCESSOR_RS64A:
1876 rs6000_cost = &rs64a_cost;
1879 case PROCESSOR_MPCCORE:
1880 rs6000_cost = &mpccore_cost;
1883 case PROCESSOR_PPC403:
1884 rs6000_cost = &ppc403_cost;
1887 case PROCESSOR_PPC405:
1888 rs6000_cost = &ppc405_cost;
1891 case PROCESSOR_PPC440:
1892 rs6000_cost = &ppc440_cost;
1895 case PROCESSOR_PPC601:
1896 rs6000_cost = &ppc601_cost;
1899 case PROCESSOR_PPC603:
1900 rs6000_cost = &ppc603_cost;
1903 case PROCESSOR_PPC604:
1904 rs6000_cost = &ppc604_cost;
1907 case PROCESSOR_PPC604e:
1908 rs6000_cost = &ppc604e_cost;
1911 case PROCESSOR_PPC620:
1912 rs6000_cost = &ppc620_cost;
1915 case PROCESSOR_PPC630:
1916 rs6000_cost = &ppc630_cost;
1919 case PROCESSOR_CELL:
1920 rs6000_cost = &ppccell_cost;
1923 case PROCESSOR_PPC750:
1924 case PROCESSOR_PPC7400:
1925 rs6000_cost = &ppc750_cost;
1928 case PROCESSOR_PPC7450:
1929 rs6000_cost = &ppc7450_cost;
1932 case PROCESSOR_PPC8540:
1933 rs6000_cost = &ppc8540_cost;
1936 case PROCESSOR_PPCE300C2:
1937 case PROCESSOR_PPCE300C3:
1938 rs6000_cost = &ppce300c2c3_cost;
1941 case PROCESSOR_PPCE500MC:
1942 rs6000_cost = &ppce500mc_cost;
1945 case PROCESSOR_POWER4:
1946 case PROCESSOR_POWER5:
1947 rs6000_cost = &power4_cost;
1950 case PROCESSOR_POWER6:
1951 rs6000_cost = &power6_cost;
1958 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
1959 set_param_value ("simultaneous-prefetches",
1960 rs6000_cost->simultaneous_prefetches);
1961 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
1962 set_param_value ("l1-cache-size", rs6000_cost->l1_cache_size);
1963 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
1964 set_param_value ("l1-cache-line-size", rs6000_cost->cache_line_size);
1965 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
1966 set_param_value ("l2-cache-size", rs6000_cost->l2_cache_size);
1968 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
1969 can be optimized to ap = __builtin_next_arg (0). */
1970 if (DEFAULT_ABI != ABI_V4)
1971 targetm.expand_builtin_va_start = NULL;
1973 /* Set up single/double float flags.
1974 If TARGET_HARD_FLOAT is set, but neither single or double is set,
1975 then set both flags. */
1976 if (TARGET_HARD_FLOAT && TARGET_FPRS
1977 && rs6000_single_float == 0 && rs6000_double_float == 0)
1978 rs6000_single_float = rs6000_double_float = 1;
1980 /* Reset single and double FP flags if target is E500. */
1983 rs6000_single_float = rs6000_double_float = 0;
1984 if (TARGET_E500_SINGLE)
1985 rs6000_single_float = 1;
1986 if (TARGET_E500_DOUBLE)
1987 rs6000_single_float = rs6000_double_float = 1;
1990 rs6000_init_hard_regno_mode_ok ();
1993 /* Implement targetm.vectorize.builtin_mask_for_load. */
1995 rs6000_builtin_mask_for_load (void)
1998 return altivec_builtin_mask_for_load;
2003 /* Implement targetm.vectorize.builtin_conversion.
2004 Returns a decl of a function that implements conversion of an integer vector
2005 into a floating-point vector, or vice-versa. TYPE is the type of the integer
2006 side of the conversion.
2007 Return NULL_TREE if it is not available. */
2009 rs6000_builtin_conversion (enum tree_code code, tree type)
2011 if (!TARGET_ALTIVEC)
2016 case FIX_TRUNC_EXPR:
2017 switch (TYPE_MODE (type))
2020 return TYPE_UNSIGNED (type)
2021 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTUXS]
2022 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTSXS];
2028 switch (TYPE_MODE (type))
2031 return TYPE_UNSIGNED (type)
2032 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFUX]
2033 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFSX];
2043 /* Implement targetm.vectorize.builtin_mul_widen_even. */
2045 rs6000_builtin_mul_widen_even (tree type)
2047 if (!TARGET_ALTIVEC)
2050 switch (TYPE_MODE (type))
2053 return TYPE_UNSIGNED (type)
2054 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH]
2055 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
2058 return TYPE_UNSIGNED (type)
2059 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB]
2060 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
2066 /* Implement targetm.vectorize.builtin_mul_widen_odd. */
2068 rs6000_builtin_mul_widen_odd (tree type)
2070 if (!TARGET_ALTIVEC)
2073 switch (TYPE_MODE (type))
2076 return TYPE_UNSIGNED (type)
2077 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH]
2078 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
2081 return TYPE_UNSIGNED (type)
2082 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB]
2083 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
2090 /* Return true iff, data reference of TYPE can reach vector alignment (16)
2091 after applying N number of iterations. This routine does not determine
2092 how may iterations are required to reach desired alignment. */
2095 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
2102 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
2105 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
2115 /* Assuming that all other types are naturally aligned. CHECKME! */
2120 /* Implement targetm.vectorize.builtin_vec_perm. */
2122 rs6000_builtin_vec_perm (tree type, tree *mask_element_type)
2126 *mask_element_type = unsigned_char_type_node;
2128 switch (TYPE_MODE (type))
2131 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_16QI];
2135 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_8HI];
2139 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SI];
2143 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SF];
2154 /* Handle generic options of the form -mfoo=yes/no.
2155 NAME is the option name.
2156 VALUE is the option value.
2157 FLAG is the pointer to the flag where to store a 1 or 0, depending on
2158 whether the option value is 'yes' or 'no' respectively. */
2160 rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
2164 else if (!strcmp (value, "yes"))
2166 else if (!strcmp (value, "no"))
2169 error ("unknown -m%s= option specified: '%s'", name, value);
2172 /* Validate and record the size specified with the -mtls-size option. */
2175 rs6000_parse_tls_size_option (void)
2177 if (rs6000_tls_size_string == 0)
2179 else if (strcmp (rs6000_tls_size_string, "16") == 0)
2180 rs6000_tls_size = 16;
2181 else if (strcmp (rs6000_tls_size_string, "32") == 0)
2182 rs6000_tls_size = 32;
2183 else if (strcmp (rs6000_tls_size_string, "64") == 0)
2184 rs6000_tls_size = 64;
2186 error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string);
2190 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
2192 if (DEFAULT_ABI == ABI_DARWIN)
2193 /* The Darwin libraries never set errno, so we might as well
2194 avoid calling them when that's the only reason we would. */
2195 flag_errno_math = 0;
2197 /* Double growth factor to counter reduced min jump length. */
2198 set_param_value ("max-grow-copy-bb-insns", 16);
2200 /* Enable section anchors by default.
2201 Skip section anchors for Objective C and Objective C++
2202 until front-ends fixed. */
2203 if (!TARGET_MACHO && lang_hooks.name[4] != 'O')
2204 flag_section_anchors = 2;
2207 static enum fpu_type_t
2208 rs6000_parse_fpu_option (const char *option)
2210 if (!strcmp("none", option)) return FPU_NONE;
2211 if (!strcmp("sp_lite", option)) return FPU_SF_LITE;
2212 if (!strcmp("dp_lite", option)) return FPU_DF_LITE;
2213 if (!strcmp("sp_full", option)) return FPU_SF_FULL;
2214 if (!strcmp("dp_full", option)) return FPU_DF_FULL;
2215 error("unknown value %s for -mfpu", option);
2219 /* Implement TARGET_HANDLE_OPTION. */
2222 rs6000_handle_option (size_t code, const char *arg, int value)
2224 enum fpu_type_t fpu_type = FPU_NONE;
2229 target_flags &= ~(MASK_POWER | MASK_POWER2
2230 | MASK_MULTIPLE | MASK_STRING);
2231 target_flags_explicit |= (MASK_POWER | MASK_POWER2
2232 | MASK_MULTIPLE | MASK_STRING);
2234 case OPT_mno_powerpc:
2235 target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
2236 | MASK_PPC_GFXOPT | MASK_POWERPC64);
2237 target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
2238 | MASK_PPC_GFXOPT | MASK_POWERPC64);
2241 target_flags &= ~MASK_MINIMAL_TOC;
2242 TARGET_NO_FP_IN_TOC = 0;
2243 TARGET_NO_SUM_IN_TOC = 0;
2244 target_flags_explicit |= MASK_MINIMAL_TOC;
2245 #ifdef TARGET_USES_SYSV4_OPT
2246 /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
2247 just the same as -mminimal-toc. */
2248 target_flags |= MASK_MINIMAL_TOC;
2249 target_flags_explicit |= MASK_MINIMAL_TOC;
2253 #ifdef TARGET_USES_SYSV4_OPT
2255 /* Make -mtoc behave like -mminimal-toc. */
2256 target_flags |= MASK_MINIMAL_TOC;
2257 target_flags_explicit |= MASK_MINIMAL_TOC;
2261 #ifdef TARGET_USES_AIX64_OPT
2266 target_flags |= MASK_POWERPC64 | MASK_POWERPC;
2267 target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
2268 target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
2271 #ifdef TARGET_USES_AIX64_OPT
2276 target_flags &= ~MASK_POWERPC64;
2277 target_flags_explicit |= MASK_POWERPC64;
2280 case OPT_minsert_sched_nops_:
2281 rs6000_sched_insert_nops_str = arg;
2284 case OPT_mminimal_toc:
2287 TARGET_NO_FP_IN_TOC = 0;
2288 TARGET_NO_SUM_IN_TOC = 0;
2295 target_flags |= (MASK_MULTIPLE | MASK_STRING);
2296 target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
2303 target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
2304 target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
2308 case OPT_mpowerpc_gpopt:
2309 case OPT_mpowerpc_gfxopt:
2312 target_flags |= MASK_POWERPC;
2313 target_flags_explicit |= MASK_POWERPC;
2317 case OPT_maix_struct_return:
2318 case OPT_msvr4_struct_return:
2319 rs6000_explicit_options.aix_struct_ret = true;
2323 rs6000_explicit_options.vrsave = true;
2324 rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
2328 rs6000_explicit_options.isel = true;
2329 rs6000_isel = value;
2333 rs6000_explicit_options.isel = true;
2334 rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel));
2338 rs6000_explicit_options.spe = true;
2343 rs6000_explicit_options.spe = true;
2344 rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
2348 rs6000_debug_name = arg;
2351 #ifdef TARGET_USES_SYSV4_OPT
2353 rs6000_abi_name = arg;
2357 rs6000_sdata_name = arg;
2360 case OPT_mtls_size_:
2361 rs6000_tls_size_string = arg;
2364 case OPT_mrelocatable:
2367 target_flags |= MASK_MINIMAL_TOC;
2368 target_flags_explicit |= MASK_MINIMAL_TOC;
2369 TARGET_NO_FP_IN_TOC = 1;
2373 case OPT_mrelocatable_lib:
2376 target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
2377 target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
2378 TARGET_NO_FP_IN_TOC = 1;
2382 target_flags &= ~MASK_RELOCATABLE;
2383 target_flags_explicit |= MASK_RELOCATABLE;
2389 if (!strcmp (arg, "altivec"))
2391 rs6000_explicit_options.altivec_abi = true;
2392 rs6000_altivec_abi = 1;
2394 /* Enabling the AltiVec ABI turns off the SPE ABI. */
2397 else if (! strcmp (arg, "no-altivec"))
2399 rs6000_explicit_options.altivec_abi = true;
2400 rs6000_altivec_abi = 0;
2402 else if (! strcmp (arg, "spe"))
2404 rs6000_explicit_options.spe_abi = true;
2406 rs6000_altivec_abi = 0;
2407 if (!TARGET_SPE_ABI)
2408 error ("not configured for ABI: '%s'", arg);
2410 else if (! strcmp (arg, "no-spe"))
2412 rs6000_explicit_options.spe_abi = true;
2416 /* These are here for testing during development only, do not
2417 document in the manual please. */
2418 else if (! strcmp (arg, "d64"))
2420 rs6000_darwin64_abi = 1;
2421 warning (0, "Using darwin64 ABI");
2423 else if (! strcmp (arg, "d32"))
2425 rs6000_darwin64_abi = 0;
2426 warning (0, "Using old darwin ABI");
2429 else if (! strcmp (arg, "ibmlongdouble"))
2431 rs6000_explicit_options.ieee = true;
2432 rs6000_ieeequad = 0;
2433 warning (0, "Using IBM extended precision long double");
2435 else if (! strcmp (arg, "ieeelongdouble"))
2437 rs6000_explicit_options.ieee = true;
2438 rs6000_ieeequad = 1;
2439 warning (0, "Using IEEE extended precision long double");
2444 error ("unknown ABI specified: '%s'", arg);
2450 rs6000_select[1].string = arg;
2454 rs6000_select[2].string = arg;
2457 case OPT_mtraceback_:
2458 rs6000_traceback_name = arg;
2461 case OPT_mfloat_gprs_:
2462 rs6000_explicit_options.float_gprs = true;
2463 if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
2464 rs6000_float_gprs = 1;
2465 else if (! strcmp (arg, "double"))
2466 rs6000_float_gprs = 2;
2467 else if (! strcmp (arg, "no"))
2468 rs6000_float_gprs = 0;
2471 error ("invalid option for -mfloat-gprs: '%s'", arg);
2476 case OPT_mlong_double_:
2477 rs6000_explicit_options.long_double = true;
2478 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2479 if (value != 64 && value != 128)
2481 error ("Unknown switch -mlong-double-%s", arg);
2482 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2486 rs6000_long_double_type_size = value;
2489 case OPT_msched_costly_dep_:
2490 rs6000_sched_costly_dep_str = arg;
2494 rs6000_explicit_options.alignment = true;
2495 if (! strcmp (arg, "power"))
2497 /* On 64-bit Darwin, power alignment is ABI-incompatible with
2498 some C library functions, so warn about it. The flag may be
2499 useful for performance studies from time to time though, so
2500 don't disable it entirely. */
2501 if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
2502 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2503 " it is incompatible with the installed C and C++ libraries");
2504 rs6000_alignment_flags = MASK_ALIGN_POWER;
2506 else if (! strcmp (arg, "natural"))
2507 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2510 error ("unknown -malign-XXXXX option specified: '%s'", arg);
2515 case OPT_msingle_float:
2516 if (!TARGET_SINGLE_FPU)
2517 warning (0, "-msingle-float option equivalent to -mhard-float");
2518 /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */
2519 rs6000_double_float = 0;
2520 target_flags &= ~MASK_SOFT_FLOAT;
2521 target_flags_explicit |= MASK_SOFT_FLOAT;
2524 case OPT_mdouble_float:
2525 /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */
2526 rs6000_single_float = 1;
2527 target_flags &= ~MASK_SOFT_FLOAT;
2528 target_flags_explicit |= MASK_SOFT_FLOAT;
2531 case OPT_msimple_fpu:
2532 if (!TARGET_SINGLE_FPU)
2533 warning (0, "-msimple-fpu option ignored");
2536 case OPT_mhard_float:
2537 /* -mhard_float implies -msingle-float and -mdouble-float. */
2538 rs6000_single_float = rs6000_double_float = 1;
2541 case OPT_msoft_float:
2542 /* -msoft_float implies -mnosingle-float and -mnodouble-float. */
2543 rs6000_single_float = rs6000_double_float = 0;
2547 fpu_type = rs6000_parse_fpu_option(arg);
2548 if (fpu_type != FPU_NONE)
2549 /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */
2551 target_flags &= ~MASK_SOFT_FLOAT;
2552 target_flags_explicit |= MASK_SOFT_FLOAT;
2553 rs6000_xilinx_fpu = 1;
2554 if (fpu_type == FPU_SF_LITE || fpu_type == FPU_SF_FULL)
2555 rs6000_single_float = 1;
2556 if (fpu_type == FPU_DF_LITE || fpu_type == FPU_DF_FULL)
2557 rs6000_single_float = rs6000_double_float = 1;
2558 if (fpu_type == FPU_SF_LITE || fpu_type == FPU_DF_LITE)
2559 rs6000_simple_fpu = 1;
2563 /* -mfpu=none is equivalent to -msoft-float */
2564 target_flags |= MASK_SOFT_FLOAT;
2565 target_flags_explicit |= MASK_SOFT_FLOAT;
2566 rs6000_single_float = rs6000_double_float = 0;
2573 /* Do anything needed at the start of the asm file. */
2576 rs6000_file_start (void)
2580 const char *start = buffer;
2581 struct rs6000_cpu_select *ptr;
2582 const char *default_cpu = TARGET_CPU_DEFAULT;
2583 FILE *file = asm_out_file;
2585 default_file_start ();
2587 #ifdef TARGET_BI_ARCH
2588 if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT)
2592 if (flag_verbose_asm)
2594 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
2595 rs6000_select[0].string = default_cpu;
2597 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
2599 ptr = &rs6000_select[i];
2600 if (ptr->string != (char *)0 && ptr->string[0] != '\0')
2602 fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
2607 if (PPC405_ERRATUM77)
2609 fprintf (file, "%s PPC405CR_ERRATUM77", start);
2613 #ifdef USING_ELFOS_H
2614 switch (rs6000_sdata)
2616 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
2617 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
2618 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
2619 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
2622 if (rs6000_sdata && g_switch_value)
2624 fprintf (file, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED, start,
2634 #ifdef HAVE_AS_GNU_ATTRIBUTE
2635 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
2637 fprintf (file, "\t.gnu_attribute 4, %d\n",
2638 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
2639 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
2641 fprintf (file, "\t.gnu_attribute 8, %d\n",
2642 (TARGET_ALTIVEC_ABI ? 2
2643 : TARGET_SPE_ABI ? 3
2645 fprintf (file, "\t.gnu_attribute 12, %d\n",
2646 aix_struct_return ? 2 : 1);
2651 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
2653 switch_to_section (toc_section);
2654 switch_to_section (text_section);
2659 /* Return nonzero if this function is known to have a null epilogue. */
2662 direct_return (void)
2664 if (reload_completed)
2666 rs6000_stack_t *info = rs6000_stack_info ();
2668 if (info->first_gp_reg_save == 32
2669 && info->first_fp_reg_save == 64
2670 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
2671 && ! info->lr_save_p
2672 && ! info->cr_save_p
2673 && info->vrsave_mask == 0
2681 /* Return the number of instructions it takes to form a constant in an
2682 integer register. */
2685 num_insns_constant_wide (HOST_WIDE_INT value)
2687 /* signed constant loadable with {cal|addi} */
2688 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
2691 /* constant loadable with {cau|addis} */
2692 else if ((value & 0xffff) == 0
2693 && (value >> 31 == -1 || value >> 31 == 0))
2696 #if HOST_BITS_PER_WIDE_INT == 64
2697 else if (TARGET_POWERPC64)
2699 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
2700 HOST_WIDE_INT high = value >> 31;
2702 if (high == 0 || high == -1)
2708 return num_insns_constant_wide (high) + 1;
2710 return (num_insns_constant_wide (high)
2711 + num_insns_constant_wide (low) + 1);
2720 num_insns_constant (rtx op, enum machine_mode mode)
2722 HOST_WIDE_INT low, high;
2724 switch (GET_CODE (op))
2727 #if HOST_BITS_PER_WIDE_INT == 64
2728 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
2729 && mask64_operand (op, mode))
2733 return num_insns_constant_wide (INTVAL (op));
2736 if (mode == SFmode || mode == SDmode)
2741 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
2742 if (DECIMAL_FLOAT_MODE_P (mode))
2743 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
2745 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
2746 return num_insns_constant_wide ((HOST_WIDE_INT) l);
2749 if (mode == VOIDmode || mode == DImode)
2751 high = CONST_DOUBLE_HIGH (op);
2752 low = CONST_DOUBLE_LOW (op);
2759 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
2760 if (DECIMAL_FLOAT_MODE_P (mode))
2761 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
2763 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
2764 high = l[WORDS_BIG_ENDIAN == 0];
2765 low = l[WORDS_BIG_ENDIAN != 0];
2769 return (num_insns_constant_wide (low)
2770 + num_insns_constant_wide (high));
2773 if ((high == 0 && low >= 0)
2774 || (high == -1 && low < 0))
2775 return num_insns_constant_wide (low);
2777 else if (mask64_operand (op, mode))
2781 return num_insns_constant_wide (high) + 1;
2784 return (num_insns_constant_wide (high)
2785 + num_insns_constant_wide (low) + 1);
2793 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
2794 If the mode of OP is MODE_VECTOR_INT, this simply returns the
2795 corresponding element of the vector, but for V4SFmode and V2SFmode,
2796 the corresponding "float" is interpreted as an SImode integer. */
2799 const_vector_elt_as_int (rtx op, unsigned int elt)
2801 rtx tmp = CONST_VECTOR_ELT (op, elt);
2802 if (GET_MODE (op) == V4SFmode
2803 || GET_MODE (op) == V2SFmode)
2804 tmp = gen_lowpart (SImode, tmp);
2805 return INTVAL (tmp);
2808 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
2809 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
2810 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
2811 all items are set to the same value and contain COPIES replicas of the
2812 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
2813 operand and the others are set to the value of the operand's msb. */
2816 vspltis_constant (rtx op, unsigned step, unsigned copies)
2818 enum machine_mode mode = GET_MODE (op);
2819 enum machine_mode inner = GET_MODE_INNER (mode);
2822 unsigned nunits = GET_MODE_NUNITS (mode);
2823 unsigned bitsize = GET_MODE_BITSIZE (inner);
2824 unsigned mask = GET_MODE_MASK (inner);
2826 HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
2827 HOST_WIDE_INT splat_val = val;
2828 HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
2830 /* Construct the value to be splatted, if possible. If not, return 0. */
2831 for (i = 2; i <= copies; i *= 2)
2833 HOST_WIDE_INT small_val;
2835 small_val = splat_val >> bitsize;
2837 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
2839 splat_val = small_val;
2842 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
2843 if (EASY_VECTOR_15 (splat_val))
2846 /* Also check if we can splat, and then add the result to itself. Do so if
2847 the value is positive, of if the splat instruction is using OP's mode;
2848 for splat_val < 0, the splat and the add should use the same mode. */
2849 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
2850 && (splat_val >= 0 || (step == 1 && copies == 1)))
2856 /* Check if VAL is present in every STEP-th element, and the
2857 other elements are filled with its most significant bit. */
2858 for (i = 0; i < nunits - 1; ++i)
2860 HOST_WIDE_INT desired_val;
2861 if (((i + 1) & (step - 1)) == 0)
2864 desired_val = msb_val;
2866 if (desired_val != const_vector_elt_as_int (op, i))
2874 /* Return true if OP is of the given MODE and can be synthesized
2875 with a vspltisb, vspltish or vspltisw. */
2878 easy_altivec_constant (rtx op, enum machine_mode mode)
2880 unsigned step, copies;
2882 if (mode == VOIDmode)
2883 mode = GET_MODE (op);
2884 else if (mode != GET_MODE (op))
2887 /* Start with a vspltisw. */
2888 step = GET_MODE_NUNITS (mode) / 4;
2891 if (vspltis_constant (op, step, copies))
2894 /* Then try with a vspltish. */
2900 if (vspltis_constant (op, step, copies))
2903 /* And finally a vspltisb. */
2909 if (vspltis_constant (op, step, copies))
2915 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
2916 result is OP. Abort if it is not possible. */
2919 gen_easy_altivec_constant (rtx op)
2921 enum machine_mode mode = GET_MODE (op);
2922 int nunits = GET_MODE_NUNITS (mode);
2923 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
2924 unsigned step = nunits / 4;
2925 unsigned copies = 1;
2927 /* Start with a vspltisw. */
2928 if (vspltis_constant (op, step, copies))
2929 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
2931 /* Then try with a vspltish. */
2937 if (vspltis_constant (op, step, copies))
2938 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
2940 /* And finally a vspltisb. */
2946 if (vspltis_constant (op, step, copies))
2947 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
2953 output_vec_const_move (rtx *operands)
2956 enum machine_mode mode;
2961 mode = GET_MODE (dest);
2966 if (zero_constant (vec, mode))
2967 return "vxor %0,%0,%0";
2969 splat_vec = gen_easy_altivec_constant (vec);
2970 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
2971 operands[1] = XEXP (splat_vec, 0);
2972 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
2975 switch (GET_MODE (splat_vec))
2978 return "vspltisw %0,%1";
2981 return "vspltish %0,%1";
2984 return "vspltisb %0,%1";
2991 gcc_assert (TARGET_SPE);
2993 /* Vector constant 0 is handled as a splitter of V2SI, and in the
2994 pattern of V1DI, V4HI, and V2SF.
2996 FIXME: We should probably return # and add post reload
2997 splitters for these, but this way is so easy ;-). */
2998 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
2999 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
3000 operands[1] = CONST_VECTOR_ELT (vec, 0);
3001 operands[2] = CONST_VECTOR_ELT (vec, 1);
3003 return "li %0,%1\n\tevmergelo %0,%0,%0";
3005 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
3008 /* Initialize TARGET of vector PAIRED to VALS. */
3011 paired_expand_vector_init (rtx target, rtx vals)
3013 enum machine_mode mode = GET_MODE (target);
3014 int n_elts = GET_MODE_NUNITS (mode);
3016 rtx x, new_rtx, tmp, constant_op, op1, op2;
3019 for (i = 0; i < n_elts; ++i)
3021 x = XVECEXP (vals, 0, i);
3022 if (!CONSTANT_P (x))
3027 /* Load from constant pool. */
3028 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
3034 /* The vector is initialized only with non-constants. */
3035 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
3036 XVECEXP (vals, 0, 1));
3038 emit_move_insn (target, new_rtx);
3042 /* One field is non-constant and the other one is a constant. Load the
3043 constant from the constant pool and use ps_merge instruction to
3044 construct the whole vector. */
3045 op1 = XVECEXP (vals, 0, 0);
3046 op2 = XVECEXP (vals, 0, 1);
3048 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
3050 tmp = gen_reg_rtx (GET_MODE (constant_op));
3051 emit_move_insn (tmp, constant_op);
3053 if (CONSTANT_P (op1))
3054 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
3056 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
3058 emit_move_insn (target, new_rtx);
3062 paired_expand_vector_move (rtx operands[])
3064 rtx op0 = operands[0], op1 = operands[1];
3066 emit_move_insn (op0, op1);
3069 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
3070 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
3071 operands for the relation operation COND. This is a recursive
3075 paired_emit_vector_compare (enum rtx_code rcode,
3076 rtx dest, rtx op0, rtx op1,
3077 rtx cc_op0, rtx cc_op1)
3079 rtx tmp = gen_reg_rtx (V2SFmode);
3080 rtx tmp1, max, min, equal_zero;
3082 gcc_assert (TARGET_PAIRED_FLOAT);
3083 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
3089 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
3093 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
3094 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
3098 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
3101 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
3104 tmp1 = gen_reg_rtx (V2SFmode);
3105 max = gen_reg_rtx (V2SFmode);
3106 min = gen_reg_rtx (V2SFmode);
3107 equal_zero = gen_reg_rtx (V2SFmode);
3109 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
3110 emit_insn (gen_selv2sf4
3111 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
3112 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
3113 emit_insn (gen_selv2sf4
3114 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
3115 emit_insn (gen_subv2sf3 (tmp1, min, max));
3116 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
3119 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
3122 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
3125 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
3128 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
3131 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
3140 /* Emit vector conditional expression.
3141 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
3142 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
3145 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
3146 rtx cond, rtx cc_op0, rtx cc_op1)
3148 enum rtx_code rcode = GET_CODE (cond);
3150 if (!TARGET_PAIRED_FLOAT)
3153 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
3158 /* Initialize vector TARGET to VALS. */
3161 rs6000_expand_vector_init (rtx target, rtx vals)
3163 enum machine_mode mode = GET_MODE (target);
3164 enum machine_mode inner_mode = GET_MODE_INNER (mode);
3165 int n_elts = GET_MODE_NUNITS (mode);
3166 int n_var = 0, one_var = -1;
3167 bool all_same = true, all_const_zero = true;
3171 for (i = 0; i < n_elts; ++i)
3173 x = XVECEXP (vals, 0, i);
3174 if (!CONSTANT_P (x))
3175 ++n_var, one_var = i;
3176 else if (x != CONST0_RTX (inner_mode))
3177 all_const_zero = false;
3179 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
3185 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
3186 if (mode != V4SFmode && all_const_zero)
3188 /* Zero register. */
3189 emit_insn (gen_rtx_SET (VOIDmode, target,
3190 gen_rtx_XOR (mode, target, target)));
3193 else if (mode != V4SFmode && easy_vector_constant (const_vec, mode))
3195 /* Splat immediate. */
3196 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
3200 ; /* Splat vector element. */
3203 /* Load from constant pool. */
3204 emit_move_insn (target, const_vec);
3209 /* Store value to stack temp. Load vector element. Splat. */
3212 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
3213 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
3214 XVECEXP (vals, 0, 0));
3215 x = gen_rtx_UNSPEC (VOIDmode,
3216 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
3217 emit_insn (gen_rtx_PARALLEL (VOIDmode,
3219 gen_rtx_SET (VOIDmode,
3222 x = gen_rtx_VEC_SELECT (inner_mode, target,
3223 gen_rtx_PARALLEL (VOIDmode,
3224 gen_rtvec (1, const0_rtx)));
3225 emit_insn (gen_rtx_SET (VOIDmode, target,
3226 gen_rtx_VEC_DUPLICATE (mode, x)));
3230 /* One field is non-constant. Load constant then overwrite
3234 rtx copy = copy_rtx (vals);
3236 /* Load constant part of vector, substitute neighboring value for
3238 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
3239 rs6000_expand_vector_init (target, copy);
3241 /* Insert variable. */
3242 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
3246 /* Construct the vector in memory one field at a time
3247 and load the whole vector. */
3248 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
3249 for (i = 0; i < n_elts; i++)
3250 emit_move_insn (adjust_address_nv (mem, inner_mode,
3251 i * GET_MODE_SIZE (inner_mode)),
3252 XVECEXP (vals, 0, i));
3253 emit_move_insn (target, mem);
3256 /* Set field ELT of TARGET to VAL. */
3259 rs6000_expand_vector_set (rtx target, rtx val, int elt)
3261 enum machine_mode mode = GET_MODE (target);
3262 enum machine_mode inner_mode = GET_MODE_INNER (mode);
3263 rtx reg = gen_reg_rtx (mode);
3265 int width = GET_MODE_SIZE (inner_mode);
3268 /* Load single variable value. */
3269 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
3270 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
3271 x = gen_rtx_UNSPEC (VOIDmode,
3272 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
3273 emit_insn (gen_rtx_PARALLEL (VOIDmode,
3275 gen_rtx_SET (VOIDmode,
3279 /* Linear sequence. */
3280 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
3281 for (i = 0; i < 16; ++i)
3282 XVECEXP (mask, 0, i) = GEN_INT (i);
3284 /* Set permute mask to insert element into target. */
3285 for (i = 0; i < width; ++i)
3286 XVECEXP (mask, 0, elt*width + i)
3287 = GEN_INT (i + 0x10);
3288 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
3289 x = gen_rtx_UNSPEC (mode,
3290 gen_rtvec (3, target, reg,
3291 force_reg (V16QImode, x)),
3293 emit_insn (gen_rtx_SET (VOIDmode, target, x));
3296 /* Extract field ELT from VEC into TARGET. */
3299 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
3301 enum machine_mode mode = GET_MODE (vec);
3302 enum machine_mode inner_mode = GET_MODE_INNER (mode);
3305 /* Allocate mode-sized buffer. */
3306 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
3308 /* Add offset to field within buffer matching vector element. */
3309 mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
3311 /* Store single field into mode-sized buffer. */
3312 x = gen_rtx_UNSPEC (VOIDmode,
3313 gen_rtvec (1, const0_rtx), UNSPEC_STVE);
3314 emit_insn (gen_rtx_PARALLEL (VOIDmode,
3316 gen_rtx_SET (VOIDmode,
3319 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
3322 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
3323 implement ANDing by the mask IN. */
3325 build_mask64_2_operands (rtx in, rtx *out)
3327 #if HOST_BITS_PER_WIDE_INT >= 64
3328 unsigned HOST_WIDE_INT c, lsb, m1, m2;
3331 gcc_assert (GET_CODE (in) == CONST_INT);
3336 /* Assume c initially something like 0x00fff000000fffff. The idea
3337 is to rotate the word so that the middle ^^^^^^ group of zeros
3338 is at the MS end and can be cleared with an rldicl mask. We then
3339 rotate back and clear off the MS ^^ group of zeros with a
3341 c = ~c; /* c == 0xff000ffffff00000 */
3342 lsb = c & -c; /* lsb == 0x0000000000100000 */
3343 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
3344 c = ~c; /* c == 0x00fff000000fffff */
3345 c &= -lsb; /* c == 0x00fff00000000000 */
3346 lsb = c & -c; /* lsb == 0x0000100000000000 */
3347 c = ~c; /* c == 0xff000fffffffffff */
3348 c &= -lsb; /* c == 0xff00000000000000 */
3350 while ((lsb >>= 1) != 0)
3351 shift++; /* shift == 44 on exit from loop */
3352 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
3353 m1 = ~m1; /* m1 == 0x000000ffffffffff */
3354 m2 = ~c; /* m2 == 0x00ffffffffffffff */
3358 /* Assume c initially something like 0xff000f0000000000. The idea
3359 is to rotate the word so that the ^^^ middle group of zeros
3360 is at the LS end and can be cleared with an rldicr mask. We then
3361 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
3363 lsb = c & -c; /* lsb == 0x0000010000000000 */
3364 m2 = -lsb; /* m2 == 0xffffff0000000000 */
3365 c = ~c; /* c == 0x00fff0ffffffffff */
3366 c &= -lsb; /* c == 0x00fff00000000000 */
3367 lsb = c & -c; /* lsb == 0x0000100000000000 */
3368 c = ~c; /* c == 0xff000fffffffffff */
3369 c &= -lsb; /* c == 0xff00000000000000 */
3371 while ((lsb >>= 1) != 0)
3372 shift++; /* shift == 44 on exit from loop */
3373 m1 = ~c; /* m1 == 0x00ffffffffffffff */
3374 m1 >>= shift; /* m1 == 0x0000000000000fff */
3375 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
3378 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
3379 masks will be all 1's. We are guaranteed more than one transition. */
3380 out[0] = GEN_INT (64 - shift);
3381 out[1] = GEN_INT (m1);
3382 out[2] = GEN_INT (shift);
3383 out[3] = GEN_INT (m2);
3391 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
3394 invalid_e500_subreg (rtx op, enum machine_mode mode)
3396 if (TARGET_E500_DOUBLE)
3398 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
3399 subreg:TI and reg:TF. Decimal float modes are like integer
3400 modes (only low part of each register used) for this
3402 if (GET_CODE (op) == SUBREG
3403 && (mode == SImode || mode == DImode || mode == TImode
3404 || mode == DDmode || mode == TDmode)
3405 && REG_P (SUBREG_REG (op))
3406 && (GET_MODE (SUBREG_REG (op)) == DFmode
3407 || GET_MODE (SUBREG_REG (op)) == TFmode))
3410 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
3412 if (GET_CODE (op) == SUBREG
3413 && (mode == DFmode || mode == TFmode)
3414 && REG_P (SUBREG_REG (op))
3415 && (GET_MODE (SUBREG_REG (op)) == DImode
3416 || GET_MODE (SUBREG_REG (op)) == TImode
3417 || GET_MODE (SUBREG_REG (op)) == DDmode
3418 || GET_MODE (SUBREG_REG (op)) == TDmode))
3423 && GET_CODE (op) == SUBREG
3425 && REG_P (SUBREG_REG (op))
3426 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
3432 /* AIX increases natural record alignment to doubleword if the first
3433 field is an FP double while the FP fields remain word aligned. */
3436 rs6000_special_round_type_align (tree type, unsigned int computed,
3437 unsigned int specified)
3439 unsigned int align = MAX (computed, specified);
3440 tree field = TYPE_FIELDS (type);
3442 /* Skip all non field decls */
3443 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
3444 field = TREE_CHAIN (field);
3446 if (field != NULL && field != type)
3448 type = TREE_TYPE (field);
3449 while (TREE_CODE (type) == ARRAY_TYPE)
3450 type = TREE_TYPE (type);
3452 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
3453 align = MAX (align, 64);
3459 /* Darwin increases record alignment to the natural alignment of
3463 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
3464 unsigned int specified)
3466 unsigned int align = MAX (computed, specified);
3468 if (TYPE_PACKED (type))
3471 /* Find the first field, looking down into aggregates. */
3473 tree field = TYPE_FIELDS (type);
3474 /* Skip all non field decls */
3475 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
3476 field = TREE_CHAIN (field);
3479 type = TREE_TYPE (field);
3480 while (TREE_CODE (type) == ARRAY_TYPE)
3481 type = TREE_TYPE (type);
3482 } while (AGGREGATE_TYPE_P (type));
3484 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
3485 align = MAX (align, TYPE_ALIGN (type));
3490 /* Return 1 for an operand in small memory on V.4/eabi. */
3493 small_data_operand (rtx op ATTRIBUTE_UNUSED,
3494 enum machine_mode mode ATTRIBUTE_UNUSED)
3499 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
3502 if (DEFAULT_ABI != ABI_V4)
3505 /* Vector and float memory instructions have a limited offset on the
3506 SPE, so using a vector or float variable directly as an operand is
3509 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
3512 if (GET_CODE (op) == SYMBOL_REF)
3515 else if (GET_CODE (op) != CONST
3516 || GET_CODE (XEXP (op, 0)) != PLUS
3517 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
3518 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
3523 rtx sum = XEXP (op, 0);
3524 HOST_WIDE_INT summand;
3526 /* We have to be careful here, because it is the referenced address
3527 that must be 32k from _SDA_BASE_, not just the symbol. */
3528 summand = INTVAL (XEXP (sum, 1));
3529 if (summand < 0 || (unsigned HOST_WIDE_INT) summand > g_switch_value)
3532 sym_ref = XEXP (sum, 0);
3535 return SYMBOL_REF_SMALL_P (sym_ref);
3541 /* Return true if either operand is a general purpose register. */
3544 gpr_or_gpr_p (rtx op0, rtx op1)
3546 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
3547 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
3551 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
3554 constant_pool_expr_p (rtx op)
3558 split_const (op, &base, &offset);
3559 return (GET_CODE (base) == SYMBOL_REF
3560 && CONSTANT_POOL_ADDRESS_P (base)
3561 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
3565 toc_relative_expr_p (rtx op)
3569 if (GET_CODE (op) != CONST)
3572 split_const (op, &base, &offset);
3573 return (GET_CODE (base) == UNSPEC
3574 && XINT (base, 1) == UNSPEC_TOCREL);
3578 legitimate_constant_pool_address_p (rtx x)
3581 && GET_CODE (x) == PLUS
3582 && GET_CODE (XEXP (x, 0)) == REG
3583 && (TARGET_MINIMAL_TOC || REGNO (XEXP (x, 0)) == TOC_REGISTER)
3584 && toc_relative_expr_p (XEXP (x, 1)));
3588 legitimate_small_data_p (enum machine_mode mode, rtx x)
3590 return (DEFAULT_ABI == ABI_V4
3591 && !flag_pic && !TARGET_TOC
3592 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
3593 && small_data_operand (x, mode));
3596 /* SPE offset addressing is limited to 5-bits worth of double words. */
3597 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
3600 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
3602 unsigned HOST_WIDE_INT offset, extra;
3604 if (GET_CODE (x) != PLUS)
3606 if (GET_CODE (XEXP (x, 0)) != REG)
3608 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
3610 if (legitimate_constant_pool_address_p (x))
3612 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
3615 offset = INTVAL (XEXP (x, 1));
3623 /* AltiVec vector modes. Only reg+reg addressing is valid and
3624 constant offset zero should not occur due to canonicalization. */
3631 /* Paired vector modes. Only reg+reg addressing is valid and
3632 constant offset zero should not occur due to canonicalization. */
3633 if (TARGET_PAIRED_FLOAT)
3635 /* SPE vector modes. */
3636 return SPE_CONST_OFFSET_OK (offset);
3639 if (TARGET_E500_DOUBLE)
3640 return SPE_CONST_OFFSET_OK (offset);
3644 /* On e500v2, we may have:
3646 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
3648 Which gets addressed with evldd instructions. */
3649 if (TARGET_E500_DOUBLE)
3650 return SPE_CONST_OFFSET_OK (offset);
3652 if (mode == DFmode || mode == DDmode || !TARGET_POWERPC64)
3654 else if (offset & 3)
3659 if (TARGET_E500_DOUBLE)
3660 return (SPE_CONST_OFFSET_OK (offset)
3661 && SPE_CONST_OFFSET_OK (offset + 8));
3665 if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
3667 else if (offset & 3)
3678 return (offset < 0x10000) && (offset + extra < 0x10000);
3682 legitimate_indexed_address_p (rtx x, int strict)
3686 if (GET_CODE (x) != PLUS)
3692 /* Recognize the rtl generated by reload which we know will later be
3693 replaced with proper base and index regs. */
3695 && reload_in_progress
3696 && (REG_P (op0) || GET_CODE (op0) == PLUS)
3700 return (REG_P (op0) && REG_P (op1)
3701 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
3702 && INT_REG_OK_FOR_INDEX_P (op1, strict))
3703 || (INT_REG_OK_FOR_BASE_P (op1, strict)
3704 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
3708 legitimate_indirect_address_p (rtx x, int strict)
3710 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
3714 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
3716 if (!TARGET_MACHO || !flag_pic
3717 || mode != SImode || GET_CODE (x) != MEM)
3721 if (GET_CODE (x) != LO_SUM)
3723 if (GET_CODE (XEXP (x, 0)) != REG)
3725 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
3729 return CONSTANT_P (x);
3733 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
3735 if (GET_CODE (x) != LO_SUM)
3737 if (GET_CODE (XEXP (x, 0)) != REG)
3739 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
3741 /* Restrict addressing for DI because of our SUBREG hackery. */
3742 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
3743 || mode == DDmode || mode == TDmode
3748 if (TARGET_ELF || TARGET_MACHO)
3750 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
3754 if (GET_MODE_NUNITS (mode) != 1)
3756 if (GET_MODE_BITSIZE (mode) > 64
3757 || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
3758 && !(TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
3759 && (mode == DFmode || mode == DDmode))))
3762 return CONSTANT_P (x);
3769 /* Try machine-dependent ways of modifying an illegitimate address
3770 to be legitimate. If we find one, return the new, valid address.
3771 This is used from only one place: `memory_address' in explow.c.
3773 OLDX is the address as it was before break_out_memory_refs was
3774 called. In some cases it is useful to look at this to decide what
3777 MODE is passed so that this function can use GO_IF_LEGITIMATE_ADDRESS.
3779 It is always safe for this function to do nothing. It exists to
3780 recognize opportunities to optimize the output.
3782 On RS/6000, first check for the sum of a register with a constant
3783 integer that is out of range. If so, generate code to add the
3784 constant with the low-order 16 bits masked to the register and force
3785 this result into another register (this can be done with `cau').
3786 Then generate an address of REG+(CONST&0xffff), allowing for the
3787 possibility of bit 16 being a one.
3789 Then check for the sum of a register and something not constant, try to
3790 load the other things into a register and return the sum. */
3793 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3794 enum machine_mode mode)
3796 if (GET_CODE (x) == SYMBOL_REF)
3798 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
3800 return rs6000_legitimize_tls_address (x, model);
3803 if (GET_CODE (x) == PLUS
3804 && GET_CODE (XEXP (x, 0)) == REG
3805 && GET_CODE (XEXP (x, 1)) == CONST_INT
3806 && (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000
3807 && !(SPE_VECTOR_MODE (mode)
3808 || ALTIVEC_VECTOR_MODE (mode)
3809 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
3810 || mode == DImode || mode == DDmode
3811 || mode == TDmode))))
3813 HOST_WIDE_INT high_int, low_int;
3815 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
3816 high_int = INTVAL (XEXP (x, 1)) - low_int;
3817 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
3818 GEN_INT (high_int)), 0);
3819 return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
3821 else if (GET_CODE (x) == PLUS
3822 && GET_CODE (XEXP (x, 0)) == REG
3823 && GET_CODE (XEXP (x, 1)) != CONST_INT
3824 && GET_MODE_NUNITS (mode) == 1
3825 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3827 || ((mode != DImode && mode != DFmode && mode != DDmode)
3828 || (TARGET_E500_DOUBLE && mode != DDmode)))
3829 && (TARGET_POWERPC64 || mode != DImode)
3834 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
3835 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
3837 else if (ALTIVEC_VECTOR_MODE (mode))
3841 /* Make sure both operands are registers. */
3842 if (GET_CODE (x) == PLUS)
3843 return gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
3844 force_reg (Pmode, XEXP (x, 1)));
3846 reg = force_reg (Pmode, x);
3849 else if (SPE_VECTOR_MODE (mode)
3850 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
3851 || mode == DDmode || mode == TDmode
3852 || mode == DImode)))
3856 /* We accept [reg + reg] and [reg + OFFSET]. */
3858 if (GET_CODE (x) == PLUS)
3860 rtx op1 = XEXP (x, 0);
3861 rtx op2 = XEXP (x, 1);
3864 op1 = force_reg (Pmode, op1);
3866 if (GET_CODE (op2) != REG
3867 && (GET_CODE (op2) != CONST_INT
3868 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
3869 || (GET_MODE_SIZE (mode) > 8
3870 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
3871 op2 = force_reg (Pmode, op2);
3873 /* We can't always do [reg + reg] for these, because [reg +
3874 reg + offset] is not a legitimate addressing mode. */
3875 y = gen_rtx_PLUS (Pmode, op1, op2);
3877 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
3878 return force_reg (Pmode, y);
3883 return force_reg (Pmode, x);
3889 && GET_CODE (x) != CONST_INT
3890 && GET_CODE (x) != CONST_DOUBLE
3892 && GET_MODE_NUNITS (mode) == 1
3893 && (GET_MODE_BITSIZE (mode) <= 32
3894 || ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3895 && (mode == DFmode || mode == DDmode))))
3897 rtx reg = gen_reg_rtx (Pmode);
3898 emit_insn (gen_elf_high (reg, x));
3899 return gen_rtx_LO_SUM (Pmode, reg, x);
3901 else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
3904 && ! MACHO_DYNAMIC_NO_PIC_P
3906 && GET_CODE (x) != CONST_INT
3907 && GET_CODE (x) != CONST_DOUBLE
3909 && GET_MODE_NUNITS (mode) == 1
3910 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3911 || (mode != DFmode && mode != DDmode))
3915 rtx reg = gen_reg_rtx (Pmode);
3916 emit_insn (gen_macho_high (reg, x));
3917 return gen_rtx_LO_SUM (Pmode, reg, x);
3920 && GET_CODE (x) == SYMBOL_REF
3921 && constant_pool_expr_p (x)
3922 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
3924 return create_TOC_reference (x);
3930 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
3931 We need to emit DTP-relative relocations. */
3934 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
3939 fputs ("\t.long\t", file);
3942 fputs (DOUBLE_INT_ASM_OP, file);
3947 output_addr_const (file, x);
3948 fputs ("@dtprel+0x8000", file);
3951 /* Construct the SYMBOL_REF for the tls_get_addr function. */
3953 static GTY(()) rtx rs6000_tls_symbol;
3955 rs6000_tls_get_addr (void)
3957 if (!rs6000_tls_symbol)
3958 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
3960 return rs6000_tls_symbol;
3963 /* Construct the SYMBOL_REF for TLS GOT references. */
3965 static GTY(()) rtx rs6000_got_symbol;
3967 rs6000_got_sym (void)
3969 if (!rs6000_got_symbol)
3971 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3972 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
3973 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
3976 return rs6000_got_symbol;
3979 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3980 this (thread-local) address. */
3983 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
3987 dest = gen_reg_rtx (Pmode);
3988 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
3994 tlsreg = gen_rtx_REG (Pmode, 13);
3995 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
3999 tlsreg = gen_rtx_REG (Pmode, 2);
4000 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
4004 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
4008 tmp = gen_reg_rtx (Pmode);
4011 tlsreg = gen_rtx_REG (Pmode, 13);
4012 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
4016 tlsreg = gen_rtx_REG (Pmode, 2);
4017 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
4021 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
4023 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
4028 rtx r3, got, tga, tmp1, tmp2, eqv;
4030 /* We currently use relocations like @got@tlsgd for tls, which
4031 means the linker will handle allocation of tls entries, placing
4032 them in the .got section. So use a pointer to the .got section,
4033 not one to secondary TOC sections used by 64-bit -mminimal-toc,
4034 or to secondary GOT sections used by 32-bit -fPIC. */
4036 got = gen_rtx_REG (Pmode, 2);
4040 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
4043 rtx gsym = rs6000_got_sym ();
4044 got = gen_reg_rtx (Pmode);
4046 rs6000_emit_move (got, gsym, Pmode);
4052 tmp1 = gen_reg_rtx (Pmode);
4053 tmp2 = gen_reg_rtx (Pmode);
4054 tmp3 = gen_reg_rtx (Pmode);
4055 mem = gen_const_mem (Pmode, tmp1);
4057 first = emit_insn (gen_load_toc_v4_PIC_1b (gsym));
4058 emit_move_insn (tmp1,
4059 gen_rtx_REG (Pmode, LR_REGNO));
4060 emit_move_insn (tmp2, mem);
4061 emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
4062 last = emit_move_insn (got, tmp3);
4063 set_unique_reg_note (last, REG_EQUAL, gsym);
4068 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
4070 r3 = gen_rtx_REG (Pmode, 3);
4071 tga = rs6000_tls_get_addr ();
4073 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
4074 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
4075 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
4076 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
4077 else if (DEFAULT_ABI == ABI_V4)
4078 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
4083 insn = emit_call_insn (insn);
4084 RTL_CONST_CALL_P (insn) = 1;
4085 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
4086 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
4087 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4088 insn = get_insns ();
4090 emit_libcall_block (insn, dest, r3, addr);
4092 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
4094 r3 = gen_rtx_REG (Pmode, 3);
4095 tga = rs6000_tls_get_addr ();
4097 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
4098 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
4099 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
4100 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
4101 else if (DEFAULT_ABI == ABI_V4)
4102 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
4107 insn = emit_call_insn (insn);
4108 RTL_CONST_CALL_P (insn) = 1;
4109 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
4110 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
4111 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4112 insn = get_insns ();
4114 tmp1 = gen_reg_rtx (Pmode);
4115 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4117 emit_libcall_block (insn, tmp1, r3, eqv);
4118 if (rs6000_tls_size == 16)
4121 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
4123 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
4125 else if (rs6000_tls_size == 32)
4127 tmp2 = gen_reg_rtx (Pmode);
4129 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
4131 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
4134 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
4136 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
4140 tmp2 = gen_reg_rtx (Pmode);
4142 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
4144 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
4146 insn = gen_rtx_SET (Pmode, dest,
4147 gen_rtx_PLUS (Pmode, tmp2, tmp1));
4153 /* IE, or 64-bit offset LE. */
4154 tmp2 = gen_reg_rtx (Pmode);
4156 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
4158 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
4161 insn = gen_tls_tls_64 (dest, tmp2, addr);
4163 insn = gen_tls_tls_32 (dest, tmp2, addr);
4171 /* Return 1 if X contains a thread-local symbol. */
4174 rs6000_tls_referenced_p (rtx x)
4176 if (! TARGET_HAVE_TLS)
4179 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
4182 /* Return 1 if *X is a thread-local symbol. This is the same as
4183 rs6000_tls_symbol_ref except for the type of the unused argument. */
4186 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4188 return RS6000_SYMBOL_REF_TLS_P (*x);
4191 /* The convention appears to be to define this wherever it is used.
4192 With legitimize_reload_address now defined here, REG_MODE_OK_FOR_BASE_P
4193 is now used here. */
4194 #ifndef REG_MODE_OK_FOR_BASE_P
4195 #define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO)
4198 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4199 replace the input X, or the original X if no replacement is called for.
4200 The output parameter *WIN is 1 if the calling macro should goto WIN,
4203 For RS/6000, we wish to handle large displacements off a base
4204 register by splitting the addend across an addiu/addis and the mem insn.
4205 This cuts number of extra insns needed from 3 to 1.
4207 On Darwin, we use this to generate code for floating point constants.
4208 A movsf_low is generated so we wind up with 2 instructions rather than 3.
4209 The Darwin code is inside #if TARGET_MACHO because only then are the
4210 machopic_* functions defined. */
4212 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
4213 int opnum, int type,
4214 int ind_levels ATTRIBUTE_UNUSED, int *win)
4216 /* We must recognize output that we have already generated ourselves. */
4217 if (GET_CODE (x) == PLUS
4218 && GET_CODE (XEXP (x, 0)) == PLUS
4219 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4220 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4221 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4223 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4224 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4225 opnum, (enum reload_type)type);
4231 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
4232 && GET_CODE (x) == LO_SUM
4233 && GET_CODE (XEXP (x, 0)) == PLUS
4234 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
4235 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
4236 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
4237 && machopic_operand_p (XEXP (x, 1)))
4239 /* Result of previous invocation of this function on Darwin
4240 floating point constant. */
4241 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4242 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4243 opnum, (enum reload_type)type);
4249 /* Force ld/std non-word aligned offset into base register by wrapping
4251 if (GET_CODE (x) == PLUS
4252 && GET_CODE (XEXP (x, 0)) == REG
4253 && REGNO (XEXP (x, 0)) < 32
4254 && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
4255 && GET_CODE (XEXP (x, 1)) == CONST_INT
4256 && (INTVAL (XEXP (x, 1)) & 3) != 0
4257 && !ALTIVEC_VECTOR_MODE (mode)
4258 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
4259 && TARGET_POWERPC64)
4261 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
4262 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4263 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4264 opnum, (enum reload_type) type);
4269 if (GET_CODE (x) == PLUS
4270 && GET_CODE (XEXP (x, 0)) == REG
4271 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
4272 && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
4273 && GET_CODE (XEXP (x, 1)) == CONST_INT
4274 && !SPE_VECTOR_MODE (mode)
4275 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
4276 || mode == DDmode || mode == TDmode
4278 && !ALTIVEC_VECTOR_MODE (mode))
4280 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
4281 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
4283 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
4285 /* Check for 32-bit overflow. */
4286 if (high + low != val)
4292 /* Reload the high part into a base reg; leave the low part
4293 in the mem directly. */
4295 x = gen_rtx_PLUS (GET_MODE (x),
4296 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
4300 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4301 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4302 opnum, (enum reload_type)type);
4307 if (GET_CODE (x) == SYMBOL_REF
4308 && !ALTIVEC_VECTOR_MODE (mode)
4309 && !SPE_VECTOR_MODE (mode)
4311 && DEFAULT_ABI == ABI_DARWIN
4312 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
4314 && DEFAULT_ABI == ABI_V4
4317 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
4318 The same goes for DImode without 64-bit gprs and DFmode and DDmode
4322 && (mode != DImode || TARGET_POWERPC64)
4323 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
4324 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
4329 rtx offset = machopic_gen_offset (x);
4330 x = gen_rtx_LO_SUM (GET_MODE (x),
4331 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
4332 gen_rtx_HIGH (Pmode, offset)), offset);
4336 x = gen_rtx_LO_SUM (GET_MODE (x),
4337 gen_rtx_HIGH (Pmode, x), x);
4339 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4340 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4341 opnum, (enum reload_type)type);
4346 /* Reload an offset address wrapped by an AND that represents the
4347 masking of the lower bits. Strip the outer AND and let reload
4348 convert the offset address into an indirect address. */
4350 && ALTIVEC_VECTOR_MODE (mode)
4351 && GET_CODE (x) == AND
4352 && GET_CODE (XEXP (x, 0)) == PLUS
4353 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4354 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4355 && GET_CODE (XEXP (x, 1)) == CONST_INT
4356 && INTVAL (XEXP (x, 1)) == -16)
4364 && GET_CODE (x) == SYMBOL_REF
4365 && constant_pool_expr_p (x)
4366 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
4368 x = create_TOC_reference (x);
4376 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
4377 that is a valid memory address for an instruction.
4378 The MODE argument is the machine mode for the MEM expression
4379 that wants to use this address.
4381 On the RS/6000, there are four valid address: a SYMBOL_REF that
4382 refers to a constant pool entry of an address (or the sum of it
4383 plus a constant), a short (16-bit signed) constant plus a register,
4384 the sum of two registers, or a register indirect, possibly with an
4385 auto-increment. For DFmode, DDmode and DImode with a constant plus
4386 register, we must ensure that both words are addressable or PowerPC64
4387 with offset word aligned.
4389 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
4390 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
4391 because adjacent memory cells are accessed by adding word-sized offsets
4392 during assembly output. */
4394 rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
4396 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
4398 && ALTIVEC_VECTOR_MODE (mode)
4399 && GET_CODE (x) == AND
4400 && GET_CODE (XEXP (x, 1)) == CONST_INT
4401 && INTVAL (XEXP (x, 1)) == -16)
4404 if (RS6000_SYMBOL_REF_TLS_P (x))
4406 if (legitimate_indirect_address_p (x, reg_ok_strict))
4408 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
4409 && !ALTIVEC_VECTOR_MODE (mode)
4410 && !SPE_VECTOR_MODE (mode)
4413 /* Restrict addressing for DI because of our SUBREG hackery. */
4414 && !(TARGET_E500_DOUBLE
4415 && (mode == DFmode || mode == DDmode || mode == DImode))
4417 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
4419 if (legitimate_small_data_p (mode, x))
4421 if (legitimate_constant_pool_address_p (x))
4423 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
4425 && GET_CODE (x) == PLUS
4426 && GET_CODE (XEXP (x, 0)) == REG
4427 && (XEXP (x, 0) == virtual_stack_vars_rtx
4428 || XEXP (x, 0) == arg_pointer_rtx)
4429 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4431 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
4436 && ((TARGET_HARD_FLOAT && TARGET_FPRS)
4438 || (mode != DFmode && mode != DDmode)
4439 || (TARGET_E500_DOUBLE && mode != DDmode))
4440 && (TARGET_POWERPC64 || mode != DImode)
4441 && legitimate_indexed_address_p (x, reg_ok_strict))
4443 if (GET_CODE (x) == PRE_MODIFY
4447 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
4449 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
4450 && (TARGET_POWERPC64 || mode != DImode)
4451 && !ALTIVEC_VECTOR_MODE (mode)
4452 && !SPE_VECTOR_MODE (mode)
4453 /* Restrict addressing for DI because of our SUBREG hackery. */
4454 && !(TARGET_E500_DOUBLE
4455 && (mode == DFmode || mode == DDmode || mode == DImode))
4457 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
4458 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
4459 || legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict))
4460 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
4462 if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
4467 /* Go to LABEL if ADDR (a legitimate address expression)
4468 has an effect that depends on the machine mode it is used for.
4470 On the RS/6000 this is true of all integral offsets (since AltiVec
4471 modes don't allow them) or is a pre-increment or decrement.
4473 ??? Except that due to conceptual problems in offsettable_address_p
4474 we can't really report the problems of integral offsets. So leave
4475 this assuming that the adjustable offset must be valid for the
4476 sub-words of a TFmode operand, which is what we had before. */
4479 rs6000_mode_dependent_address (rtx addr)
4481 switch (GET_CODE (addr))
4484 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4486 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
4487 return val + 12 + 0x8000 >= 0x10000;
4494 /* Auto-increment cases are now treated generically in recog.c. */
4496 return TARGET_UPDATE;
4505 /* Implement FIND_BASE_TERM. */
4508 rs6000_find_base_term (rtx op)
4512 split_const (op, &base, &offset);
4513 if (GET_CODE (base) == UNSPEC)
4514 switch (XINT (base, 1))
4517 case UNSPEC_MACHOPIC_OFFSET:
4518 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
4519 for aliasing purposes. */
4520 return XVECEXP (base, 0, 0);
4526 /* More elaborate version of recog's offsettable_memref_p predicate
4527 that works around the ??? note of rs6000_mode_dependent_address.
4528 In particular it accepts
4530 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
4532 in 32-bit mode, that the recog predicate rejects. */
4535 rs6000_offsettable_memref_p (rtx op)
4540 /* First mimic offsettable_memref_p. */
4541 if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
4544 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
4545 the latter predicate knows nothing about the mode of the memory
4546 reference and, therefore, assumes that it is the largest supported
4547 mode (TFmode). As a consequence, legitimate offsettable memory
4548 references are rejected. rs6000_legitimate_offset_address_p contains
4549 the correct logic for the PLUS case of rs6000_mode_dependent_address. */
4550 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
4553 /* Return number of consecutive hard regs needed starting at reg REGNO
4554 to hold something of mode MODE.
4555 This is ordinarily the length in words of a value of mode MODE
4556 but can be less for certain modes in special long registers.
4558 For the SPE, GPRs are 64 bits but only 32 bits are visible in
4559 scalar instructions. The upper 32 bits are only available to the
4562 POWER and PowerPC GPRs hold 32 bits worth;
4563 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
4566 rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
4568 if (FP_REGNO_P (regno))
4569 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
4571 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
4572 return (GET_MODE_SIZE (mode) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD;
4574 if (ALTIVEC_REGNO_P (regno))
4576 (GET_MODE_SIZE (mode) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD;
4578 /* The value returned for SCmode in the E500 double case is 2 for
4579 ABI compatibility; storing an SCmode value in a single register
4580 would require function_arg and rs6000_spe_function_arg to handle
4581 SCmode so as to pass the value correctly in a pair of
4583 if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
4584 && !DECIMAL_FLOAT_MODE_P (mode))
4585 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
4587 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4590 /* Change register usage conditional on target flags. */
4592 rs6000_conditional_register_usage (void)
4596 /* Set MQ register fixed (already call_used) if not POWER
4597 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
4602 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
4604 fixed_regs[13] = call_used_regs[13]
4605 = call_really_used_regs[13] = 1;
4607 /* Conditionally disable FPRs. */
4608 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
4609 for (i = 32; i < 64; i++)
4610 fixed_regs[i] = call_used_regs[i]
4611 = call_really_used_regs[i] = 1;
4613 /* The TOC register is not killed across calls in a way that is
4614 visible to the compiler. */
4615 if (DEFAULT_ABI == ABI_AIX)
4616 call_really_used_regs[2] = 0;
4618 if (DEFAULT_ABI == ABI_V4
4619 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
4621 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4623 if (DEFAULT_ABI == ABI_V4
4624 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
4626 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4627 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4628 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4630 if (DEFAULT_ABI == ABI_DARWIN
4631 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
4632 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4633 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4634 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4636 if (TARGET_TOC && TARGET_MINIMAL_TOC)
4637 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4638 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4642 global_regs[SPEFSCR_REGNO] = 1;
4643 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
4644 registers in prologues and epilogues. We no longer use r14
4645 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
4646 pool for link-compatibility with older versions of GCC. Once
4647 "old" code has died out, we can return r14 to the allocation
4650 = call_used_regs[14]
4651 = call_really_used_regs[14] = 1;
4654 if (!TARGET_ALTIVEC)
4656 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
4657 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
4658 call_really_used_regs[VRSAVE_REGNO] = 1;
4662 global_regs[VSCR_REGNO] = 1;
4664 if (TARGET_ALTIVEC_ABI)
4666 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
4667 call_used_regs[i] = call_really_used_regs[i] = 1;
4669 /* AIX reserves VR20:31 in non-extended ABI mode. */
4671 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
4672 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
4676 /* Try to output insns to set TARGET equal to the constant C if it can
4677 be done in less than N insns. Do all computations in MODE.
4678 Returns the place where the output has been placed if it can be
4679 done and the insns have been emitted. If it would take more than N
4680 insns, zero is returned and no insns and emitted. */
4683 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
4684 rtx source, int n ATTRIBUTE_UNUSED)
4686 rtx result, insn, set;
4687 HOST_WIDE_INT c0, c1;
4694 dest = gen_reg_rtx (mode);
4695 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
4699 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
4701 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
4702 GEN_INT (INTVAL (source)
4703 & (~ (HOST_WIDE_INT) 0xffff))));
4704 emit_insn (gen_rtx_SET (VOIDmode, dest,
4705 gen_rtx_IOR (SImode, copy_rtx (result),
4706 GEN_INT (INTVAL (source) & 0xffff))));
4711 switch (GET_CODE (source))
4714 c0 = INTVAL (source);
4719 #if HOST_BITS_PER_WIDE_INT >= 64
4720 c0 = CONST_DOUBLE_LOW (source);
4723 c0 = CONST_DOUBLE_LOW (source);
4724 c1 = CONST_DOUBLE_HIGH (source);
4732 result = rs6000_emit_set_long_const (dest, c0, c1);
4739 insn = get_last_insn ();
4740 set = single_set (insn);
4741 if (! CONSTANT_P (SET_SRC (set)))
4742 set_unique_reg_note (insn, REG_EQUAL, source);
4747 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
4748 fall back to a straight forward decomposition. We do this to avoid
4749 exponential run times encountered when looking for longer sequences
4750 with rs6000_emit_set_const. */
4752 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
4754 if (!TARGET_POWERPC64)
4756 rtx operand1, operand2;
4758 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
4760 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
4762 emit_move_insn (operand1, GEN_INT (c1));
4763 emit_move_insn (operand2, GEN_INT (c2));
4767 HOST_WIDE_INT ud1, ud2, ud3, ud4;
4770 ud2 = (c1 & 0xffff0000) >> 16;
4771 #if HOST_BITS_PER_WIDE_INT >= 64
4775 ud4 = (c2 & 0xffff0000) >> 16;
4777 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
4778 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
4781 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
4783 emit_move_insn (dest, GEN_INT (ud1));
4786 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
4787 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
4790 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
4793 emit_move_insn (dest, GEN_INT (ud2 << 16));
4795 emit_move_insn (copy_rtx (dest),
4796 gen_rtx_IOR (DImode, copy_rtx (dest),
4799 else if ((ud4 == 0xffff && (ud3 & 0x8000))
4800 || (ud4 == 0 && ! (ud3 & 0x8000)))
4803 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
4806 emit_move_insn (dest, GEN_INT (ud3 << 16));
4809 emit_move_insn (copy_rtx (dest),
4810 gen_rtx_IOR (DImode, copy_rtx (dest),
4812 emit_move_insn (copy_rtx (dest),
4813 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
4816 emit_move_insn (copy_rtx (dest),
4817 gen_rtx_IOR (DImode, copy_rtx (dest),
4823 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
4826 emit_move_insn (dest, GEN_INT (ud4 << 16));
4829 emit_move_insn (copy_rtx (dest),
4830 gen_rtx_IOR (DImode, copy_rtx (dest),
4833 emit_move_insn (copy_rtx (dest),
4834 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
4837 emit_move_insn (copy_rtx (dest),
4838 gen_rtx_IOR (DImode, copy_rtx (dest),
4839 GEN_INT (ud2 << 16)));
4841 emit_move_insn (copy_rtx (dest),
4842 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
4848 /* Helper for the following. Get rid of [r+r] memory refs
4849 in cases where it won't work (TImode, TFmode, TDmode). */
4852 rs6000_eliminate_indexed_memrefs (rtx operands[2])
4854 if (GET_CODE (operands[0]) == MEM
4855 && GET_CODE (XEXP (operands[0], 0)) != REG
4856 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0))
4857 && ! reload_in_progress)
4859 = replace_equiv_address (operands[0],
4860 copy_addr_to_reg (XEXP (operands[0], 0)));
4862 if (GET_CODE (operands[1]) == MEM
4863 && GET_CODE (XEXP (operands[1], 0)) != REG
4864 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0))
4865 && ! reload_in_progress)
4867 = replace_equiv_address (operands[1],
4868 copy_addr_to_reg (XEXP (operands[1], 0)));
4871 /* Emit a move from SOURCE to DEST in mode MODE. */
4873 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
4877 operands[1] = source;
4879 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
4880 if (GET_CODE (operands[1]) == CONST_DOUBLE
4881 && ! FLOAT_MODE_P (mode)
4882 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4884 /* FIXME. This should never happen. */
4885 /* Since it seems that it does, do the safe thing and convert
4887 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
4889 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
4890 || FLOAT_MODE_P (mode)
4891 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
4892 || CONST_DOUBLE_LOW (operands[1]) < 0)
4893 && (CONST_DOUBLE_HIGH (operands[1]) != -1
4894 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
4896 /* Check if GCC is setting up a block move that will end up using FP
4897 registers as temporaries. We must make sure this is acceptable. */
4898 if (GET_CODE (operands[0]) == MEM
4899 && GET_CODE (operands[1]) == MEM
4901 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
4902 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
4903 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
4904 ? 32 : MEM_ALIGN (operands[0])))
4905 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
4907 : MEM_ALIGN (operands[1]))))
4908 && ! MEM_VOLATILE_P (operands [0])
4909 && ! MEM_VOLATILE_P (operands [1]))
4911 emit_move_insn (adjust_address (operands[0], SImode, 0),
4912 adjust_address (operands[1], SImode, 0));
4913 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
4914 adjust_address (copy_rtx (operands[1]), SImode, 4));
4918 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
4919 && !gpc_reg_operand (operands[1], mode))
4920 operands[1] = force_reg (mode, operands[1]);
4922 if (mode == SFmode && ! TARGET_POWERPC
4923 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
4924 && GET_CODE (operands[0]) == MEM)
4928 if (reload_in_progress || reload_completed)
4929 regnum = true_regnum (operands[1]);
4930 else if (GET_CODE (operands[1]) == REG)
4931 regnum = REGNO (operands[1]);
4935 /* If operands[1] is a register, on POWER it may have
4936 double-precision data in it, so truncate it to single
4938 if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
4941 newreg = (!can_create_pseudo_p () ? copy_rtx (operands[1])
4942 : gen_reg_rtx (mode));
4943 emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
4944 operands[1] = newreg;
4948 /* Recognize the case where operand[1] is a reference to thread-local
4949 data and load its address to a register. */
4950 if (rs6000_tls_referenced_p (operands[1]))
4952 enum tls_model model;
4953 rtx tmp = operands[1];
4956 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
4958 addend = XEXP (XEXP (tmp, 0), 1);
4959 tmp = XEXP (XEXP (tmp, 0), 0);
4962 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
4963 model = SYMBOL_REF_TLS_MODEL (tmp);
4964 gcc_assert (model != 0);
4966 tmp = rs6000_legitimize_tls_address (tmp, model);
4969 tmp = gen_rtx_PLUS (mode, tmp, addend);
4970 tmp = force_operand (tmp, operands[0]);
4975 /* Handle the case where reload calls us with an invalid address. */
4976 if (reload_in_progress && mode == Pmode
4977 && (! general_operand (operands[1], mode)
4978 || ! nonimmediate_operand (operands[0], mode)))
4981 /* 128-bit constant floating-point values on Darwin should really be
4982 loaded as two parts. */
4983 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
4984 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
4986 /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
4987 know how to get a DFmode SUBREG of a TFmode. */
4988 enum machine_mode imode = (TARGET_E500_DOUBLE ? DFmode : DImode);
4989 rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode, 0),
4990 simplify_gen_subreg (imode, operands[1], mode, 0),
4992 rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode,
4993 GET_MODE_SIZE (imode)),
4994 simplify_gen_subreg (imode, operands[1], mode,
4995 GET_MODE_SIZE (imode)),
5000 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
5001 cfun->machine->sdmode_stack_slot =
5002 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
5004 if (reload_in_progress
5006 && MEM_P (operands[0])
5007 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
5008 && REG_P (operands[1]))
5010 if (FP_REGNO_P (REGNO (operands[1])))
5012 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
5013 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5014 emit_insn (gen_movsd_store (mem, operands[1]));
5016 else if (INT_REGNO_P (REGNO (operands[1])))
5018 rtx mem = adjust_address_nv (operands[0], mode, 4);
5019 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5020 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
5026 if (reload_in_progress
5028 && REG_P (operands[0])
5029 && MEM_P (operands[1])
5030 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
5032 if (FP_REGNO_P (REGNO (operands[0])))
5034 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
5035 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5036 emit_insn (gen_movsd_load (operands[0], mem));
5038 else if (INT_REGNO_P (REGNO (operands[0])))
5040 rtx mem = adjust_address_nv (operands[1], mode, 4);
5041 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5042 emit_insn (gen_movsd_hardfloat (operands[0], mem));
5049 /* FIXME: In the long term, this switch statement should go away
5050 and be replaced by a sequence of tests based on things like
5056 if (CONSTANT_P (operands[1])
5057 && GET_CODE (operands[1]) != CONST_INT)
5058 operands[1] = force_const_mem (mode, operands[1]);
5063 rs6000_eliminate_indexed_memrefs (operands);
5070 if (CONSTANT_P (operands[1])
5071 && ! easy_fp_constant (operands[1], mode))
5072 operands[1] = force_const_mem (mode, operands[1]);
5083 if (CONSTANT_P (operands[1])
5084 && !easy_vector_constant (operands[1], mode))
5085 operands[1] = force_const_mem (mode, operands[1]);
5090 /* Use default pattern for address of ELF small data */
5093 && DEFAULT_ABI == ABI_V4
5094 && (GET_CODE (operands[1]) == SYMBOL_REF
5095 || GET_CODE (operands[1]) == CONST)
5096 && small_data_operand (operands[1], mode))
5098 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
5102 if (DEFAULT_ABI == ABI_V4
5103 && mode == Pmode && mode == SImode
5104 && flag_pic == 1 && got_operand (operands[1], mode))
5106 emit_insn (gen_movsi_got (operands[0], operands[1]));
5110 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
5114 && CONSTANT_P (operands[1])
5115 && GET_CODE (operands[1]) != HIGH
5116 && GET_CODE (operands[1]) != CONST_INT)
5118 rtx target = (!can_create_pseudo_p ()
5120 : gen_reg_rtx (mode));
5122 /* If this is a function address on -mcall-aixdesc,
5123 convert it to the address of the descriptor. */
5124 if (DEFAULT_ABI == ABI_AIX
5125 && GET_CODE (operands[1]) == SYMBOL_REF
5126 && XSTR (operands[1], 0)[0] == '.')
5128 const char *name = XSTR (operands[1], 0);
5130 while (*name == '.')
5132 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
5133 CONSTANT_POOL_ADDRESS_P (new_ref)
5134 = CONSTANT_POOL_ADDRESS_P (operands[1]);
5135 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
5136 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
5137 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
5138 operands[1] = new_ref;
5141 if (DEFAULT_ABI == ABI_DARWIN)
5144 if (MACHO_DYNAMIC_NO_PIC_P)
5146 /* Take care of any required data indirection. */
5147 operands[1] = rs6000_machopic_legitimize_pic_address (
5148 operands[1], mode, operands[0]);
5149 if (operands[0] != operands[1])
5150 emit_insn (gen_rtx_SET (VOIDmode,
5151 operands[0], operands[1]));
5155 emit_insn (gen_macho_high (target, operands[1]));
5156 emit_insn (gen_macho_low (operands[0], target, operands[1]));
5160 emit_insn (gen_elf_high (target, operands[1]));
5161 emit_insn (gen_elf_low (operands[0], target, operands[1]));
5165 /* If this is a SYMBOL_REF that refers to a constant pool entry,
5166 and we have put it in the TOC, we just need to make a TOC-relative
5169 && GET_CODE (operands[1]) == SYMBOL_REF
5170 && constant_pool_expr_p (operands[1])
5171 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
5172 get_pool_mode (operands[1])))
5174 operands[1] = create_TOC_reference (operands[1]);
5176 else if (mode == Pmode
5177 && CONSTANT_P (operands[1])
5178 && ((GET_CODE (operands[1]) != CONST_INT
5179 && ! easy_fp_constant (operands[1], mode))
5180 || (GET_CODE (operands[1]) == CONST_INT
5181 && num_insns_constant (operands[1], mode) > 2)
5182 || (GET_CODE (operands[0]) == REG
5183 && FP_REGNO_P (REGNO (operands[0]))))
5184 && GET_CODE (operands[1]) != HIGH
5185 && ! legitimate_constant_pool_address_p (operands[1])
5186 && ! toc_relative_expr_p (operands[1]))
5188 /* Emit a USE operation so that the constant isn't deleted if
5189 expensive optimizations are turned on because nobody
5190 references it. This should only be done for operands that
5191 contain SYMBOL_REFs with CONSTANT_POOL_ADDRESS_P set.
5192 This should not be done for operands that contain LABEL_REFs.
5193 For now, we just handle the obvious case. */
5194 if (GET_CODE (operands[1]) != LABEL_REF)
5195 emit_use (operands[1]);
5198 /* Darwin uses a special PIC legitimizer. */
5199 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
5202 rs6000_machopic_legitimize_pic_address (operands[1], mode,
5204 if (operands[0] != operands[1])
5205 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
5210 /* If we are to limit the number of things we put in the TOC and
5211 this is a symbol plus a constant we can add in one insn,
5212 just put the symbol in the TOC and add the constant. Don't do
5213 this if reload is in progress. */
5214 if (GET_CODE (operands[1]) == CONST
5215 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
5216 && GET_CODE (XEXP (operands[1], 0)) == PLUS
5217 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
5218 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
5219 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
5220 && ! side_effects_p (operands[0]))
5223 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
5224 rtx other = XEXP (XEXP (operands[1], 0), 1);
5226 sym = force_reg (mode, sym);
5228 emit_insn (gen_addsi3 (operands[0], sym, other));
5230 emit_insn (gen_adddi3 (operands[0], sym, other));
5234 operands[1] = force_const_mem (mode, operands[1]);
5237 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
5238 && constant_pool_expr_p (XEXP (operands[1], 0))
5239 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
5240 get_pool_constant (XEXP (operands[1], 0)),
5241 get_pool_mode (XEXP (operands[1], 0))))
5244 = gen_const_mem (mode,
5245 create_TOC_reference (XEXP (operands[1], 0)));
5246 set_mem_alias_set (operands[1], get_TOC_alias_set ());
5252 rs6000_eliminate_indexed_memrefs (operands);
5256 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5258 gen_rtx_SET (VOIDmode,
5259 operands[0], operands[1]),
5260 gen_rtx_CLOBBER (VOIDmode,
5261 gen_rtx_SCRATCH (SImode)))));
5270 /* Above, we may have called force_const_mem which may have returned
5271 an invalid address. If we can, fix this up; otherwise, reload will
5272 have to deal with it. */
5273 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
5274 operands[1] = validize_mem (operands[1]);
5277 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
5280 /* Nonzero if we can use a floating-point register to pass this arg. */
5281 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
5282 (SCALAR_FLOAT_MODE_P (MODE) \
5283 && (CUM)->fregno <= FP_ARG_MAX_REG \
5284 && TARGET_HARD_FLOAT && TARGET_FPRS)
5286 /* Nonzero if we can use an AltiVec register to pass this arg. */
5287 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
5288 (ALTIVEC_VECTOR_MODE (MODE) \
5289 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
5290 && TARGET_ALTIVEC_ABI \
5293 /* Return a nonzero value to say to return the function value in
5294 memory, just as large structures are always returned. TYPE will be
5295 the data type of the value, and FNTYPE will be the type of the
5296 function doing the returning, or @code{NULL} for libcalls.
5298 The AIX ABI for the RS/6000 specifies that all structures are
5299 returned in memory. The Darwin ABI does the same. The SVR4 ABI
5300 specifies that structures <= 8 bytes are returned in r3/r4, but a
5301 draft put them in memory, and GCC used to implement the draft
5302 instead of the final standard. Therefore, aix_struct_return
5303 controls this instead of DEFAULT_ABI; V.4 targets needing backward
5304 compatibility can change DRAFT_V4_STRUCT_RET to override the
5305 default, and -m switches get the final word. See
5306 rs6000_override_options for more details.
5308 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
5309 long double support is enabled. These values are returned in memory.
5311 int_size_in_bytes returns -1 for variable size objects, which go in
5312 memory always. The cast to unsigned makes -1 > 8. */
5315 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5317 /* In the darwin64 abi, try to use registers for larger structs
5319 if (rs6000_darwin64_abi
5320 && TREE_CODE (type) == RECORD_TYPE
5321 && int_size_in_bytes (type) > 0)
5323 CUMULATIVE_ARGS valcum;
5327 valcum.fregno = FP_ARG_MIN_REG;
5328 valcum.vregno = ALTIVEC_ARG_MIN_REG;
5329 /* Do a trial code generation as if this were going to be passed
5330 as an argument; if any part goes in memory, we return NULL. */
5331 valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
5334 /* Otherwise fall through to more conventional ABI rules. */
5337 if (AGGREGATE_TYPE_P (type)
5338 && (aix_struct_return
5339 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
5342 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
5343 modes only exist for GCC vector types if -maltivec. */
5344 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
5345 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
5348 /* Return synthetic vectors in memory. */
5349 if (TREE_CODE (type) == VECTOR_TYPE
5350 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
5352 static bool warned_for_return_big_vectors = false;
5353 if (!warned_for_return_big_vectors)
5355 warning (0, "GCC vector returned by reference: "
5356 "non-standard ABI extension with no compatibility guarantee");
5357 warned_for_return_big_vectors = true;
5362 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
5368 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5369 for a call to a function whose data type is FNTYPE.
5370 For a library call, FNTYPE is 0.
5372 For incoming args we set the number of arguments in the prototype large
5373 so we never return a PARALLEL. */
5376 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
5377 rtx libname ATTRIBUTE_UNUSED, int incoming,
5378 int libcall, int n_named_args)
5380 static CUMULATIVE_ARGS zero_cumulative;
5382 *cum = zero_cumulative;
5384 cum->fregno = FP_ARG_MIN_REG;
5385 cum->vregno = ALTIVEC_ARG_MIN_REG;
5386 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
5387 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
5388 ? CALL_LIBCALL : CALL_NORMAL);
5389 cum->sysv_gregno = GP_ARG_MIN_REG;
5390 cum->stdarg = fntype
5391 && (TYPE_ARG_TYPES (fntype) != 0
5392 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5393 != void_type_node));
5395 cum->nargs_prototype = 0;
5396 if (incoming || cum->prototype)
5397 cum->nargs_prototype = n_named_args;
5399 /* Check for a longcall attribute. */
5400 if ((!fntype && rs6000_default_long_calls)
5402 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
5403 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
5404 cum->call_cookie |= CALL_LONG;
5406 if (TARGET_DEBUG_ARG)
5408 fprintf (stderr, "\ninit_cumulative_args:");
5411 tree ret_type = TREE_TYPE (fntype);
5412 fprintf (stderr, " ret code = %s,",
5413 tree_code_name[ (int)TREE_CODE (ret_type) ]);
5416 if (cum->call_cookie & CALL_LONG)
5417 fprintf (stderr, " longcall,");
5419 fprintf (stderr, " proto = %d, nargs = %d\n",
5420 cum->prototype, cum->nargs_prototype);
5425 && TARGET_ALTIVEC_ABI
5426 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
5428 error ("cannot return value in vector register because"
5429 " altivec instructions are disabled, use -maltivec"
5434 /* Return true if TYPE must be passed on the stack and not in registers. */
5437 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
5439 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
5440 return must_pass_in_stack_var_size (mode, type);
5442 return must_pass_in_stack_var_size_or_pad (mode, type);
5445 /* If defined, a C expression which determines whether, and in which
5446 direction, to pad out an argument with extra space. The value
5447 should be of type `enum direction': either `upward' to pad above
5448 the argument, `downward' to pad below, or `none' to inhibit
5451 For the AIX ABI structs are always stored left shifted in their
5455 function_arg_padding (enum machine_mode mode, const_tree type)
5457 #ifndef AGGREGATE_PADDING_FIXED
5458 #define AGGREGATE_PADDING_FIXED 0
5460 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
5461 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
5464 if (!AGGREGATE_PADDING_FIXED)
5466 /* GCC used to pass structures of the same size as integer types as
5467 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
5468 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
5469 passed padded downward, except that -mstrict-align further
5470 muddied the water in that multi-component structures of 2 and 4
5471 bytes in size were passed padded upward.
5473 The following arranges for best compatibility with previous
5474 versions of gcc, but removes the -mstrict-align dependency. */
5475 if (BYTES_BIG_ENDIAN)
5477 HOST_WIDE_INT size = 0;
5479 if (mode == BLKmode)
5481 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
5482 size = int_size_in_bytes (type);
5485 size = GET_MODE_SIZE (mode);
5487 if (size == 1 || size == 2 || size == 4)
5493 if (AGGREGATES_PAD_UPWARD_ALWAYS)
5495 if (type != 0 && AGGREGATE_TYPE_P (type))
5499 /* Fall back to the default. */
5500 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5503 /* If defined, a C expression that gives the alignment boundary, in bits,
5504 of an argument with the specified mode and type. If it is not defined,
5505 PARM_BOUNDARY is used for all arguments.
5507 V.4 wants long longs and doubles to be double word aligned. Just
5508 testing the mode size is a boneheaded way to do this as it means
5509 that other types such as complex int are also double word aligned.
5510 However, we're stuck with this because changing the ABI might break
5511 existing library interfaces.
5513 Doubleword align SPE vectors.
5514 Quadword align Altivec vectors.
5515 Quadword align large synthetic vector types. */
5518 function_arg_boundary (enum machine_mode mode, tree type)
5520 if (DEFAULT_ABI == ABI_V4
5521 && (GET_MODE_SIZE (mode) == 8
5522 || (TARGET_HARD_FLOAT
5524 && (mode == TFmode || mode == TDmode))))
5526 else if (SPE_VECTOR_MODE (mode)
5527 || (type && TREE_CODE (type) == VECTOR_TYPE
5528 && int_size_in_bytes (type) >= 8
5529 && int_size_in_bytes (type) < 16))
5531 else if (ALTIVEC_VECTOR_MODE (mode)
5532 || (type && TREE_CODE (type) == VECTOR_TYPE
5533 && int_size_in_bytes (type) >= 16))
5535 else if (rs6000_darwin64_abi && mode == BLKmode
5536 && type && TYPE_ALIGN (type) > 64)
5539 return PARM_BOUNDARY;
5542 /* For a function parm of MODE and TYPE, return the starting word in
5543 the parameter area. NWORDS of the parameter area are already used. */
5546 rs6000_parm_start (enum machine_mode mode, tree type, unsigned int nwords)
5549 unsigned int parm_offset;
5551 align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
5552 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
5553 return nwords + (-(parm_offset + nwords) & align);
5556 /* Compute the size (in words) of a function argument. */
5558 static unsigned long
5559 rs6000_arg_size (enum machine_mode mode, tree type)
5563 if (mode != BLKmode)
5564 size = GET_MODE_SIZE (mode);
5566 size = int_size_in_bytes (type);
5569 return (size + 3) >> 2;
5571 return (size + 7) >> 3;
5574 /* Use this to flush pending int fields. */
5577 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
5578 HOST_WIDE_INT bitpos)
5580 unsigned int startbit, endbit;
5581 int intregs, intoffset;
5582 enum machine_mode mode;
5584 if (cum->intoffset == -1)
5587 intoffset = cum->intoffset;
5588 cum->intoffset = -1;
5590 if (intoffset % BITS_PER_WORD != 0)
5592 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5594 if (mode == BLKmode)
5596 /* We couldn't find an appropriate mode, which happens,
5597 e.g., in packed structs when there are 3 bytes to load.
5598 Back intoffset back to the beginning of the word in this
5600 intoffset = intoffset & -BITS_PER_WORD;
5604 startbit = intoffset & -BITS_PER_WORD;
5605 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5606 intregs = (endbit - startbit) / BITS_PER_WORD;
5607 cum->words += intregs;
5610 /* The darwin64 ABI calls for us to recurse down through structs,
5611 looking for elements passed in registers. Unfortunately, we have
5612 to track int register count here also because of misalignments
5613 in powerpc alignment mode. */
5616 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
5618 HOST_WIDE_INT startbitpos)
5622 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
5623 if (TREE_CODE (f) == FIELD_DECL)
5625 HOST_WIDE_INT bitpos = startbitpos;
5626 tree ftype = TREE_TYPE (f);
5627 enum machine_mode mode;
5628 if (ftype == error_mark_node)
5630 mode = TYPE_MODE (ftype);
5632 if (DECL_SIZE (f) != 0
5633 && host_integerp (bit_position (f), 1))
5634 bitpos += int_bit_position (f);
5636 /* ??? FIXME: else assume zero offset. */
5638 if (TREE_CODE (ftype) == RECORD_TYPE)
5639 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
5640 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
5642 rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
5643 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
5644 cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
5646 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
5648 rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
5652 else if (cum->intoffset == -1)
5653 cum->intoffset = bitpos;
5657 /* Update the data in CUM to advance over an argument
5658 of mode MODE and data type TYPE.
5659 (TYPE is null for libcalls where that information may not be available.)
5661 Note that for args passed by reference, function_arg will be called
5662 with MODE and TYPE set to that of the pointer to the arg, not the arg
5666 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5667 tree type, int named, int depth)
5671 /* Only tick off an argument if we're not recursing. */
5673 cum->nargs_prototype--;
5675 if (TARGET_ALTIVEC_ABI
5676 && (ALTIVEC_VECTOR_MODE (mode)
5677 || (type && TREE_CODE (type) == VECTOR_TYPE
5678 && int_size_in_bytes (type) == 16)))
5682 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
5685 if (!TARGET_ALTIVEC)
5686 error ("cannot pass argument in vector register because"
5687 " altivec instructions are disabled, use -maltivec"
5690 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
5691 even if it is going to be passed in a vector register.
5692 Darwin does the same for variable-argument functions. */
5693 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5694 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
5704 /* Vector parameters must be 16-byte aligned. This places
5705 them at 2 mod 4 in terms of words in 32-bit mode, since
5706 the parameter save area starts at offset 24 from the
5707 stack. In 64-bit mode, they just have to start on an
5708 even word, since the parameter save area is 16-byte
5709 aligned. Space for GPRs is reserved even if the argument
5710 will be passed in memory. */
5712 align = (2 - cum->words) & 3;
5714 align = cum->words & 1;
5715 cum->words += align + rs6000_arg_size (mode, type);
5717 if (TARGET_DEBUG_ARG)
5719 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
5721 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
5722 cum->nargs_prototype, cum->prototype,
5723 GET_MODE_NAME (mode));
5727 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
5729 && cum->sysv_gregno <= GP_ARG_MAX_REG)
5732 else if (rs6000_darwin64_abi
5734 && TREE_CODE (type) == RECORD_TYPE
5735 && (size = int_size_in_bytes (type)) > 0)
5737 /* Variable sized types have size == -1 and are
5738 treated as if consisting entirely of ints.
5739 Pad to 16 byte boundary if needed. */
5740 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
5741 && (cum->words % 2) != 0)
5743 /* For varargs, we can just go up by the size of the struct. */
5745 cum->words += (size + 7) / 8;
5748 /* It is tempting to say int register count just goes up by
5749 sizeof(type)/8, but this is wrong in a case such as
5750 { int; double; int; } [powerpc alignment]. We have to
5751 grovel through the fields for these too. */
5753 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
5754 rs6000_darwin64_record_arg_advance_flush (cum,
5755 size * BITS_PER_UNIT);
5758 else if (DEFAULT_ABI == ABI_V4)
5760 if (TARGET_HARD_FLOAT && TARGET_FPRS
5761 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
5762 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
5763 || (mode == TFmode && !TARGET_IEEEQUAD)
5764 || mode == SDmode || mode == DDmode || mode == TDmode))
5766 /* _Decimal128 must use an even/odd register pair. This assumes
5767 that the register number is odd when fregno is odd. */
5768 if (mode == TDmode && (cum->fregno % 2) == 1)
5771 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
5772 <= FP_ARG_V4_MAX_REG)
5773 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
5776 cum->fregno = FP_ARG_V4_MAX_REG + 1;
5777 if (mode == DFmode || mode == TFmode
5778 || mode == DDmode || mode == TDmode)
5779 cum->words += cum->words & 1;
5780 cum->words += rs6000_arg_size (mode, type);
5785 int n_words = rs6000_arg_size (mode, type);
5786 int gregno = cum->sysv_gregno;
5788 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
5789 (r7,r8) or (r9,r10). As does any other 2 word item such
5790 as complex int due to a historical mistake. */
5792 gregno += (1 - gregno) & 1;
5794 /* Multi-reg args are not split between registers and stack. */
5795 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
5797 /* Long long and SPE vectors are aligned on the stack.
5798 So are other 2 word items such as complex int due to
5799 a historical mistake. */
5801 cum->words += cum->words & 1;
5802 cum->words += n_words;
5805 /* Note: continuing to accumulate gregno past when we've started
5806 spilling to the stack indicates the fact that we've started
5807 spilling to the stack to expand_builtin_saveregs. */
5808 cum->sysv_gregno = gregno + n_words;
5811 if (TARGET_DEBUG_ARG)
5813 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
5814 cum->words, cum->fregno);
5815 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
5816 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
5817 fprintf (stderr, "mode = %4s, named = %d\n",
5818 GET_MODE_NAME (mode), named);
5823 int n_words = rs6000_arg_size (mode, type);
5824 int start_words = cum->words;
5825 int align_words = rs6000_parm_start (mode, type, start_words);
5827 cum->words = align_words + n_words;
5829 if (SCALAR_FLOAT_MODE_P (mode)
5830 && TARGET_HARD_FLOAT && TARGET_FPRS)
5832 /* _Decimal128 must be passed in an even/odd float register pair.
5833 This assumes that the register number is odd when fregno is
5835 if (mode == TDmode && (cum->fregno % 2) == 1)
5837 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
5840 if (TARGET_DEBUG_ARG)
5842 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
5843 cum->words, cum->fregno);
5844 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
5845 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
5846 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
5847 named, align_words - start_words, depth);
5853 spe_build_register_parallel (enum machine_mode mode, int gregno)
5860 r1 = gen_rtx_REG (DImode, gregno);
5861 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
5862 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
5866 r1 = gen_rtx_REG (DImode, gregno);
5867 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
5868 r3 = gen_rtx_REG (DImode, gregno + 2);
5869 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
5870 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
5873 r1 = gen_rtx_REG (DImode, gregno);
5874 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
5875 r3 = gen_rtx_REG (DImode, gregno + 2);
5876 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
5877 r5 = gen_rtx_REG (DImode, gregno + 4);
5878 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
5879 r7 = gen_rtx_REG (DImode, gregno + 6);
5880 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
5881 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
5888 /* Determine where to put a SIMD argument on the SPE. */
5890 rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5893 int gregno = cum->sysv_gregno;
5895 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
5896 are passed and returned in a pair of GPRs for ABI compatibility. */
5897 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5898 || mode == DCmode || mode == TCmode))
5900 int n_words = rs6000_arg_size (mode, type);
5902 /* Doubles go in an odd/even register pair (r5/r6, etc). */
5904 gregno += (1 - gregno) & 1;
5906 /* Multi-reg args are not split between registers and stack. */
5907 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
5910 return spe_build_register_parallel (mode, gregno);
5914 int n_words = rs6000_arg_size (mode, type);
5916 /* SPE vectors are put in odd registers. */
5917 if (n_words == 2 && (gregno & 1) == 0)
5920 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
5923 enum machine_mode m = SImode;
5925 r1 = gen_rtx_REG (m, gregno);
5926 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
5927 r2 = gen_rtx_REG (m, gregno + 1);
5928 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
5929 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
5936 if (gregno <= GP_ARG_MAX_REG)
5937 return gen_rtx_REG (mode, gregno);
5943 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
5944 structure between cum->intoffset and bitpos to integer registers. */
5947 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
5948 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
5950 enum machine_mode mode;
5952 unsigned int startbit, endbit;
5953 int this_regno, intregs, intoffset;
5956 if (cum->intoffset == -1)
5959 intoffset = cum->intoffset;
5960 cum->intoffset = -1;
5962 /* If this is the trailing part of a word, try to only load that
5963 much into the register. Otherwise load the whole register. Note
5964 that in the latter case we may pick up unwanted bits. It's not a
5965 problem at the moment but may wish to revisit. */
5967 if (intoffset % BITS_PER_WORD != 0)
5969 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5971 if (mode == BLKmode)
5973 /* We couldn't find an appropriate mode, which happens,
5974 e.g., in packed structs when there are 3 bytes to load.
5975 Back intoffset back to the beginning of the word in this
5977 intoffset = intoffset & -BITS_PER_WORD;
5984 startbit = intoffset & -BITS_PER_WORD;
5985 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5986 intregs = (endbit - startbit) / BITS_PER_WORD;
5987 this_regno = cum->words + intoffset / BITS_PER_WORD;
5989 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
5992 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
5996 intoffset /= BITS_PER_UNIT;
5999 regno = GP_ARG_MIN_REG + this_regno;
6000 reg = gen_rtx_REG (mode, regno);
6002 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6005 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
6009 while (intregs > 0);
6012 /* Recursive workhorse for the following. */
6015 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
6016 HOST_WIDE_INT startbitpos, rtx rvec[],
6021 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
6022 if (TREE_CODE (f) == FIELD_DECL)
6024 HOST_WIDE_INT bitpos = startbitpos;
6025 tree ftype = TREE_TYPE (f);
6026 enum machine_mode mode;
6027 if (ftype == error_mark_node)
6029 mode = TYPE_MODE (ftype);
6031 if (DECL_SIZE (f) != 0
6032 && host_integerp (bit_position (f), 1))
6033 bitpos += int_bit_position (f);
6035 /* ??? FIXME: else assume zero offset. */
6037 if (TREE_CODE (ftype) == RECORD_TYPE)
6038 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
6039 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
6044 case SCmode: mode = SFmode; break;
6045 case DCmode: mode = DFmode; break;
6046 case TCmode: mode = TFmode; break;
6050 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
6052 = gen_rtx_EXPR_LIST (VOIDmode,
6053 gen_rtx_REG (mode, cum->fregno++),
6054 GEN_INT (bitpos / BITS_PER_UNIT));
6055 if (mode == TFmode || mode == TDmode)
6058 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
6060 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
6062 = gen_rtx_EXPR_LIST (VOIDmode,
6063 gen_rtx_REG (mode, cum->vregno++),
6064 GEN_INT (bitpos / BITS_PER_UNIT));
6066 else if (cum->intoffset == -1)
6067 cum->intoffset = bitpos;
6071 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
6072 the register(s) to be used for each field and subfield of a struct
6073 being passed by value, along with the offset of where the
6074 register's value may be found in the block. FP fields go in FP
6075 register, vector fields go in vector registers, and everything
6076 else goes in int registers, packed as in memory.
6078 This code is also used for function return values. RETVAL indicates
6079 whether this is the case.
6081 Much of this is taken from the SPARC V9 port, which has a similar
6082 calling convention. */
6085 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
6086 int named, bool retval)
6088 rtx rvec[FIRST_PSEUDO_REGISTER];
6089 int k = 1, kbase = 1;
6090 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6091 /* This is a copy; modifications are not visible to our caller. */
6092 CUMULATIVE_ARGS copy_cum = *orig_cum;
6093 CUMULATIVE_ARGS *cum = ©_cum;
6095 /* Pad to 16 byte boundary if needed. */
6096 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
6097 && (cum->words % 2) != 0)
6104 /* Put entries into rvec[] for individual FP and vector fields, and
6105 for the chunks of memory that go in int regs. Note we start at
6106 element 1; 0 is reserved for an indication of using memory, and
6107 may or may not be filled in below. */
6108 rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
6109 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
6111 /* If any part of the struct went on the stack put all of it there.
6112 This hack is because the generic code for
6113 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
6114 parts of the struct are not at the beginning. */
6118 return NULL_RTX; /* doesn't go in registers at all */
6120 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6122 if (k > 1 || cum->use_stack)
6123 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
6128 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
6131 rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
6135 rtx rvec[GP_ARG_NUM_REG + 1];
6137 if (align_words >= GP_ARG_NUM_REG)
6140 n_units = rs6000_arg_size (mode, type);
6142 /* Optimize the simple case where the arg fits in one gpr, except in
6143 the case of BLKmode due to assign_parms assuming that registers are
6144 BITS_PER_WORD wide. */
6146 || (n_units == 1 && mode != BLKmode))
6147 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6150 if (align_words + n_units > GP_ARG_NUM_REG)
6151 /* Not all of the arg fits in gprs. Say that it goes in memory too,
6152 using a magic NULL_RTX component.
6153 This is not strictly correct. Only some of the arg belongs in
6154 memory, not all of it. However, the normal scheme using
6155 function_arg_partial_nregs can result in unusual subregs, eg.
6156 (subreg:SI (reg:DF) 4), which are not handled well. The code to
6157 store the whole arg to memory is often more efficient than code
6158 to store pieces, and we know that space is available in the right
6159 place for the whole arg. */
6160 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6165 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
6166 rtx off = GEN_INT (i++ * 4);
6167 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
6169 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
6171 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
6174 /* Determine where to put an argument to a function.
6175 Value is zero to push the argument on the stack,
6176 or a hard register in which to store the argument.
6178 MODE is the argument's machine mode.
6179 TYPE is the data type of the argument (as a tree).
6180 This is null for libcalls where that information may
6182 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6183 the preceding args and about the function being called. It is
6184 not modified in this routine.
6185 NAMED is nonzero if this argument is a named parameter
6186 (otherwise it is an extra parameter matching an ellipsis).
6188 On RS/6000 the first eight words of non-FP are normally in registers
6189 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
6190 Under V.4, the first 8 FP args are in registers.
6192 If this is floating-point and no prototype is specified, we use
6193 both an FP and integer register (or possibly FP reg and stack). Library
6194 functions (when CALL_LIBCALL is set) always have the proper types for args,
6195 so we can pass the FP value just in one register. emit_library_function
6196 doesn't support PARALLEL anyway.
6198 Note that for args passed by reference, function_arg will be called
6199 with MODE and TYPE set to that of the pointer to the arg, not the arg
6203 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6204 tree type, int named)
6206 enum rs6000_abi abi = DEFAULT_ABI;
6208 /* Return a marker to indicate whether CR1 needs to set or clear the
6209 bit that V.4 uses to say fp args were passed in registers.
6210 Assume that we don't need the marker for software floating point,
6211 or compiler generated library calls. */
6212 if (mode == VOIDmode)
6215 && (cum->call_cookie & CALL_LIBCALL) == 0
6217 || (cum->nargs_prototype < 0
6218 && (cum->prototype || TARGET_NO_PROTOTYPE))))
6220 /* For the SPE, we need to crxor CR6 always. */
6222 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
6223 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
6224 return GEN_INT (cum->call_cookie
6225 | ((cum->fregno == FP_ARG_MIN_REG)
6226 ? CALL_V4_SET_FP_ARGS
6227 : CALL_V4_CLEAR_FP_ARGS));
6230 return GEN_INT (cum->call_cookie);
6233 if (rs6000_darwin64_abi && mode == BLKmode
6234 && TREE_CODE (type) == RECORD_TYPE)
6236 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
6237 if (rslt != NULL_RTX)
6239 /* Else fall through to usual handling. */
6242 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
6243 if (TARGET_64BIT && ! cum->prototype)
6245 /* Vector parameters get passed in vector register
6246 and also in GPRs or memory, in absence of prototype. */
6249 align_words = (cum->words + 1) & ~1;
6251 if (align_words >= GP_ARG_NUM_REG)
6257 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6259 return gen_rtx_PARALLEL (mode,
6261 gen_rtx_EXPR_LIST (VOIDmode,
6263 gen_rtx_EXPR_LIST (VOIDmode,
6264 gen_rtx_REG (mode, cum->vregno),
6268 return gen_rtx_REG (mode, cum->vregno);
6269 else if (TARGET_ALTIVEC_ABI
6270 && (ALTIVEC_VECTOR_MODE (mode)
6271 || (type && TREE_CODE (type) == VECTOR_TYPE
6272 && int_size_in_bytes (type) == 16)))
6274 if (named || abi == ABI_V4)
6278 /* Vector parameters to varargs functions under AIX or Darwin
6279 get passed in memory and possibly also in GPRs. */
6280 int align, align_words, n_words;
6281 enum machine_mode part_mode;
6283 /* Vector parameters must be 16-byte aligned. This places them at
6284 2 mod 4 in terms of words in 32-bit mode, since the parameter
6285 save area starts at offset 24 from the stack. In 64-bit mode,
6286 they just have to start on an even word, since the parameter
6287 save area is 16-byte aligned. */
6289 align = (2 - cum->words) & 3;
6291 align = cum->words & 1;
6292 align_words = cum->words + align;
6294 /* Out of registers? Memory, then. */
6295 if (align_words >= GP_ARG_NUM_REG)
6298 if (TARGET_32BIT && TARGET_POWERPC64)
6299 return rs6000_mixed_function_arg (mode, type, align_words);
6301 /* The vector value goes in GPRs. Only the part of the
6302 value in GPRs is reported here. */
6304 n_words = rs6000_arg_size (mode, type);
6305 if (align_words + n_words > GP_ARG_NUM_REG)
6306 /* Fortunately, there are only two possibilities, the value
6307 is either wholly in GPRs or half in GPRs and half not. */
6310 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
6313 else if (TARGET_SPE_ABI && TARGET_SPE
6314 && (SPE_VECTOR_MODE (mode)
6315 || (TARGET_E500_DOUBLE && (mode == DFmode
6318 || mode == TCmode))))
6319 return rs6000_spe_function_arg (cum, mode, type);
6321 else if (abi == ABI_V4)
6323 if (TARGET_HARD_FLOAT && TARGET_FPRS
6324 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
6325 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
6326 || (mode == TFmode && !TARGET_IEEEQUAD)
6327 || mode == SDmode || mode == DDmode || mode == TDmode))
6329 /* _Decimal128 must use an even/odd register pair. This assumes
6330 that the register number is odd when fregno is odd. */
6331 if (mode == TDmode && (cum->fregno % 2) == 1)
6334 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
6335 <= FP_ARG_V4_MAX_REG)
6336 return gen_rtx_REG (mode, cum->fregno);
6342 int n_words = rs6000_arg_size (mode, type);
6343 int gregno = cum->sysv_gregno;
6345 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
6346 (r7,r8) or (r9,r10). As does any other 2 word item such
6347 as complex int due to a historical mistake. */
6349 gregno += (1 - gregno) & 1;
6351 /* Multi-reg args are not split between registers and stack. */
6352 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
6355 if (TARGET_32BIT && TARGET_POWERPC64)
6356 return rs6000_mixed_function_arg (mode, type,
6357 gregno - GP_ARG_MIN_REG);
6358 return gen_rtx_REG (mode, gregno);
6363 int align_words = rs6000_parm_start (mode, type, cum->words);
6365 /* _Decimal128 must be passed in an even/odd float register pair.
6366 This assumes that the register number is odd when fregno is odd. */
6367 if (mode == TDmode && (cum->fregno % 2) == 1)
6370 if (USE_FP_FOR_ARG_P (cum, mode, type))
6372 rtx rvec[GP_ARG_NUM_REG + 1];
6376 enum machine_mode fmode = mode;
6377 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
6379 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
6381 /* Currently, we only ever need one reg here because complex
6382 doubles are split. */
6383 gcc_assert (cum->fregno == FP_ARG_MAX_REG
6384 && (fmode == TFmode || fmode == TDmode));
6386 /* Long double or _Decimal128 split over regs and memory. */
6387 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
6390 /* Do we also need to pass this arg in the parameter save
6393 && (cum->nargs_prototype <= 0
6394 || (DEFAULT_ABI == ABI_AIX
6396 && align_words >= GP_ARG_NUM_REG)));
6398 if (!needs_psave && mode == fmode)
6399 return gen_rtx_REG (fmode, cum->fregno);
6404 /* Describe the part that goes in gprs or the stack.
6405 This piece must come first, before the fprs. */
6406 if (align_words < GP_ARG_NUM_REG)
6408 unsigned long n_words = rs6000_arg_size (mode, type);
6410 if (align_words + n_words > GP_ARG_NUM_REG
6411 || (TARGET_32BIT && TARGET_POWERPC64))
6413 /* If this is partially on the stack, then we only
6414 include the portion actually in registers here. */
6415 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
6418 if (align_words + n_words > GP_ARG_NUM_REG)
6419 /* Not all of the arg fits in gprs. Say that it
6420 goes in memory too, using a magic NULL_RTX
6421 component. Also see comment in
6422 rs6000_mixed_function_arg for why the normal
6423 function_arg_partial_nregs scheme doesn't work
6425 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
6429 r = gen_rtx_REG (rmode,
6430 GP_ARG_MIN_REG + align_words);
6431 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
6432 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
6434 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
6438 /* The whole arg fits in gprs. */
6439 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6440 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
6444 /* It's entirely in memory. */
6445 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6448 /* Describe where this piece goes in the fprs. */
6449 r = gen_rtx_REG (fmode, cum->fregno);
6450 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
6452 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
6454 else if (align_words < GP_ARG_NUM_REG)
6456 if (TARGET_32BIT && TARGET_POWERPC64)
6457 return rs6000_mixed_function_arg (mode, type, align_words);
6459 if (mode == BLKmode)
6462 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6469 /* For an arg passed partly in registers and partly in memory, this is
6470 the number of bytes passed in registers. For args passed entirely in
6471 registers or entirely in memory, zero. When an arg is described by a
6472 PARALLEL, perhaps using more than one register type, this function
6473 returns the number of bytes used by the first element of the PARALLEL. */
6476 rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6477 tree type, bool named)
6482 if (DEFAULT_ABI == ABI_V4)
6485 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
6486 && cum->nargs_prototype >= 0)
6489 /* In this complicated case we just disable the partial_nregs code. */
6490 if (rs6000_darwin64_abi && mode == BLKmode
6491 && TREE_CODE (type) == RECORD_TYPE
6492 && int_size_in_bytes (type) > 0)
6495 align_words = rs6000_parm_start (mode, type, cum->words);
6497 if (USE_FP_FOR_ARG_P (cum, mode, type))
6499 /* If we are passing this arg in the fixed parameter save area
6500 (gprs or memory) as well as fprs, then this function should
6501 return the number of partial bytes passed in the parameter
6502 save area rather than partial bytes passed in fprs. */
6504 && (cum->nargs_prototype <= 0
6505 || (DEFAULT_ABI == ABI_AIX
6507 && align_words >= GP_ARG_NUM_REG)))
6509 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
6510 > FP_ARG_MAX_REG + 1)
6511 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
6512 else if (cum->nargs_prototype >= 0)
6516 if (align_words < GP_ARG_NUM_REG
6517 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
6518 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
6520 if (ret != 0 && TARGET_DEBUG_ARG)
6521 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
6526 /* A C expression that indicates when an argument must be passed by
6527 reference. If nonzero for an argument, a copy of that argument is
6528 made in memory and a pointer to the argument is passed instead of
6529 the argument itself. The pointer is passed in whatever way is
6530 appropriate for passing a pointer to that type.
6532 Under V.4, aggregates and long double are passed by reference.
6534 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
6535 reference unless the AltiVec vector extension ABI is in force.
6537 As an extension to all ABIs, variable sized types are passed by
6541 rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6542 enum machine_mode mode, const_tree type,
6543 bool named ATTRIBUTE_UNUSED)
6545 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
6547 if (TARGET_DEBUG_ARG)
6548 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
6555 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
6557 if (TARGET_DEBUG_ARG)
6558 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
6562 if (int_size_in_bytes (type) < 0)
6564 if (TARGET_DEBUG_ARG)
6565 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
6569 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
6570 modes only exist for GCC vector types if -maltivec. */
6571 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
6573 if (TARGET_DEBUG_ARG)
6574 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
6578 /* Pass synthetic vectors in memory. */
6579 if (TREE_CODE (type) == VECTOR_TYPE
6580 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
6582 static bool warned_for_pass_big_vectors = false;
6583 if (TARGET_DEBUG_ARG)
6584 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
6585 if (!warned_for_pass_big_vectors)
6587 warning (0, "GCC vector passed by reference: "
6588 "non-standard ABI extension with no compatibility guarantee");
6589 warned_for_pass_big_vectors = true;
6598 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
6601 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
6606 for (i = 0; i < nregs; i++)
6608 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
6609 if (reload_completed)
6611 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
6614 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
6615 i * GET_MODE_SIZE (reg_mode));
6618 tem = replace_equiv_address (tem, XEXP (tem, 0));
6622 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
6626 /* Perform any needed actions needed for a function that is receiving a
6627 variable number of arguments.
6631 MODE and TYPE are the mode and type of the current parameter.
6633 PRETEND_SIZE is a variable that should be set to the amount of stack
6634 that must be pushed by the prolog to pretend that our caller pushed
6637 Normally, this macro will push all remaining incoming registers on the
6638 stack and set PRETEND_SIZE to the length of the registers pushed. */
6641 setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6642 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6645 CUMULATIVE_ARGS next_cum;
6646 int reg_size = TARGET_32BIT ? 4 : 8;
6647 rtx save_area = NULL_RTX, mem;
6648 int first_reg_offset;
6651 /* Skip the last named argument. */
6653 function_arg_advance (&next_cum, mode, type, 1, 0);
6655 if (DEFAULT_ABI == ABI_V4)
6657 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
6661 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
6662 HOST_WIDE_INT offset = 0;
6664 /* Try to optimize the size of the varargs save area.
6665 The ABI requires that ap.reg_save_area is doubleword
6666 aligned, but we don't need to allocate space for all
6667 the bytes, only those to which we actually will save
6669 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
6670 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
6671 if (TARGET_HARD_FLOAT && TARGET_FPRS
6672 && next_cum.fregno <= FP_ARG_V4_MAX_REG
6673 && cfun->va_list_fpr_size)
6676 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
6677 * UNITS_PER_FP_WORD;
6678 if (cfun->va_list_fpr_size
6679 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
6680 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
6682 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
6683 * UNITS_PER_FP_WORD;
6687 offset = -((first_reg_offset * reg_size) & ~7);
6688 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
6690 gpr_reg_num = cfun->va_list_gpr_size;
6691 if (reg_size == 4 && (first_reg_offset & 1))
6694 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
6697 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
6699 - (int) (GP_ARG_NUM_REG * reg_size);
6701 if (gpr_size + fpr_size)
6704 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
6705 gcc_assert (GET_CODE (reg_save_area) == MEM);
6706 reg_save_area = XEXP (reg_save_area, 0);
6707 if (GET_CODE (reg_save_area) == PLUS)
6709 gcc_assert (XEXP (reg_save_area, 0)
6710 == virtual_stack_vars_rtx);
6711 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
6712 offset += INTVAL (XEXP (reg_save_area, 1));
6715 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
6718 cfun->machine->varargs_save_offset = offset;
6719 save_area = plus_constant (virtual_stack_vars_rtx, offset);
6724 first_reg_offset = next_cum.words;
6725 save_area = virtual_incoming_args_rtx;
6727 if (targetm.calls.must_pass_in_stack (mode, type))
6728 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
6731 set = get_varargs_alias_set ();
6732 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
6733 && cfun->va_list_gpr_size)
6735 int nregs = GP_ARG_NUM_REG - first_reg_offset;
6737 if (va_list_gpr_counter_field)
6739 /* V4 va_list_gpr_size counts number of registers needed. */
6740 if (nregs > cfun->va_list_gpr_size)
6741 nregs = cfun->va_list_gpr_size;
6745 /* char * va_list instead counts number of bytes needed. */
6746 if (nregs > cfun->va_list_gpr_size / reg_size)
6747 nregs = cfun->va_list_gpr_size / reg_size;
6750 mem = gen_rtx_MEM (BLKmode,
6751 plus_constant (save_area,
6752 first_reg_offset * reg_size));
6753 MEM_NOTRAP_P (mem) = 1;
6754 set_mem_alias_set (mem, set);
6755 set_mem_align (mem, BITS_PER_WORD);
6757 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
6761 /* Save FP registers if needed. */
6762 if (DEFAULT_ABI == ABI_V4
6763 && TARGET_HARD_FLOAT && TARGET_FPRS
6765 && next_cum.fregno <= FP_ARG_V4_MAX_REG
6766 && cfun->va_list_fpr_size)
6768 int fregno = next_cum.fregno, nregs;
6769 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
6770 rtx lab = gen_label_rtx ();
6771 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
6772 * UNITS_PER_FP_WORD);
6775 (gen_rtx_SET (VOIDmode,
6777 gen_rtx_IF_THEN_ELSE (VOIDmode,
6778 gen_rtx_NE (VOIDmode, cr1,
6780 gen_rtx_LABEL_REF (VOIDmode, lab),
6784 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
6785 fregno++, off += UNITS_PER_FP_WORD, nregs++)
6787 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
6789 plus_constant (save_area, off));
6790 MEM_NOTRAP_P (mem) = 1;
6791 set_mem_alias_set (mem, set);
6792 set_mem_align (mem, GET_MODE_ALIGNMENT (
6793 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
6794 ? DFmode : SFmode));
6795 emit_move_insn (mem, gen_rtx_REG (
6796 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
6797 ? DFmode : SFmode, fregno));
6804 /* Create the va_list data type. */
6807 rs6000_build_builtin_va_list (void)
6809 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
6811 /* For AIX, prefer 'char *' because that's what the system
6812 header files like. */
6813 if (DEFAULT_ABI != ABI_V4)
6814 return build_pointer_type (char_type_node);
6816 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6817 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6819 f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
6820 unsigned_char_type_node);
6821 f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
6822 unsigned_char_type_node);
6823 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
6825 f_res = build_decl (FIELD_DECL, get_identifier ("reserved"),
6826 short_unsigned_type_node);
6827 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
6829 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
6832 va_list_gpr_counter_field = f_gpr;
6833 va_list_fpr_counter_field = f_fpr;
6835 DECL_FIELD_CONTEXT (f_gpr) = record;
6836 DECL_FIELD_CONTEXT (f_fpr) = record;
6837 DECL_FIELD_CONTEXT (f_res) = record;
6838 DECL_FIELD_CONTEXT (f_ovf) = record;
6839 DECL_FIELD_CONTEXT (f_sav) = record;
6841 TREE_CHAIN (record) = type_decl;
6842 TYPE_NAME (record) = type_decl;
6843 TYPE_FIELDS (record) = f_gpr;
6844 TREE_CHAIN (f_gpr) = f_fpr;
6845 TREE_CHAIN (f_fpr) = f_res;
6846 TREE_CHAIN (f_res) = f_ovf;
6847 TREE_CHAIN (f_ovf) = f_sav;
6849 layout_type (record);
6851 /* The correct type is an array type of one element. */
6852 return build_array_type (record, build_index_type (size_zero_node));
6855 /* Implement va_start. */
6858 rs6000_va_start (tree valist, rtx nextarg)
6860 HOST_WIDE_INT words, n_gpr, n_fpr;
6861 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
6862 tree gpr, fpr, ovf, sav, t;
6864 /* Only SVR4 needs something special. */
6865 if (DEFAULT_ABI != ABI_V4)
6867 std_expand_builtin_va_start (valist, nextarg);
6871 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
6872 f_fpr = TREE_CHAIN (f_gpr);
6873 f_res = TREE_CHAIN (f_fpr);
6874 f_ovf = TREE_CHAIN (f_res);
6875 f_sav = TREE_CHAIN (f_ovf);
6877 valist = build_va_arg_indirect_ref (valist);
6878 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6879 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
6881 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
6883 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
6886 /* Count number of gp and fp argument registers used. */
6887 words = crtl->args.info.words;
6888 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
6890 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
6893 if (TARGET_DEBUG_ARG)
6894 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
6895 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
6896 words, n_gpr, n_fpr);
6898 if (cfun->va_list_gpr_size)
6900 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
6901 build_int_cst (NULL_TREE, n_gpr));
6902 TREE_SIDE_EFFECTS (t) = 1;
6903 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6906 if (cfun->va_list_fpr_size)
6908 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
6909 build_int_cst (NULL_TREE, n_fpr));
6910 TREE_SIDE_EFFECTS (t) = 1;
6911 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6914 /* Find the overflow area. */
6915 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
6917 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t,
6918 size_int (words * UNITS_PER_WORD));
6919 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
6920 TREE_SIDE_EFFECTS (t) = 1;
6921 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6923 /* If there were no va_arg invocations, don't set up the register
6925 if (!cfun->va_list_gpr_size
6926 && !cfun->va_list_fpr_size
6927 && n_gpr < GP_ARG_NUM_REG
6928 && n_fpr < FP_ARG_V4_MAX_REG)
6931 /* Find the register save area. */
6932 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
6933 if (cfun->machine->varargs_save_offset)
6934 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
6935 size_int (cfun->machine->varargs_save_offset));
6936 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
6937 TREE_SIDE_EFFECTS (t) = 1;
6938 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6941 /* Implement va_arg. */
6944 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6947 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
6948 tree gpr, fpr, ovf, sav, reg, t, u;
6949 int size, rsize, n_reg, sav_ofs, sav_scale;
6950 tree lab_false, lab_over, addr;
6952 tree ptrtype = build_pointer_type (type);
6956 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6958 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
6959 return build_va_arg_indirect_ref (t);
6962 if (DEFAULT_ABI != ABI_V4)
6964 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
6966 tree elem_type = TREE_TYPE (type);
6967 enum machine_mode elem_mode = TYPE_MODE (elem_type);
6968 int elem_size = GET_MODE_SIZE (elem_mode);
6970 if (elem_size < UNITS_PER_WORD)
6972 tree real_part, imag_part;
6973 gimple_seq post = NULL;
6975 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
6977 /* Copy the value into a temporary, lest the formal temporary
6978 be reused out from under us. */
6979 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
6980 gimple_seq_add_seq (pre_p, post);
6982 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
6985 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
6989 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6992 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
6993 f_fpr = TREE_CHAIN (f_gpr);
6994 f_res = TREE_CHAIN (f_fpr);
6995 f_ovf = TREE_CHAIN (f_res);
6996 f_sav = TREE_CHAIN (f_ovf);
6998 valist = build_va_arg_indirect_ref (valist);
6999 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
7000 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7002 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7004 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7007 size = int_size_in_bytes (type);
7008 rsize = (size + 3) / 4;
7011 if (TARGET_HARD_FLOAT && TARGET_FPRS
7012 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
7013 || (TARGET_DOUBLE_FLOAT
7014 && (TYPE_MODE (type) == DFmode
7015 || TYPE_MODE (type) == TFmode
7016 || TYPE_MODE (type) == SDmode
7017 || TYPE_MODE (type) == DDmode
7018 || TYPE_MODE (type) == TDmode))))
7020 /* FP args go in FP registers, if present. */
7022 n_reg = (size + 7) / 8;
7023 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
7024 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
7025 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
7030 /* Otherwise into GP registers. */
7039 /* Pull the value out of the saved registers.... */
7042 addr = create_tmp_var (ptr_type_node, "addr");
7043 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
7045 /* AltiVec vectors never go in registers when -mabi=altivec. */
7046 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7050 lab_false = create_artificial_label ();
7051 lab_over = create_artificial_label ();
7053 /* Long long and SPE vectors are aligned in the registers.
7054 As are any other 2 gpr item such as complex int due to a
7055 historical mistake. */
7057 if (n_reg == 2 && reg == gpr)
7060 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
7061 build_int_cst (TREE_TYPE (reg), n_reg - 1));
7062 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
7063 unshare_expr (reg), u);
7065 /* _Decimal128 is passed in even/odd fpr pairs; the stored
7066 reg number is 0 for f1, so we want to make it odd. */
7067 else if (reg == fpr && TYPE_MODE (type) == TDmode)
7070 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
7071 build_int_cst (TREE_TYPE (reg), 1));
7072 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
7075 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
7076 t = build2 (GE_EXPR, boolean_type_node, u, t);
7077 u = build1 (GOTO_EXPR, void_type_node, lab_false);
7078 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
7079 gimplify_and_add (t, pre_p);
7083 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
7085 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
7086 build_int_cst (TREE_TYPE (reg), n_reg));
7087 u = fold_convert (sizetype, u);
7088 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
7089 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, u);
7091 /* _Decimal32 varargs are located in the second word of the 64-bit
7092 FP register for 32-bit binaries. */
7093 if (!TARGET_POWERPC64
7094 && TARGET_HARD_FLOAT && TARGET_FPRS
7095 && TYPE_MODE (type) == SDmode)
7096 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
7098 gimplify_assign (addr, t, pre_p);
7100 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7102 stmt = gimple_build_label (lab_false);
7103 gimple_seq_add_stmt (pre_p, stmt);
7105 if ((n_reg == 2 && !regalign) || n_reg > 2)
7107 /* Ensure that we don't find any more args in regs.
7108 Alignment has taken care of for special cases. */
7109 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
7113 /* ... otherwise out of the overflow area. */
7115 /* Care for on-stack alignment if needed. */
7119 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
7120 t = fold_convert (sizetype, t);
7121 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7123 t = fold_convert (TREE_TYPE (ovf), t);
7125 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7127 gimplify_assign (unshare_expr (addr), t, pre_p);
7129 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
7130 gimplify_assign (unshare_expr (ovf), t, pre_p);
7134 stmt = gimple_build_label (lab_over);
7135 gimple_seq_add_stmt (pre_p, stmt);
7138 if (STRICT_ALIGNMENT
7139 && (TYPE_ALIGN (type)
7140 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
7142 /* The value (of type complex double, for example) may not be
7143 aligned in memory in the saved registers, so copy via a
7144 temporary. (This is the same code as used for SPARC.) */
7145 tree tmp = create_tmp_var (type, "va_arg_tmp");
7146 tree dest_addr = build_fold_addr_expr (tmp);
7148 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
7149 3, dest_addr, addr, size_int (rsize * 4));
7151 gimplify_and_add (copy, pre_p);
7155 addr = fold_convert (ptrtype, addr);
7156 return build_va_arg_indirect_ref (addr);
7162 def_builtin (int mask, const char *name, tree type, int code)
7164 if ((mask & target_flags) || TARGET_PAIRED_FLOAT)
7166 if (rs6000_builtin_decls[code])
7169 rs6000_builtin_decls[code] =
7170 add_builtin_function (name, type, code, BUILT_IN_MD,
7175 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
7177 static const struct builtin_description bdesc_3arg[] =
7179 { MASK_ALTIVEC, CODE_FOR_altivec_vmaddfp, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP },
7180 { MASK_ALTIVEC, CODE_FOR_altivec_vmhaddshs, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS },
7181 { MASK_ALTIVEC, CODE_FOR_altivec_vmhraddshs, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS },
7182 { MASK_ALTIVEC, CODE_FOR_altivec_vmladduhm, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM},
7183 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumubm, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM },
7184 { MASK_ALTIVEC, CODE_FOR_altivec_vmsummbm, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM },
7185 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhm, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM },
7186 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
7187 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
7188 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
7189 { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
7190 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
7191 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
7192 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
7193 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
7194 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
7195 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
7196 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
7197 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
7198 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
7199 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
7200 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
7201 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
7203 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD },
7204 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS },
7205 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD },
7206 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS },
7207 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM },
7208 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM },
7209 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM },
7210 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM },
7211 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM },
7212 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS },
7213 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS },
7214 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS },
7215 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB },
7216 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM },
7217 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL },
7219 { 0, CODE_FOR_paired_msub, "__builtin_paired_msub", PAIRED_BUILTIN_MSUB },
7220 { 0, CODE_FOR_paired_madd, "__builtin_paired_madd", PAIRED_BUILTIN_MADD },
7221 { 0, CODE_FOR_paired_madds0, "__builtin_paired_madds0", PAIRED_BUILTIN_MADDS0 },
7222 { 0, CODE_FOR_paired_madds1, "__builtin_paired_madds1", PAIRED_BUILTIN_MADDS1 },
7223 { 0, CODE_FOR_paired_nmsub, "__builtin_paired_nmsub", PAIRED_BUILTIN_NMSUB },
7224 { 0, CODE_FOR_paired_nmadd, "__builtin_paired_nmadd", PAIRED_BUILTIN_NMADD },
7225 { 0, CODE_FOR_paired_sum0, "__builtin_paired_sum0", PAIRED_BUILTIN_SUM0 },
7226 { 0, CODE_FOR_paired_sum1, "__builtin_paired_sum1", PAIRED_BUILTIN_SUM1 },
7227 { 0, CODE_FOR_selv2sf4, "__builtin_paired_selv2sf4", PAIRED_BUILTIN_SELV2SF4 },
7230 /* DST operations: void foo (void *, const int, const char). */
7232 static const struct builtin_description bdesc_dst[] =
7234 { MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
7235 { MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
7236 { MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
7237 { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT },
7239 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST },
7240 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT },
7241 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST },
7242 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT }
7245 /* Simple binary operations: VECc = foo (VECa, VECb). */
7247 static struct builtin_description bdesc_2arg[] =
7249 { MASK_ALTIVEC, CODE_FOR_addv16qi3, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM },
7250 { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
7251 { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
7252 { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
7253 { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
7254 { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
7255 { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
7256 { MASK_ALTIVEC, CODE_FOR_altivec_vadduhs, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS },
7257 { MASK_ALTIVEC, CODE_FOR_altivec_vaddshs, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS },
7258 { MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
7259 { MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
7260 { MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
7261 { MASK_ALTIVEC, CODE_FOR_andcv4si3, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
7262 { MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
7263 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
7264 { MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
7265 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsh, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH },
7266 { MASK_ALTIVEC, CODE_FOR_altivec_vavguw, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW },
7267 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsw, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW },
7268 { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
7269 { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
7270 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
7271 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
7272 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
7273 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
7274 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
7275 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
7276 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
7277 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
7278 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
7279 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
7280 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
7281 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
7282 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
7283 { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
7284 { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
7285 { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
7286 { MASK_ALTIVEC, CODE_FOR_smaxv16qi3, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB },
7287 { MASK_ALTIVEC, CODE_FOR_umaxv8hi3, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH },
7288 { MASK_ALTIVEC, CODE_FOR_smaxv8hi3, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH },
7289 { MASK_ALTIVEC, CODE_FOR_umaxv4si3, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW },
7290 { MASK_ALTIVEC, CODE_FOR_smaxv4si3, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW },
7291 { MASK_ALTIVEC, CODE_FOR_smaxv4sf3, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP },
7292 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghb, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB },
7293 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghh, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH },
7294 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghw, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW },
7295 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglb, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB },
7296 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglh, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH },
7297 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglw, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW },
7298 { MASK_ALTIVEC, CODE_FOR_uminv16qi3, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB },
7299 { MASK_ALTIVEC, CODE_FOR_sminv16qi3, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB },
7300 { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH },
7301 { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH },
7302 { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW },
7303 { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW },
7304 { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP },
7305 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB },
7306 { MASK_ALTIVEC, CODE_FOR_altivec_vmulesb, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB },
7307 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH },
7308 { MASK_ALTIVEC, CODE_FOR_altivec_vmulesh, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH },
7309 { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB },
7310 { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
7311 { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
7312 { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
7313 { MASK_ALTIVEC, CODE_FOR_altivec_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
7314 { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
7315 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
7316 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
7317 { MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
7318 { MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
7319 { MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
7320 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
7321 { MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
7322 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwus, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS },
7323 { MASK_ALTIVEC, CODE_FOR_altivec_vpkswus, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS },
7324 { MASK_ALTIVEC, CODE_FOR_altivec_vrlb, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB },
7325 { MASK_ALTIVEC, CODE_FOR_altivec_vrlh, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH },
7326 { MASK_ALTIVEC, CODE_FOR_altivec_vrlw, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW },
7327 { MASK_ALTIVEC, CODE_FOR_vashlv16qi3, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB },
7328 { MASK_ALTIVEC, CODE_FOR_vashlv8hi3, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH },
7329 { MASK_ALTIVEC, CODE_FOR_vashlv4si3, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW },
7330 { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL },
7331 { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO },
7332 { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
7333 { MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
7334 { MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
7335 { MASK_ALTIVEC, CODE_FOR_vlshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
7336 { MASK_ALTIVEC, CODE_FOR_vlshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
7337 { MASK_ALTIVEC, CODE_FOR_vlshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
7338 { MASK_ALTIVEC, CODE_FOR_vashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
7339 { MASK_ALTIVEC, CODE_FOR_vashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
7340 { MASK_ALTIVEC, CODE_FOR_vashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
7341 { MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
7342 { MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
7343 { MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
7344 { MASK_ALTIVEC, CODE_FOR_subv8hi3, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM },
7345 { MASK_ALTIVEC, CODE_FOR_subv4si3, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM },
7346 { MASK_ALTIVEC, CODE_FOR_subv4sf3, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP },
7347 { MASK_ALTIVEC, CODE_FOR_altivec_vsubcuw, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW },
7348 { MASK_ALTIVEC, CODE_FOR_altivec_vsububs, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS },
7349 { MASK_ALTIVEC, CODE_FOR_altivec_vsubsbs, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS },
7350 { MASK_ALTIVEC, CODE_FOR_altivec_vsubuhs, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS },
7351 { MASK_ALTIVEC, CODE_FOR_altivec_vsubshs, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS },
7352 { MASK_ALTIVEC, CODE_FOR_altivec_vsubuws, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS },
7353 { MASK_ALTIVEC, CODE_FOR_altivec_vsubsws, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS },
7354 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4ubs, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS },
7355 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4sbs, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS },
7356 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4shs, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS },
7357 { MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
7358 { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
7359 { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
7361 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
7362 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
7363 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
7364 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM },
7365 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM },
7366 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC },
7367 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS },
7368 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS },
7369 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS },
7370 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS },
7371 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS },
7372 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS },
7373 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS },
7374 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND },
7375 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC },
7376 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG },
7377 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW },
7378 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW },
7379 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH },
7380 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH },
7381 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB },
7382 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB },
7383 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB },
7384 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ },
7385 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP },
7386 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW },
7387 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH },
7388 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB },
7389 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE },
7390 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT },
7391 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP },
7392 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW },
7393 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW },
7394 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH },
7395 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH },
7396 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB },
7397 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB },
7398 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE },
7399 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT },
7400 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX },
7401 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP },
7402 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW },
7403 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW },
7404 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH },
7405 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH },
7406 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB },
7407 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB },
7408 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH },
7409 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW },
7410 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH },
7411 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB },
7412 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL },
7413 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW },
7414 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH },
7415 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB },
7416 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN },
7417 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP },
7418 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW },
7419 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW },
7420 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH },
7421 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH },
7422 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB },
7423 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB },
7424 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE },
7425 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB },
7426 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB },
7427 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH },
7428 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH },
7429 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO },
7430 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH },
7431 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH },
7432 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB },
7433 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB },
7434 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR },
7435 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR },
7436 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK },
7437 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM },
7438 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM },
7439 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX },
7440 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS },
7441 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS },
7442 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS },
7443 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS },
7444 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS },
7445 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU },
7446 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS },
7447 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS },
7448 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL },
7449 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW },
7450 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH },
7451 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB },
7452 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL },
7453 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW },
7454 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH },
7455 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB },
7456 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL },
7457 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO },
7458 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR },
7459 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW },
7460 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH },
7461 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB },
7462 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA },
7463 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW },
7464 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH },
7465 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB },
7466 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL },
7467 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO },
7468 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB },
7469 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP },
7470 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM },
7471 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM },
7472 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM },
7473 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC },
7474 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS },
7475 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS },
7476 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS },
7477 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS },
7478 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS },
7479 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS },
7480 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS },
7481 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S },
7482 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS },
7483 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS },
7484 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS },
7485 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S },
7486 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
7487 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
7489 { 0, CODE_FOR_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
7490 { 0, CODE_FOR_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
7491 { 0, CODE_FOR_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
7492 { 0, CODE_FOR_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
7493 { 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 },
7494 { 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 },
7495 { 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 },
7496 { 0, CODE_FOR_paired_merge01, "__builtin_paired_merge01", PAIRED_BUILTIN_MERGE01 },
7497 { 0, CODE_FOR_paired_merge10, "__builtin_paired_merge10", PAIRED_BUILTIN_MERGE10 },
7498 { 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 },
7500 /* Place holder, leave as first spe builtin. */
7501 { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
7502 { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
7503 { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
7504 { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
7505 { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
7506 { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
7507 { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
7508 { 0, CODE_FOR_spe_evfsdiv, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV },
7509 { 0, CODE_FOR_spe_evfsmul, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL },
7510 { 0, CODE_FOR_spe_evfssub, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB },
7511 { 0, CODE_FOR_spe_evmergehi, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI },
7512 { 0, CODE_FOR_spe_evmergehilo, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO },
7513 { 0, CODE_FOR_spe_evmergelo, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO },
7514 { 0, CODE_FOR_spe_evmergelohi, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI },
7515 { 0, CODE_FOR_spe_evmhegsmfaa, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA },
7516 { 0, CODE_FOR_spe_evmhegsmfan, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN },
7517 { 0, CODE_FOR_spe_evmhegsmiaa, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA },
7518 { 0, CODE_FOR_spe_evmhegsmian, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN },
7519 { 0, CODE_FOR_spe_evmhegumiaa, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA },
7520 { 0, CODE_FOR_spe_evmhegumian, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN },
7521 { 0, CODE_FOR_spe_evmhesmf, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF },
7522 { 0, CODE_FOR_spe_evmhesmfa, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA },
7523 { 0, CODE_FOR_spe_evmhesmfaaw, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW },
7524 { 0, CODE_FOR_spe_evmhesmfanw, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW },
7525 { 0, CODE_FOR_spe_evmhesmi, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI },
7526 { 0, CODE_FOR_spe_evmhesmia, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA },
7527 { 0, CODE_FOR_spe_evmhesmiaaw, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW },
7528 { 0, CODE_FOR_spe_evmhesmianw, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW },
7529 { 0, CODE_FOR_spe_evmhessf, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF },
7530 { 0, CODE_FOR_spe_evmhessfa, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA },
7531 { 0, CODE_FOR_spe_evmhessfaaw, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW },
7532 { 0, CODE_FOR_spe_evmhessfanw, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW },
7533 { 0, CODE_FOR_spe_evmhessiaaw, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW },
7534 { 0, CODE_FOR_spe_evmhessianw, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW },
7535 { 0, CODE_FOR_spe_evmheumi, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI },
7536 { 0, CODE_FOR_spe_evmheumia, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA },
7537 { 0, CODE_FOR_spe_evmheumiaaw, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW },
7538 { 0, CODE_FOR_spe_evmheumianw, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW },
7539 { 0, CODE_FOR_spe_evmheusiaaw, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW },
7540 { 0, CODE_FOR_spe_evmheusianw, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW },
7541 { 0, CODE_FOR_spe_evmhogsmfaa, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA },
7542 { 0, CODE_FOR_spe_evmhogsmfan, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN },
7543 { 0, CODE_FOR_spe_evmhogsmiaa, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA },
7544 { 0, CODE_FOR_spe_evmhogsmian, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN },
7545 { 0, CODE_FOR_spe_evmhogumiaa, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA },
7546 { 0, CODE_FOR_spe_evmhogumian, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN },
7547 { 0, CODE_FOR_spe_evmhosmf, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF },
7548 { 0, CODE_FOR_spe_evmhosmfa, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA },
7549 { 0, CODE_FOR_spe_evmhosmfaaw, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW },
7550 { 0, CODE_FOR_spe_evmhosmfanw, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW },
7551 { 0, CODE_FOR_spe_evmhosmi, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI },
7552 { 0, CODE_FOR_spe_evmhosmia, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA },
7553 { 0, CODE_FOR_spe_evmhosmiaaw, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW },
7554 { 0, CODE_FOR_spe_evmhosmianw, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW },
7555 { 0, CODE_FOR_spe_evmhossf, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF },
7556 { 0, CODE_FOR_spe_evmhossfa, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA },
7557 { 0, CODE_FOR_spe_evmhossfaaw, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW },
7558 { 0, CODE_FOR_spe_evmhossfanw, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW },
7559 { 0, CODE_FOR_spe_evmhossiaaw, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW },
7560 { 0, CODE_FOR_spe_evmhossianw, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW },
7561 { 0, CODE_FOR_spe_evmhoumi, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI },
7562 { 0, CODE_FOR_spe_evmhoumia, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA },
7563 { 0, CODE_FOR_spe_evmhoumiaaw, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW },
7564 { 0, CODE_FOR_spe_evmhoumianw, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW },
7565 { 0, CODE_FOR_spe_evmhousiaaw, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW },
7566 { 0, CODE_FOR_spe_evmhousianw, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW },
7567 { 0, CODE_FOR_spe_evmwhsmf, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF },
7568 { 0, CODE_FOR_spe_evmwhsmfa, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA },
7569 { 0, CODE_FOR_spe_evmwhsmi, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI },
7570 { 0, CODE_FOR_spe_evmwhsmia, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA },
7571 { 0, CODE_FOR_spe_evmwhssf, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF },
7572 { 0, CODE_FOR_spe_evmwhssfa, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA },
7573 { 0, CODE_FOR_spe_evmwhumi, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI },
7574 { 0, CODE_FOR_spe_evmwhumia, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA },
7575 { 0, CODE_FOR_spe_evmwlsmiaaw, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW },
7576 { 0, CODE_FOR_spe_evmwlsmianw, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW },
7577 { 0, CODE_FOR_spe_evmwlssiaaw, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW },
7578 { 0, CODE_FOR_spe_evmwlssianw, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW },
7579 { 0, CODE_FOR_spe_evmwlumi, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI },
7580 { 0, CODE_FOR_spe_evmwlumia, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA },
7581 { 0, CODE_FOR_spe_evmwlumiaaw, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW },
7582 { 0, CODE_FOR_spe_evmwlumianw, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW },
7583 { 0, CODE_FOR_spe_evmwlusiaaw, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW },
7584 { 0, CODE_FOR_spe_evmwlusianw, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW },
7585 { 0, CODE_FOR_spe_evmwsmf, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF },
7586 { 0, CODE_FOR_spe_evmwsmfa, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA },
7587 { 0, CODE_FOR_spe_evmwsmfaa, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA },
7588 { 0, CODE_FOR_spe_evmwsmfan, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN },
7589 { 0, CODE_FOR_spe_evmwsmi, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI },
7590 { 0, CODE_FOR_spe_evmwsmia, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA },
7591 { 0, CODE_FOR_spe_evmwsmiaa, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA },
7592 { 0, CODE_FOR_spe_evmwsmian, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN },
7593 { 0, CODE_FOR_spe_evmwssf, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF },
7594 { 0, CODE_FOR_spe_evmwssfa, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA },
7595 { 0, CODE_FOR_spe_evmwssfaa, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA },
7596 { 0, CODE_FOR_spe_evmwssfan, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN },
7597 { 0, CODE_FOR_spe_evmwumi, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI },
7598 { 0, CODE_FOR_spe_evmwumia, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA },
7599 { 0, CODE_FOR_spe_evmwumiaa, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA },
7600 { 0, CODE_FOR_spe_evmwumian, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN },
7601 { 0, CODE_FOR_spe_evnand, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND },
7602 { 0, CODE_FOR_spe_evnor, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR },
7603 { 0, CODE_FOR_spe_evor, "__builtin_spe_evor", SPE_BUILTIN_EVOR },
7604 { 0, CODE_FOR_spe_evorc, "__builtin_spe_evorc", SPE_BUILTIN_EVORC },
7605 { 0, CODE_FOR_spe_evrlw, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW },
7606 { 0, CODE_FOR_spe_evslw, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW },
7607 { 0, CODE_FOR_spe_evsrws, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS },
7608 { 0, CODE_FOR_spe_evsrwu, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU },
7609 { 0, CODE_FOR_spe_evsubfw, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
7611 /* SPE binary operations expecting a 5-bit unsigned literal. */
7612 { 0, CODE_FOR_spe_evaddiw, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW },
7614 { 0, CODE_FOR_spe_evrlwi, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI },
7615 { 0, CODE_FOR_spe_evslwi, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI },
7616 { 0, CODE_FOR_spe_evsrwis, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS },
7617 { 0, CODE_FOR_spe_evsrwiu, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU },
7618 { 0, CODE_FOR_spe_evsubifw, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW },
7619 { 0, CODE_FOR_spe_evmwhssfaa, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA },
7620 { 0, CODE_FOR_spe_evmwhssmaa, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA },
7621 { 0, CODE_FOR_spe_evmwhsmfaa, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA },
7622 { 0, CODE_FOR_spe_evmwhsmiaa, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA },
7623 { 0, CODE_FOR_spe_evmwhusiaa, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA },
7624 { 0, CODE_FOR_spe_evmwhumiaa, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA },
7625 { 0, CODE_FOR_spe_evmwhssfan, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN },
7626 { 0, CODE_FOR_spe_evmwhssian, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN },
7627 { 0, CODE_FOR_spe_evmwhsmfan, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN },
7628 { 0, CODE_FOR_spe_evmwhsmian, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN },
7629 { 0, CODE_FOR_spe_evmwhusian, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN },
7630 { 0, CODE_FOR_spe_evmwhumian, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN },
7631 { 0, CODE_FOR_spe_evmwhgssfaa, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA },
7632 { 0, CODE_FOR_spe_evmwhgsmfaa, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA },
7633 { 0, CODE_FOR_spe_evmwhgsmiaa, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA },
7634 { 0, CODE_FOR_spe_evmwhgumiaa, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA },
7635 { 0, CODE_FOR_spe_evmwhgssfan, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN },
7636 { 0, CODE_FOR_spe_evmwhgsmfan, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN },
7637 { 0, CODE_FOR_spe_evmwhgsmian, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN },
7638 { 0, CODE_FOR_spe_evmwhgumian, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN },
7639 { 0, CODE_FOR_spe_brinc, "__builtin_spe_brinc", SPE_BUILTIN_BRINC },
7641 /* Place-holder. Leave as last binary SPE builtin. */
7642 { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR }
7645 /* AltiVec predicates. */
7647 struct builtin_description_predicates
7649 const unsigned int mask;
7650 const enum insn_code icode;
7652 const char *const name;
7653 const enum rs6000_builtins code;
7656 static const struct builtin_description_predicates bdesc_altivec_preds[] =
7658 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
7659 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
7660 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
7661 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
7662 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
7663 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
7664 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
7665 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
7666 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
7667 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
7668 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
7669 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
7670 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
7672 { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P },
7673 { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P },
7674 { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P }
7677 /* SPE predicates. */
7678 static struct builtin_description bdesc_spe_predicates[] =
7680 /* Place-holder. Leave as first. */
7681 { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ },
7682 { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS },
7683 { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU },
7684 { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS },
7685 { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU },
7686 { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ },
7687 { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT },
7688 { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT },
7689 { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ },
7690 { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT },
7691 /* Place-holder. Leave as last. */
7692 { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT },
7695 /* SPE evsel predicates. */
7696 static struct builtin_description bdesc_spe_evsel[] =
7698 /* Place-holder. Leave as first. */
7699 { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS },
7700 { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU },
7701 { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS },
7702 { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU },
7703 { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ },
7704 { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT },
7705 { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT },
7706 { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ },
7707 { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT },
7708 { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT },
7709 /* Place-holder. Leave as last. */
7710 { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ },
7713 /* PAIRED predicates. */
7714 static const struct builtin_description bdesc_paired_preds[] =
7716 /* Place-holder. Leave as first. */
7717 { 0, CODE_FOR_paired_cmpu0, "__builtin_paired_cmpu0", PAIRED_BUILTIN_CMPU0 },
7718 /* Place-holder. Leave as last. */
7719 { 0, CODE_FOR_paired_cmpu1, "__builtin_paired_cmpu1", PAIRED_BUILTIN_CMPU1 },
7722 /* ABS* operations. */
7724 static const struct builtin_description bdesc_abs[] =
7726 { MASK_ALTIVEC, CODE_FOR_absv4si2, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI },
7727 { MASK_ALTIVEC, CODE_FOR_absv8hi2, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI },
7728 { MASK_ALTIVEC, CODE_FOR_absv4sf2, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF },
7729 { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
7730 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
7731 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
7732 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI }
7735 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
7738 static struct builtin_description bdesc_1arg[] =
7740 { MASK_ALTIVEC, CODE_FOR_altivec_vexptefp, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP },
7741 { MASK_ALTIVEC, CODE_FOR_altivec_vlogefp, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP },
7742 { MASK_ALTIVEC, CODE_FOR_altivec_vrefp, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP },
7743 { MASK_ALTIVEC, CODE_FOR_altivec_vrfim, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM },
7744 { MASK_ALTIVEC, CODE_FOR_altivec_vrfin, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN },
7745 { MASK_ALTIVEC, CODE_FOR_altivec_vrfip, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP },
7746 { MASK_ALTIVEC, CODE_FOR_ftruncv4sf2, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ },
7747 { MASK_ALTIVEC, CODE_FOR_altivec_vrsqrtefp, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP },
7748 { MASK_ALTIVEC, CODE_FOR_altivec_vspltisb, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB },
7749 { MASK_ALTIVEC, CODE_FOR_altivec_vspltish, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH },
7750 { MASK_ALTIVEC, CODE_FOR_altivec_vspltisw, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW },
7751 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsb, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB },
7752 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhpx, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX },
7753 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsh, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH },
7754 { MASK_ALTIVEC, CODE_FOR_altivec_vupklsb, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB },
7755 { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
7756 { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
7758 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
7759 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
7760 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
7761 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE },
7762 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR },
7763 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE },
7764 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR },
7765 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE },
7766 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND },
7767 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE },
7768 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC },
7769 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH },
7770 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH },
7771 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX },
7772 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB },
7773 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL },
7774 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX },
7775 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
7776 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
7778 /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
7779 end with SPE_BUILTIN_EVSUBFUSIAAW. */
7780 { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
7781 { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
7782 { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
7783 { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
7784 { 0, CODE_FOR_spe_evaddusiaaw, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW },
7785 { 0, CODE_FOR_spe_evcntlsw, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW },
7786 { 0, CODE_FOR_spe_evcntlzw, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW },
7787 { 0, CODE_FOR_spe_evextsb, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB },
7788 { 0, CODE_FOR_spe_evextsh, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH },
7789 { 0, CODE_FOR_spe_evfsabs, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS },
7790 { 0, CODE_FOR_spe_evfscfsf, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF },
7791 { 0, CODE_FOR_spe_evfscfsi, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI },
7792 { 0, CODE_FOR_spe_evfscfuf, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF },
7793 { 0, CODE_FOR_spe_evfscfui, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI },
7794 { 0, CODE_FOR_spe_evfsctsf, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF },
7795 { 0, CODE_FOR_spe_evfsctsi, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI },
7796 { 0, CODE_FOR_spe_evfsctsiz, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ },
7797 { 0, CODE_FOR_spe_evfsctuf, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF },
7798 { 0, CODE_FOR_spe_evfsctui, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI },
7799 { 0, CODE_FOR_spe_evfsctuiz, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ },
7800 { 0, CODE_FOR_spe_evfsnabs, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS },
7801 { 0, CODE_FOR_spe_evfsneg, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG },
7802 { 0, CODE_FOR_spe_evmra, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA },
7803 { 0, CODE_FOR_negv2si2, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG },
7804 { 0, CODE_FOR_spe_evrndw, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW },
7805 { 0, CODE_FOR_spe_evsubfsmiaaw, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW },
7806 { 0, CODE_FOR_spe_evsubfssiaaw, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW },
7807 { 0, CODE_FOR_spe_evsubfumiaaw, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW },
7809 /* Place-holder. Leave as last unary SPE builtin. */
7810 { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
7812 { 0, CODE_FOR_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
7813 { 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 },
7814 { 0, CODE_FOR_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
7815 { 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 },
7816 { 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 }
7820 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
7823 tree arg0 = CALL_EXPR_ARG (exp, 0);
7824 rtx op0 = expand_normal (arg0);
7825 enum machine_mode tmode = insn_data[icode].operand[0].mode;
7826 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7828 if (icode == CODE_FOR_nothing)
7829 /* Builtin not supported on this processor. */
7832 /* If we got invalid arguments bail out before generating bad rtl. */
7833 if (arg0 == error_mark_node)
7836 if (icode == CODE_FOR_altivec_vspltisb
7837 || icode == CODE_FOR_altivec_vspltish
7838 || icode == CODE_FOR_altivec_vspltisw
7839 || icode == CODE_FOR_spe_evsplatfi
7840 || icode == CODE_FOR_spe_evsplati)
7842 /* Only allow 5-bit *signed* literals. */
7843 if (GET_CODE (op0) != CONST_INT
7844 || INTVAL (op0) > 15
7845 || INTVAL (op0) < -16)
7847 error ("argument 1 must be a 5-bit signed literal");
7853 || GET_MODE (target) != tmode
7854 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7855 target = gen_reg_rtx (tmode);
7857 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
7858 op0 = copy_to_mode_reg (mode0, op0);
7860 pat = GEN_FCN (icode) (target, op0);
7869 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
7871 rtx pat, scratch1, scratch2;
7872 tree arg0 = CALL_EXPR_ARG (exp, 0);
7873 rtx op0 = expand_normal (arg0);
7874 enum machine_mode tmode = insn_data[icode].operand[0].mode;
7875 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7877 /* If we have invalid arguments, bail out before generating bad rtl. */
7878 if (arg0 == error_mark_node)
7882 || GET_MODE (target) != tmode
7883 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7884 target = gen_reg_rtx (tmode);
7886 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
7887 op0 = copy_to_mode_reg (mode0, op0);
7889 scratch1 = gen_reg_rtx (mode0);
7890 scratch2 = gen_reg_rtx (mode0);
7892 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
7901 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
7904 tree arg0 = CALL_EXPR_ARG (exp, 0);
7905 tree arg1 = CALL_EXPR_ARG (exp, 1);
7906 rtx op0 = expand_normal (arg0);
7907 rtx op1 = expand_normal (arg1);
7908 enum machine_mode tmode = insn_data[icode].operand[0].mode;
7909 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7910 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
7912 if (icode == CODE_FOR_nothing)
7913 /* Builtin not supported on this processor. */
7916 /* If we got invalid arguments bail out before generating bad rtl. */
7917 if (arg0 == error_mark_node || arg1 == error_mark_node)
7920 if (icode == CODE_FOR_altivec_vcfux
7921 || icode == CODE_FOR_altivec_vcfsx
7922 || icode == CODE_FOR_altivec_vctsxs
7923 || icode == CODE_FOR_altivec_vctuxs
7924 || icode == CODE_FOR_altivec_vspltb
7925 || icode == CODE_FOR_altivec_vsplth
7926 || icode == CODE_FOR_altivec_vspltw
7927 || icode == CODE_FOR_spe_evaddiw
7928 || icode == CODE_FOR_spe_evldd
7929 || icode == CODE_FOR_spe_evldh
7930 || icode == CODE_FOR_spe_evldw
7931 || icode == CODE_FOR_spe_evlhhesplat
7932 || icode == CODE_FOR_spe_evlhhossplat
7933 || icode == CODE_FOR_spe_evlhhousplat
7934 || icode == CODE_FOR_spe_evlwhe
7935 || icode == CODE_FOR_spe_evlwhos
7936 || icode == CODE_FOR_spe_evlwhou
7937 || icode == CODE_FOR_spe_evlwhsplat
7938 || icode == CODE_FOR_spe_evlwwsplat
7939 || icode == CODE_FOR_spe_evrlwi
7940 || icode == CODE_FOR_spe_evslwi
7941 || icode == CODE_FOR_spe_evsrwis
7942 || icode == CODE_FOR_spe_evsubifw
7943 || icode == CODE_FOR_spe_evsrwiu)
7945 /* Only allow 5-bit unsigned literals. */
7947 if (TREE_CODE (arg1) != INTEGER_CST
7948 || TREE_INT_CST_LOW (arg1) & ~0x1f)
7950 error ("argument 2 must be a 5-bit unsigned literal");
7956 || GET_MODE (target) != tmode
7957 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7958 target = gen_reg_rtx (tmode);
7960 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
7961 op0 = copy_to_mode_reg (mode0, op0);
7962 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
7963 op1 = copy_to_mode_reg (mode1, op1);
7965 pat = GEN_FCN (icode) (target, op0, op1);
7974 altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
7975 tree exp, rtx target)
7978 tree cr6_form = CALL_EXPR_ARG (exp, 0);
7979 tree arg0 = CALL_EXPR_ARG (exp, 1);
7980 tree arg1 = CALL_EXPR_ARG (exp, 2);
7981 rtx op0 = expand_normal (arg0);
7982 rtx op1 = expand_normal (arg1);
7983 enum machine_mode tmode = SImode;
7984 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7985 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
7988 if (TREE_CODE (cr6_form) != INTEGER_CST)
7990 error ("argument 1 of __builtin_altivec_predicate must be a constant");
7994 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
7996 gcc_assert (mode0 == mode1);
7998 /* If we have invalid arguments, bail out before generating bad rtl. */
7999 if (arg0 == error_mark_node || arg1 == error_mark_node)
8003 || GET_MODE (target) != tmode
8004 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8005 target = gen_reg_rtx (tmode);
8007 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
8008 op0 = copy_to_mode_reg (mode0, op0);
8009 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
8010 op1 = copy_to_mode_reg (mode1, op1);
8012 scratch = gen_reg_rtx (mode0);
8014 pat = GEN_FCN (icode) (scratch, op0, op1,
8015 gen_rtx_SYMBOL_REF (Pmode, opcode));
8020 /* The vec_any* and vec_all* predicates use the same opcodes for two
8021 different operations, but the bits in CR6 will be different
8022 depending on what information we want. So we have to play tricks
8023 with CR6 to get the right bits out.
8025 If you think this is disgusting, look at the specs for the
8026 AltiVec predicates. */
8028 switch (cr6_form_int)
8031 emit_insn (gen_cr6_test_for_zero (target));
8034 emit_insn (gen_cr6_test_for_zero_reverse (target));
8037 emit_insn (gen_cr6_test_for_lt (target));
8040 emit_insn (gen_cr6_test_for_lt_reverse (target));
8043 error ("argument 1 of __builtin_altivec_predicate is out of range");
8051 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
8054 tree arg0 = CALL_EXPR_ARG (exp, 0);
8055 tree arg1 = CALL_EXPR_ARG (exp, 1);
8056 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8057 enum machine_mode mode0 = Pmode;
8058 enum machine_mode mode1 = Pmode;
8059 rtx op0 = expand_normal (arg0);
8060 rtx op1 = expand_normal (arg1);
8062 if (icode == CODE_FOR_nothing)
8063 /* Builtin not supported on this processor. */
8066 /* If we got invalid arguments bail out before generating bad rtl. */
8067 if (arg0 == error_mark_node || arg1 == error_mark_node)
8071 || GET_MODE (target) != tmode
8072 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8073 target = gen_reg_rtx (tmode);
8075 op1 = copy_to_mode_reg (mode1, op1);
8077 if (op0 == const0_rtx)
8079 addr = gen_rtx_MEM (tmode, op1);
8083 op0 = copy_to_mode_reg (mode0, op0);
8084 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
8087 pat = GEN_FCN (icode) (target, addr);
8097 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
8100 tree arg0 = CALL_EXPR_ARG (exp, 0);
8101 tree arg1 = CALL_EXPR_ARG (exp, 1);
8102 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8103 enum machine_mode mode0 = Pmode;
8104 enum machine_mode mode1 = Pmode;
8105 rtx op0 = expand_normal (arg0);
8106 rtx op1 = expand_normal (arg1);
8108 if (icode == CODE_FOR_nothing)
8109 /* Builtin not supported on this processor. */
8112 /* If we got invalid arguments bail out before generating bad rtl. */
8113 if (arg0 == error_mark_node || arg1 == error_mark_node)
8117 || GET_MODE (target) != tmode
8118 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8119 target = gen_reg_rtx (tmode);
8121 op1 = copy_to_mode_reg (mode1, op1);
8123 if (op0 == const0_rtx)
8125 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
8129 op0 = copy_to_mode_reg (mode0, op0);
8130 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
8133 pat = GEN_FCN (icode) (target, addr);
8143 spe_expand_stv_builtin (enum insn_code icode, tree exp)
8145 tree arg0 = CALL_EXPR_ARG (exp, 0);
8146 tree arg1 = CALL_EXPR_ARG (exp, 1);
8147 tree arg2 = CALL_EXPR_ARG (exp, 2);
8148 rtx op0 = expand_normal (arg0);
8149 rtx op1 = expand_normal (arg1);
8150 rtx op2 = expand_normal (arg2);
8152 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
8153 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
8154 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
8156 /* Invalid arguments. Bail before doing anything stoopid! */
8157 if (arg0 == error_mark_node
8158 || arg1 == error_mark_node
8159 || arg2 == error_mark_node)
8162 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
8163 op0 = copy_to_mode_reg (mode2, op0);
8164 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
8165 op1 = copy_to_mode_reg (mode0, op1);
8166 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
8167 op2 = copy_to_mode_reg (mode1, op2);
8169 pat = GEN_FCN (icode) (op1, op2, op0);
8176 paired_expand_stv_builtin (enum insn_code icode, tree exp)
8178 tree arg0 = CALL_EXPR_ARG (exp, 0);
8179 tree arg1 = CALL_EXPR_ARG (exp, 1);
8180 tree arg2 = CALL_EXPR_ARG (exp, 2);
8181 rtx op0 = expand_normal (arg0);
8182 rtx op1 = expand_normal (arg1);
8183 rtx op2 = expand_normal (arg2);
8185 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8186 enum machine_mode mode1 = Pmode;
8187 enum machine_mode mode2 = Pmode;
8189 /* Invalid arguments. Bail before doing anything stoopid! */
8190 if (arg0 == error_mark_node
8191 || arg1 == error_mark_node
8192 || arg2 == error_mark_node)
8195 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
8196 op0 = copy_to_mode_reg (tmode, op0);
8198 op2 = copy_to_mode_reg (mode2, op2);
8200 if (op1 == const0_rtx)
8202 addr = gen_rtx_MEM (tmode, op2);
8206 op1 = copy_to_mode_reg (mode1, op1);
8207 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
8210 pat = GEN_FCN (icode) (addr, op0);
8217 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
8219 tree arg0 = CALL_EXPR_ARG (exp, 0);
8220 tree arg1 = CALL_EXPR_ARG (exp, 1);
8221 tree arg2 = CALL_EXPR_ARG (exp, 2);
8222 rtx op0 = expand_normal (arg0);
8223 rtx op1 = expand_normal (arg1);
8224 rtx op2 = expand_normal (arg2);
8226 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8227 enum machine_mode mode1 = Pmode;
8228 enum machine_mode mode2 = Pmode;
8230 /* Invalid arguments. Bail before doing anything stoopid! */
8231 if (arg0 == error_mark_node
8232 || arg1 == error_mark_node
8233 || arg2 == error_mark_node)
8236 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
8237 op0 = copy_to_mode_reg (tmode, op0);
8239 op2 = copy_to_mode_reg (mode2, op2);
8241 if (op1 == const0_rtx)
8243 addr = gen_rtx_MEM (tmode, op2);
8247 op1 = copy_to_mode_reg (mode1, op1);
8248 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
8251 pat = GEN_FCN (icode) (addr, op0);
8258 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
8261 tree arg0 = CALL_EXPR_ARG (exp, 0);
8262 tree arg1 = CALL_EXPR_ARG (exp, 1);
8263 tree arg2 = CALL_EXPR_ARG (exp, 2);
8264 rtx op0 = expand_normal (arg0);
8265 rtx op1 = expand_normal (arg1);
8266 rtx op2 = expand_normal (arg2);
8267 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8268 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
8269 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
8270 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
8272 if (icode == CODE_FOR_nothing)
8273 /* Builtin not supported on this processor. */
8276 /* If we got invalid arguments bail out before generating bad rtl. */
8277 if (arg0 == error_mark_node
8278 || arg1 == error_mark_node
8279 || arg2 == error_mark_node)
8282 if (icode == CODE_FOR_altivec_vsldoi_v4sf
8283 || icode == CODE_FOR_altivec_vsldoi_v4si
8284 || icode == CODE_FOR_altivec_vsldoi_v8hi
8285 || icode == CODE_FOR_altivec_vsldoi_v16qi)
8287 /* Only allow 4-bit unsigned literals. */
8289 if (TREE_CODE (arg2) != INTEGER_CST
8290 || TREE_INT_CST_LOW (arg2) & ~0xf)
8292 error ("argument 3 must be a 4-bit unsigned literal");
8298 || GET_MODE (target) != tmode
8299 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8300 target = gen_reg_rtx (tmode);
8302 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
8303 op0 = copy_to_mode_reg (mode0, op0);
8304 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
8305 op1 = copy_to_mode_reg (mode1, op1);
8306 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
8307 op2 = copy_to_mode_reg (mode2, op2);
8309 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
8310 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
8312 pat = GEN_FCN (icode) (target, op0, op1, op2);
8320 /* Expand the lvx builtins. */
8322 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
8324 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8325 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8327 enum machine_mode tmode, mode0;
8329 enum insn_code icode;
8333 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
8334 icode = CODE_FOR_altivec_lvx_v16qi;
8336 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
8337 icode = CODE_FOR_altivec_lvx_v8hi;
8339 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
8340 icode = CODE_FOR_altivec_lvx_v4si;
8342 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
8343 icode = CODE_FOR_altivec_lvx_v4sf;
8352 arg0 = CALL_EXPR_ARG (exp, 0);
8353 op0 = expand_normal (arg0);
8354 tmode = insn_data[icode].operand[0].mode;
8355 mode0 = insn_data[icode].operand[1].mode;
8358 || GET_MODE (target) != tmode
8359 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8360 target = gen_reg_rtx (tmode);
8362 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
8363 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
8365 pat = GEN_FCN (icode) (target, op0);
8372 /* Expand the stvx builtins. */
8374 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
8377 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8378 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8380 enum machine_mode mode0, mode1;
8382 enum insn_code icode;
8386 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
8387 icode = CODE_FOR_altivec_stvx_v16qi;
8389 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
8390 icode = CODE_FOR_altivec_stvx_v8hi;
8392 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
8393 icode = CODE_FOR_altivec_stvx_v4si;
8395 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
8396 icode = CODE_FOR_altivec_stvx_v4sf;
8403 arg0 = CALL_EXPR_ARG (exp, 0);
8404 arg1 = CALL_EXPR_ARG (exp, 1);
8405 op0 = expand_normal (arg0);
8406 op1 = expand_normal (arg1);
8407 mode0 = insn_data[icode].operand[0].mode;
8408 mode1 = insn_data[icode].operand[1].mode;
8410 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8411 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
8412 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
8413 op1 = copy_to_mode_reg (mode1, op1);
8415 pat = GEN_FCN (icode) (op0, op1);
8423 /* Expand the dst builtins. */
8425 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
8428 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8429 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8430 tree arg0, arg1, arg2;
8431 enum machine_mode mode0, mode1, mode2;
8432 rtx pat, op0, op1, op2;
8433 const struct builtin_description *d;
8438 /* Handle DST variants. */
8440 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
8441 if (d->code == fcode)
8443 arg0 = CALL_EXPR_ARG (exp, 0);
8444 arg1 = CALL_EXPR_ARG (exp, 1);
8445 arg2 = CALL_EXPR_ARG (exp, 2);
8446 op0 = expand_normal (arg0);
8447 op1 = expand_normal (arg1);
8448 op2 = expand_normal (arg2);
8449 mode0 = insn_data[d->icode].operand[0].mode;
8450 mode1 = insn_data[d->icode].operand[1].mode;
8451 mode2 = insn_data[d->icode].operand[2].mode;
8453 /* Invalid arguments, bail out before generating bad rtl. */
8454 if (arg0 == error_mark_node
8455 || arg1 == error_mark_node
8456 || arg2 == error_mark_node)
8461 if (TREE_CODE (arg2) != INTEGER_CST
8462 || TREE_INT_CST_LOW (arg2) & ~0x3)
8464 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
8468 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
8469 op0 = copy_to_mode_reg (Pmode, op0);
8470 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
8471 op1 = copy_to_mode_reg (mode1, op1);
8473 pat = GEN_FCN (d->icode) (op0, op1, op2);
8483 /* Expand vec_init builtin. */
8485 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
8487 enum machine_mode tmode = TYPE_MODE (type);
8488 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
8489 int i, n_elt = GET_MODE_NUNITS (tmode);
8490 rtvec v = rtvec_alloc (n_elt);
8492 gcc_assert (VECTOR_MODE_P (tmode));
8493 gcc_assert (n_elt == call_expr_nargs (exp));
8495 for (i = 0; i < n_elt; ++i)
8497 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
8498 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
8501 if (!target || !register_operand (target, tmode))
8502 target = gen_reg_rtx (tmode);
8504 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
8508 /* Return the integer constant in ARG. Constrain it to be in the range
8509 of the subparts of VEC_TYPE; issue an error if not. */
8512 get_element_number (tree vec_type, tree arg)
8514 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
8516 if (!host_integerp (arg, 1)
8517 || (elt = tree_low_cst (arg, 1), elt > max))
8519 error ("selector must be an integer constant in the range 0..%wi", max);
8526 /* Expand vec_set builtin. */
8528 altivec_expand_vec_set_builtin (tree exp)
8530 enum machine_mode tmode, mode1;
8531 tree arg0, arg1, arg2;
8535 arg0 = CALL_EXPR_ARG (exp, 0);
8536 arg1 = CALL_EXPR_ARG (exp, 1);
8537 arg2 = CALL_EXPR_ARG (exp, 2);
8539 tmode = TYPE_MODE (TREE_TYPE (arg0));
8540 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
8541 gcc_assert (VECTOR_MODE_P (tmode));
8543 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
8544 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
8545 elt = get_element_number (TREE_TYPE (arg0), arg2);
8547 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
8548 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
8550 op0 = force_reg (tmode, op0);
8551 op1 = force_reg (mode1, op1);
8553 rs6000_expand_vector_set (op0, op1, elt);
8558 /* Expand vec_ext builtin. */
8560 altivec_expand_vec_ext_builtin (tree exp, rtx target)
8562 enum machine_mode tmode, mode0;
8567 arg0 = CALL_EXPR_ARG (exp, 0);
8568 arg1 = CALL_EXPR_ARG (exp, 1);
8570 op0 = expand_normal (arg0);
8571 elt = get_element_number (TREE_TYPE (arg0), arg1);
8573 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
8574 mode0 = TYPE_MODE (TREE_TYPE (arg0));
8575 gcc_assert (VECTOR_MODE_P (mode0));
8577 op0 = force_reg (mode0, op0);
8579 if (optimize || !target || !register_operand (target, tmode))
8580 target = gen_reg_rtx (tmode);
8582 rs6000_expand_vector_extract (target, op0, elt);
8587 /* Expand the builtin in EXP and store the result in TARGET. Store
8588 true in *EXPANDEDP if we found a builtin to expand. */
8590 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
8592 const struct builtin_description *d;
8593 const struct builtin_description_predicates *dp;
8595 enum insn_code icode;
8596 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8599 enum machine_mode tmode, mode0;
8600 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8602 if (fcode >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
8603 && fcode <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
8606 error ("unresolved overload for Altivec builtin %qF", fndecl);
8610 target = altivec_expand_ld_builtin (exp, target, expandedp);
8614 target = altivec_expand_st_builtin (exp, target, expandedp);
8618 target = altivec_expand_dst_builtin (exp, target, expandedp);
8626 case ALTIVEC_BUILTIN_STVX:
8627 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, exp);
8628 case ALTIVEC_BUILTIN_STVEBX:
8629 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
8630 case ALTIVEC_BUILTIN_STVEHX:
8631 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
8632 case ALTIVEC_BUILTIN_STVEWX:
8633 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
8634 case ALTIVEC_BUILTIN_STVXL:
8635 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
8637 case ALTIVEC_BUILTIN_STVLX:
8638 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
8639 case ALTIVEC_BUILTIN_STVLXL:
8640 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
8641 case ALTIVEC_BUILTIN_STVRX:
8642 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
8643 case ALTIVEC_BUILTIN_STVRXL:
8644 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
8646 case ALTIVEC_BUILTIN_MFVSCR:
8647 icode = CODE_FOR_altivec_mfvscr;
8648 tmode = insn_data[icode].operand[0].mode;
8651 || GET_MODE (target) != tmode
8652 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8653 target = gen_reg_rtx (tmode);
8655 pat = GEN_FCN (icode) (target);
8661 case ALTIVEC_BUILTIN_MTVSCR:
8662 icode = CODE_FOR_altivec_mtvscr;
8663 arg0 = CALL_EXPR_ARG (exp, 0);
8664 op0 = expand_normal (arg0);
8665 mode0 = insn_data[icode].operand[0].mode;
8667 /* If we got invalid arguments bail out before generating bad rtl. */
8668 if (arg0 == error_mark_node)
8671 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8672 op0 = copy_to_mode_reg (mode0, op0);
8674 pat = GEN_FCN (icode) (op0);
8679 case ALTIVEC_BUILTIN_DSSALL:
8680 emit_insn (gen_altivec_dssall ());
8683 case ALTIVEC_BUILTIN_DSS:
8684 icode = CODE_FOR_altivec_dss;
8685 arg0 = CALL_EXPR_ARG (exp, 0);
8687 op0 = expand_normal (arg0);
8688 mode0 = insn_data[icode].operand[0].mode;
8690 /* If we got invalid arguments bail out before generating bad rtl. */
8691 if (arg0 == error_mark_node)
8694 if (TREE_CODE (arg0) != INTEGER_CST
8695 || TREE_INT_CST_LOW (arg0) & ~0x3)
8697 error ("argument to dss must be a 2-bit unsigned literal");
8701 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8702 op0 = copy_to_mode_reg (mode0, op0);
8704 emit_insn (gen_altivec_dss (op0));
8707 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
8708 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
8709 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
8710 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
8711 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
8713 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
8714 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
8715 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
8716 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
8717 return altivec_expand_vec_set_builtin (exp);
8719 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
8720 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
8721 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
8722 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
8723 return altivec_expand_vec_ext_builtin (exp, target);
8730 /* Expand abs* operations. */
8732 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
8733 if (d->code == fcode)
8734 return altivec_expand_abs_builtin (d->icode, exp, target);
8736 /* Expand the AltiVec predicates. */
8737 dp = bdesc_altivec_preds;
8738 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
8739 if (dp->code == fcode)
8740 return altivec_expand_predicate_builtin (dp->icode, dp->opcode,
8743 /* LV* are funky. We initialized them differently. */
8746 case ALTIVEC_BUILTIN_LVSL:
8747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
8748 exp, target, false);
8749 case ALTIVEC_BUILTIN_LVSR:
8750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
8751 exp, target, false);
8752 case ALTIVEC_BUILTIN_LVEBX:
8753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
8754 exp, target, false);
8755 case ALTIVEC_BUILTIN_LVEHX:
8756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
8757 exp, target, false);
8758 case ALTIVEC_BUILTIN_LVEWX:
8759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
8760 exp, target, false);
8761 case ALTIVEC_BUILTIN_LVXL:
8762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
8763 exp, target, false);
8764 case ALTIVEC_BUILTIN_LVX:
8765 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
8766 exp, target, false);
8767 case ALTIVEC_BUILTIN_LVLX:
8768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
8770 case ALTIVEC_BUILTIN_LVLXL:
8771 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
8773 case ALTIVEC_BUILTIN_LVRX:
8774 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
8776 case ALTIVEC_BUILTIN_LVRXL:
8777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
8788 /* Expand the builtin in EXP and store the result in TARGET. Store
8789 true in *EXPANDEDP if we found a builtin to expand. */
8791 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
8793 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8794 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8795 const struct builtin_description *d;
8802 case PAIRED_BUILTIN_STX:
8803 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
8804 case PAIRED_BUILTIN_LX:
8805 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
8811 /* Expand the paired predicates. */
8812 d = bdesc_paired_preds;
8813 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
8814 if (d->code == fcode)
8815 return paired_expand_predicate_builtin (d->icode, exp, target);
8821 /* Binops that need to be initialized manually, but can be expanded
8822 automagically by rs6000_expand_binop_builtin. */
8823 static struct builtin_description bdesc_2arg_spe[] =
8825 { 0, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
8826 { 0, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
8827 { 0, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
8828 { 0, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
8829 { 0, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
8830 { 0, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
8831 { 0, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
8832 { 0, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
8833 { 0, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
8834 { 0, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
8835 { 0, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
8836 { 0, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
8837 { 0, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
8838 { 0, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
8839 { 0, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
8840 { 0, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
8841 { 0, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
8842 { 0, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
8843 { 0, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
8844 { 0, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
8845 { 0, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
8846 { 0, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
8849 /* Expand the builtin in EXP and store the result in TARGET. Store
8850 true in *EXPANDEDP if we found a builtin to expand.
8852 This expands the SPE builtins that are not simple unary and binary
8855 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
8857 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8859 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8860 enum insn_code icode;
8861 enum machine_mode tmode, mode0;
8863 struct builtin_description *d;
8868 /* Syntax check for a 5-bit unsigned immediate. */
8871 case SPE_BUILTIN_EVSTDD:
8872 case SPE_BUILTIN_EVSTDH:
8873 case SPE_BUILTIN_EVSTDW:
8874 case SPE_BUILTIN_EVSTWHE:
8875 case SPE_BUILTIN_EVSTWHO:
8876 case SPE_BUILTIN_EVSTWWE:
8877 case SPE_BUILTIN_EVSTWWO:
8878 arg1 = CALL_EXPR_ARG (exp, 2);
8879 if (TREE_CODE (arg1) != INTEGER_CST
8880 || TREE_INT_CST_LOW (arg1) & ~0x1f)
8882 error ("argument 2 must be a 5-bit unsigned literal");
8890 /* The evsplat*i instructions are not quite generic. */
8893 case SPE_BUILTIN_EVSPLATFI:
8894 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
8896 case SPE_BUILTIN_EVSPLATI:
8897 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
8903 d = (struct builtin_description *) bdesc_2arg_spe;
8904 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
8905 if (d->code == fcode)
8906 return rs6000_expand_binop_builtin (d->icode, exp, target);
8908 d = (struct builtin_description *) bdesc_spe_predicates;
8909 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
8910 if (d->code == fcode)
8911 return spe_expand_predicate_builtin (d->icode, exp, target);
8913 d = (struct builtin_description *) bdesc_spe_evsel;
8914 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
8915 if (d->code == fcode)
8916 return spe_expand_evsel_builtin (d->icode, exp, target);
8920 case SPE_BUILTIN_EVSTDDX:
8921 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
8922 case SPE_BUILTIN_EVSTDHX:
8923 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
8924 case SPE_BUILTIN_EVSTDWX:
8925 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
8926 case SPE_BUILTIN_EVSTWHEX:
8927 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
8928 case SPE_BUILTIN_EVSTWHOX:
8929 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
8930 case SPE_BUILTIN_EVSTWWEX:
8931 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
8932 case SPE_BUILTIN_EVSTWWOX:
8933 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
8934 case SPE_BUILTIN_EVSTDD:
8935 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
8936 case SPE_BUILTIN_EVSTDH:
8937 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
8938 case SPE_BUILTIN_EVSTDW:
8939 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
8940 case SPE_BUILTIN_EVSTWHE:
8941 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
8942 case SPE_BUILTIN_EVSTWHO:
8943 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
8944 case SPE_BUILTIN_EVSTWWE:
8945 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
8946 case SPE_BUILTIN_EVSTWWO:
8947 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
8948 case SPE_BUILTIN_MFSPEFSCR:
8949 icode = CODE_FOR_spe_mfspefscr;
8950 tmode = insn_data[icode].operand[0].mode;
8953 || GET_MODE (target) != tmode
8954 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8955 target = gen_reg_rtx (tmode);
8957 pat = GEN_FCN (icode) (target);
8962 case SPE_BUILTIN_MTSPEFSCR:
8963 icode = CODE_FOR_spe_mtspefscr;
8964 arg0 = CALL_EXPR_ARG (exp, 0);
8965 op0 = expand_normal (arg0);
8966 mode0 = insn_data[icode].operand[0].mode;
8968 if (arg0 == error_mark_node)
8971 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8972 op0 = copy_to_mode_reg (mode0, op0);
8974 pat = GEN_FCN (icode) (op0);
8987 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
8989 rtx pat, scratch, tmp;
8990 tree form = CALL_EXPR_ARG (exp, 0);
8991 tree arg0 = CALL_EXPR_ARG (exp, 1);
8992 tree arg1 = CALL_EXPR_ARG (exp, 2);
8993 rtx op0 = expand_normal (arg0);
8994 rtx op1 = expand_normal (arg1);
8995 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
8996 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9000 if (TREE_CODE (form) != INTEGER_CST)
9002 error ("argument 1 of __builtin_paired_predicate must be a constant");
9006 form_int = TREE_INT_CST_LOW (form);
9008 gcc_assert (mode0 == mode1);
9010 if (arg0 == error_mark_node || arg1 == error_mark_node)
9014 || GET_MODE (target) != SImode
9015 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
9016 target = gen_reg_rtx (SImode);
9017 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
9018 op0 = copy_to_mode_reg (mode0, op0);
9019 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
9020 op1 = copy_to_mode_reg (mode1, op1);
9022 scratch = gen_reg_rtx (CCFPmode);
9024 pat = GEN_FCN (icode) (scratch, op0, op1);
9046 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
9049 error ("argument 1 of __builtin_paired_predicate is out of range");
9053 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
9054 emit_move_insn (target, tmp);
9059 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9061 rtx pat, scratch, tmp;
9062 tree form = CALL_EXPR_ARG (exp, 0);
9063 tree arg0 = CALL_EXPR_ARG (exp, 1);
9064 tree arg1 = CALL_EXPR_ARG (exp, 2);
9065 rtx op0 = expand_normal (arg0);
9066 rtx op1 = expand_normal (arg1);
9067 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9068 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9072 if (TREE_CODE (form) != INTEGER_CST)
9074 error ("argument 1 of __builtin_spe_predicate must be a constant");
9078 form_int = TREE_INT_CST_LOW (form);
9080 gcc_assert (mode0 == mode1);
9082 if (arg0 == error_mark_node || arg1 == error_mark_node)
9086 || GET_MODE (target) != SImode
9087 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
9088 target = gen_reg_rtx (SImode);
9090 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9091 op0 = copy_to_mode_reg (mode0, op0);
9092 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9093 op1 = copy_to_mode_reg (mode1, op1);
9095 scratch = gen_reg_rtx (CCmode);
9097 pat = GEN_FCN (icode) (scratch, op0, op1);
9102 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
9103 _lower_. We use one compare, but look in different bits of the
9104 CR for each variant.
9106 There are 2 elements in each SPE simd type (upper/lower). The CR
9107 bits are set as follows:
9109 BIT0 | BIT 1 | BIT 2 | BIT 3
9110 U | L | (U | L) | (U & L)
9112 So, for an "all" relationship, BIT 3 would be set.
9113 For an "any" relationship, BIT 2 would be set. Etc.
9115 Following traditional nomenclature, these bits map to:
9117 BIT0 | BIT 1 | BIT 2 | BIT 3
9120 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
9125 /* All variant. OV bit. */
9127 /* We need to get to the OV bit, which is the ORDERED bit. We
9128 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
9129 that's ugly and will make validate_condition_mode die.
9130 So let's just use another pattern. */
9131 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
9133 /* Any variant. EQ bit. */
9137 /* Upper variant. LT bit. */
9141 /* Lower variant. GT bit. */
9146 error ("argument 1 of __builtin_spe_predicate is out of range");
9150 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
9151 emit_move_insn (target, tmp);
9156 /* The evsel builtins look like this:
9158 e = __builtin_spe_evsel_OP (a, b, c, d);
9162 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
9163 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
9167 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
9170 tree arg0 = CALL_EXPR_ARG (exp, 0);
9171 tree arg1 = CALL_EXPR_ARG (exp, 1);
9172 tree arg2 = CALL_EXPR_ARG (exp, 2);
9173 tree arg3 = CALL_EXPR_ARG (exp, 3);
9174 rtx op0 = expand_normal (arg0);
9175 rtx op1 = expand_normal (arg1);
9176 rtx op2 = expand_normal (arg2);
9177 rtx op3 = expand_normal (arg3);
9178 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9179 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9181 gcc_assert (mode0 == mode1);
9183 if (arg0 == error_mark_node || arg1 == error_mark_node
9184 || arg2 == error_mark_node || arg3 == error_mark_node)
9188 || GET_MODE (target) != mode0
9189 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
9190 target = gen_reg_rtx (mode0);
9192 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9193 op0 = copy_to_mode_reg (mode0, op0);
9194 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
9195 op1 = copy_to_mode_reg (mode0, op1);
9196 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
9197 op2 = copy_to_mode_reg (mode0, op2);
9198 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
9199 op3 = copy_to_mode_reg (mode0, op3);
9201 /* Generate the compare. */
9202 scratch = gen_reg_rtx (CCmode);
9203 pat = GEN_FCN (icode) (scratch, op0, op1);
9208 if (mode0 == V2SImode)
9209 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
9211 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
9216 /* Expand an expression EXP that calls a built-in function,
9217 with result going to TARGET if that's convenient
9218 (and in mode MODE if that's convenient).
9219 SUBTARGET may be used as the target for computing one of EXP's operands.
9220 IGNORE is nonzero if the value is to be ignored. */
9223 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9224 enum machine_mode mode ATTRIBUTE_UNUSED,
9225 int ignore ATTRIBUTE_UNUSED)
9227 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9228 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9229 const struct builtin_description *d;
9234 if (fcode == RS6000_BUILTIN_RECIP)
9235 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
9237 if (fcode == RS6000_BUILTIN_RECIPF)
9238 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
9240 if (fcode == RS6000_BUILTIN_RSQRTF)
9241 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
9243 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_LOAD
9244 || fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
9246 int icode = (int) CODE_FOR_altivec_lvsr;
9247 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9248 enum machine_mode mode = insn_data[icode].operand[1].mode;
9252 gcc_assert (TARGET_ALTIVEC);
9254 arg = CALL_EXPR_ARG (exp, 0);
9255 gcc_assert (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE);
9256 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
9257 addr = memory_address (mode, op);
9258 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
9262 /* For the load case need to negate the address. */
9263 op = gen_reg_rtx (GET_MODE (addr));
9264 emit_insn (gen_rtx_SET (VOIDmode, op,
9265 gen_rtx_NEG (GET_MODE (addr), addr)));
9267 op = gen_rtx_MEM (mode, op);
9270 || GET_MODE (target) != tmode
9271 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9272 target = gen_reg_rtx (tmode);
9274 /*pat = gen_altivec_lvsr (target, op);*/
9275 pat = GEN_FCN (icode) (target, op);
9283 /* FIXME: There's got to be a nicer way to handle this case than
9284 constructing a new CALL_EXPR. */
9285 if (fcode == ALTIVEC_BUILTIN_VCFUX
9286 || fcode == ALTIVEC_BUILTIN_VCFSX
9287 || fcode == ALTIVEC_BUILTIN_VCTUXS
9288 || fcode == ALTIVEC_BUILTIN_VCTSXS)
9290 if (call_expr_nargs (exp) == 1)
9291 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
9292 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
9297 ret = altivec_expand_builtin (exp, target, &success);
9304 ret = spe_expand_builtin (exp, target, &success);
9309 if (TARGET_PAIRED_FLOAT)
9311 ret = paired_expand_builtin (exp, target, &success);
9317 gcc_assert (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT);
9319 /* Handle simple unary operations. */
9320 d = (struct builtin_description *) bdesc_1arg;
9321 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
9322 if (d->code == fcode)
9323 return rs6000_expand_unop_builtin (d->icode, exp, target);
9325 /* Handle simple binary operations. */
9326 d = (struct builtin_description *) bdesc_2arg;
9327 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
9328 if (d->code == fcode)
9329 return rs6000_expand_binop_builtin (d->icode, exp, target);
9331 /* Handle simple ternary operations. */
9333 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
9334 if (d->code == fcode)
9335 return rs6000_expand_ternop_builtin (d->icode, exp, target);
9341 build_opaque_vector_type (tree node, int nunits)
9343 node = copy_node (node);
9344 TYPE_MAIN_VARIANT (node) = node;
9345 TYPE_CANONICAL (node) = node;
9346 return build_vector_type (node, nunits);
9350 rs6000_init_builtins (void)
9352 V2SI_type_node = build_vector_type (intSI_type_node, 2);
9353 V2SF_type_node = build_vector_type (float_type_node, 2);
9354 V4HI_type_node = build_vector_type (intHI_type_node, 4);
9355 V4SI_type_node = build_vector_type (intSI_type_node, 4);
9356 V4SF_type_node = build_vector_type (float_type_node, 4);
9357 V8HI_type_node = build_vector_type (intHI_type_node, 8);
9358 V16QI_type_node = build_vector_type (intQI_type_node, 16);
9360 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
9361 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
9362 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
9364 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
9365 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
9366 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
9367 opaque_V4SI_type_node = copy_node (V4SI_type_node);
9369 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
9370 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
9371 'vector unsigned short'. */
9373 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
9374 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
9375 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
9376 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
9378 long_integer_type_internal_node = long_integer_type_node;
9379 long_unsigned_type_internal_node = long_unsigned_type_node;
9380 intQI_type_internal_node = intQI_type_node;
9381 uintQI_type_internal_node = unsigned_intQI_type_node;
9382 intHI_type_internal_node = intHI_type_node;
9383 uintHI_type_internal_node = unsigned_intHI_type_node;
9384 intSI_type_internal_node = intSI_type_node;
9385 uintSI_type_internal_node = unsigned_intSI_type_node;
9386 float_type_internal_node = float_type_node;
9387 void_type_internal_node = void_type_node;
9389 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9390 get_identifier ("__bool char"),
9391 bool_char_type_node));
9392 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9393 get_identifier ("__bool short"),
9394 bool_short_type_node));
9395 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9396 get_identifier ("__bool int"),
9397 bool_int_type_node));
9398 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9399 get_identifier ("__pixel"),
9402 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
9403 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
9404 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
9405 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
9407 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9408 get_identifier ("__vector unsigned char"),
9409 unsigned_V16QI_type_node));
9410 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9411 get_identifier ("__vector signed char"),
9413 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9414 get_identifier ("__vector __bool char"),
9415 bool_V16QI_type_node));
9417 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9418 get_identifier ("__vector unsigned short"),
9419 unsigned_V8HI_type_node));
9420 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9421 get_identifier ("__vector signed short"),
9423 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9424 get_identifier ("__vector __bool short"),
9425 bool_V8HI_type_node));
9427 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9428 get_identifier ("__vector unsigned int"),
9429 unsigned_V4SI_type_node));
9430 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9431 get_identifier ("__vector signed int"),
9433 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9434 get_identifier ("__vector __bool int"),
9435 bool_V4SI_type_node));
9437 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9438 get_identifier ("__vector float"),
9440 (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
9441 get_identifier ("__vector __pixel"),
9442 pixel_V8HI_type_node));
9444 if (TARGET_PAIRED_FLOAT)
9445 paired_init_builtins ();
9447 spe_init_builtins ();
9449 altivec_init_builtins ();
9450 if (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT)
9451 rs6000_common_init_builtins ();
9452 if (TARGET_PPC_GFXOPT)
9454 tree ftype = build_function_type_list (float_type_node,
9458 def_builtin (MASK_PPC_GFXOPT, "__builtin_recipdivf", ftype,
9459 RS6000_BUILTIN_RECIPF);
9461 ftype = build_function_type_list (float_type_node,
9464 def_builtin (MASK_PPC_GFXOPT, "__builtin_rsqrtf", ftype,
9465 RS6000_BUILTIN_RSQRTF);
9469 tree ftype = build_function_type_list (double_type_node,
9473 def_builtin (MASK_POPCNTB, "__builtin_recipdiv", ftype,
9474 RS6000_BUILTIN_RECIP);
9479 /* AIX libm provides clog as __clog. */
9480 if (built_in_decls [BUILT_IN_CLOG])
9481 set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
9484 #ifdef SUBTARGET_INIT_BUILTINS
9485 SUBTARGET_INIT_BUILTINS;
9489 /* Search through a set of builtins and enable the mask bits.
9490 DESC is an array of builtins.
9491 SIZE is the total number of builtins.
9492 START is the builtin enum at which to start.
9493 END is the builtin enum at which to end. */
9495 enable_mask_for_builtins (struct builtin_description *desc, int size,
9496 enum rs6000_builtins start,
9497 enum rs6000_builtins end)
9501 for (i = 0; i < size; ++i)
9502 if (desc[i].code == start)
9508 for (; i < size; ++i)
9510 /* Flip all the bits on. */
9511 desc[i].mask = target_flags;
9512 if (desc[i].code == end)
9518 spe_init_builtins (void)
9520 tree endlink = void_list_node;
9521 tree puint_type_node = build_pointer_type (unsigned_type_node);
9522 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
9523 struct builtin_description *d;
9526 tree v2si_ftype_4_v2si
9527 = build_function_type
9528 (opaque_V2SI_type_node,
9529 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9530 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9531 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9532 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9535 tree v2sf_ftype_4_v2sf
9536 = build_function_type
9537 (opaque_V2SF_type_node,
9538 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9539 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9540 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9541 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9544 tree int_ftype_int_v2si_v2si
9545 = build_function_type
9547 tree_cons (NULL_TREE, integer_type_node,
9548 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9549 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9552 tree int_ftype_int_v2sf_v2sf
9553 = build_function_type
9555 tree_cons (NULL_TREE, integer_type_node,
9556 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9557 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9560 tree void_ftype_v2si_puint_int
9561 = build_function_type (void_type_node,
9562 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9563 tree_cons (NULL_TREE, puint_type_node,
9564 tree_cons (NULL_TREE,
9568 tree void_ftype_v2si_puint_char
9569 = build_function_type (void_type_node,
9570 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9571 tree_cons (NULL_TREE, puint_type_node,
9572 tree_cons (NULL_TREE,
9576 tree void_ftype_v2si_pv2si_int
9577 = build_function_type (void_type_node,
9578 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9579 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
9580 tree_cons (NULL_TREE,
9584 tree void_ftype_v2si_pv2si_char
9585 = build_function_type (void_type_node,
9586 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9587 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
9588 tree_cons (NULL_TREE,
9593 = build_function_type (void_type_node,
9594 tree_cons (NULL_TREE, integer_type_node, endlink));
9597 = build_function_type (integer_type_node, endlink);
9599 tree v2si_ftype_pv2si_int
9600 = build_function_type (opaque_V2SI_type_node,
9601 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
9602 tree_cons (NULL_TREE, integer_type_node,
9605 tree v2si_ftype_puint_int
9606 = build_function_type (opaque_V2SI_type_node,
9607 tree_cons (NULL_TREE, puint_type_node,
9608 tree_cons (NULL_TREE, integer_type_node,
9611 tree v2si_ftype_pushort_int
9612 = build_function_type (opaque_V2SI_type_node,
9613 tree_cons (NULL_TREE, pushort_type_node,
9614 tree_cons (NULL_TREE, integer_type_node,
9617 tree v2si_ftype_signed_char
9618 = build_function_type (opaque_V2SI_type_node,
9619 tree_cons (NULL_TREE, signed_char_type_node,
9622 /* The initialization of the simple binary and unary builtins is
9623 done in rs6000_common_init_builtins, but we have to enable the
9624 mask bits here manually because we have run out of `target_flags'
9625 bits. We really need to redesign this mask business. */
9627 enable_mask_for_builtins ((struct builtin_description *) bdesc_2arg,
9628 ARRAY_SIZE (bdesc_2arg),
9631 enable_mask_for_builtins ((struct builtin_description *) bdesc_1arg,
9632 ARRAY_SIZE (bdesc_1arg),
9634 SPE_BUILTIN_EVSUBFUSIAAW);
9635 enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_predicates,
9636 ARRAY_SIZE (bdesc_spe_predicates),
9637 SPE_BUILTIN_EVCMPEQ,
9638 SPE_BUILTIN_EVFSTSTLT);
9639 enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_evsel,
9640 ARRAY_SIZE (bdesc_spe_evsel),
9641 SPE_BUILTIN_EVSEL_CMPGTS,
9642 SPE_BUILTIN_EVSEL_FSTSTEQ);
9644 (*lang_hooks.decls.pushdecl)
9645 (build_decl (TYPE_DECL, get_identifier ("__ev64_opaque__"),
9646 opaque_V2SI_type_node));
9648 /* Initialize irregular SPE builtins. */
9650 def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
9651 def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
9652 def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
9653 def_builtin (target_flags, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
9654 def_builtin (target_flags, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
9655 def_builtin (target_flags, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
9656 def_builtin (target_flags, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
9657 def_builtin (target_flags, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
9658 def_builtin (target_flags, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
9659 def_builtin (target_flags, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
9660 def_builtin (target_flags, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
9661 def_builtin (target_flags, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
9662 def_builtin (target_flags, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
9663 def_builtin (target_flags, "__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
9664 def_builtin (target_flags, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
9665 def_builtin (target_flags, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
9666 def_builtin (target_flags, "__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
9667 def_builtin (target_flags, "__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
9670 def_builtin (target_flags, "__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
9671 def_builtin (target_flags, "__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
9672 def_builtin (target_flags, "__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
9673 def_builtin (target_flags, "__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
9674 def_builtin (target_flags, "__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
9675 def_builtin (target_flags, "__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
9676 def_builtin (target_flags, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
9677 def_builtin (target_flags, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
9678 def_builtin (target_flags, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
9679 def_builtin (target_flags, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
9680 def_builtin (target_flags, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
9681 def_builtin (target_flags, "__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
9682 def_builtin (target_flags, "__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
9683 def_builtin (target_flags, "__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
9684 def_builtin (target_flags, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
9685 def_builtin (target_flags, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
9686 def_builtin (target_flags, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
9687 def_builtin (target_flags, "__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
9688 def_builtin (target_flags, "__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
9689 def_builtin (target_flags, "__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
9690 def_builtin (target_flags, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
9691 def_builtin (target_flags, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
9694 d = (struct builtin_description *) bdesc_spe_predicates;
9695 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
9699 switch (insn_data[d->icode].operand[1].mode)
9702 type = int_ftype_int_v2si_v2si;
9705 type = int_ftype_int_v2sf_v2sf;
9711 def_builtin (d->mask, d->name, type, d->code);
9714 /* Evsel predicates. */
9715 d = (struct builtin_description *) bdesc_spe_evsel;
9716 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
9720 switch (insn_data[d->icode].operand[1].mode)
9723 type = v2si_ftype_4_v2si;
9726 type = v2sf_ftype_4_v2sf;
9732 def_builtin (d->mask, d->name, type, d->code);
9737 paired_init_builtins (void)
9739 const struct builtin_description *d;
9741 tree endlink = void_list_node;
9743 tree int_ftype_int_v2sf_v2sf
9744 = build_function_type
9746 tree_cons (NULL_TREE, integer_type_node,
9747 tree_cons (NULL_TREE, V2SF_type_node,
9748 tree_cons (NULL_TREE, V2SF_type_node,
9750 tree pcfloat_type_node =
9751 build_pointer_type (build_qualified_type
9752 (float_type_node, TYPE_QUAL_CONST));
9754 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
9755 long_integer_type_node,
9758 tree void_ftype_v2sf_long_pcfloat =
9759 build_function_type_list (void_type_node,
9761 long_integer_type_node,
9766 def_builtin (0, "__builtin_paired_lx", v2sf_ftype_long_pcfloat,
9770 def_builtin (0, "__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
9771 PAIRED_BUILTIN_STX);
9774 d = bdesc_paired_preds;
9775 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
9779 switch (insn_data[d->icode].operand[1].mode)
9782 type = int_ftype_int_v2sf_v2sf;
9788 def_builtin (d->mask, d->name, type, d->code);
9793 altivec_init_builtins (void)
9795 const struct builtin_description *d;
9796 const struct builtin_description_predicates *dp;
9800 tree pfloat_type_node = build_pointer_type (float_type_node);
9801 tree pint_type_node = build_pointer_type (integer_type_node);
9802 tree pshort_type_node = build_pointer_type (short_integer_type_node);
9803 tree pchar_type_node = build_pointer_type (char_type_node);
9805 tree pvoid_type_node = build_pointer_type (void_type_node);
9807 tree pcfloat_type_node = build_pointer_type (build_qualified_type (float_type_node, TYPE_QUAL_CONST));
9808 tree pcint_type_node = build_pointer_type (build_qualified_type (integer_type_node, TYPE_QUAL_CONST));
9809 tree pcshort_type_node = build_pointer_type (build_qualified_type (short_integer_type_node, TYPE_QUAL_CONST));
9810 tree pcchar_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
9812 tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
9814 tree int_ftype_opaque
9815 = build_function_type_list (integer_type_node,
9816 opaque_V4SI_type_node, NULL_TREE);
9817 tree opaque_ftype_opaque
9818 = build_function_type (integer_type_node,
9820 tree opaque_ftype_opaque_int
9821 = build_function_type_list (opaque_V4SI_type_node,
9822 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
9823 tree opaque_ftype_opaque_opaque_int
9824 = build_function_type_list (opaque_V4SI_type_node,
9825 opaque_V4SI_type_node, opaque_V4SI_type_node,
9826 integer_type_node, NULL_TREE);
9827 tree int_ftype_int_opaque_opaque
9828 = build_function_type_list (integer_type_node,
9829 integer_type_node, opaque_V4SI_type_node,
9830 opaque_V4SI_type_node, NULL_TREE);
9831 tree int_ftype_int_v4si_v4si
9832 = build_function_type_list (integer_type_node,
9833 integer_type_node, V4SI_type_node,
9834 V4SI_type_node, NULL_TREE);
9835 tree v4sf_ftype_pcfloat
9836 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
9837 tree void_ftype_pfloat_v4sf
9838 = build_function_type_list (void_type_node,
9839 pfloat_type_node, V4SF_type_node, NULL_TREE);
9840 tree v4si_ftype_pcint
9841 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
9842 tree void_ftype_pint_v4si
9843 = build_function_type_list (void_type_node,
9844 pint_type_node, V4SI_type_node, NULL_TREE);
9845 tree v8hi_ftype_pcshort
9846 = build_function_type_list (V8HI_type_node, pcshort_type_node, NULL_TREE);
9847 tree void_ftype_pshort_v8hi
9848 = build_function_type_list (void_type_node,
9849 pshort_type_node, V8HI_type_node, NULL_TREE);
9850 tree v16qi_ftype_pcchar
9851 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
9852 tree void_ftype_pchar_v16qi
9853 = build_function_type_list (void_type_node,
9854 pchar_type_node, V16QI_type_node, NULL_TREE);
9855 tree void_ftype_v4si
9856 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
9857 tree v8hi_ftype_void
9858 = build_function_type (V8HI_type_node, void_list_node);
9859 tree void_ftype_void
9860 = build_function_type (void_type_node, void_list_node);
9862 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
9864 tree opaque_ftype_long_pcvoid
9865 = build_function_type_list (opaque_V4SI_type_node,
9866 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9867 tree v16qi_ftype_long_pcvoid
9868 = build_function_type_list (V16QI_type_node,
9869 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9870 tree v8hi_ftype_long_pcvoid
9871 = build_function_type_list (V8HI_type_node,
9872 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9873 tree v4si_ftype_long_pcvoid
9874 = build_function_type_list (V4SI_type_node,
9875 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9877 tree void_ftype_opaque_long_pvoid
9878 = build_function_type_list (void_type_node,
9879 opaque_V4SI_type_node, long_integer_type_node,
9880 pvoid_type_node, NULL_TREE);
9881 tree void_ftype_v4si_long_pvoid
9882 = build_function_type_list (void_type_node,
9883 V4SI_type_node, long_integer_type_node,
9884 pvoid_type_node, NULL_TREE);
9885 tree void_ftype_v16qi_long_pvoid
9886 = build_function_type_list (void_type_node,
9887 V16QI_type_node, long_integer_type_node,
9888 pvoid_type_node, NULL_TREE);
9889 tree void_ftype_v8hi_long_pvoid
9890 = build_function_type_list (void_type_node,
9891 V8HI_type_node, long_integer_type_node,
9892 pvoid_type_node, NULL_TREE);
9893 tree int_ftype_int_v8hi_v8hi
9894 = build_function_type_list (integer_type_node,
9895 integer_type_node, V8HI_type_node,
9896 V8HI_type_node, NULL_TREE);
9897 tree int_ftype_int_v16qi_v16qi
9898 = build_function_type_list (integer_type_node,
9899 integer_type_node, V16QI_type_node,
9900 V16QI_type_node, NULL_TREE);
9901 tree int_ftype_int_v4sf_v4sf
9902 = build_function_type_list (integer_type_node,
9903 integer_type_node, V4SF_type_node,
9904 V4SF_type_node, NULL_TREE);
9905 tree v4si_ftype_v4si
9906 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
9907 tree v8hi_ftype_v8hi
9908 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
9909 tree v16qi_ftype_v16qi
9910 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
9911 tree v4sf_ftype_v4sf
9912 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
9913 tree void_ftype_pcvoid_int_int
9914 = build_function_type_list (void_type_node,
9915 pcvoid_type_node, integer_type_node,
9916 integer_type_node, NULL_TREE);
9918 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
9919 ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
9920 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
9921 ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
9922 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint,
9923 ALTIVEC_BUILTIN_LD_INTERNAL_4si);
9924 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si,
9925 ALTIVEC_BUILTIN_ST_INTERNAL_4si);
9926 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort,
9927 ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
9928 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi,
9929 ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
9930 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar,
9931 ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
9932 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi,
9933 ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
9934 def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
9935 def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
9936 def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
9937 def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
9938 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
9939 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
9940 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
9941 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
9942 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
9943 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
9944 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
9945 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
9946 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
9947 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
9948 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
9949 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
9950 def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
9951 def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
9952 def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
9953 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
9954 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
9955 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
9956 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
9957 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
9958 def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
9959 def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
9960 def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
9961 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
9962 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
9963 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
9965 if (rs6000_cpu == PROCESSOR_CELL)
9967 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
9968 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
9969 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
9970 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
9972 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
9973 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
9974 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
9975 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
9977 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
9978 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
9979 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
9980 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
9982 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
9983 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
9984 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
9985 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
9987 def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
9988 def_builtin (MASK_ALTIVEC, "__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
9989 def_builtin (MASK_ALTIVEC, "__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
9991 def_builtin (MASK_ALTIVEC, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
9992 def_builtin (MASK_ALTIVEC, "__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
9993 def_builtin (MASK_ALTIVEC, "__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
9994 def_builtin (MASK_ALTIVEC, "__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
9995 def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
9996 def_builtin (MASK_ALTIVEC, "__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
9997 def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
9998 def_builtin (MASK_ALTIVEC, "__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
9999 def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
10000 def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
10001 def_builtin (MASK_ALTIVEC, "__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
10002 def_builtin (MASK_ALTIVEC, "__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
10004 /* Add the DST variants. */
10006 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10007 def_builtin (d->mask, d->name, void_ftype_pcvoid_int_int, d->code);
10009 /* Initialize the predicates. */
10010 dp = bdesc_altivec_preds;
10011 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
10013 enum machine_mode mode1;
10015 bool is_overloaded = dp->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10016 && dp->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10021 mode1 = insn_data[dp->icode].operand[1].mode;
10026 type = int_ftype_int_opaque_opaque;
10029 type = int_ftype_int_v4si_v4si;
10032 type = int_ftype_int_v8hi_v8hi;
10035 type = int_ftype_int_v16qi_v16qi;
10038 type = int_ftype_int_v4sf_v4sf;
10041 gcc_unreachable ();
10044 def_builtin (dp->mask, dp->name, type, dp->code);
10047 /* Initialize the abs* operators. */
10049 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10051 enum machine_mode mode0;
10054 mode0 = insn_data[d->icode].operand[0].mode;
10059 type = v4si_ftype_v4si;
10062 type = v8hi_ftype_v8hi;
10065 type = v16qi_ftype_v16qi;
10068 type = v4sf_ftype_v4sf;
10071 gcc_unreachable ();
10074 def_builtin (d->mask, d->name, type, d->code);
10077 if (TARGET_ALTIVEC)
10081 /* Initialize target builtin that implements
10082 targetm.vectorize.builtin_mask_for_load. */
10084 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
10085 v16qi_ftype_long_pcvoid,
10086 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
10087 BUILT_IN_MD, NULL, NULL_TREE);
10088 TREE_READONLY (decl) = 1;
10089 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
10090 altivec_builtin_mask_for_load = decl;
10093 /* Access to the vec_init patterns. */
10094 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
10095 integer_type_node, integer_type_node,
10096 integer_type_node, NULL_TREE);
10097 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
10098 ALTIVEC_BUILTIN_VEC_INIT_V4SI);
10100 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
10101 short_integer_type_node,
10102 short_integer_type_node,
10103 short_integer_type_node,
10104 short_integer_type_node,
10105 short_integer_type_node,
10106 short_integer_type_node,
10107 short_integer_type_node, NULL_TREE);
10108 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
10109 ALTIVEC_BUILTIN_VEC_INIT_V8HI);
10111 ftype = build_function_type_list (V16QI_type_node, char_type_node,
10112 char_type_node, char_type_node,
10113 char_type_node, char_type_node,
10114 char_type_node, char_type_node,
10115 char_type_node, char_type_node,
10116 char_type_node, char_type_node,
10117 char_type_node, char_type_node,
10118 char_type_node, char_type_node,
10119 char_type_node, NULL_TREE);
10120 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
10121 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
10123 ftype = build_function_type_list (V4SF_type_node, float_type_node,
10124 float_type_node, float_type_node,
10125 float_type_node, NULL_TREE);
10126 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
10127 ALTIVEC_BUILTIN_VEC_INIT_V4SF);
10129 /* Access to the vec_set patterns. */
10130 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
10132 integer_type_node, NULL_TREE);
10133 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
10134 ALTIVEC_BUILTIN_VEC_SET_V4SI);
10136 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
10138 integer_type_node, NULL_TREE);
10139 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
10140 ALTIVEC_BUILTIN_VEC_SET_V8HI);
10142 ftype = build_function_type_list (V8HI_type_node, V16QI_type_node,
10144 integer_type_node, NULL_TREE);
10145 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
10146 ALTIVEC_BUILTIN_VEC_SET_V16QI);
10148 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
10150 integer_type_node, NULL_TREE);
10151 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4sf", ftype,
10152 ALTIVEC_BUILTIN_VEC_SET_V4SF);
10154 /* Access to the vec_extract patterns. */
10155 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
10156 integer_type_node, NULL_TREE);
10157 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
10158 ALTIVEC_BUILTIN_VEC_EXT_V4SI);
10160 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
10161 integer_type_node, NULL_TREE);
10162 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
10163 ALTIVEC_BUILTIN_VEC_EXT_V8HI);
10165 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
10166 integer_type_node, NULL_TREE);
10167 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
10168 ALTIVEC_BUILTIN_VEC_EXT_V16QI);
10170 ftype = build_function_type_list (float_type_node, V4SF_type_node,
10171 integer_type_node, NULL_TREE);
10172 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4sf", ftype,
10173 ALTIVEC_BUILTIN_VEC_EXT_V4SF);
10177 rs6000_common_init_builtins (void)
10179 const struct builtin_description *d;
10182 tree v2sf_ftype_v2sf_v2sf_v2sf
10183 = build_function_type_list (V2SF_type_node,
10184 V2SF_type_node, V2SF_type_node,
10185 V2SF_type_node, NULL_TREE);
10187 tree v4sf_ftype_v4sf_v4sf_v16qi
10188 = build_function_type_list (V4SF_type_node,
10189 V4SF_type_node, V4SF_type_node,
10190 V16QI_type_node, NULL_TREE);
10191 tree v4si_ftype_v4si_v4si_v16qi
10192 = build_function_type_list (V4SI_type_node,
10193 V4SI_type_node, V4SI_type_node,
10194 V16QI_type_node, NULL_TREE);
10195 tree v8hi_ftype_v8hi_v8hi_v16qi
10196 = build_function_type_list (V8HI_type_node,
10197 V8HI_type_node, V8HI_type_node,
10198 V16QI_type_node, NULL_TREE);
10199 tree v16qi_ftype_v16qi_v16qi_v16qi
10200 = build_function_type_list (V16QI_type_node,
10201 V16QI_type_node, V16QI_type_node,
10202 V16QI_type_node, NULL_TREE);
10203 tree v4si_ftype_int
10204 = build_function_type_list (V4SI_type_node, integer_type_node, NULL_TREE);
10205 tree v8hi_ftype_int
10206 = build_function_type_list (V8HI_type_node, integer_type_node, NULL_TREE);
10207 tree v16qi_ftype_int
10208 = build_function_type_list (V16QI_type_node, integer_type_node, NULL_TREE);
10209 tree v8hi_ftype_v16qi
10210 = build_function_type_list (V8HI_type_node, V16QI_type_node, NULL_TREE);
10211 tree v4sf_ftype_v4sf
10212 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
10214 tree v2si_ftype_v2si_v2si
10215 = build_function_type_list (opaque_V2SI_type_node,
10216 opaque_V2SI_type_node,
10217 opaque_V2SI_type_node, NULL_TREE);
10219 tree v2sf_ftype_v2sf_v2sf_spe
10220 = build_function_type_list (opaque_V2SF_type_node,
10221 opaque_V2SF_type_node,
10222 opaque_V2SF_type_node, NULL_TREE);
10224 tree v2sf_ftype_v2sf_v2sf
10225 = build_function_type_list (V2SF_type_node,
10227 V2SF_type_node, NULL_TREE);
10230 tree v2si_ftype_int_int
10231 = build_function_type_list (opaque_V2SI_type_node,
10232 integer_type_node, integer_type_node,
10235 tree opaque_ftype_opaque
10236 = build_function_type_list (opaque_V4SI_type_node,
10237 opaque_V4SI_type_node, NULL_TREE);
10239 tree v2si_ftype_v2si
10240 = build_function_type_list (opaque_V2SI_type_node,
10241 opaque_V2SI_type_node, NULL_TREE);
10243 tree v2sf_ftype_v2sf_spe
10244 = build_function_type_list (opaque_V2SF_type_node,
10245 opaque_V2SF_type_node, NULL_TREE);
10247 tree v2sf_ftype_v2sf
10248 = build_function_type_list (V2SF_type_node,
10249 V2SF_type_node, NULL_TREE);
10251 tree v2sf_ftype_v2si
10252 = build_function_type_list (opaque_V2SF_type_node,
10253 opaque_V2SI_type_node, NULL_TREE);
10255 tree v2si_ftype_v2sf
10256 = build_function_type_list (opaque_V2SI_type_node,
10257 opaque_V2SF_type_node, NULL_TREE);
10259 tree v2si_ftype_v2si_char
10260 = build_function_type_list (opaque_V2SI_type_node,
10261 opaque_V2SI_type_node,
10262 char_type_node, NULL_TREE);
10264 tree v2si_ftype_int_char
10265 = build_function_type_list (opaque_V2SI_type_node,
10266 integer_type_node, char_type_node, NULL_TREE);
10268 tree v2si_ftype_char
10269 = build_function_type_list (opaque_V2SI_type_node,
10270 char_type_node, NULL_TREE);
10272 tree int_ftype_int_int
10273 = build_function_type_list (integer_type_node,
10274 integer_type_node, integer_type_node,
10277 tree opaque_ftype_opaque_opaque
10278 = build_function_type_list (opaque_V4SI_type_node,
10279 opaque_V4SI_type_node, opaque_V4SI_type_node, NULL_TREE);
10280 tree v4si_ftype_v4si_v4si
10281 = build_function_type_list (V4SI_type_node,
10282 V4SI_type_node, V4SI_type_node, NULL_TREE);
10283 tree v4sf_ftype_v4si_int
10284 = build_function_type_list (V4SF_type_node,
10285 V4SI_type_node, integer_type_node, NULL_TREE);
10286 tree v4si_ftype_v4sf_int
10287 = build_function_type_list (V4SI_type_node,
10288 V4SF_type_node, integer_type_node, NULL_TREE);
10289 tree v4si_ftype_v4si_int
10290 = build_function_type_list (V4SI_type_node,
10291 V4SI_type_node, integer_type_node, NULL_TREE);
10292 tree v8hi_ftype_v8hi_int
10293 = build_function_type_list (V8HI_type_node,
10294 V8HI_type_node, integer_type_node, NULL_TREE);
10295 tree v16qi_ftype_v16qi_int
10296 = build_function_type_list (V16QI_type_node,
10297 V16QI_type_node, integer_type_node, NULL_TREE);
10298 tree v16qi_ftype_v16qi_v16qi_int
10299 = build_function_type_list (V16QI_type_node,
10300 V16QI_type_node, V16QI_type_node,
10301 integer_type_node, NULL_TREE);
10302 tree v8hi_ftype_v8hi_v8hi_int
10303 = build_function_type_list (V8HI_type_node,
10304 V8HI_type_node, V8HI_type_node,
10305 integer_type_node, NULL_TREE);
10306 tree v4si_ftype_v4si_v4si_int
10307 = build_function_type_list (V4SI_type_node,
10308 V4SI_type_node, V4SI_type_node,
10309 integer_type_node, NULL_TREE);
10310 tree v4sf_ftype_v4sf_v4sf_int
10311 = build_function_type_list (V4SF_type_node,
10312 V4SF_type_node, V4SF_type_node,
10313 integer_type_node, NULL_TREE);
10314 tree v4sf_ftype_v4sf_v4sf
10315 = build_function_type_list (V4SF_type_node,
10316 V4SF_type_node, V4SF_type_node, NULL_TREE);
10317 tree opaque_ftype_opaque_opaque_opaque
10318 = build_function_type_list (opaque_V4SI_type_node,
10319 opaque_V4SI_type_node, opaque_V4SI_type_node,
10320 opaque_V4SI_type_node, NULL_TREE);
10321 tree v4sf_ftype_v4sf_v4sf_v4si
10322 = build_function_type_list (V4SF_type_node,
10323 V4SF_type_node, V4SF_type_node,
10324 V4SI_type_node, NULL_TREE);
10325 tree v4sf_ftype_v4sf_v4sf_v4sf
10326 = build_function_type_list (V4SF_type_node,
10327 V4SF_type_node, V4SF_type_node,
10328 V4SF_type_node, NULL_TREE);
10329 tree v4si_ftype_v4si_v4si_v4si
10330 = build_function_type_list (V4SI_type_node,
10331 V4SI_type_node, V4SI_type_node,
10332 V4SI_type_node, NULL_TREE);
10333 tree v8hi_ftype_v8hi_v8hi
10334 = build_function_type_list (V8HI_type_node,
10335 V8HI_type_node, V8HI_type_node, NULL_TREE);
10336 tree v8hi_ftype_v8hi_v8hi_v8hi
10337 = build_function_type_list (V8HI_type_node,
10338 V8HI_type_node, V8HI_type_node,
10339 V8HI_type_node, NULL_TREE);
10340 tree v4si_ftype_v8hi_v8hi_v4si
10341 = build_function_type_list (V4SI_type_node,
10342 V8HI_type_node, V8HI_type_node,
10343 V4SI_type_node, NULL_TREE);
10344 tree v4si_ftype_v16qi_v16qi_v4si
10345 = build_function_type_list (V4SI_type_node,
10346 V16QI_type_node, V16QI_type_node,
10347 V4SI_type_node, NULL_TREE);
10348 tree v16qi_ftype_v16qi_v16qi
10349 = build_function_type_list (V16QI_type_node,
10350 V16QI_type_node, V16QI_type_node, NULL_TREE);
10351 tree v4si_ftype_v4sf_v4sf
10352 = build_function_type_list (V4SI_type_node,
10353 V4SF_type_node, V4SF_type_node, NULL_TREE);
10354 tree v8hi_ftype_v16qi_v16qi
10355 = build_function_type_list (V8HI_type_node,
10356 V16QI_type_node, V16QI_type_node, NULL_TREE);
10357 tree v4si_ftype_v8hi_v8hi
10358 = build_function_type_list (V4SI_type_node,
10359 V8HI_type_node, V8HI_type_node, NULL_TREE);
10360 tree v8hi_ftype_v4si_v4si
10361 = build_function_type_list (V8HI_type_node,
10362 V4SI_type_node, V4SI_type_node, NULL_TREE);
10363 tree v16qi_ftype_v8hi_v8hi
10364 = build_function_type_list (V16QI_type_node,
10365 V8HI_type_node, V8HI_type_node, NULL_TREE);
10366 tree v4si_ftype_v16qi_v4si
10367 = build_function_type_list (V4SI_type_node,
10368 V16QI_type_node, V4SI_type_node, NULL_TREE);
10369 tree v4si_ftype_v16qi_v16qi
10370 = build_function_type_list (V4SI_type_node,
10371 V16QI_type_node, V16QI_type_node, NULL_TREE);
10372 tree v4si_ftype_v8hi_v4si
10373 = build_function_type_list (V4SI_type_node,
10374 V8HI_type_node, V4SI_type_node, NULL_TREE);
10375 tree v4si_ftype_v8hi
10376 = build_function_type_list (V4SI_type_node, V8HI_type_node, NULL_TREE);
10377 tree int_ftype_v4si_v4si
10378 = build_function_type_list (integer_type_node,
10379 V4SI_type_node, V4SI_type_node, NULL_TREE);
10380 tree int_ftype_v4sf_v4sf
10381 = build_function_type_list (integer_type_node,
10382 V4SF_type_node, V4SF_type_node, NULL_TREE);
10383 tree int_ftype_v16qi_v16qi
10384 = build_function_type_list (integer_type_node,
10385 V16QI_type_node, V16QI_type_node, NULL_TREE);
10386 tree int_ftype_v8hi_v8hi
10387 = build_function_type_list (integer_type_node,
10388 V8HI_type_node, V8HI_type_node, NULL_TREE);
10390 /* Add the simple ternary operators. */
10392 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
10394 enum machine_mode mode0, mode1, mode2, mode3;
10396 bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10397 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10408 if (d->name == 0 || d->icode == CODE_FOR_nothing)
10411 mode0 = insn_data[d->icode].operand[0].mode;
10412 mode1 = insn_data[d->icode].operand[1].mode;
10413 mode2 = insn_data[d->icode].operand[2].mode;
10414 mode3 = insn_data[d->icode].operand[3].mode;
10417 /* When all four are of the same mode. */
10418 if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
10423 type = opaque_ftype_opaque_opaque_opaque;
10426 type = v4si_ftype_v4si_v4si_v4si;
10429 type = v4sf_ftype_v4sf_v4sf_v4sf;
10432 type = v8hi_ftype_v8hi_v8hi_v8hi;
10435 type = v16qi_ftype_v16qi_v16qi_v16qi;
10438 type = v2sf_ftype_v2sf_v2sf_v2sf;
10441 gcc_unreachable ();
10444 else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
10449 type = v4si_ftype_v4si_v4si_v16qi;
10452 type = v4sf_ftype_v4sf_v4sf_v16qi;
10455 type = v8hi_ftype_v8hi_v8hi_v16qi;
10458 type = v16qi_ftype_v16qi_v16qi_v16qi;
10461 gcc_unreachable ();
10464 else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
10465 && mode3 == V4SImode)
10466 type = v4si_ftype_v16qi_v16qi_v4si;
10467 else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
10468 && mode3 == V4SImode)
10469 type = v4si_ftype_v8hi_v8hi_v4si;
10470 else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
10471 && mode3 == V4SImode)
10472 type = v4sf_ftype_v4sf_v4sf_v4si;
10474 /* vchar, vchar, vchar, 4-bit literal. */
10475 else if (mode0 == V16QImode && mode1 == mode0 && mode2 == mode0
10476 && mode3 == QImode)
10477 type = v16qi_ftype_v16qi_v16qi_int;
10479 /* vshort, vshort, vshort, 4-bit literal. */
10480 else if (mode0 == V8HImode && mode1 == mode0 && mode2 == mode0
10481 && mode3 == QImode)
10482 type = v8hi_ftype_v8hi_v8hi_int;
10484 /* vint, vint, vint, 4-bit literal. */
10485 else if (mode0 == V4SImode && mode1 == mode0 && mode2 == mode0
10486 && mode3 == QImode)
10487 type = v4si_ftype_v4si_v4si_int;
10489 /* vfloat, vfloat, vfloat, 4-bit literal. */
10490 else if (mode0 == V4SFmode && mode1 == mode0 && mode2 == mode0
10491 && mode3 == QImode)
10492 type = v4sf_ftype_v4sf_v4sf_int;
10495 gcc_unreachable ();
10497 def_builtin (d->mask, d->name, type, d->code);
10500 /* Add the simple binary operators. */
10501 d = (struct builtin_description *) bdesc_2arg;
10502 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
10504 enum machine_mode mode0, mode1, mode2;
10506 bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10507 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10517 if (d->name == 0 || d->icode == CODE_FOR_nothing)
10520 mode0 = insn_data[d->icode].operand[0].mode;
10521 mode1 = insn_data[d->icode].operand[1].mode;
10522 mode2 = insn_data[d->icode].operand[2].mode;
10525 /* When all three operands are of the same mode. */
10526 if (mode0 == mode1 && mode1 == mode2)
10531 type = opaque_ftype_opaque_opaque;
10534 type = v4sf_ftype_v4sf_v4sf;
10537 type = v4si_ftype_v4si_v4si;
10540 type = v16qi_ftype_v16qi_v16qi;
10543 type = v8hi_ftype_v8hi_v8hi;
10546 type = v2si_ftype_v2si_v2si;
10549 if (TARGET_PAIRED_FLOAT)
10550 type = v2sf_ftype_v2sf_v2sf;
10552 type = v2sf_ftype_v2sf_v2sf_spe;
10555 type = int_ftype_int_int;
10558 gcc_unreachable ();
10562 /* A few other combos we really don't want to do manually. */
10564 /* vint, vfloat, vfloat. */
10565 else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == V4SFmode)
10566 type = v4si_ftype_v4sf_v4sf;
10568 /* vshort, vchar, vchar. */
10569 else if (mode0 == V8HImode && mode1 == V16QImode && mode2 == V16QImode)
10570 type = v8hi_ftype_v16qi_v16qi;
10572 /* vint, vshort, vshort. */
10573 else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode)
10574 type = v4si_ftype_v8hi_v8hi;
10576 /* vshort, vint, vint. */
10577 else if (mode0 == V8HImode && mode1 == V4SImode && mode2 == V4SImode)
10578 type = v8hi_ftype_v4si_v4si;
10580 /* vchar, vshort, vshort. */
10581 else if (mode0 == V16QImode && mode1 == V8HImode && mode2 == V8HImode)
10582 type = v16qi_ftype_v8hi_v8hi;
10584 /* vint, vchar, vint. */
10585 else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V4SImode)
10586 type = v4si_ftype_v16qi_v4si;
10588 /* vint, vchar, vchar. */
10589 else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode)
10590 type = v4si_ftype_v16qi_v16qi;
10592 /* vint, vshort, vint. */
10593 else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
10594 type = v4si_ftype_v8hi_v4si;
10596 /* vint, vint, 5-bit literal. */
10597 else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
10598 type = v4si_ftype_v4si_int;
10600 /* vshort, vshort, 5-bit literal. */
10601 else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
10602 type = v8hi_ftype_v8hi_int;
10604 /* vchar, vchar, 5-bit literal. */
10605 else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
10606 type = v16qi_ftype_v16qi_int;
10608 /* vfloat, vint, 5-bit literal. */
10609 else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
10610 type = v4sf_ftype_v4si_int;
10612 /* vint, vfloat, 5-bit literal. */
10613 else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
10614 type = v4si_ftype_v4sf_int;
10616 else if (mode0 == V2SImode && mode1 == SImode && mode2 == SImode)
10617 type = v2si_ftype_int_int;
10619 else if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
10620 type = v2si_ftype_v2si_char;
10622 else if (mode0 == V2SImode && mode1 == SImode && mode2 == QImode)
10623 type = v2si_ftype_int_char;
10628 gcc_assert (mode0 == SImode);
10632 type = int_ftype_v4si_v4si;
10635 type = int_ftype_v4sf_v4sf;
10638 type = int_ftype_v16qi_v16qi;
10641 type = int_ftype_v8hi_v8hi;
10644 gcc_unreachable ();
10648 def_builtin (d->mask, d->name, type, d->code);
10651 /* Add the simple unary operators. */
10652 d = (struct builtin_description *) bdesc_1arg;
10653 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
10655 enum machine_mode mode0, mode1;
10657 bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10658 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10667 if (d->name == 0 || d->icode == CODE_FOR_nothing)
10670 mode0 = insn_data[d->icode].operand[0].mode;
10671 mode1 = insn_data[d->icode].operand[1].mode;
10674 if (mode0 == V4SImode && mode1 == QImode)
10675 type = v4si_ftype_int;
10676 else if (mode0 == V8HImode && mode1 == QImode)
10677 type = v8hi_ftype_int;
10678 else if (mode0 == V16QImode && mode1 == QImode)
10679 type = v16qi_ftype_int;
10680 else if (mode0 == VOIDmode && mode1 == VOIDmode)
10681 type = opaque_ftype_opaque;
10682 else if (mode0 == V4SFmode && mode1 == V4SFmode)
10683 type = v4sf_ftype_v4sf;
10684 else if (mode0 == V8HImode && mode1 == V16QImode)
10685 type = v8hi_ftype_v16qi;
10686 else if (mode0 == V4SImode && mode1 == V8HImode)
10687 type = v4si_ftype_v8hi;
10688 else if (mode0 == V2SImode && mode1 == V2SImode)
10689 type = v2si_ftype_v2si;
10690 else if (mode0 == V2SFmode && mode1 == V2SFmode)
10692 if (TARGET_PAIRED_FLOAT)
10693 type = v2sf_ftype_v2sf;
10695 type = v2sf_ftype_v2sf_spe;
10697 else if (mode0 == V2SFmode && mode1 == V2SImode)
10698 type = v2sf_ftype_v2si;
10699 else if (mode0 == V2SImode && mode1 == V2SFmode)
10700 type = v2si_ftype_v2sf;
10701 else if (mode0 == V2SImode && mode1 == QImode)
10702 type = v2si_ftype_char;
10704 gcc_unreachable ();
10706 def_builtin (d->mask, d->name, type, d->code);
10711 rs6000_init_libfuncs (void)
10713 if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
10714 && !TARGET_POWER2 && !TARGET_POWERPC)
10716 /* AIX library routines for float->int conversion. */
10717 set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
10718 set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
10719 set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
10720 set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
10723 if (!TARGET_IEEEQUAD)
10724 /* AIX/Darwin/64-bit Linux quad floating point routines. */
10725 if (!TARGET_XL_COMPAT)
10727 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
10728 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
10729 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
10730 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
10732 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
10734 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
10735 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
10736 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
10737 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
10738 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
10739 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
10740 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
10742 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
10743 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
10744 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
10745 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
10746 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
10747 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
10748 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
10749 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
10752 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
10753 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
10757 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
10758 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
10759 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
10760 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
10764 /* 32-bit SVR4 quad floating point routines. */
10766 set_optab_libfunc (add_optab, TFmode, "_q_add");
10767 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
10768 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
10769 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
10770 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
10771 if (TARGET_PPC_GPOPT || TARGET_POWER2)
10772 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
10774 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
10775 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
10776 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
10777 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
10778 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
10779 set_optab_libfunc (le_optab, TFmode, "_q_fle");
10781 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
10782 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
10783 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
10784 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
10785 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
10786 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
10787 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
10788 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
10793 /* Expand a block clear operation, and return 1 if successful. Return 0
10794 if we should let the compiler generate normal code.
10796 operands[0] is the destination
10797 operands[1] is the length
10798 operands[3] is the alignment */
10801 expand_block_clear (rtx operands[])
10803 rtx orig_dest = operands[0];
10804 rtx bytes_rtx = operands[1];
10805 rtx align_rtx = operands[3];
10806 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
10807 HOST_WIDE_INT align;
10808 HOST_WIDE_INT bytes;
10813 /* If this is not a fixed size move, just call memcpy */
10817 /* This must be a fixed size alignment */
10818 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
10819 align = INTVAL (align_rtx) * BITS_PER_UNIT;
10821 /* Anything to clear? */
10822 bytes = INTVAL (bytes_rtx);
10826 /* Use the builtin memset after a point, to avoid huge code bloat.
10827 When optimize_size, avoid any significant code bloat; calling
10828 memset is about 4 instructions, so allow for one instruction to
10829 load zero and three to do clearing. */
10830 if (TARGET_ALTIVEC && align >= 128)
10832 else if (TARGET_POWERPC64 && align >= 32)
10834 else if (TARGET_SPE && align >= 64)
10839 if (optimize_size && bytes > 3 * clear_step)
10841 if (! optimize_size && bytes > 8 * clear_step)
10844 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
10846 enum machine_mode mode = BLKmode;
10849 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
10854 else if (bytes >= 8 && TARGET_SPE && align >= 64)
10859 else if (bytes >= 8 && TARGET_POWERPC64
10860 /* 64-bit loads and stores require word-aligned
10862 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
10867 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
10868 { /* move 4 bytes */
10872 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
10873 { /* move 2 bytes */
10877 else /* move 1 byte at a time */
10883 dest = adjust_address (orig_dest, mode, offset);
10885 emit_move_insn (dest, CONST0_RTX (mode));
10892 /* Expand a block move operation, and return 1 if successful. Return 0
10893 if we should let the compiler generate normal code.
10895 operands[0] is the destination
10896 operands[1] is the source
10897 operands[2] is the length
10898 operands[3] is the alignment */
10900 #define MAX_MOVE_REG 4
10903 expand_block_move (rtx operands[])
10905 rtx orig_dest = operands[0];
10906 rtx orig_src = operands[1];
10907 rtx bytes_rtx = operands[2];
10908 rtx align_rtx = operands[3];
10909 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
10914 rtx stores[MAX_MOVE_REG];
10917 /* If this is not a fixed size move, just call memcpy */
10921 /* This must be a fixed size alignment */
10922 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
10923 align = INTVAL (align_rtx) * BITS_PER_UNIT;
10925 /* Anything to move? */
10926 bytes = INTVAL (bytes_rtx);
10930 /* store_one_arg depends on expand_block_move to handle at least the size of
10931 reg_parm_stack_space. */
10932 if (bytes > (TARGET_POWERPC64 ? 64 : 32))
10935 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
10938 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
10939 rtx (*mov) (rtx, rtx);
10941 enum machine_mode mode = BLKmode;
10944 /* Altivec first, since it will be faster than a string move
10945 when it applies, and usually not significantly larger. */
10946 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
10950 gen_func.mov = gen_movv4si;
10952 else if (TARGET_SPE && bytes >= 8 && align >= 64)
10956 gen_func.mov = gen_movv2si;
10958 else if (TARGET_STRING
10959 && bytes > 24 /* move up to 32 bytes at a time */
10965 && ! fixed_regs[10]
10966 && ! fixed_regs[11]
10967 && ! fixed_regs[12])
10969 move_bytes = (bytes > 32) ? 32 : bytes;
10970 gen_func.movmemsi = gen_movmemsi_8reg;
10972 else if (TARGET_STRING
10973 && bytes > 16 /* move up to 24 bytes at a time */
10979 && ! fixed_regs[10])
10981 move_bytes = (bytes > 24) ? 24 : bytes;
10982 gen_func.movmemsi = gen_movmemsi_6reg;
10984 else if (TARGET_STRING
10985 && bytes > 8 /* move up to 16 bytes at a time */
10989 && ! fixed_regs[8])
10991 move_bytes = (bytes > 16) ? 16 : bytes;
10992 gen_func.movmemsi = gen_movmemsi_4reg;
10994 else if (bytes >= 8 && TARGET_POWERPC64
10995 /* 64-bit loads and stores require word-aligned
10997 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
11001 gen_func.mov = gen_movdi;
11003 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
11004 { /* move up to 8 bytes at a time */
11005 move_bytes = (bytes > 8) ? 8 : bytes;
11006 gen_func.movmemsi = gen_movmemsi_2reg;
11008 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
11009 { /* move 4 bytes */
11012 gen_func.mov = gen_movsi;
11014 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
11015 { /* move 2 bytes */
11018 gen_func.mov = gen_movhi;
11020 else if (TARGET_STRING && bytes > 1)
11021 { /* move up to 4 bytes at a time */
11022 move_bytes = (bytes > 4) ? 4 : bytes;
11023 gen_func.movmemsi = gen_movmemsi_1reg;
11025 else /* move 1 byte at a time */
11029 gen_func.mov = gen_movqi;
11032 src = adjust_address (orig_src, mode, offset);
11033 dest = adjust_address (orig_dest, mode, offset);
11035 if (mode != BLKmode)
11037 rtx tmp_reg = gen_reg_rtx (mode);
11039 emit_insn ((*gen_func.mov) (tmp_reg, src));
11040 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
11043 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
11046 for (i = 0; i < num_reg; i++)
11047 emit_insn (stores[i]);
11051 if (mode == BLKmode)
11053 /* Move the address into scratch registers. The movmemsi
11054 patterns require zero offset. */
11055 if (!REG_P (XEXP (src, 0)))
11057 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
11058 src = replace_equiv_address (src, src_reg);
11060 set_mem_size (src, GEN_INT (move_bytes));
11062 if (!REG_P (XEXP (dest, 0)))
11064 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
11065 dest = replace_equiv_address (dest, dest_reg);
11067 set_mem_size (dest, GEN_INT (move_bytes));
11069 emit_insn ((*gen_func.movmemsi) (dest, src,
11070 GEN_INT (move_bytes & 31),
11079 /* Return a string to perform a load_multiple operation.
11080 operands[0] is the vector.
11081 operands[1] is the source address.
11082 operands[2] is the first destination register. */
11085 rs6000_output_load_multiple (rtx operands[3])
11087 /* We have to handle the case where the pseudo used to contain the address
11088 is assigned to one of the output registers. */
11090 int words = XVECLEN (operands[0], 0);
11093 if (XVECLEN (operands[0], 0) == 1)
11094 return "{l|lwz} %2,0(%1)";
11096 for (i = 0; i < words; i++)
11097 if (refers_to_regno_p (REGNO (operands[2]) + i,
11098 REGNO (operands[2]) + i + 1, operands[1], 0))
11102 xop[0] = GEN_INT (4 * (words-1));
11103 xop[1] = operands[1];
11104 xop[2] = operands[2];
11105 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
11110 xop[0] = GEN_INT (4 * (words-1));
11111 xop[1] = operands[1];
11112 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
11113 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
11118 for (j = 0; j < words; j++)
11121 xop[0] = GEN_INT (j * 4);
11122 xop[1] = operands[1];
11123 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
11124 output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
11126 xop[0] = GEN_INT (i * 4);
11127 xop[1] = operands[1];
11128 output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
11133 return "{lsi|lswi} %2,%1,%N0";
11137 /* A validation routine: say whether CODE, a condition code, and MODE
11138 match. The other alternatives either don't make sense or should
11139 never be generated. */
11142 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
11144 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
11145 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
11146 && GET_MODE_CLASS (mode) == MODE_CC);
11148 /* These don't make sense. */
11149 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
11150 || mode != CCUNSmode);
11152 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
11153 || mode == CCUNSmode);
11155 gcc_assert (mode == CCFPmode
11156 || (code != ORDERED && code != UNORDERED
11157 && code != UNEQ && code != LTGT
11158 && code != UNGT && code != UNLT
11159 && code != UNGE && code != UNLE));
11161 /* These should never be generated except for
11162 flag_finite_math_only. */
11163 gcc_assert (mode != CCFPmode
11164 || flag_finite_math_only
11165 || (code != LE && code != GE
11166 && code != UNEQ && code != LTGT
11167 && code != UNGT && code != UNLT));
11169 /* These are invalid; the information is not there. */
11170 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
11174 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
11175 mask required to convert the result of a rotate insn into a shift
11176 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
11179 includes_lshift_p (rtx shiftop, rtx andop)
11181 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
11183 shift_mask <<= INTVAL (shiftop);
11185 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
11188 /* Similar, but for right shift. */
11191 includes_rshift_p (rtx shiftop, rtx andop)
11193 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
11195 shift_mask >>= INTVAL (shiftop);
11197 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
11200 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
11201 to perform a left shift. It must have exactly SHIFTOP least
11202 significant 0's, then one or more 1's, then zero or more 0's. */
11205 includes_rldic_lshift_p (rtx shiftop, rtx andop)
11207 if (GET_CODE (andop) == CONST_INT)
11209 HOST_WIDE_INT c, lsb, shift_mask;
11211 c = INTVAL (andop);
11212 if (c == 0 || c == ~0)
11216 shift_mask <<= INTVAL (shiftop);
11218 /* Find the least significant one bit. */
11221 /* It must coincide with the LSB of the shift mask. */
11222 if (-lsb != shift_mask)
11225 /* Invert to look for the next transition (if any). */
11228 /* Remove the low group of ones (originally low group of zeros). */
11231 /* Again find the lsb, and check we have all 1's above. */
11235 else if (GET_CODE (andop) == CONST_DOUBLE
11236 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
11238 HOST_WIDE_INT low, high, lsb;
11239 HOST_WIDE_INT shift_mask_low, shift_mask_high;
11241 low = CONST_DOUBLE_LOW (andop);
11242 if (HOST_BITS_PER_WIDE_INT < 64)
11243 high = CONST_DOUBLE_HIGH (andop);
11245 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
11246 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
11249 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
11251 shift_mask_high = ~0;
11252 if (INTVAL (shiftop) > 32)
11253 shift_mask_high <<= INTVAL (shiftop) - 32;
11255 lsb = high & -high;
11257 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
11263 lsb = high & -high;
11264 return high == -lsb;
11267 shift_mask_low = ~0;
11268 shift_mask_low <<= INTVAL (shiftop);
11272 if (-lsb != shift_mask_low)
11275 if (HOST_BITS_PER_WIDE_INT < 64)
11280 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
11282 lsb = high & -high;
11283 return high == -lsb;
11287 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
11293 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
11294 to perform a left shift. It must have SHIFTOP or more least
11295 significant 0's, with the remainder of the word 1's. */
11298 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
11300 if (GET_CODE (andop) == CONST_INT)
11302 HOST_WIDE_INT c, lsb, shift_mask;
11305 shift_mask <<= INTVAL (shiftop);
11306 c = INTVAL (andop);
11308 /* Find the least significant one bit. */
11311 /* It must be covered by the shift mask.
11312 This test also rejects c == 0. */
11313 if ((lsb & shift_mask) == 0)
11316 /* Check we have all 1's above the transition, and reject all 1's. */
11317 return c == -lsb && lsb != 1;
11319 else if (GET_CODE (andop) == CONST_DOUBLE
11320 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
11322 HOST_WIDE_INT low, lsb, shift_mask_low;
11324 low = CONST_DOUBLE_LOW (andop);
11326 if (HOST_BITS_PER_WIDE_INT < 64)
11328 HOST_WIDE_INT high, shift_mask_high;
11330 high = CONST_DOUBLE_HIGH (andop);
11334 shift_mask_high = ~0;
11335 if (INTVAL (shiftop) > 32)
11336 shift_mask_high <<= INTVAL (shiftop) - 32;
11338 lsb = high & -high;
11340 if ((lsb & shift_mask_high) == 0)
11343 return high == -lsb;
11349 shift_mask_low = ~0;
11350 shift_mask_low <<= INTVAL (shiftop);
11354 if ((lsb & shift_mask_low) == 0)
11357 return low == -lsb && lsb != 1;
11363 /* Return 1 if operands will generate a valid arguments to rlwimi
11364 instruction for insert with right shift in 64-bit mode. The mask may
11365 not start on the first bit or stop on the last bit because wrap-around
11366 effects of instruction do not correspond to semantics of RTL insn. */
11369 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
11371 if (INTVAL (startop) > 32
11372 && INTVAL (startop) < 64
11373 && INTVAL (sizeop) > 1
11374 && INTVAL (sizeop) + INTVAL (startop) < 64
11375 && INTVAL (shiftop) > 0
11376 && INTVAL (sizeop) + INTVAL (shiftop) < 32
11377 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
11383 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
11384 for lfq and stfq insns iff the registers are hard registers. */
11387 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
11389 /* We might have been passed a SUBREG. */
11390 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
11393 /* We might have been passed non floating point registers. */
11394 if (!FP_REGNO_P (REGNO (reg1))
11395 || !FP_REGNO_P (REGNO (reg2)))
11398 return (REGNO (reg1) == REGNO (reg2) - 1);
11401 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
11402 addr1 and addr2 must be in consecutive memory locations
11403 (addr2 == addr1 + 8). */
11406 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
11409 unsigned int reg1, reg2;
11410 int offset1, offset2;
11412 /* The mems cannot be volatile. */
11413 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
11416 addr1 = XEXP (mem1, 0);
11417 addr2 = XEXP (mem2, 0);
11419 /* Extract an offset (if used) from the first addr. */
11420 if (GET_CODE (addr1) == PLUS)
11422 /* If not a REG, return zero. */
11423 if (GET_CODE (XEXP (addr1, 0)) != REG)
11427 reg1 = REGNO (XEXP (addr1, 0));
11428 /* The offset must be constant! */
11429 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
11431 offset1 = INTVAL (XEXP (addr1, 1));
11434 else if (GET_CODE (addr1) != REG)
11438 reg1 = REGNO (addr1);
11439 /* This was a simple (mem (reg)) expression. Offset is 0. */
11443 /* And now for the second addr. */
11444 if (GET_CODE (addr2) == PLUS)
11446 /* If not a REG, return zero. */
11447 if (GET_CODE (XEXP (addr2, 0)) != REG)
11451 reg2 = REGNO (XEXP (addr2, 0));
11452 /* The offset must be constant. */
11453 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
11455 offset2 = INTVAL (XEXP (addr2, 1));
11458 else if (GET_CODE (addr2) != REG)
11462 reg2 = REGNO (addr2);
11463 /* This was a simple (mem (reg)) expression. Offset is 0. */
11467 /* Both of these must have the same base register. */
11471 /* The offset for the second addr must be 8 more than the first addr. */
11472 if (offset2 != offset1 + 8)
11475 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
11482 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
11484 static bool eliminated = false;
11485 if (mode != SDmode)
11486 return assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11489 rtx mem = cfun->machine->sdmode_stack_slot;
11490 gcc_assert (mem != NULL_RTX);
11494 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11495 cfun->machine->sdmode_stack_slot = mem;
11503 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
11505 /* Don't walk into types. */
11506 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
11508 *walk_subtrees = 0;
11512 switch (TREE_CODE (*tp))
11520 case ALIGN_INDIRECT_REF:
11521 case MISALIGNED_INDIRECT_REF:
11522 case VIEW_CONVERT_EXPR:
11523 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
11534 /* Allocate a 64-bit stack slot to be used for copying SDmode
11535 values through if this function has any SDmode references. */
11538 rs6000_alloc_sdmode_stack_slot (void)
11542 gimple_stmt_iterator gsi;
11544 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
11547 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
11549 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
11552 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
11553 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
11559 /* Check for any SDmode parameters of the function. */
11560 for (t = DECL_ARGUMENTS (cfun->decl); t; t = TREE_CHAIN (t))
11562 if (TREE_TYPE (t) == error_mark_node)
11565 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
11566 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
11568 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
11569 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
11577 rs6000_instantiate_decls (void)
11579 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
11580 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
11583 /* Return the register class of a scratch register needed to copy IN into
11584 or out of a register in RCLASS in MODE. If it can be done directly,
11585 NO_REGS is returned. */
11588 rs6000_secondary_reload_class (enum reg_class rclass,
11589 enum machine_mode mode ATTRIBUTE_UNUSED,
11594 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
11596 && MACHOPIC_INDIRECT
11600 /* We cannot copy a symbolic operand directly into anything
11601 other than BASE_REGS for TARGET_ELF. So indicate that a
11602 register from BASE_REGS is needed as an intermediate
11605 On Darwin, pic addresses require a load from memory, which
11606 needs a base register. */
11607 if (rclass != BASE_REGS
11608 && (GET_CODE (in) == SYMBOL_REF
11609 || GET_CODE (in) == HIGH
11610 || GET_CODE (in) == LABEL_REF
11611 || GET_CODE (in) == CONST))
11615 if (GET_CODE (in) == REG)
11617 regno = REGNO (in);
11618 if (regno >= FIRST_PSEUDO_REGISTER)
11620 regno = true_regnum (in);
11621 if (regno >= FIRST_PSEUDO_REGISTER)
11625 else if (GET_CODE (in) == SUBREG)
11627 regno = true_regnum (in);
11628 if (regno >= FIRST_PSEUDO_REGISTER)
11634 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
11636 if (rclass == GENERAL_REGS || rclass == BASE_REGS
11637 || (regno >= 0 && INT_REGNO_P (regno)))
11640 /* Constants, memory, and FP registers can go into FP registers. */
11641 if ((regno == -1 || FP_REGNO_P (regno))
11642 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
11643 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
11645 /* Memory, and AltiVec registers can go into AltiVec registers. */
11646 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
11647 && rclass == ALTIVEC_REGS)
11650 /* We can copy among the CR registers. */
11651 if ((rclass == CR_REGS || rclass == CR0_REGS)
11652 && regno >= 0 && CR_REGNO_P (regno))
11655 /* Otherwise, we need GENERAL_REGS. */
11656 return GENERAL_REGS;
11659 /* Given a comparison operation, return the bit number in CCR to test. We
11660 know this is a valid comparison.
11662 SCC_P is 1 if this is for an scc. That means that %D will have been
11663 used instead of %C, so the bits will be in different places.
11665 Return -1 if OP isn't a valid comparison for some reason. */
11668 ccr_bit (rtx op, int scc_p)
11670 enum rtx_code code = GET_CODE (op);
11671 enum machine_mode cc_mode;
11676 if (!COMPARISON_P (op))
11679 reg = XEXP (op, 0);
11681 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
11683 cc_mode = GET_MODE (reg);
11684 cc_regnum = REGNO (reg);
11685 base_bit = 4 * (cc_regnum - CR0_REGNO);
11687 validate_condition_mode (code, cc_mode);
11689 /* When generating a sCOND operation, only positive conditions are
11692 || code == EQ || code == GT || code == LT || code == UNORDERED
11693 || code == GTU || code == LTU);
11698 return scc_p ? base_bit + 3 : base_bit + 2;
11700 return base_bit + 2;
11701 case GT: case GTU: case UNLE:
11702 return base_bit + 1;
11703 case LT: case LTU: case UNGE:
11705 case ORDERED: case UNORDERED:
11706 return base_bit + 3;
11709 /* If scc, we will have done a cror to put the bit in the
11710 unordered position. So test that bit. For integer, this is ! LT
11711 unless this is an scc insn. */
11712 return scc_p ? base_bit + 3 : base_bit;
11715 return scc_p ? base_bit + 3 : base_bit + 1;
11718 gcc_unreachable ();
11722 /* Return the GOT register. */
11725 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
11727 /* The second flow pass currently (June 1999) can't update
11728 regs_ever_live without disturbing other parts of the compiler, so
11729 update it here to make the prolog/epilogue code happy. */
11730 if (!can_create_pseudo_p ()
11731 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
11732 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
11734 crtl->uses_pic_offset_table = 1;
11736 return pic_offset_table_rtx;
11739 /* Function to init struct machine_function.
11740 This will be called, via a pointer variable,
11741 from push_function_context. */
11743 static struct machine_function *
11744 rs6000_init_machine_status (void)
11746 return GGC_CNEW (machine_function);
11749 /* These macros test for integers and extract the low-order bits. */
11751 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
11752 && GET_MODE (X) == VOIDmode)
11754 #define INT_LOWPART(X) \
11755 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
11758 extract_MB (rtx op)
11761 unsigned long val = INT_LOWPART (op);
11763 /* If the high bit is zero, the value is the first 1 bit we find
11765 if ((val & 0x80000000) == 0)
11767 gcc_assert (val & 0xffffffff);
11770 while (((val <<= 1) & 0x80000000) == 0)
11775 /* If the high bit is set and the low bit is not, or the mask is all
11776 1's, the value is zero. */
11777 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
11780 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11783 while (((val >>= 1) & 1) != 0)
11790 extract_ME (rtx op)
11793 unsigned long val = INT_LOWPART (op);
11795 /* If the low bit is zero, the value is the first 1 bit we find from
11797 if ((val & 1) == 0)
11799 gcc_assert (val & 0xffffffff);
11802 while (((val >>= 1) & 1) == 0)
11808 /* If the low bit is set and the high bit is not, or the mask is all
11809 1's, the value is 31. */
11810 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
11813 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11816 while (((val <<= 1) & 0x80000000) != 0)
11822 /* Locate some local-dynamic symbol still in use by this function
11823 so that we can print its name in some tls_ld pattern. */
11825 static const char *
11826 rs6000_get_some_local_dynamic_name (void)
11830 if (cfun->machine->some_ld_name)
11831 return cfun->machine->some_ld_name;
11833 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11835 && for_each_rtx (&PATTERN (insn),
11836 rs6000_get_some_local_dynamic_name_1, 0))
11837 return cfun->machine->some_ld_name;
11839 gcc_unreachable ();
11842 /* Helper function for rs6000_get_some_local_dynamic_name. */
11845 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11849 if (GET_CODE (x) == SYMBOL_REF)
11851 const char *str = XSTR (x, 0);
11852 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11854 cfun->machine->some_ld_name = str;
11862 /* Write out a function code label. */
11865 rs6000_output_function_entry (FILE *file, const char *fname)
11867 if (fname[0] != '.')
11869 switch (DEFAULT_ABI)
11872 gcc_unreachable ();
11878 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
11887 RS6000_OUTPUT_BASENAME (file, fname);
11889 assemble_name (file, fname);
11892 /* Print an operand. Recognize special options, documented below. */
11895 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
11896 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
11898 #define SMALL_DATA_RELOC "sda21"
11899 #define SMALL_DATA_REG 0
11903 print_operand (FILE *file, rtx x, int code)
11907 unsigned HOST_WIDE_INT uval;
11912 /* Write out an instruction after the call which may be replaced
11913 with glue code by the loader. This depends on the AIX version. */
11914 asm_fprintf (file, RS6000_CALL_GLUE);
11917 /* %a is output_address. */
11920 /* If X is a constant integer whose low-order 5 bits are zero,
11921 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
11922 in the AIX assembler where "sri" with a zero shift count
11923 writes a trash instruction. */
11924 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
11931 /* If constant, low-order 16 bits of constant, unsigned.
11932 Otherwise, write normally. */
11934 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
11936 print_operand (file, x, 0);
11940 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
11941 for 64-bit mask direction. */
11942 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
11945 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
11949 /* X is a CR register. Print the number of the GT bit of the CR. */
11950 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
11951 output_operand_lossage ("invalid %%E value");
11953 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
11957 /* Like 'J' but get to the GT bit only. */
11958 gcc_assert (GET_CODE (x) == REG);
11960 /* Bit 1 is GT bit. */
11961 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
11963 /* Add one for shift count in rlinm for scc. */
11964 fprintf (file, "%d", i + 1);
11968 /* X is a CR register. Print the number of the EQ bit of the CR */
11969 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
11970 output_operand_lossage ("invalid %%E value");
11972 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
11976 /* X is a CR register. Print the shift count needed to move it
11977 to the high-order four bits. */
11978 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
11979 output_operand_lossage ("invalid %%f value");
11981 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
11985 /* Similar, but print the count for the rotate in the opposite
11987 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
11988 output_operand_lossage ("invalid %%F value");
11990 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
11994 /* X is a constant integer. If it is negative, print "m",
11995 otherwise print "z". This is to make an aze or ame insn. */
11996 if (GET_CODE (x) != CONST_INT)
11997 output_operand_lossage ("invalid %%G value");
11998 else if (INTVAL (x) >= 0)
12005 /* If constant, output low-order five bits. Otherwise, write
12008 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
12010 print_operand (file, x, 0);
12014 /* If constant, output low-order six bits. Otherwise, write
12017 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
12019 print_operand (file, x, 0);
12023 /* Print `i' if this is a constant, else nothing. */
12029 /* Write the bit number in CCR for jump. */
12030 i = ccr_bit (x, 0);
12032 output_operand_lossage ("invalid %%j code");
12034 fprintf (file, "%d", i);
12038 /* Similar, but add one for shift count in rlinm for scc and pass
12039 scc flag to `ccr_bit'. */
12040 i = ccr_bit (x, 1);
12042 output_operand_lossage ("invalid %%J code");
12044 /* If we want bit 31, write a shift count of zero, not 32. */
12045 fprintf (file, "%d", i == 31 ? 0 : i + 1);
12049 /* X must be a constant. Write the 1's complement of the
12052 output_operand_lossage ("invalid %%k value");
12054 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
12058 /* X must be a symbolic constant on ELF. Write an
12059 expression suitable for an 'addi' that adds in the low 16
12060 bits of the MEM. */
12061 if (GET_CODE (x) != CONST)
12063 print_operand_address (file, x);
12064 fputs ("@l", file);
12068 if (GET_CODE (XEXP (x, 0)) != PLUS
12069 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
12070 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
12071 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
12072 output_operand_lossage ("invalid %%K value");
12073 print_operand_address (file, XEXP (XEXP (x, 0), 0));
12074 fputs ("@l", file);
12075 /* For GNU as, there must be a non-alphanumeric character
12076 between 'l' and the number. The '-' is added by
12077 print_operand() already. */
12078 if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
12080 print_operand (file, XEXP (XEXP (x, 0), 1), 0);
12084 /* %l is output_asm_label. */
12087 /* Write second word of DImode or DFmode reference. Works on register
12088 or non-indexed memory only. */
12089 if (GET_CODE (x) == REG)
12090 fputs (reg_names[REGNO (x) + 1], file);
12091 else if (GET_CODE (x) == MEM)
12093 /* Handle possible auto-increment. Since it is pre-increment and
12094 we have already done it, we can just use an offset of word. */
12095 if (GET_CODE (XEXP (x, 0)) == PRE_INC
12096 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
12097 output_address (plus_constant (XEXP (XEXP (x, 0), 0),
12099 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12100 output_address (plus_constant (XEXP (XEXP (x, 0), 0),
12103 output_address (XEXP (adjust_address_nv (x, SImode,
12107 if (small_data_operand (x, GET_MODE (x)))
12108 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12109 reg_names[SMALL_DATA_REG]);
12114 /* MB value for a mask operand. */
12115 if (! mask_operand (x, SImode))
12116 output_operand_lossage ("invalid %%m value");
12118 fprintf (file, "%d", extract_MB (x));
12122 /* ME value for a mask operand. */
12123 if (! mask_operand (x, SImode))
12124 output_operand_lossage ("invalid %%M value");
12126 fprintf (file, "%d", extract_ME (x));
12129 /* %n outputs the negative of its operand. */
12132 /* Write the number of elements in the vector times 4. */
12133 if (GET_CODE (x) != PARALLEL)
12134 output_operand_lossage ("invalid %%N value");
12136 fprintf (file, "%d", XVECLEN (x, 0) * 4);
12140 /* Similar, but subtract 1 first. */
12141 if (GET_CODE (x) != PARALLEL)
12142 output_operand_lossage ("invalid %%O value");
12144 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
12148 /* X is a CONST_INT that is a power of two. Output the logarithm. */
12150 || INT_LOWPART (x) < 0
12151 || (i = exact_log2 (INT_LOWPART (x))) < 0)
12152 output_operand_lossage ("invalid %%p value");
12154 fprintf (file, "%d", i);
12158 /* The operand must be an indirect memory reference. The result
12159 is the register name. */
12160 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
12161 || REGNO (XEXP (x, 0)) >= 32)
12162 output_operand_lossage ("invalid %%P value");
12164 fputs (reg_names[REGNO (XEXP (x, 0))], file);
12168 /* This outputs the logical code corresponding to a boolean
12169 expression. The expression may have one or both operands
12170 negated (if one, only the first one). For condition register
12171 logical operations, it will also treat the negated
12172 CR codes as NOTs, but not handle NOTs of them. */
12174 const char *const *t = 0;
12176 enum rtx_code code = GET_CODE (x);
12177 static const char * const tbl[3][3] = {
12178 { "and", "andc", "nor" },
12179 { "or", "orc", "nand" },
12180 { "xor", "eqv", "xor" } };
12184 else if (code == IOR)
12186 else if (code == XOR)
12189 output_operand_lossage ("invalid %%q value");
12191 if (GET_CODE (XEXP (x, 0)) != NOT)
12195 if (GET_CODE (XEXP (x, 1)) == NOT)
12213 /* X is a CR register. Print the mask for `mtcrf'. */
12214 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
12215 output_operand_lossage ("invalid %%R value");
12217 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
12221 /* Low 5 bits of 32 - value */
12223 output_operand_lossage ("invalid %%s value");
12225 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
12229 /* PowerPC64 mask position. All 0's is excluded.
12230 CONST_INT 32-bit mask is considered sign-extended so any
12231 transition must occur within the CONST_INT, not on the boundary. */
12232 if (! mask64_operand (x, DImode))
12233 output_operand_lossage ("invalid %%S value");
12235 uval = INT_LOWPART (x);
12237 if (uval & 1) /* Clear Left */
12239 #if HOST_BITS_PER_WIDE_INT > 64
12240 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
12244 else /* Clear Right */
12247 #if HOST_BITS_PER_WIDE_INT > 64
12248 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
12254 gcc_assert (i >= 0);
12255 fprintf (file, "%d", i);
12259 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
12260 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
12262 /* Bit 3 is OV bit. */
12263 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
12265 /* If we want bit 31, write a shift count of zero, not 32. */
12266 fprintf (file, "%d", i == 31 ? 0 : i + 1);
12270 /* Print the symbolic name of a branch target register. */
12271 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
12272 && REGNO (x) != CTR_REGNO))
12273 output_operand_lossage ("invalid %%T value");
12274 else if (REGNO (x) == LR_REGNO)
12275 fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
12277 fputs ("ctr", file);
12281 /* High-order 16 bits of constant for use in unsigned operand. */
12283 output_operand_lossage ("invalid %%u value");
12285 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
12286 (INT_LOWPART (x) >> 16) & 0xffff);
12290 /* High-order 16 bits of constant for use in signed operand. */
12292 output_operand_lossage ("invalid %%v value");
12294 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
12295 (INT_LOWPART (x) >> 16) & 0xffff);
12299 /* Print `u' if this has an auto-increment or auto-decrement. */
12300 if (GET_CODE (x) == MEM
12301 && (GET_CODE (XEXP (x, 0)) == PRE_INC
12302 || GET_CODE (XEXP (x, 0)) == PRE_DEC
12303 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
12308 /* Print the trap code for this operand. */
12309 switch (GET_CODE (x))
12312 fputs ("eq", file); /* 4 */
12315 fputs ("ne", file); /* 24 */
12318 fputs ("lt", file); /* 16 */
12321 fputs ("le", file); /* 20 */
12324 fputs ("gt", file); /* 8 */
12327 fputs ("ge", file); /* 12 */
12330 fputs ("llt", file); /* 2 */
12333 fputs ("lle", file); /* 6 */
12336 fputs ("lgt", file); /* 1 */
12339 fputs ("lge", file); /* 5 */
12342 gcc_unreachable ();
12347 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
12350 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
12351 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
12353 print_operand (file, x, 0);
12357 /* MB value for a PowerPC64 rldic operand. */
12358 val = (GET_CODE (x) == CONST_INT
12359 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
12364 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
12365 if ((val <<= 1) < 0)
12368 #if HOST_BITS_PER_WIDE_INT == 32
12369 if (GET_CODE (x) == CONST_INT && i >= 0)
12370 i += 32; /* zero-extend high-part was all 0's */
12371 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
12373 val = CONST_DOUBLE_LOW (x);
12379 for ( ; i < 64; i++)
12380 if ((val <<= 1) < 0)
12385 fprintf (file, "%d", i + 1);
12389 if (GET_CODE (x) == MEM
12390 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
12391 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
12392 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
12397 /* Like 'L', for third word of TImode */
12398 if (GET_CODE (x) == REG)
12399 fputs (reg_names[REGNO (x) + 2], file);
12400 else if (GET_CODE (x) == MEM)
12402 if (GET_CODE (XEXP (x, 0)) == PRE_INC
12403 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
12404 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
12405 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12406 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
12408 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
12409 if (small_data_operand (x, GET_MODE (x)))
12410 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12411 reg_names[SMALL_DATA_REG]);
12416 /* X is a SYMBOL_REF. Write out the name preceded by a
12417 period and without any trailing data in brackets. Used for function
12418 names. If we are configured for System V (or the embedded ABI) on
12419 the PowerPC, do not emit the period, since those systems do not use
12420 TOCs and the like. */
12421 gcc_assert (GET_CODE (x) == SYMBOL_REF);
12423 /* Mark the decl as referenced so that cgraph will output the
12425 if (SYMBOL_REF_DECL (x))
12426 mark_decl_referenced (SYMBOL_REF_DECL (x));
12428 /* For macho, check to see if we need a stub. */
12431 const char *name = XSTR (x, 0);
12433 if (MACHOPIC_INDIRECT
12434 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
12435 name = machopic_indirection_name (x, /*stub_p=*/true);
12437 assemble_name (file, name);
12439 else if (!DOT_SYMBOLS)
12440 assemble_name (file, XSTR (x, 0));
12442 rs6000_output_function_entry (file, XSTR (x, 0));
12446 /* Like 'L', for last word of TImode. */
12447 if (GET_CODE (x) == REG)
12448 fputs (reg_names[REGNO (x) + 3], file);
12449 else if (GET_CODE (x) == MEM)
12451 if (GET_CODE (XEXP (x, 0)) == PRE_INC
12452 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
12453 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
12454 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12455 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
12457 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
12458 if (small_data_operand (x, GET_MODE (x)))
12459 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12460 reg_names[SMALL_DATA_REG]);
12464 /* Print AltiVec or SPE memory operand. */
12469 gcc_assert (GET_CODE (x) == MEM);
12473 /* Ugly hack because %y is overloaded. */
12474 if ((TARGET_SPE || TARGET_E500_DOUBLE)
12475 && (GET_MODE_SIZE (GET_MODE (x)) == 8
12476 || GET_MODE (x) == TFmode
12477 || GET_MODE (x) == TImode))
12479 /* Handle [reg]. */
12480 if (GET_CODE (tmp) == REG)
12482 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
12485 /* Handle [reg+UIMM]. */
12486 else if (GET_CODE (tmp) == PLUS &&
12487 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
12491 gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
12493 x = INTVAL (XEXP (tmp, 1));
12494 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
12498 /* Fall through. Must be [reg+reg]. */
12501 && GET_CODE (tmp) == AND
12502 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
12503 && INTVAL (XEXP (tmp, 1)) == -16)
12504 tmp = XEXP (tmp, 0);
12505 if (GET_CODE (tmp) == REG)
12506 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
12509 if (!GET_CODE (tmp) == PLUS
12510 || !REG_P (XEXP (tmp, 0))
12511 || !REG_P (XEXP (tmp, 1)))
12513 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
12517 if (REGNO (XEXP (tmp, 0)) == 0)
12518 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
12519 reg_names[ REGNO (XEXP (tmp, 0)) ]);
12521 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
12522 reg_names[ REGNO (XEXP (tmp, 1)) ]);
12528 if (GET_CODE (x) == REG)
12529 fprintf (file, "%s", reg_names[REGNO (x)]);
12530 else if (GET_CODE (x) == MEM)
12532 /* We need to handle PRE_INC and PRE_DEC here, since we need to
12533 know the width from the mode. */
12534 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
12535 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
12536 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
12537 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
12538 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
12539 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
12540 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12541 output_address (XEXP (XEXP (x, 0), 1));
12543 output_address (XEXP (x, 0));
12546 output_addr_const (file, x);
12550 assemble_name (file, rs6000_get_some_local_dynamic_name ());
12554 output_operand_lossage ("invalid %%xn code");
12558 /* Print the address of an operand. */
12561 print_operand_address (FILE *file, rtx x)
12563 if (GET_CODE (x) == REG)
12564 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
12565 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
12566 || GET_CODE (x) == LABEL_REF)
12568 output_addr_const (file, x);
12569 if (small_data_operand (x, GET_MODE (x)))
12570 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12571 reg_names[SMALL_DATA_REG]);
12573 gcc_assert (!TARGET_TOC);
12575 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
12577 gcc_assert (REG_P (XEXP (x, 0)));
12578 if (REGNO (XEXP (x, 0)) == 0)
12579 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
12580 reg_names[ REGNO (XEXP (x, 0)) ]);
12582 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
12583 reg_names[ REGNO (XEXP (x, 1)) ]);
12585 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
12586 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
12587 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
12589 else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
12590 && CONSTANT_P (XEXP (x, 1)))
12592 output_addr_const (file, XEXP (x, 1));
12593 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
12597 else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
12598 && CONSTANT_P (XEXP (x, 1)))
12600 fprintf (file, "lo16(");
12601 output_addr_const (file, XEXP (x, 1));
12602 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
12605 else if (legitimate_constant_pool_address_p (x))
12607 output_addr_const (file, XEXP (x, 1));
12608 fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
12611 gcc_unreachable ();
12614 /* Implement OUTPUT_ADDR_CONST_EXTRA for address X. */
12617 rs6000_output_addr_const_extra (FILE *file, rtx x)
12619 if (GET_CODE (x) == UNSPEC)
12620 switch (XINT (x, 1))
12622 case UNSPEC_TOCREL:
12623 x = XVECEXP (x, 0, 0);
12624 gcc_assert (GET_CODE (x) == SYMBOL_REF);
12625 output_addr_const (file, x);
12626 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
12629 assemble_name (file, toc_label_name);
12631 else if (TARGET_ELF)
12632 fputs ("@toc", file);
12636 case UNSPEC_MACHOPIC_OFFSET:
12637 output_addr_const (file, XVECEXP (x, 0, 0));
12639 machopic_output_function_base_name (file);
12646 /* Target hook for assembling integer objects. The PowerPC version has
12647 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
12648 is defined. It also needs to handle DI-mode objects on 64-bit
12652 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
12654 #ifdef RELOCATABLE_NEEDS_FIXUP
12655 /* Special handling for SI values. */
12656 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
12658 static int recurse = 0;
12660 /* For -mrelocatable, we mark all addresses that need to be fixed up
12661 in the .fixup section. */
12662 if (TARGET_RELOCATABLE
12663 && in_section != toc_section
12664 && in_section != text_section
12665 && !unlikely_text_section_p (in_section)
12667 && GET_CODE (x) != CONST_INT
12668 && GET_CODE (x) != CONST_DOUBLE
12674 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
12676 ASM_OUTPUT_LABEL (asm_out_file, buf);
12677 fprintf (asm_out_file, "\t.long\t(");
12678 output_addr_const (asm_out_file, x);
12679 fprintf (asm_out_file, ")@fixup\n");
12680 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
12681 ASM_OUTPUT_ALIGN (asm_out_file, 2);
12682 fprintf (asm_out_file, "\t.long\t");
12683 assemble_name (asm_out_file, buf);
12684 fprintf (asm_out_file, "\n\t.previous\n");
12688 /* Remove initial .'s to turn a -mcall-aixdesc function
12689 address into the address of the descriptor, not the function
12691 else if (GET_CODE (x) == SYMBOL_REF
12692 && XSTR (x, 0)[0] == '.'
12693 && DEFAULT_ABI == ABI_AIX)
12695 const char *name = XSTR (x, 0);
12696 while (*name == '.')
12699 fprintf (asm_out_file, "\t.long\t%s\n", name);
12703 #endif /* RELOCATABLE_NEEDS_FIXUP */
12704 return default_assemble_integer (x, size, aligned_p);
12707 #ifdef HAVE_GAS_HIDDEN
12708 /* Emit an assembler directive to set symbol visibility for DECL to
12709 VISIBILITY_TYPE. */
12712 rs6000_assemble_visibility (tree decl, int vis)
12714 /* Functions need to have their entry point symbol visibility set as
12715 well as their descriptor symbol visibility. */
12716 if (DEFAULT_ABI == ABI_AIX
12718 && TREE_CODE (decl) == FUNCTION_DECL)
12720 static const char * const visibility_types[] = {
12721 NULL, "internal", "hidden", "protected"
12724 const char *name, *type;
12726 name = ((* targetm.strip_name_encoding)
12727 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
12728 type = visibility_types[vis];
12730 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
12731 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
12734 default_assemble_visibility (decl, vis);
12739 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
12741 /* Reversal of FP compares takes care -- an ordered compare
12742 becomes an unordered compare and vice versa. */
12743 if (mode == CCFPmode
12744 && (!flag_finite_math_only
12745 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
12746 || code == UNEQ || code == LTGT))
12747 return reverse_condition_maybe_unordered (code);
12749 return reverse_condition (code);
12752 /* Generate a compare for CODE. Return a brand-new rtx that
12753 represents the result of the compare. */
12756 rs6000_generate_compare (enum rtx_code code)
12758 enum machine_mode comp_mode;
12759 rtx compare_result;
12761 if (rs6000_compare_fp_p)
12762 comp_mode = CCFPmode;
12763 else if (code == GTU || code == LTU
12764 || code == GEU || code == LEU)
12765 comp_mode = CCUNSmode;
12766 else if ((code == EQ || code == NE)
12767 && GET_CODE (rs6000_compare_op0) == SUBREG
12768 && GET_CODE (rs6000_compare_op1) == SUBREG
12769 && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op0)
12770 && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op1))
12771 /* These are unsigned values, perhaps there will be a later
12772 ordering compare that can be shared with this one.
12773 Unfortunately we cannot detect the signedness of the operands
12774 for non-subregs. */
12775 comp_mode = CCUNSmode;
12777 comp_mode = CCmode;
12779 /* First, the compare. */
12780 compare_result = gen_reg_rtx (comp_mode);
12782 /* E500 FP compare instructions on the GPRs. Yuck! */
12783 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
12784 && rs6000_compare_fp_p)
12786 rtx cmp, or_result, compare_result2;
12787 enum machine_mode op_mode = GET_MODE (rs6000_compare_op0);
12789 if (op_mode == VOIDmode)
12790 op_mode = GET_MODE (rs6000_compare_op1);
12792 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
12793 This explains the following mess. */
12797 case EQ: case UNEQ: case NE: case LTGT:
12801 cmp = flag_unsafe_math_optimizations
12802 ? gen_tstsfeq_gpr (compare_result, rs6000_compare_op0,
12803 rs6000_compare_op1)
12804 : gen_cmpsfeq_gpr (compare_result, rs6000_compare_op0,
12805 rs6000_compare_op1);
12809 cmp = flag_unsafe_math_optimizations
12810 ? gen_tstdfeq_gpr (compare_result, rs6000_compare_op0,
12811 rs6000_compare_op1)
12812 : gen_cmpdfeq_gpr (compare_result, rs6000_compare_op0,
12813 rs6000_compare_op1);
12817 cmp = flag_unsafe_math_optimizations
12818 ? gen_tsttfeq_gpr (compare_result, rs6000_compare_op0,
12819 rs6000_compare_op1)
12820 : gen_cmptfeq_gpr (compare_result, rs6000_compare_op0,
12821 rs6000_compare_op1);
12825 gcc_unreachable ();
12829 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
12833 cmp = flag_unsafe_math_optimizations
12834 ? gen_tstsfgt_gpr (compare_result, rs6000_compare_op0,
12835 rs6000_compare_op1)
12836 : gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0,
12837 rs6000_compare_op1);
12841 cmp = flag_unsafe_math_optimizations
12842 ? gen_tstdfgt_gpr (compare_result, rs6000_compare_op0,
12843 rs6000_compare_op1)
12844 : gen_cmpdfgt_gpr (compare_result, rs6000_compare_op0,
12845 rs6000_compare_op1);
12849 cmp = flag_unsafe_math_optimizations
12850 ? gen_tsttfgt_gpr (compare_result, rs6000_compare_op0,
12851 rs6000_compare_op1)
12852 : gen_cmptfgt_gpr (compare_result, rs6000_compare_op0,
12853 rs6000_compare_op1);
12857 gcc_unreachable ();
12861 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
12865 cmp = flag_unsafe_math_optimizations
12866 ? gen_tstsflt_gpr (compare_result, rs6000_compare_op0,
12867 rs6000_compare_op1)
12868 : gen_cmpsflt_gpr (compare_result, rs6000_compare_op0,
12869 rs6000_compare_op1);
12873 cmp = flag_unsafe_math_optimizations
12874 ? gen_tstdflt_gpr (compare_result, rs6000_compare_op0,
12875 rs6000_compare_op1)
12876 : gen_cmpdflt_gpr (compare_result, rs6000_compare_op0,
12877 rs6000_compare_op1);
12881 cmp = flag_unsafe_math_optimizations
12882 ? gen_tsttflt_gpr (compare_result, rs6000_compare_op0,
12883 rs6000_compare_op1)
12884 : gen_cmptflt_gpr (compare_result, rs6000_compare_op0,
12885 rs6000_compare_op1);
12889 gcc_unreachable ();
12893 gcc_unreachable ();
12896 /* Synthesize LE and GE from LT/GT || EQ. */
12897 if (code == LE || code == GE || code == LEU || code == GEU)
12903 case LE: code = LT; break;
12904 case GE: code = GT; break;
12905 case LEU: code = LT; break;
12906 case GEU: code = GT; break;
12907 default: gcc_unreachable ();
12910 compare_result2 = gen_reg_rtx (CCFPmode);
12916 cmp = flag_unsafe_math_optimizations
12917 ? gen_tstsfeq_gpr (compare_result2, rs6000_compare_op0,
12918 rs6000_compare_op1)
12919 : gen_cmpsfeq_gpr (compare_result2, rs6000_compare_op0,
12920 rs6000_compare_op1);
12924 cmp = flag_unsafe_math_optimizations
12925 ? gen_tstdfeq_gpr (compare_result2, rs6000_compare_op0,
12926 rs6000_compare_op1)
12927 : gen_cmpdfeq_gpr (compare_result2, rs6000_compare_op0,
12928 rs6000_compare_op1);
12932 cmp = flag_unsafe_math_optimizations
12933 ? gen_tsttfeq_gpr (compare_result2, rs6000_compare_op0,
12934 rs6000_compare_op1)
12935 : gen_cmptfeq_gpr (compare_result2, rs6000_compare_op0,
12936 rs6000_compare_op1);
12940 gcc_unreachable ();
12944 /* OR them together. */
12945 or_result = gen_reg_rtx (CCFPmode);
12946 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
12948 compare_result = or_result;
12953 if (code == NE || code == LTGT)
12963 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
12964 CLOBBERs to match cmptf_internal2 pattern. */
12965 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
12966 && GET_MODE (rs6000_compare_op0) == TFmode
12967 && !TARGET_IEEEQUAD
12968 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
12969 emit_insn (gen_rtx_PARALLEL (VOIDmode,
12971 gen_rtx_SET (VOIDmode,
12973 gen_rtx_COMPARE (comp_mode,
12974 rs6000_compare_op0,
12975 rs6000_compare_op1)),
12976 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12977 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12978 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12979 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12980 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12981 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12982 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12983 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
12984 else if (GET_CODE (rs6000_compare_op1) == UNSPEC
12985 && XINT (rs6000_compare_op1, 1) == UNSPEC_SP_TEST)
12987 rtx op1 = XVECEXP (rs6000_compare_op1, 0, 0);
12988 comp_mode = CCEQmode;
12989 compare_result = gen_reg_rtx (CCEQmode);
12991 emit_insn (gen_stack_protect_testdi (compare_result,
12992 rs6000_compare_op0, op1));
12994 emit_insn (gen_stack_protect_testsi (compare_result,
12995 rs6000_compare_op0, op1));
12998 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
12999 gen_rtx_COMPARE (comp_mode,
13000 rs6000_compare_op0,
13001 rs6000_compare_op1)));
13004 /* Some kinds of FP comparisons need an OR operation;
13005 under flag_finite_math_only we don't bother. */
13006 if (rs6000_compare_fp_p
13007 && !flag_finite_math_only
13008 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
13009 && (code == LE || code == GE
13010 || code == UNEQ || code == LTGT
13011 || code == UNGT || code == UNLT))
13013 enum rtx_code or1, or2;
13014 rtx or1_rtx, or2_rtx, compare2_rtx;
13015 rtx or_result = gen_reg_rtx (CCEQmode);
13019 case LE: or1 = LT; or2 = EQ; break;
13020 case GE: or1 = GT; or2 = EQ; break;
13021 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
13022 case LTGT: or1 = LT; or2 = GT; break;
13023 case UNGT: or1 = UNORDERED; or2 = GT; break;
13024 case UNLT: or1 = UNORDERED; or2 = LT; break;
13025 default: gcc_unreachable ();
13027 validate_condition_mode (or1, comp_mode);
13028 validate_condition_mode (or2, comp_mode);
13029 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
13030 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
13031 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
13032 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
13034 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
13036 compare_result = or_result;
13040 validate_condition_mode (code, GET_MODE (compare_result));
13042 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
13046 /* Emit the RTL for an sCOND pattern. */
13049 rs6000_emit_sCOND (enum rtx_code code, rtx result)
13052 enum machine_mode op_mode;
13053 enum rtx_code cond_code;
13055 condition_rtx = rs6000_generate_compare (code);
13056 cond_code = GET_CODE (condition_rtx);
13058 if (rs6000_compare_fp_p
13059 && !TARGET_FPRS && TARGET_HARD_FLOAT)
13063 PUT_MODE (condition_rtx, SImode);
13064 t = XEXP (condition_rtx, 0);
13066 gcc_assert (cond_code == NE || cond_code == EQ);
13068 if (cond_code == NE)
13069 emit_insn (gen_e500_flip_gt_bit (t, t));
13071 emit_insn (gen_move_from_CR_gt_bit (result, t));
13075 if (cond_code == NE
13076 || cond_code == GE || cond_code == LE
13077 || cond_code == GEU || cond_code == LEU
13078 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
13080 rtx not_result = gen_reg_rtx (CCEQmode);
13081 rtx not_op, rev_cond_rtx;
13082 enum machine_mode cc_mode;
13084 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
13086 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
13087 SImode, XEXP (condition_rtx, 0), const0_rtx);
13088 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
13089 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
13090 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
13093 op_mode = GET_MODE (rs6000_compare_op0);
13094 if (op_mode == VOIDmode)
13095 op_mode = GET_MODE (rs6000_compare_op1);
13097 if (TARGET_POWERPC64 && (op_mode == DImode || rs6000_compare_fp_p))
13099 PUT_MODE (condition_rtx, DImode);
13100 convert_move (result, condition_rtx, 0);
13104 PUT_MODE (condition_rtx, SImode);
13105 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
13109 /* Emit a branch of kind CODE to location LOC. */
13112 rs6000_emit_cbranch (enum rtx_code code, rtx loc)
13114 rtx condition_rtx, loc_ref;
13116 condition_rtx = rs6000_generate_compare (code);
13117 loc_ref = gen_rtx_LABEL_REF (VOIDmode, loc);
13118 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
13119 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
13120 loc_ref, pc_rtx)));
13123 /* Return the string to output a conditional branch to LABEL, which is
13124 the operand number of the label, or -1 if the branch is really a
13125 conditional return.
13127 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
13128 condition code register and its mode specifies what kind of
13129 comparison we made.
13131 REVERSED is nonzero if we should reverse the sense of the comparison.
13133 INSN is the insn. */
13136 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
13138 static char string[64];
13139 enum rtx_code code = GET_CODE (op);
13140 rtx cc_reg = XEXP (op, 0);
13141 enum machine_mode mode = GET_MODE (cc_reg);
13142 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
13143 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
13144 int really_reversed = reversed ^ need_longbranch;
13150 validate_condition_mode (code, mode);
13152 /* Work out which way this really branches. We could use
13153 reverse_condition_maybe_unordered here always but this
13154 makes the resulting assembler clearer. */
13155 if (really_reversed)
13157 /* Reversal of FP compares takes care -- an ordered compare
13158 becomes an unordered compare and vice versa. */
13159 if (mode == CCFPmode)
13160 code = reverse_condition_maybe_unordered (code);
13162 code = reverse_condition (code);
13165 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
13167 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
13172 /* Opposite of GT. */
13181 gcc_unreachable ();
13187 /* Not all of these are actually distinct opcodes, but
13188 we distinguish them for clarity of the resulting assembler. */
13189 case NE: case LTGT:
13190 ccode = "ne"; break;
13191 case EQ: case UNEQ:
13192 ccode = "eq"; break;
13194 ccode = "ge"; break;
13195 case GT: case GTU: case UNGT:
13196 ccode = "gt"; break;
13198 ccode = "le"; break;
13199 case LT: case LTU: case UNLT:
13200 ccode = "lt"; break;
13201 case UNORDERED: ccode = "un"; break;
13202 case ORDERED: ccode = "nu"; break;
13203 case UNGE: ccode = "nl"; break;
13204 case UNLE: ccode = "ng"; break;
13206 gcc_unreachable ();
13209 /* Maybe we have a guess as to how likely the branch is.
13210 The old mnemonics don't have a way to specify this information. */
13212 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
13213 if (note != NULL_RTX)
13215 /* PROB is the difference from 50%. */
13216 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
13218 /* Only hint for highly probable/improbable branches on newer
13219 cpus as static prediction overrides processor dynamic
13220 prediction. For older cpus we may as well always hint, but
13221 assume not taken for branches that are very close to 50% as a
13222 mispredicted taken branch is more expensive than a
13223 mispredicted not-taken branch. */
13224 if (rs6000_always_hint
13225 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
13226 && br_prob_note_reliable_p (note)))
13228 if (abs (prob) > REG_BR_PROB_BASE / 20
13229 && ((prob > 0) ^ need_longbranch))
13237 s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
13239 s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
13241 /* We need to escape any '%' characters in the reg_names string.
13242 Assume they'd only be the first character.... */
13243 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
13245 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
13249 /* If the branch distance was too far, we may have to use an
13250 unconditional branch to go the distance. */
13251 if (need_longbranch)
13252 s += sprintf (s, ",$+8\n\tb %s", label);
13254 s += sprintf (s, ",%s", label);
13260 /* Return the string to flip the GT bit on a CR. */
13262 output_e500_flip_gt_bit (rtx dst, rtx src)
13264 static char string[64];
13267 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
13268 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
13271 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
13272 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
13274 sprintf (string, "crnot %d,%d", a, b);
13278 /* Return insn index for the vector compare instruction for given CODE,
13279 and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
13283 get_vec_cmp_insn (enum rtx_code code,
13284 enum machine_mode dest_mode,
13285 enum machine_mode op_mode)
13287 if (!TARGET_ALTIVEC)
13288 return INSN_NOT_AVAILABLE;
13293 if (dest_mode == V16QImode && op_mode == V16QImode)
13294 return UNSPEC_VCMPEQUB;
13295 if (dest_mode == V8HImode && op_mode == V8HImode)
13296 return UNSPEC_VCMPEQUH;
13297 if (dest_mode == V4SImode && op_mode == V4SImode)
13298 return UNSPEC_VCMPEQUW;
13299 if (dest_mode == V4SImode && op_mode == V4SFmode)
13300 return UNSPEC_VCMPEQFP;
13303 if (dest_mode == V4SImode && op_mode == V4SFmode)
13304 return UNSPEC_VCMPGEFP;
13306 if (dest_mode == V16QImode && op_mode == V16QImode)
13307 return UNSPEC_VCMPGTSB;
13308 if (dest_mode == V8HImode && op_mode == V8HImode)
13309 return UNSPEC_VCMPGTSH;
13310 if (dest_mode == V4SImode && op_mode == V4SImode)
13311 return UNSPEC_VCMPGTSW;
13312 if (dest_mode == V4SImode && op_mode == V4SFmode)
13313 return UNSPEC_VCMPGTFP;
13316 if (dest_mode == V16QImode && op_mode == V16QImode)
13317 return UNSPEC_VCMPGTUB;
13318 if (dest_mode == V8HImode && op_mode == V8HImode)
13319 return UNSPEC_VCMPGTUH;
13320 if (dest_mode == V4SImode && op_mode == V4SImode)
13321 return UNSPEC_VCMPGTUW;
13326 return INSN_NOT_AVAILABLE;
13329 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
13330 DMODE is expected destination mode. This is a recursive function. */
13333 rs6000_emit_vector_compare (enum rtx_code rcode,
13335 enum machine_mode dmode)
13339 enum machine_mode dest_mode;
13340 enum machine_mode op_mode = GET_MODE (op1);
13342 gcc_assert (TARGET_ALTIVEC);
13343 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
13345 /* Floating point vector compare instructions uses destination V4SImode.
13346 Move destination to appropriate mode later. */
13347 if (dmode == V4SFmode)
13348 dest_mode = V4SImode;
13352 mask = gen_reg_rtx (dest_mode);
13353 vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
13355 if (vec_cmp_insn == INSN_NOT_AVAILABLE)
13357 bool swap_operands = false;
13358 bool try_again = false;
13363 swap_operands = true;
13368 swap_operands = true;
13376 /* Invert condition and try again.
13377 e.g., A != B becomes ~(A==B). */
13379 enum rtx_code rev_code;
13380 enum insn_code nor_code;
13383 rev_code = reverse_condition_maybe_unordered (rcode);
13384 eq_rtx = rs6000_emit_vector_compare (rev_code, op0, op1,
13387 nor_code = optab_handler (one_cmpl_optab, (int)dest_mode)->insn_code;
13388 gcc_assert (nor_code != CODE_FOR_nothing);
13389 emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
13391 if (dmode != dest_mode)
13393 rtx temp = gen_reg_rtx (dest_mode);
13394 convert_move (temp, mask, 0);
13404 /* Try GT/GTU/LT/LTU OR EQ */
13407 enum insn_code ior_code;
13408 enum rtx_code new_code;
13429 gcc_unreachable ();
13432 c_rtx = rs6000_emit_vector_compare (new_code,
13433 op0, op1, dest_mode);
13434 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
13437 ior_code = optab_handler (ior_optab, (int)dest_mode)->insn_code;
13438 gcc_assert (ior_code != CODE_FOR_nothing);
13439 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
13440 if (dmode != dest_mode)
13442 rtx temp = gen_reg_rtx (dest_mode);
13443 convert_move (temp, mask, 0);
13450 gcc_unreachable ();
13455 vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
13456 /* You only get two chances. */
13457 gcc_assert (vec_cmp_insn != INSN_NOT_AVAILABLE);
13469 emit_insn (gen_rtx_SET (VOIDmode, mask,
13470 gen_rtx_UNSPEC (dest_mode,
13471 gen_rtvec (2, op0, op1),
13473 if (dmode != dest_mode)
13475 rtx temp = gen_reg_rtx (dest_mode);
13476 convert_move (temp, mask, 0);
13482 /* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
13483 valid insn doesn exist for given mode. */
13486 get_vsel_insn (enum machine_mode mode)
13491 return UNSPEC_VSEL4SI;
13494 return UNSPEC_VSEL4SF;
13497 return UNSPEC_VSEL8HI;
13500 return UNSPEC_VSEL16QI;
13503 return INSN_NOT_AVAILABLE;
13506 return INSN_NOT_AVAILABLE;
13509 /* Emit vector select insn where DEST is destination using
13510 operands OP1, OP2 and MASK. */
13513 rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
13516 enum machine_mode dest_mode = GET_MODE (dest);
13517 int vsel_insn_index = get_vsel_insn (GET_MODE (dest));
13519 temp = gen_reg_rtx (dest_mode);
13521 /* For each vector element, select op1 when mask is 1 otherwise
13523 t = gen_rtx_SET (VOIDmode, temp,
13524 gen_rtx_UNSPEC (dest_mode,
13525 gen_rtvec (3, op2, op1, mask),
13528 emit_move_insn (dest, temp);
13532 /* Emit vector conditional expression.
13533 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
13534 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
13537 rs6000_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
13538 rtx cond, rtx cc_op0, rtx cc_op1)
13540 enum machine_mode dest_mode = GET_MODE (dest);
13541 enum rtx_code rcode = GET_CODE (cond);
13544 if (!TARGET_ALTIVEC)
13547 /* Get the vector mask for the given relational operations. */
13548 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
13550 rs6000_emit_vector_select (dest, op1, op2, mask);
13555 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
13556 operands of the last comparison is nonzero/true, FALSE_COND if it
13557 is zero/false. Return 0 if the hardware has no such operation. */
13560 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
13562 enum rtx_code code = GET_CODE (op);
13563 rtx op0 = rs6000_compare_op0;
13564 rtx op1 = rs6000_compare_op1;
13565 REAL_VALUE_TYPE c1;
13566 enum machine_mode compare_mode = GET_MODE (op0);
13567 enum machine_mode result_mode = GET_MODE (dest);
13569 bool is_against_zero;
13571 /* These modes should always match. */
13572 if (GET_MODE (op1) != compare_mode
13573 /* In the isel case however, we can use a compare immediate, so
13574 op1 may be a small constant. */
13575 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
13577 if (GET_MODE (true_cond) != result_mode)
13579 if (GET_MODE (false_cond) != result_mode)
13582 /* First, work out if the hardware can do this at all, or
13583 if it's too slow.... */
13584 if (! rs6000_compare_fp_p)
13587 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
13590 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
13591 && SCALAR_FLOAT_MODE_P (compare_mode))
13594 is_against_zero = op1 == CONST0_RTX (compare_mode);
13596 /* A floating-point subtract might overflow, underflow, or produce
13597 an inexact result, thus changing the floating-point flags, so it
13598 can't be generated if we care about that. It's safe if one side
13599 of the construct is zero, since then no subtract will be
13601 if (SCALAR_FLOAT_MODE_P (compare_mode)
13602 && flag_trapping_math && ! is_against_zero)
13605 /* Eliminate half of the comparisons by switching operands, this
13606 makes the remaining code simpler. */
13607 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
13608 || code == LTGT || code == LT || code == UNLE)
13610 code = reverse_condition_maybe_unordered (code);
13612 true_cond = false_cond;
13616 /* UNEQ and LTGT take four instructions for a comparison with zero,
13617 it'll probably be faster to use a branch here too. */
13618 if (code == UNEQ && HONOR_NANS (compare_mode))
13621 if (GET_CODE (op1) == CONST_DOUBLE)
13622 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
13624 /* We're going to try to implement comparisons by performing
13625 a subtract, then comparing against zero. Unfortunately,
13626 Inf - Inf is NaN which is not zero, and so if we don't
13627 know that the operand is finite and the comparison
13628 would treat EQ different to UNORDERED, we can't do it. */
13629 if (HONOR_INFINITIES (compare_mode)
13630 && code != GT && code != UNGE
13631 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
13632 /* Constructs of the form (a OP b ? a : b) are safe. */
13633 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
13634 || (! rtx_equal_p (op0, true_cond)
13635 && ! rtx_equal_p (op1, true_cond))))
13638 /* At this point we know we can use fsel. */
13640 /* Reduce the comparison to a comparison against zero. */
13641 if (! is_against_zero)
13643 temp = gen_reg_rtx (compare_mode);
13644 emit_insn (gen_rtx_SET (VOIDmode, temp,
13645 gen_rtx_MINUS (compare_mode, op0, op1)));
13647 op1 = CONST0_RTX (compare_mode);
13650 /* If we don't care about NaNs we can reduce some of the comparisons
13651 down to faster ones. */
13652 if (! HONOR_NANS (compare_mode))
13658 true_cond = false_cond;
13671 /* Now, reduce everything down to a GE. */
13678 temp = gen_reg_rtx (compare_mode);
13679 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
13684 temp = gen_reg_rtx (compare_mode);
13685 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
13690 temp = gen_reg_rtx (compare_mode);
13691 emit_insn (gen_rtx_SET (VOIDmode, temp,
13692 gen_rtx_NEG (compare_mode,
13693 gen_rtx_ABS (compare_mode, op0))));
13698 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
13699 temp = gen_reg_rtx (result_mode);
13700 emit_insn (gen_rtx_SET (VOIDmode, temp,
13701 gen_rtx_IF_THEN_ELSE (result_mode,
13702 gen_rtx_GE (VOIDmode,
13704 true_cond, false_cond)));
13705 false_cond = true_cond;
13708 temp = gen_reg_rtx (compare_mode);
13709 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
13714 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
13715 temp = gen_reg_rtx (result_mode);
13716 emit_insn (gen_rtx_SET (VOIDmode, temp,
13717 gen_rtx_IF_THEN_ELSE (result_mode,
13718 gen_rtx_GE (VOIDmode,
13720 true_cond, false_cond)));
13721 true_cond = false_cond;
13724 temp = gen_reg_rtx (compare_mode);
13725 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
13730 gcc_unreachable ();
13733 emit_insn (gen_rtx_SET (VOIDmode, dest,
13734 gen_rtx_IF_THEN_ELSE (result_mode,
13735 gen_rtx_GE (VOIDmode,
13737 true_cond, false_cond)));
13741 /* Same as above, but for ints (isel). */
13744 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
13746 rtx condition_rtx, cr;
13748 /* All isel implementations thus far are 32-bits. */
13749 if (GET_MODE (rs6000_compare_op0) != SImode)
13752 /* We still have to do the compare, because isel doesn't do a
13753 compare, it just looks at the CRx bits set by a previous compare
13755 condition_rtx = rs6000_generate_compare (GET_CODE (op));
13756 cr = XEXP (condition_rtx, 0);
13758 if (GET_MODE (cr) == CCmode)
13759 emit_insn (gen_isel_signed (dest, condition_rtx,
13760 true_cond, false_cond, cr));
13762 emit_insn (gen_isel_unsigned (dest, condition_rtx,
13763 true_cond, false_cond, cr));
13769 output_isel (rtx *operands)
13771 enum rtx_code code;
13773 code = GET_CODE (operands[1]);
13774 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
13776 PUT_CODE (operands[1], reverse_condition (code));
13777 return "isel %0,%3,%2,%j1";
13780 return "isel %0,%2,%3,%j1";
13784 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
13786 enum machine_mode mode = GET_MODE (op0);
13790 if (code == SMAX || code == SMIN)
13795 if (code == SMAX || code == UMAX)
13796 target = emit_conditional_move (dest, c, op0, op1, mode,
13797 op0, op1, mode, 0);
13799 target = emit_conditional_move (dest, c, op0, op1, mode,
13800 op1, op0, mode, 0);
13801 gcc_assert (target);
13802 if (target != dest)
13803 emit_move_insn (dest, target);
13806 /* Emit instructions to perform a load-reserved/store-conditional operation.
13807 The operation performed is an atomic
13808 (set M (CODE:MODE M OP))
13809 If not NULL, BEFORE is atomically set to M before the operation, and
13810 AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
13811 If SYNC_P then a memory barrier is emitted before the operation.
13812 Either OP or M may be wrapped in a NOT operation. */
13815 rs6000_emit_sync (enum rtx_code code, enum machine_mode mode,
13816 rtx m, rtx op, rtx before_param, rtx after_param,
13819 enum machine_mode used_mode;
13820 rtx the_op, set_before, set_after, set_atomic, cc_scratch, before, after;
13823 HOST_WIDE_INT imask = GET_MODE_MASK (mode);
13824 rtx shift = NULL_RTX;
13827 emit_insn (gen_lwsync ());
13829 if (GET_CODE (m) == NOT)
13830 used_m = XEXP (m, 0);
13834 /* If this is smaller than SImode, we'll have to use SImode with
13836 if (mode == QImode || mode == HImode)
13840 if (MEM_ALIGN (used_m) >= 32)
13843 if (BYTES_BIG_ENDIAN)
13844 ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
13846 shift = GEN_INT (ishift);
13847 used_m = change_address (used_m, SImode, 0);
13851 rtx addrSI, aligned_addr;
13852 int shift_mask = mode == QImode ? 0x18 : 0x10;
13854 addrSI = gen_lowpart_common (SImode,
13855 force_reg (Pmode, XEXP (used_m, 0)));
13856 addrSI = force_reg (SImode, addrSI);
13857 shift = gen_reg_rtx (SImode);
13859 emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
13860 GEN_INT (shift_mask)));
13861 emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
13863 aligned_addr = expand_binop (Pmode, and_optab,
13865 GEN_INT (-4), NULL_RTX,
13866 1, OPTAB_LIB_WIDEN);
13867 used_m = change_address (used_m, SImode, aligned_addr);
13868 set_mem_align (used_m, 32);
13870 /* It's safe to keep the old alias set of USED_M, because
13871 the operation is atomic and only affects the original
13873 if (GET_CODE (m) == NOT)
13874 m = gen_rtx_NOT (SImode, used_m);
13878 if (GET_CODE (op) == NOT)
13880 oldop = lowpart_subreg (SImode, XEXP (op, 0), mode);
13881 oldop = gen_rtx_NOT (SImode, oldop);
13884 oldop = lowpart_subreg (SImode, op, mode);
13890 newop = expand_binop (SImode, and_optab,
13891 oldop, GEN_INT (imask), NULL_RTX,
13892 1, OPTAB_LIB_WIDEN);
13893 emit_insn (gen_ashlsi3 (newop, newop, shift));
13897 newop = expand_binop (SImode, ior_optab,
13898 oldop, GEN_INT (~imask), NULL_RTX,
13899 1, OPTAB_LIB_WIDEN);
13900 emit_insn (gen_rotlsi3 (newop, newop, shift));
13908 newop = expand_binop (SImode, and_optab,
13909 oldop, GEN_INT (imask), NULL_RTX,
13910 1, OPTAB_LIB_WIDEN);
13911 emit_insn (gen_ashlsi3 (newop, newop, shift));
13913 mask = gen_reg_rtx (SImode);
13914 emit_move_insn (mask, GEN_INT (imask));
13915 emit_insn (gen_ashlsi3 (mask, mask, shift));
13918 newop = gen_rtx_PLUS (SImode, m, newop);
13920 newop = gen_rtx_MINUS (SImode, m, newop);
13921 newop = gen_rtx_AND (SImode, newop, mask);
13922 newop = gen_rtx_IOR (SImode, newop,
13923 gen_rtx_AND (SImode,
13924 gen_rtx_NOT (SImode, mask),
13930 gcc_unreachable ();
13933 if (GET_CODE (m) == NOT)
13937 mask = gen_reg_rtx (SImode);
13938 emit_move_insn (mask, GEN_INT (imask));
13939 emit_insn (gen_ashlsi3 (mask, mask, shift));
13941 xorm = gen_rtx_XOR (SImode, used_m, mask);
13942 /* Depending on the value of 'op', the XOR or the operation might
13943 be able to be simplified away. */
13944 newop = simplify_gen_binary (code, SImode, xorm, newop);
13947 used_mode = SImode;
13948 before = gen_reg_rtx (used_mode);
13949 after = gen_reg_rtx (used_mode);
13954 before = before_param;
13955 after = after_param;
13957 if (before == NULL_RTX)
13958 before = gen_reg_rtx (used_mode);
13959 if (after == NULL_RTX)
13960 after = gen_reg_rtx (used_mode);
13963 if ((code == PLUS || code == MINUS || GET_CODE (m) == NOT)
13964 && used_mode != mode)
13965 the_op = op; /* Computed above. */
13966 else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
13967 the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
13969 the_op = gen_rtx_fmt_ee (code, used_mode, m, op);
13971 set_after = gen_rtx_SET (VOIDmode, after, the_op);
13972 set_before = gen_rtx_SET (VOIDmode, before, used_m);
13973 set_atomic = gen_rtx_SET (VOIDmode, used_m,
13974 gen_rtx_UNSPEC (used_mode,
13975 gen_rtvec (1, the_op),
13977 cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
13979 if ((code == PLUS || code == MINUS) && used_mode != mode)
13980 vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
13981 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
13983 vec = gen_rtvec (4, set_after, set_before, set_atomic, cc_scratch);
13984 emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
13986 /* Shift and mask the return values properly. */
13987 if (used_mode != mode && before_param)
13989 emit_insn (gen_lshrsi3 (before, before, shift));
13990 convert_move (before_param, before, 1);
13993 if (used_mode != mode && after_param)
13995 emit_insn (gen_lshrsi3 (after, after, shift));
13996 convert_move (after_param, after, 1);
13999 /* The previous sequence will end with a branch that's dependent on
14000 the conditional store, so placing an isync will ensure that no
14001 other instructions (especially, no load or store instructions)
14002 can start before the atomic operation completes. */
14004 emit_insn (gen_isync ());
14007 /* A subroutine of the atomic operation splitters. Jump to LABEL if
14008 COND is true. Mark the jump as unlikely to be taken. */
14011 emit_unlikely_jump (rtx cond, rtx label)
14013 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
14016 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
14017 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
14018 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
14021 /* A subroutine of the atomic operation splitters. Emit a load-locked
14022 instruction in MODE. */
14025 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
14027 rtx (*fn) (rtx, rtx) = NULL;
14028 if (mode == SImode)
14029 fn = gen_load_locked_si;
14030 else if (mode == DImode)
14031 fn = gen_load_locked_di;
14032 emit_insn (fn (reg, mem));
14035 /* A subroutine of the atomic operation splitters. Emit a store-conditional
14036 instruction in MODE. */
14039 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
14041 rtx (*fn) (rtx, rtx, rtx) = NULL;
14042 if (mode == SImode)
14043 fn = gen_store_conditional_si;
14044 else if (mode == DImode)
14045 fn = gen_store_conditional_di;
14047 /* Emit sync before stwcx. to address PPC405 Erratum. */
14048 if (PPC405_ERRATUM77)
14049 emit_insn (gen_memory_barrier ());
14051 emit_insn (fn (res, mem, val));
14054 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
14055 to perform. MEM is the memory on which to operate. VAL is the second
14056 operand of the binary operator. BEFORE and AFTER are optional locations to
14057 return the value of MEM either before of after the operation. SCRATCH is
14058 a scratch register. */
14061 rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
14062 rtx before, rtx after, rtx scratch)
14064 enum machine_mode mode = GET_MODE (mem);
14065 rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14067 emit_insn (gen_lwsync ());
14069 label = gen_label_rtx ();
14070 emit_label (label);
14071 label = gen_rtx_LABEL_REF (VOIDmode, label);
14073 if (before == NULL_RTX)
14075 emit_load_locked (mode, before, mem);
14078 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
14079 else if (code == AND)
14080 x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
14082 x = gen_rtx_fmt_ee (code, mode, before, val);
14084 if (after != NULL_RTX)
14085 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
14086 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14088 emit_store_conditional (mode, cond, mem, scratch);
14090 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14091 emit_unlikely_jump (x, label);
14093 emit_insn (gen_isync ());
14096 /* Expand an atomic compare and swap operation. MEM is the memory on which
14097 to operate. OLDVAL is the old value to be compared. NEWVAL is the new
14098 value to be stored. SCRATCH is a scratch GPR. */
14101 rs6000_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
14104 enum machine_mode mode = GET_MODE (mem);
14105 rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14107 emit_insn (gen_lwsync ());
14109 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14110 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14111 emit_label (XEXP (label1, 0));
14113 emit_load_locked (mode, retval, mem);
14115 x = gen_rtx_COMPARE (CCmode, retval, oldval);
14116 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
14118 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14119 emit_unlikely_jump (x, label2);
14121 emit_move_insn (scratch, newval);
14122 emit_store_conditional (mode, cond, mem, scratch);
14124 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14125 emit_unlikely_jump (x, label1);
14127 emit_insn (gen_isync ());
14128 emit_label (XEXP (label2, 0));
14131 /* Expand an atomic test and set operation. MEM is the memory on which
14132 to operate. VAL is the value set. SCRATCH is a scratch GPR. */
14135 rs6000_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
14137 enum machine_mode mode = GET_MODE (mem);
14138 rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14140 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14141 emit_label (XEXP (label, 0));
14143 emit_load_locked (mode, retval, mem);
14144 emit_move_insn (scratch, val);
14145 emit_store_conditional (mode, cond, mem, scratch);
14147 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14148 emit_unlikely_jump (x, label);
14150 emit_insn (gen_isync ());
14154 rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
14156 enum machine_mode mode = GET_MODE (mem);
14157 rtx addrSI, align, wdst, shift, mask;
14158 HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
14159 HOST_WIDE_INT imask = GET_MODE_MASK (mode);
14161 /* Shift amount for subword relative to aligned word. */
14162 addrSI = force_reg (GET_MODE (XEXP (mem, 0)), XEXP (mem, 0));
14163 addrSI = force_reg (SImode, gen_lowpart_common (SImode, addrSI));
14164 shift = gen_reg_rtx (SImode);
14165 emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
14166 GEN_INT (shift_mask)));
14167 emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
14169 /* Shift and mask old value into position within word. */
14170 oldval = convert_modes (SImode, mode, oldval, 1);
14171 oldval = expand_binop (SImode, and_optab,
14172 oldval, GEN_INT (imask), NULL_RTX,
14173 1, OPTAB_LIB_WIDEN);
14174 emit_insn (gen_ashlsi3 (oldval, oldval, shift));
14176 /* Shift and mask new value into position within word. */
14177 newval = convert_modes (SImode, mode, newval, 1);
14178 newval = expand_binop (SImode, and_optab,
14179 newval, GEN_INT (imask), NULL_RTX,
14180 1, OPTAB_LIB_WIDEN);
14181 emit_insn (gen_ashlsi3 (newval, newval, shift));
14183 /* Mask for insertion. */
14184 mask = gen_reg_rtx (SImode);
14185 emit_move_insn (mask, GEN_INT (imask));
14186 emit_insn (gen_ashlsi3 (mask, mask, shift));
14188 /* Address of aligned word containing subword. */
14189 align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
14190 NULL_RTX, 1, OPTAB_LIB_WIDEN);
14191 mem = change_address (mem, SImode, align);
14192 set_mem_align (mem, 32);
14193 MEM_VOLATILE_P (mem) = 1;
14195 wdst = gen_reg_rtx (SImode);
14196 emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
14197 oldval, newval, mem));
14199 /* Shift the result back. */
14200 emit_insn (gen_lshrsi3 (wdst, wdst, shift));
14202 emit_move_insn (dst, gen_lowpart (mode, wdst));
14206 rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
14207 rtx oldval, rtx newval, rtx mem,
14210 rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14212 emit_insn (gen_lwsync ());
14213 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14214 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14215 emit_label (XEXP (label1, 0));
14217 emit_load_locked (SImode, scratch, mem);
14219 /* Mask subword within loaded value for comparison with oldval.
14220 Use UNSPEC_AND to avoid clobber.*/
14221 emit_insn (gen_rtx_SET (SImode, dest,
14222 gen_rtx_UNSPEC (SImode,
14223 gen_rtvec (2, scratch, mask),
14226 x = gen_rtx_COMPARE (CCmode, dest, oldval);
14227 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
14229 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14230 emit_unlikely_jump (x, label2);
14232 /* Clear subword within loaded value for insertion of new value. */
14233 emit_insn (gen_rtx_SET (SImode, scratch,
14234 gen_rtx_AND (SImode,
14235 gen_rtx_NOT (SImode, mask), scratch)));
14236 emit_insn (gen_iorsi3 (scratch, scratch, newval));
14237 emit_store_conditional (SImode, cond, mem, scratch);
14239 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14240 emit_unlikely_jump (x, label1);
14242 emit_insn (gen_isync ());
14243 emit_label (XEXP (label2, 0));
14247 /* Emit instructions to move SRC to DST. Called by splitters for
14248 multi-register moves. It will emit at most one instruction for
14249 each register that is accessed; that is, it won't emit li/lis pairs
14250 (or equivalent for 64-bit code). One of SRC or DST must be a hard
14254 rs6000_split_multireg_move (rtx dst, rtx src)
14256 /* The register number of the first register being moved. */
14258 /* The mode that is to be moved. */
14259 enum machine_mode mode;
14260 /* The mode that the move is being done in, and its size. */
14261 enum machine_mode reg_mode;
14263 /* The number of registers that will be moved. */
14266 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
14267 mode = GET_MODE (dst);
14268 nregs = hard_regno_nregs[reg][mode];
14269 if (FP_REGNO_P (reg))
14270 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
14271 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
14272 else if (ALTIVEC_REGNO_P (reg))
14273 reg_mode = V16QImode;
14274 else if (TARGET_E500_DOUBLE && mode == TFmode)
14277 reg_mode = word_mode;
14278 reg_mode_size = GET_MODE_SIZE (reg_mode);
14280 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
14282 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
14284 /* Move register range backwards, if we might have destructive
14287 for (i = nregs - 1; i >= 0; i--)
14288 emit_insn (gen_rtx_SET (VOIDmode,
14289 simplify_gen_subreg (reg_mode, dst, mode,
14290 i * reg_mode_size),
14291 simplify_gen_subreg (reg_mode, src, mode,
14292 i * reg_mode_size)));
14298 bool used_update = false;
14300 if (MEM_P (src) && INT_REGNO_P (reg))
14304 if (GET_CODE (XEXP (src, 0)) == PRE_INC
14305 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
14308 breg = XEXP (XEXP (src, 0), 0);
14309 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
14310 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
14311 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
14312 emit_insn (TARGET_32BIT
14313 ? gen_addsi3 (breg, breg, delta_rtx)
14314 : gen_adddi3 (breg, breg, delta_rtx));
14315 src = replace_equiv_address (src, breg);
14317 else if (! rs6000_offsettable_memref_p (src))
14320 basereg = gen_rtx_REG (Pmode, reg);
14321 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
14322 src = replace_equiv_address (src, basereg);
14325 breg = XEXP (src, 0);
14326 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
14327 breg = XEXP (breg, 0);
14329 /* If the base register we are using to address memory is
14330 also a destination reg, then change that register last. */
14332 && REGNO (breg) >= REGNO (dst)
14333 && REGNO (breg) < REGNO (dst) + nregs)
14334 j = REGNO (breg) - REGNO (dst);
14337 if (GET_CODE (dst) == MEM && INT_REGNO_P (reg))
14341 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
14342 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
14345 breg = XEXP (XEXP (dst, 0), 0);
14346 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
14347 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
14348 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
14350 /* We have to update the breg before doing the store.
14351 Use store with update, if available. */
14355 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
14356 emit_insn (TARGET_32BIT
14357 ? (TARGET_POWERPC64
14358 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
14359 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
14360 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
14361 used_update = true;
14364 emit_insn (TARGET_32BIT
14365 ? gen_addsi3 (breg, breg, delta_rtx)
14366 : gen_adddi3 (breg, breg, delta_rtx));
14367 dst = replace_equiv_address (dst, breg);
14370 gcc_assert (rs6000_offsettable_memref_p (dst));
14373 for (i = 0; i < nregs; i++)
14375 /* Calculate index to next subword. */
14380 /* If compiler already emitted move of first word by
14381 store with update, no need to do anything. */
14382 if (j == 0 && used_update)
14385 emit_insn (gen_rtx_SET (VOIDmode,
14386 simplify_gen_subreg (reg_mode, dst, mode,
14387 j * reg_mode_size),
14388 simplify_gen_subreg (reg_mode, src, mode,
14389 j * reg_mode_size)));
14395 /* This page contains routines that are used to determine what the
14396 function prologue and epilogue code will do and write them out. */
14398 /* Return the first fixed-point register that is required to be
14399 saved. 32 if none. */
14402 first_reg_to_save (void)
14406 /* Find lowest numbered live register. */
14407 for (first_reg = 13; first_reg <= 31; first_reg++)
14408 if (df_regs_ever_live_p (first_reg)
14409 && (! call_used_regs[first_reg]
14410 || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
14411 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
14412 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
14413 || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
14418 && crtl->uses_pic_offset_table
14419 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
14420 return RS6000_PIC_OFFSET_TABLE_REGNUM;
14426 /* Similar, for FP regs. */
14429 first_fp_reg_to_save (void)
14433 /* Find lowest numbered live register. */
14434 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
14435 if (df_regs_ever_live_p (first_reg))
14441 /* Similar, for AltiVec regs. */
14444 first_altivec_reg_to_save (void)
14448 /* Stack frame remains as is unless we are in AltiVec ABI. */
14449 if (! TARGET_ALTIVEC_ABI)
14450 return LAST_ALTIVEC_REGNO + 1;
14452 /* On Darwin, the unwind routines are compiled without
14453 TARGET_ALTIVEC, and use save_world to save/restore the
14454 altivec registers when necessary. */
14455 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
14456 && ! TARGET_ALTIVEC)
14457 return FIRST_ALTIVEC_REGNO + 20;
14459 /* Find lowest numbered live register. */
14460 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
14461 if (df_regs_ever_live_p (i))
14467 /* Return a 32-bit mask of the AltiVec registers we need to set in
14468 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
14469 the 32-bit word is 0. */
14471 static unsigned int
14472 compute_vrsave_mask (void)
14474 unsigned int i, mask = 0;
14476 /* On Darwin, the unwind routines are compiled without
14477 TARGET_ALTIVEC, and use save_world to save/restore the
14478 call-saved altivec registers when necessary. */
14479 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
14480 && ! TARGET_ALTIVEC)
14483 /* First, find out if we use _any_ altivec registers. */
14484 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
14485 if (df_regs_ever_live_p (i))
14486 mask |= ALTIVEC_REG_BIT (i);
14491 /* Next, remove the argument registers from the set. These must
14492 be in the VRSAVE mask set by the caller, so we don't need to add
14493 them in again. More importantly, the mask we compute here is
14494 used to generate CLOBBERs in the set_vrsave insn, and we do not
14495 wish the argument registers to die. */
14496 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
14497 mask &= ~ALTIVEC_REG_BIT (i);
14499 /* Similarly, remove the return value from the set. */
14502 diddle_return_value (is_altivec_return_reg, &yes);
14504 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
14510 /* For a very restricted set of circumstances, we can cut down the
14511 size of prologues/epilogues by calling our own save/restore-the-world
14515 compute_save_world_info (rs6000_stack_t *info_ptr)
14517 info_ptr->world_save_p = 1;
14518 info_ptr->world_save_p
14519 = (WORLD_SAVE_P (info_ptr)
14520 && DEFAULT_ABI == ABI_DARWIN
14521 && ! (cfun->calls_setjmp && flag_exceptions)
14522 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
14523 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
14524 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
14525 && info_ptr->cr_save_p);
14527 /* This will not work in conjunction with sibcalls. Make sure there
14528 are none. (This check is expensive, but seldom executed.) */
14529 if (WORLD_SAVE_P (info_ptr))
14532 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
14533 if ( GET_CODE (insn) == CALL_INSN
14534 && SIBLING_CALL_P (insn))
14536 info_ptr->world_save_p = 0;
14541 if (WORLD_SAVE_P (info_ptr))
14543 /* Even if we're not touching VRsave, make sure there's room on the
14544 stack for it, if it looks like we're calling SAVE_WORLD, which
14545 will attempt to save it. */
14546 info_ptr->vrsave_size = 4;
14548 /* If we are going to save the world, we need to save the link register too. */
14549 info_ptr->lr_save_p = 1;
14551 /* "Save" the VRsave register too if we're saving the world. */
14552 if (info_ptr->vrsave_mask == 0)
14553 info_ptr->vrsave_mask = compute_vrsave_mask ();
14555 /* Because the Darwin register save/restore routines only handle
14556 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
14558 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
14559 && (info_ptr->first_altivec_reg_save
14560 >= FIRST_SAVED_ALTIVEC_REGNO));
14567 is_altivec_return_reg (rtx reg, void *xyes)
14569 bool *yes = (bool *) xyes;
14570 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
14575 /* Calculate the stack information for the current function. This is
14576 complicated by having two separate calling sequences, the AIX calling
14577 sequence and the V.4 calling sequence.
14579 AIX (and Darwin/Mac OS X) stack frames look like:
14581 SP----> +---------------------------------------+
14582 | back chain to caller | 0 0
14583 +---------------------------------------+
14584 | saved CR | 4 8 (8-11)
14585 +---------------------------------------+
14587 +---------------------------------------+
14588 | reserved for compilers | 12 24
14589 +---------------------------------------+
14590 | reserved for binders | 16 32
14591 +---------------------------------------+
14592 | saved TOC pointer | 20 40
14593 +---------------------------------------+
14594 | Parameter save area (P) | 24 48
14595 +---------------------------------------+
14596 | Alloca space (A) | 24+P etc.
14597 +---------------------------------------+
14598 | Local variable space (L) | 24+P+A
14599 +---------------------------------------+
14600 | Float/int conversion temporary (X) | 24+P+A+L
14601 +---------------------------------------+
14602 | Save area for AltiVec registers (W) | 24+P+A+L+X
14603 +---------------------------------------+
14604 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
14605 +---------------------------------------+
14606 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
14607 +---------------------------------------+
14608 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
14609 +---------------------------------------+
14610 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
14611 +---------------------------------------+
14612 old SP->| back chain to caller's caller |
14613 +---------------------------------------+
14615 The required alignment for AIX configurations is two words (i.e., 8
14619 V.4 stack frames look like:
14621 SP----> +---------------------------------------+
14622 | back chain to caller | 0
14623 +---------------------------------------+
14624 | caller's saved LR | 4
14625 +---------------------------------------+
14626 | Parameter save area (P) | 8
14627 +---------------------------------------+
14628 | Alloca space (A) | 8+P
14629 +---------------------------------------+
14630 | Varargs save area (V) | 8+P+A
14631 +---------------------------------------+
14632 | Local variable space (L) | 8+P+A+V
14633 +---------------------------------------+
14634 | Float/int conversion temporary (X) | 8+P+A+V+L
14635 +---------------------------------------+
14636 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
14637 +---------------------------------------+
14638 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
14639 +---------------------------------------+
14640 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
14641 +---------------------------------------+
14642 | SPE: area for 64-bit GP registers |
14643 +---------------------------------------+
14644 | SPE alignment padding |
14645 +---------------------------------------+
14646 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
14647 +---------------------------------------+
14648 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
14649 +---------------------------------------+
14650 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
14651 +---------------------------------------+
14652 old SP->| back chain to caller's caller |
14653 +---------------------------------------+
14655 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
14656 given. (But note below and in sysv4.h that we require only 8 and
14657 may round up the size of our stack frame anyways. The historical
14658 reason is early versions of powerpc-linux which didn't properly
14659 align the stack at program startup. A happy side-effect is that
14660 -mno-eabi libraries can be used with -meabi programs.)
14662 The EABI configuration defaults to the V.4 layout. However,
14663 the stack alignment requirements may differ. If -mno-eabi is not
14664 given, the required stack alignment is 8 bytes; if -mno-eabi is
14665 given, the required alignment is 16 bytes. (But see V.4 comment
14668 #ifndef ABI_STACK_BOUNDARY
14669 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
14672 static rs6000_stack_t *
14673 rs6000_stack_info (void)
14675 static rs6000_stack_t info;
14676 rs6000_stack_t *info_ptr = &info;
14677 int reg_size = TARGET_32BIT ? 4 : 8;
14681 HOST_WIDE_INT non_fixed_size;
14683 memset (&info, 0, sizeof (info));
14687 /* Cache value so we don't rescan instruction chain over and over. */
14688 if (cfun->machine->insn_chain_scanned_p == 0)
14689 cfun->machine->insn_chain_scanned_p
14690 = spe_func_has_64bit_regs_p () + 1;
14691 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
14694 /* Select which calling sequence. */
14695 info_ptr->abi = DEFAULT_ABI;
14697 /* Calculate which registers need to be saved & save area size. */
14698 info_ptr->first_gp_reg_save = first_reg_to_save ();
14699 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
14700 even if it currently looks like we won't. Reload may need it to
14701 get at a constant; if so, it will have already created a constant
14702 pool entry for it. */
14703 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
14704 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
14705 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
14706 && crtl->uses_const_pool
14707 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
14708 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
14710 first_gp = info_ptr->first_gp_reg_save;
14712 info_ptr->gp_size = reg_size * (32 - first_gp);
14714 /* For the SPE, we have an additional upper 32-bits on each GPR.
14715 Ideally we should save the entire 64-bits only when the upper
14716 half is used in SIMD instructions. Since we only record
14717 registers live (not the size they are used in), this proves
14718 difficult because we'd have to traverse the instruction chain at
14719 the right time, taking reload into account. This is a real pain,
14720 so we opt to save the GPRs in 64-bits always if but one register
14721 gets used in 64-bits. Otherwise, all the registers in the frame
14722 get saved in 32-bits.
14724 So... since when we save all GPRs (except the SP) in 64-bits, the
14725 traditional GP save area will be empty. */
14726 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
14727 info_ptr->gp_size = 0;
14729 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
14730 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
14732 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
14733 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
14734 - info_ptr->first_altivec_reg_save);
14736 /* Does this function call anything? */
14737 info_ptr->calls_p = (! current_function_is_leaf
14738 || cfun->machine->ra_needs_full_frame);
14740 /* Determine if we need to save the link register. */
14741 if ((DEFAULT_ABI == ABI_AIX
14743 && !TARGET_PROFILE_KERNEL)
14744 #ifdef TARGET_RELOCATABLE
14745 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
14747 || (info_ptr->first_fp_reg_save != 64
14748 && !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
14749 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
14750 || info_ptr->calls_p
14751 || rs6000_ra_ever_killed ())
14753 info_ptr->lr_save_p = 1;
14754 df_set_regs_ever_live (LR_REGNO, true);
14757 /* Determine if we need to save the condition code registers. */
14758 if (df_regs_ever_live_p (CR2_REGNO)
14759 || df_regs_ever_live_p (CR3_REGNO)
14760 || df_regs_ever_live_p (CR4_REGNO))
14762 info_ptr->cr_save_p = 1;
14763 if (DEFAULT_ABI == ABI_V4)
14764 info_ptr->cr_size = reg_size;
14767 /* If the current function calls __builtin_eh_return, then we need
14768 to allocate stack space for registers that will hold data for
14769 the exception handler. */
14770 if (crtl->calls_eh_return)
14773 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
14776 /* SPE saves EH registers in 64-bits. */
14777 ehrd_size = i * (TARGET_SPE_ABI
14778 && info_ptr->spe_64bit_regs_used != 0
14779 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
14784 /* Determine various sizes. */
14785 info_ptr->reg_size = reg_size;
14786 info_ptr->fixed_size = RS6000_SAVE_AREA;
14787 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
14788 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
14789 TARGET_ALTIVEC ? 16 : 8);
14790 if (FRAME_GROWS_DOWNWARD)
14791 info_ptr->vars_size
14792 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
14793 + info_ptr->parm_size,
14794 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
14795 - (info_ptr->fixed_size + info_ptr->vars_size
14796 + info_ptr->parm_size);
14798 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
14799 info_ptr->spe_gp_size = 8 * (32 - first_gp);
14801 info_ptr->spe_gp_size = 0;
14803 if (TARGET_ALTIVEC_ABI)
14804 info_ptr->vrsave_mask = compute_vrsave_mask ();
14806 info_ptr->vrsave_mask = 0;
14808 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
14809 info_ptr->vrsave_size = 4;
14811 info_ptr->vrsave_size = 0;
14813 compute_save_world_info (info_ptr);
14815 /* Calculate the offsets. */
14816 switch (DEFAULT_ABI)
14820 gcc_unreachable ();
14824 info_ptr->fp_save_offset = - info_ptr->fp_size;
14825 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
14827 if (TARGET_ALTIVEC_ABI)
14829 info_ptr->vrsave_save_offset
14830 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
14832 /* Align stack so vector save area is on a quadword boundary.
14833 The padding goes above the vectors. */
14834 if (info_ptr->altivec_size != 0)
14835 info_ptr->altivec_padding_size
14836 = info_ptr->vrsave_save_offset & 0xF;
14838 info_ptr->altivec_padding_size = 0;
14840 info_ptr->altivec_save_offset
14841 = info_ptr->vrsave_save_offset
14842 - info_ptr->altivec_padding_size
14843 - info_ptr->altivec_size;
14844 gcc_assert (info_ptr->altivec_size == 0
14845 || info_ptr->altivec_save_offset % 16 == 0);
14847 /* Adjust for AltiVec case. */
14848 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
14851 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
14852 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
14853 info_ptr->lr_save_offset = 2*reg_size;
14857 info_ptr->fp_save_offset = - info_ptr->fp_size;
14858 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
14859 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
14861 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
14863 /* Align stack so SPE GPR save area is aligned on a
14864 double-word boundary. */
14865 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
14866 info_ptr->spe_padding_size
14867 = 8 - (-info_ptr->cr_save_offset % 8);
14869 info_ptr->spe_padding_size = 0;
14871 info_ptr->spe_gp_save_offset
14872 = info_ptr->cr_save_offset
14873 - info_ptr->spe_padding_size
14874 - info_ptr->spe_gp_size;
14876 /* Adjust for SPE case. */
14877 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
14879 else if (TARGET_ALTIVEC_ABI)
14881 info_ptr->vrsave_save_offset
14882 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
14884 /* Align stack so vector save area is on a quadword boundary. */
14885 if (info_ptr->altivec_size != 0)
14886 info_ptr->altivec_padding_size
14887 = 16 - (-info_ptr->vrsave_save_offset % 16);
14889 info_ptr->altivec_padding_size = 0;
14891 info_ptr->altivec_save_offset
14892 = info_ptr->vrsave_save_offset
14893 - info_ptr->altivec_padding_size
14894 - info_ptr->altivec_size;
14896 /* Adjust for AltiVec case. */
14897 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
14900 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
14901 info_ptr->ehrd_offset -= ehrd_size;
14902 info_ptr->lr_save_offset = reg_size;
14906 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
14907 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
14908 + info_ptr->gp_size
14909 + info_ptr->altivec_size
14910 + info_ptr->altivec_padding_size
14911 + info_ptr->spe_gp_size
14912 + info_ptr->spe_padding_size
14914 + info_ptr->cr_size
14915 + info_ptr->vrsave_size,
14918 non_fixed_size = (info_ptr->vars_size
14919 + info_ptr->parm_size
14920 + info_ptr->save_size);
14922 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
14923 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
14925 /* Determine if we need to allocate any stack frame:
14927 For AIX we need to push the stack if a frame pointer is needed
14928 (because the stack might be dynamically adjusted), if we are
14929 debugging, if we make calls, or if the sum of fp_save, gp_save,
14930 and local variables are more than the space needed to save all
14931 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
14932 + 18*8 = 288 (GPR13 reserved).
14934 For V.4 we don't have the stack cushion that AIX uses, but assume
14935 that the debugger can handle stackless frames. */
14937 if (info_ptr->calls_p)
14938 info_ptr->push_p = 1;
14940 else if (DEFAULT_ABI == ABI_V4)
14941 info_ptr->push_p = non_fixed_size != 0;
14943 else if (frame_pointer_needed)
14944 info_ptr->push_p = 1;
14946 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
14947 info_ptr->push_p = 1;
14950 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
14952 /* Zero offsets if we're not saving those registers. */
14953 if (info_ptr->fp_size == 0)
14954 info_ptr->fp_save_offset = 0;
14956 if (info_ptr->gp_size == 0)
14957 info_ptr->gp_save_offset = 0;
14959 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
14960 info_ptr->altivec_save_offset = 0;
14962 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
14963 info_ptr->vrsave_save_offset = 0;
14965 if (! TARGET_SPE_ABI
14966 || info_ptr->spe_64bit_regs_used == 0
14967 || info_ptr->spe_gp_size == 0)
14968 info_ptr->spe_gp_save_offset = 0;
14970 if (! info_ptr->lr_save_p)
14971 info_ptr->lr_save_offset = 0;
14973 if (! info_ptr->cr_save_p)
14974 info_ptr->cr_save_offset = 0;
14979 /* Return true if the current function uses any GPRs in 64-bit SIMD
14983 spe_func_has_64bit_regs_p (void)
14987 /* Functions that save and restore all the call-saved registers will
14988 need to save/restore the registers in 64-bits. */
14989 if (crtl->calls_eh_return
14990 || cfun->calls_setjmp
14991 || crtl->has_nonlocal_goto)
14994 insns = get_insns ();
14996 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
15002 /* FIXME: This should be implemented with attributes...
15004 (set_attr "spe64" "true")....then,
15005 if (get_spe64(insn)) return true;
15007 It's the only reliable way to do the stuff below. */
15009 i = PATTERN (insn);
15010 if (GET_CODE (i) == SET)
15012 enum machine_mode mode = GET_MODE (SET_SRC (i));
15014 if (SPE_VECTOR_MODE (mode))
15016 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
15026 debug_stack_info (rs6000_stack_t *info)
15028 const char *abi_string;
15031 info = rs6000_stack_info ();
15033 fprintf (stderr, "\nStack information for function %s:\n",
15034 ((current_function_decl && DECL_NAME (current_function_decl))
15035 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
15040 default: abi_string = "Unknown"; break;
15041 case ABI_NONE: abi_string = "NONE"; break;
15042 case ABI_AIX: abi_string = "AIX"; break;
15043 case ABI_DARWIN: abi_string = "Darwin"; break;
15044 case ABI_V4: abi_string = "V.4"; break;
15047 fprintf (stderr, "\tABI = %5s\n", abi_string);
15049 if (TARGET_ALTIVEC_ABI)
15050 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
15052 if (TARGET_SPE_ABI)
15053 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
15055 if (info->first_gp_reg_save != 32)
15056 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
15058 if (info->first_fp_reg_save != 64)
15059 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
15061 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
15062 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
15063 info->first_altivec_reg_save);
15065 if (info->lr_save_p)
15066 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
15068 if (info->cr_save_p)
15069 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
15071 if (info->vrsave_mask)
15072 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
15075 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
15078 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
15080 if (info->gp_save_offset)
15081 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
15083 if (info->fp_save_offset)
15084 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
15086 if (info->altivec_save_offset)
15087 fprintf (stderr, "\taltivec_save_offset = %5d\n",
15088 info->altivec_save_offset);
15090 if (info->spe_gp_save_offset)
15091 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
15092 info->spe_gp_save_offset);
15094 if (info->vrsave_save_offset)
15095 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
15096 info->vrsave_save_offset);
15098 if (info->lr_save_offset)
15099 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
15101 if (info->cr_save_offset)
15102 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
15104 if (info->varargs_save_offset)
15105 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
15107 if (info->total_size)
15108 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
15111 if (info->vars_size)
15112 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
15115 if (info->parm_size)
15116 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
15118 if (info->fixed_size)
15119 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
15122 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
15124 if (info->spe_gp_size)
15125 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
15128 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
15130 if (info->altivec_size)
15131 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
15133 if (info->vrsave_size)
15134 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
15136 if (info->altivec_padding_size)
15137 fprintf (stderr, "\taltivec_padding_size= %5d\n",
15138 info->altivec_padding_size);
15140 if (info->spe_padding_size)
15141 fprintf (stderr, "\tspe_padding_size = %5d\n",
15142 info->spe_padding_size);
15145 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
15147 if (info->save_size)
15148 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
15150 if (info->reg_size != 4)
15151 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
15153 fprintf (stderr, "\n");
15157 rs6000_return_addr (int count, rtx frame)
15159 /* Currently we don't optimize very well between prolog and body
15160 code and for PIC code the code can be actually quite bad, so
15161 don't try to be too clever here. */
15162 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
15164 cfun->machine->ra_needs_full_frame = 1;
15171 plus_constant (copy_to_reg
15172 (gen_rtx_MEM (Pmode,
15173 memory_address (Pmode, frame))),
15174 RETURN_ADDRESS_OFFSET)));
15177 cfun->machine->ra_need_lr = 1;
15178 return get_hard_reg_initial_val (Pmode, LR_REGNO);
15181 /* Say whether a function is a candidate for sibcall handling or not.
15182 We do not allow indirect calls to be optimized into sibling calls.
15183 Also, we can't do it if there are any vector parameters; there's
15184 nowhere to put the VRsave code so it works; note that functions with
15185 vector parameters are required to have a prototype, so the argument
15186 type info must be available here. (The tail recursion case can work
15187 with vector parameters, but there's no way to distinguish here.) */
15189 rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
15194 if (TARGET_ALTIVEC_VRSAVE)
15196 for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
15197 type; type = TREE_CHAIN (type))
15199 if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
15203 if (DEFAULT_ABI == ABI_DARWIN
15204 || ((*targetm.binds_local_p) (decl)
15205 && (DEFAULT_ABI != ABI_AIX || !DECL_EXTERNAL (decl))))
15207 tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (decl));
15209 if (!lookup_attribute ("longcall", attr_list)
15210 || lookup_attribute ("shortcall", attr_list))
15217 /* NULL if INSN insn is valid within a low-overhead loop.
15218 Otherwise return why doloop cannot be applied.
15219 PowerPC uses the COUNT register for branch on table instructions. */
15221 static const char *
15222 rs6000_invalid_within_doloop (const_rtx insn)
15225 return "Function call in the loop.";
15228 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
15229 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
15230 return "Computed branch in the loop.";
15236 rs6000_ra_ever_killed (void)
15242 if (crtl->is_thunk)
15245 /* regs_ever_live has LR marked as used if any sibcalls are present,
15246 but this should not force saving and restoring in the
15247 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
15248 clobbers LR, so that is inappropriate. */
15250 /* Also, the prologue can generate a store into LR that
15251 doesn't really count, like this:
15254 bcl to set PIC register
15258 When we're called from the epilogue, we need to avoid counting
15259 this as a store. */
15261 push_topmost_sequence ();
15262 top = get_insns ();
15263 pop_topmost_sequence ();
15264 reg = gen_rtx_REG (Pmode, LR_REGNO);
15266 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
15272 if (!SIBLING_CALL_P (insn))
15275 else if (find_regno_note (insn, REG_INC, LR_REGNO))
15277 else if (set_of (reg, insn) != NULL_RTX
15278 && !prologue_epilogue_contains (insn))
15285 /* Emit instructions needed to load the TOC register.
15286 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
15287 a constant pool; or for SVR4 -fpic. */
15290 rs6000_emit_load_toc_table (int fromprolog)
15293 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
15295 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
15298 rtx lab, tmp1, tmp2, got;
15300 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
15301 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15303 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
15305 got = rs6000_got_sym ();
15306 tmp1 = tmp2 = dest;
15309 tmp1 = gen_reg_rtx (Pmode);
15310 tmp2 = gen_reg_rtx (Pmode);
15312 emit_insn (gen_load_toc_v4_PIC_1 (lab));
15313 emit_move_insn (tmp1,
15314 gen_rtx_REG (Pmode, LR_REGNO));
15315 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
15316 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
15318 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
15320 emit_insn (gen_load_toc_v4_pic_si ());
15321 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
15323 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
15326 rtx temp0 = (fromprolog
15327 ? gen_rtx_REG (Pmode, 0)
15328 : gen_reg_rtx (Pmode));
15334 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
15335 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15337 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
15338 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15340 emit_insn (gen_load_toc_v4_PIC_1 (symF));
15341 emit_move_insn (dest,
15342 gen_rtx_REG (Pmode, LR_REGNO));
15343 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
15349 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
15350 emit_insn (gen_load_toc_v4_PIC_1b (tocsym));
15351 emit_move_insn (dest,
15352 gen_rtx_REG (Pmode, LR_REGNO));
15353 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
15355 emit_insn (gen_addsi3 (dest, temp0, dest));
15357 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
15359 /* This is for AIX code running in non-PIC ELF32. */
15362 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
15363 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15365 emit_insn (gen_elf_high (dest, realsym));
15366 emit_insn (gen_elf_low (dest, dest, realsym));
15370 gcc_assert (DEFAULT_ABI == ABI_AIX);
15373 emit_insn (gen_load_toc_aix_si (dest));
15375 emit_insn (gen_load_toc_aix_di (dest));
15379 /* Emit instructions to restore the link register after determining where
15380 its value has been stored. */
15383 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
15385 rs6000_stack_t *info = rs6000_stack_info ();
15388 operands[0] = source;
15389 operands[1] = scratch;
15391 if (info->lr_save_p)
15393 rtx frame_rtx = stack_pointer_rtx;
15394 HOST_WIDE_INT sp_offset = 0;
15397 if (frame_pointer_needed
15398 || cfun->calls_alloca
15399 || info->total_size > 32767)
15401 tmp = gen_frame_mem (Pmode, frame_rtx);
15402 emit_move_insn (operands[1], tmp);
15403 frame_rtx = operands[1];
15405 else if (info->push_p)
15406 sp_offset = info->total_size;
15408 tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
15409 tmp = gen_frame_mem (Pmode, tmp);
15410 emit_move_insn (tmp, operands[0]);
15413 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
15416 static GTY(()) alias_set_type set = -1;
15419 get_TOC_alias_set (void)
15422 set = new_alias_set ();
15426 /* This returns nonzero if the current function uses the TOC. This is
15427 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
15428 is generated by the ABI_V4 load_toc_* patterns. */
15435 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15438 rtx pat = PATTERN (insn);
15441 if (GET_CODE (pat) == PARALLEL)
15442 for (i = 0; i < XVECLEN (pat, 0); i++)
15444 rtx sub = XVECEXP (pat, 0, i);
15445 if (GET_CODE (sub) == USE)
15447 sub = XEXP (sub, 0);
15448 if (GET_CODE (sub) == UNSPEC
15449 && XINT (sub, 1) == UNSPEC_TOC)
15459 create_TOC_reference (rtx symbol)
15461 if (!can_create_pseudo_p ())
15462 df_set_regs_ever_live (TOC_REGISTER, true);
15463 return gen_rtx_PLUS (Pmode,
15464 gen_rtx_REG (Pmode, TOC_REGISTER),
15465 gen_rtx_CONST (Pmode,
15466 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_TOCREL)));
15469 /* If _Unwind_* has been called from within the same module,
15470 toc register is not guaranteed to be saved to 40(1) on function
15471 entry. Save it there in that case. */
15474 rs6000_aix_emit_builtin_unwind_init (void)
15477 rtx stack_top = gen_reg_rtx (Pmode);
15478 rtx opcode_addr = gen_reg_rtx (Pmode);
15479 rtx opcode = gen_reg_rtx (SImode);
15480 rtx tocompare = gen_reg_rtx (SImode);
15481 rtx no_toc_save_needed = gen_label_rtx ();
15483 mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
15484 emit_move_insn (stack_top, mem);
15486 mem = gen_frame_mem (Pmode,
15487 gen_rtx_PLUS (Pmode, stack_top,
15488 GEN_INT (2 * GET_MODE_SIZE (Pmode))));
15489 emit_move_insn (opcode_addr, mem);
15490 emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
15491 emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
15492 : 0xE8410028, SImode));
15494 do_compare_rtx_and_jump (opcode, tocompare, EQ, 1,
15495 SImode, NULL_RTX, NULL_RTX,
15496 no_toc_save_needed);
15498 mem = gen_frame_mem (Pmode,
15499 gen_rtx_PLUS (Pmode, stack_top,
15500 GEN_INT (5 * GET_MODE_SIZE (Pmode))));
15501 emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
15502 emit_label (no_toc_save_needed);
15505 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
15506 and the change to the stack pointer. */
15509 rs6000_emit_stack_tie (void)
15511 rtx mem = gen_frame_mem (BLKmode,
15512 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
15514 emit_insn (gen_stack_tie (mem));
15517 /* Emit the correct code for allocating stack space, as insns.
15518 If COPY_R12, make sure a copy of the old frame is left in r12.
15519 If COPY_R11, make sure a copy of the old frame is left in r11,
15520 in preference to r12 if COPY_R12.
15521 The generated code may use hard register 0 as a temporary. */
15524 rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12, int copy_r11)
15527 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
15528 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
15529 rtx todec = gen_int_mode (-size, Pmode);
15531 if (INTVAL (todec) != -size)
15533 warning (0, "stack frame too large");
15534 emit_insn (gen_trap ());
15538 if (crtl->limit_stack)
15540 if (REG_P (stack_limit_rtx)
15541 && REGNO (stack_limit_rtx) > 1
15542 && REGNO (stack_limit_rtx) <= 31)
15544 emit_insn (TARGET_32BIT
15545 ? gen_addsi3 (tmp_reg,
15548 : gen_adddi3 (tmp_reg,
15552 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
15555 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
15557 && DEFAULT_ABI == ABI_V4)
15559 rtx toload = gen_rtx_CONST (VOIDmode,
15560 gen_rtx_PLUS (Pmode,
15564 emit_insn (gen_elf_high (tmp_reg, toload));
15565 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
15566 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
15570 warning (0, "stack limit expression is not supported");
15573 if (copy_r12 || copy_r11 || ! TARGET_UPDATE)
15574 emit_move_insn (copy_r11
15575 ? gen_rtx_REG (Pmode, 11)
15576 : gen_rtx_REG (Pmode, 12),
15585 /* Need a note here so that try_split doesn't get confused. */
15586 if (get_last_insn () == NULL_RTX)
15587 emit_note (NOTE_INSN_DELETED);
15588 insn = emit_move_insn (tmp_reg, todec);
15589 try_split (PATTERN (insn), insn, 0);
15593 insn = emit_insn (TARGET_32BIT
15594 ? gen_movsi_update (stack_reg, stack_reg,
15596 : gen_movdi_di_update (stack_reg, stack_reg,
15597 todec, stack_reg));
15598 /* Since we didn't use gen_frame_mem to generate the MEM, grab
15599 it now and set the alias set/attributes. The above gen_*_update
15600 calls will generate a PARALLEL with the MEM set being the first
15602 par = PATTERN (insn);
15603 gcc_assert (GET_CODE (par) == PARALLEL);
15604 set = XVECEXP (par, 0, 0);
15605 gcc_assert (GET_CODE (set) == SET);
15606 mem = SET_DEST (set);
15607 gcc_assert (MEM_P (mem));
15608 MEM_NOTRAP_P (mem) = 1;
15609 set_mem_alias_set (mem, get_frame_alias_set ());
15613 insn = emit_insn (TARGET_32BIT
15614 ? gen_addsi3 (stack_reg, stack_reg, todec)
15615 : gen_adddi3 (stack_reg, stack_reg, todec));
15616 emit_move_insn (gen_frame_mem (Pmode, stack_reg),
15618 ? gen_rtx_REG (Pmode, 11)
15619 : gen_rtx_REG (Pmode, 12));
15622 RTX_FRAME_RELATED_P (insn) = 1;
15624 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
15625 gen_rtx_SET (VOIDmode, stack_reg,
15626 gen_rtx_PLUS (Pmode, stack_reg,
15631 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
15632 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
15633 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
15634 deduce these equivalences by itself so it wasn't necessary to hold
15635 its hand so much. */
15638 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
15639 rtx reg2, rtx rreg)
15643 /* copy_rtx will not make unique copies of registers, so we need to
15644 ensure we don't have unwanted sharing here. */
15646 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
15649 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
15651 real = copy_rtx (PATTERN (insn));
15653 if (reg2 != NULL_RTX)
15654 real = replace_rtx (real, reg2, rreg);
15656 real = replace_rtx (real, reg,
15657 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
15658 STACK_POINTER_REGNUM),
15661 /* We expect that 'real' is either a SET or a PARALLEL containing
15662 SETs (and possibly other stuff). In a PARALLEL, all the SETs
15663 are important so they all have to be marked RTX_FRAME_RELATED_P. */
15665 if (GET_CODE (real) == SET)
15669 temp = simplify_rtx (SET_SRC (set));
15671 SET_SRC (set) = temp;
15672 temp = simplify_rtx (SET_DEST (set));
15674 SET_DEST (set) = temp;
15675 if (GET_CODE (SET_DEST (set)) == MEM)
15677 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
15679 XEXP (SET_DEST (set), 0) = temp;
15686 gcc_assert (GET_CODE (real) == PARALLEL);
15687 for (i = 0; i < XVECLEN (real, 0); i++)
15688 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
15690 rtx set = XVECEXP (real, 0, i);
15692 temp = simplify_rtx (SET_SRC (set));
15694 SET_SRC (set) = temp;
15695 temp = simplify_rtx (SET_DEST (set));
15697 SET_DEST (set) = temp;
15698 if (GET_CODE (SET_DEST (set)) == MEM)
15700 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
15702 XEXP (SET_DEST (set), 0) = temp;
15704 RTX_FRAME_RELATED_P (set) = 1;
15708 RTX_FRAME_RELATED_P (insn) = 1;
15709 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
15714 /* Returns an insn that has a vrsave set operation with the
15715 appropriate CLOBBERs. */
15718 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
15721 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
15722 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
15725 = gen_rtx_SET (VOIDmode,
15727 gen_rtx_UNSPEC_VOLATILE (SImode,
15728 gen_rtvec (2, reg, vrsave),
15729 UNSPECV_SET_VRSAVE));
15733 /* We need to clobber the registers in the mask so the scheduler
15734 does not move sets to VRSAVE before sets of AltiVec registers.
15736 However, if the function receives nonlocal gotos, reload will set
15737 all call saved registers live. We will end up with:
15739 (set (reg 999) (mem))
15740 (parallel [ (set (reg vrsave) (unspec blah))
15741 (clobber (reg 999))])
15743 The clobber will cause the store into reg 999 to be dead, and
15744 flow will attempt to delete an epilogue insn. In this case, we
15745 need an unspec use/set of the register. */
15747 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
15748 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
15750 if (!epiloguep || call_used_regs [i])
15751 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
15752 gen_rtx_REG (V4SImode, i));
15755 rtx reg = gen_rtx_REG (V4SImode, i);
15758 = gen_rtx_SET (VOIDmode,
15760 gen_rtx_UNSPEC (V4SImode,
15761 gen_rtvec (1, reg), 27));
15765 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
15767 for (i = 0; i < nclobs; ++i)
15768 XVECEXP (insn, 0, i) = clobs[i];
15773 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
15774 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
15777 emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
15778 unsigned int regno, int offset, HOST_WIDE_INT total_size)
15780 rtx reg, offset_rtx, insn, mem, addr, int_rtx;
15781 rtx replacea, replaceb;
15783 int_rtx = GEN_INT (offset);
15785 /* Some cases that need register indexed addressing. */
15786 if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
15787 || (TARGET_E500_DOUBLE && mode == DFmode)
15789 && SPE_VECTOR_MODE (mode)
15790 && !SPE_CONST_OFFSET_OK (offset)))
15792 /* Whomever calls us must make sure r11 is available in the
15793 flow path of instructions in the prologue. */
15794 offset_rtx = gen_rtx_REG (Pmode, 11);
15795 emit_move_insn (offset_rtx, int_rtx);
15797 replacea = offset_rtx;
15798 replaceb = int_rtx;
15802 offset_rtx = int_rtx;
15803 replacea = NULL_RTX;
15804 replaceb = NULL_RTX;
15807 reg = gen_rtx_REG (mode, regno);
15808 addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
15809 mem = gen_frame_mem (mode, addr);
15811 insn = emit_move_insn (mem, reg);
15813 rs6000_frame_related (insn, frame_ptr, total_size, replacea, replaceb);
15816 /* Emit an offset memory reference suitable for a frame store, while
15817 converting to a valid addressing mode. */
15820 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
15822 rtx int_rtx, offset_rtx;
15824 int_rtx = GEN_INT (offset);
15826 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
15827 || (TARGET_E500_DOUBLE && mode == DFmode))
15829 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
15830 emit_move_insn (offset_rtx, int_rtx);
15833 offset_rtx = int_rtx;
15835 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
15838 /* Look for user-defined global regs. We should not save and restore these,
15839 and cannot use stmw/lmw if there are any in its range. */
15842 no_global_regs_above (int first, bool gpr)
15845 int last = gpr ? 32 : 64;
15846 for (i = first; i < last; i++)
15847 if (global_regs[i])
15852 #ifndef TARGET_FIX_AND_CONTINUE
15853 #define TARGET_FIX_AND_CONTINUE 0
15856 /* It's really GPR 13 and FPR 14, but we need the smaller of the two. */
15857 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
15858 #define LAST_SAVRES_REGISTER 31
15859 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
15861 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8];
15863 /* Return the symbol for an out-of-line register save/restore routine.
15864 We are saving/restoring GPRs if GPR is true. */
15867 rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep, bool gpr, bool exitp)
15869 int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
15871 int select = ((savep ? 1 : 0) << 2
15873 /* On the SPE, we never have any FPRs, but we do have
15874 32/64-bit versions of the routines. */
15875 ? (TARGET_SPE_ABI && info->spe_64bit_regs_used ? 1 : 0)
15879 /* Don't generate bogus routine names. */
15880 gcc_assert (FIRST_SAVRES_REGISTER <= regno && regno <= LAST_SAVRES_REGISTER);
15882 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
15887 const char *action;
15888 const char *regkind;
15889 const char *exit_suffix;
15891 action = savep ? "save" : "rest";
15893 /* SPE has slightly different names for its routines depending on
15894 whether we are saving 32-bit or 64-bit registers. */
15895 if (TARGET_SPE_ABI)
15897 /* No floating point saves on the SPE. */
15900 regkind = info->spe_64bit_regs_used ? "64gpr" : "32gpr";
15903 regkind = gpr ? "gpr" : "fpr";
15905 exit_suffix = exitp ? "_x" : "";
15907 sprintf (name, "_%s%s_%d%s", action, regkind, regno, exit_suffix);
15909 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
15910 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
15916 /* Emit a sequence of insns, including a stack tie if needed, for
15917 resetting the stack pointer. If SAVRES is true, then don't reset the
15918 stack pointer, but move the base of the frame into r11 for use by
15919 out-of-line register restore routines. */
15922 rs6000_emit_stack_reset (rs6000_stack_t *info,
15923 rtx sp_reg_rtx, rtx frame_reg_rtx,
15924 int sp_offset, bool savres)
15926 /* This blockage is needed so that sched doesn't decide to move
15927 the sp change before the register restores. */
15928 if (frame_reg_rtx != sp_reg_rtx
15930 && info->spe_64bit_regs_used != 0
15931 && info->first_gp_reg_save != 32))
15932 rs6000_emit_stack_tie ();
15934 if (frame_reg_rtx != sp_reg_rtx)
15936 rs6000_emit_stack_tie ();
15937 if (sp_offset != 0)
15938 emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx,
15939 GEN_INT (sp_offset)));
15941 emit_move_insn (sp_reg_rtx, frame_reg_rtx);
15943 else if (sp_offset != 0)
15945 /* If we are restoring registers out-of-line, we will be using the
15946 "exit" variants of the restore routines, which will reset the
15947 stack for us. But we do need to point r11 into the right place
15948 for those routines. */
15949 rtx dest_reg = (savres
15950 ? gen_rtx_REG (Pmode, 11)
15953 emit_insn (TARGET_32BIT
15954 ? gen_addsi3 (dest_reg, sp_reg_rtx,
15955 GEN_INT (sp_offset))
15956 : gen_adddi3 (dest_reg, sp_reg_rtx,
15957 GEN_INT (sp_offset)));
15961 /* Construct a parallel rtx describing the effect of a call to an
15962 out-of-line register save/restore routine. */
15965 rs6000_make_savres_rtx (rs6000_stack_t *info,
15966 rtx frame_reg_rtx, int save_area_offset,
15967 enum machine_mode reg_mode,
15968 bool savep, bool gpr, bool exitp)
15971 int offset, start_reg, end_reg, n_regs;
15972 int reg_size = GET_MODE_SIZE (reg_mode);
15978 ? info->first_gp_reg_save
15979 : info->first_fp_reg_save);
15980 end_reg = gpr ? 32 : 64;
15981 n_regs = end_reg - start_reg;
15982 p = rtvec_alloc ((exitp ? 4 : 3) + n_regs);
15984 /* If we're saving registers, then we should never say we're exiting. */
15985 gcc_assert ((savep && !exitp) || !savep);
15988 RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
15990 RTVEC_ELT (p, offset++)
15991 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
15993 sym = rs6000_savres_routine_sym (info, savep, gpr, exitp);
15994 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
15995 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 11));
15997 for (i = 0; i < end_reg - start_reg; i++)
15999 rtx addr, reg, mem;
16000 reg = gen_rtx_REG (reg_mode, start_reg + i);
16001 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16002 GEN_INT (save_area_offset + reg_size*i));
16003 mem = gen_frame_mem (reg_mode, addr);
16005 RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode,
16007 savep ? reg : mem);
16010 return gen_rtx_PARALLEL (VOIDmode, p);
16013 /* Determine whether the gp REG is really used. */
16016 rs6000_reg_live_or_pic_offset_p (int reg)
16018 return ((df_regs_ever_live_p (reg)
16019 && (!call_used_regs[reg]
16020 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
16021 && TARGET_TOC && TARGET_MINIMAL_TOC)))
16022 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
16023 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
16024 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
16028 SAVRES_MULTIPLE = 0x1,
16029 SAVRES_INLINE_FPRS = 0x2,
16030 SAVRES_INLINE_GPRS = 0x4
16033 /* Determine the strategy for savings/restoring registers. */
16036 rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
16037 int using_static_chain_p, int sibcall)
16039 bool using_multiple_p;
16041 bool savres_fprs_inline;
16042 bool savres_gprs_inline;
16043 bool noclobber_global_gprs
16044 = no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true);
16046 using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64
16047 && (!TARGET_SPE_ABI
16048 || info->spe_64bit_regs_used == 0)
16049 && info->first_gp_reg_save < 31
16050 && noclobber_global_gprs);
16051 /* Don't bother to try to save things out-of-line if r11 is occupied
16052 by the static chain. It would require too much fiddling and the
16053 static chain is rarely used anyway. */
16054 common = (using_static_chain_p
16056 || crtl->calls_eh_return
16057 || !info->lr_save_p
16058 || cfun->machine->ra_need_lr
16059 || info->total_size > 32767);
16060 savres_fprs_inline = (common
16061 || info->first_fp_reg_save == 64
16062 || !no_global_regs_above (info->first_fp_reg_save,
16064 || FP_SAVE_INLINE (info->first_fp_reg_save));
16065 savres_gprs_inline = (common
16066 /* Saving CR interferes with the exit routines
16067 used on the SPE, so just punt here. */
16070 && info->spe_64bit_regs_used != 0
16071 && info->cr_save_p != 0)
16072 || info->first_gp_reg_save == 32
16073 || !noclobber_global_gprs
16074 || GP_SAVE_INLINE (info->first_gp_reg_save));
16077 /* If we are going to use store multiple, then don't even bother
16078 with the out-of-line routines, since the store-multiple instruction
16079 will always be smaller. */
16080 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
16083 /* The situation is more complicated with load multiple. We'd
16084 prefer to use the out-of-line routines for restores, since the
16085 "exit" out-of-line routines can handle the restore of LR and
16086 the frame teardown. But we can only use the out-of-line
16087 routines if we know that we've used store multiple or
16088 out-of-line routines in the prologue, i.e. if we've saved all
16089 the registers from first_gp_reg_save. Otherwise, we risk
16090 loading garbage from the stack. Furthermore, we can only use
16091 the "exit" out-of-line gpr restore if we haven't saved any
16093 bool saved_all = !savres_gprs_inline || using_multiple_p;
16095 if (saved_all && info->first_fp_reg_save != 64)
16096 /* We can't use the exit routine; use load multiple if it's
16098 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
16101 return (using_multiple_p
16102 | (savres_fprs_inline << 1)
16103 | (savres_gprs_inline << 2));
16106 /* Emit function prologue as insns. */
16109 rs6000_emit_prologue (void)
16111 rs6000_stack_t *info = rs6000_stack_info ();
16112 enum machine_mode reg_mode = Pmode;
16113 int reg_size = TARGET_32BIT ? 4 : 8;
16114 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
16115 rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
16116 rtx frame_reg_rtx = sp_reg_rtx;
16117 rtx cr_save_rtx = NULL_RTX;
16120 int saving_FPRs_inline;
16121 int saving_GPRs_inline;
16122 int using_store_multiple;
16123 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
16124 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
16125 && call_used_regs[STATIC_CHAIN_REGNUM]);
16126 HOST_WIDE_INT sp_offset = 0;
16128 if (TARGET_FIX_AND_CONTINUE)
16130 /* gdb on darwin arranges to forward a function from the old
16131 address by modifying the first 5 instructions of the function
16132 to branch to the overriding function. This is necessary to
16133 permit function pointers that point to the old function to
16134 actually forward to the new function. */
16135 emit_insn (gen_nop ());
16136 emit_insn (gen_nop ());
16137 emit_insn (gen_nop ());
16138 emit_insn (gen_nop ());
16139 emit_insn (gen_nop ());
16142 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
16144 reg_mode = V2SImode;
16148 strategy = rs6000_savres_strategy (info, /*savep=*/true,
16149 /*static_chain_p=*/using_static_chain_p,
16151 using_store_multiple = strategy & SAVRES_MULTIPLE;
16152 saving_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
16153 saving_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
16155 /* For V.4, update stack before we do any saving and set back pointer. */
16156 if (! WORLD_SAVE_P (info)
16158 && (DEFAULT_ABI == ABI_V4
16159 || crtl->calls_eh_return))
16161 bool need_r11 = (TARGET_SPE
16162 ? (!saving_GPRs_inline
16163 && info->spe_64bit_regs_used == 0)
16164 : (!saving_FPRs_inline || !saving_GPRs_inline));
16165 if (info->total_size < 32767)
16166 sp_offset = info->total_size;
16168 frame_reg_rtx = (need_r11
16169 ? gen_rtx_REG (Pmode, 11)
16171 rs6000_emit_allocate_stack (info->total_size,
16172 (frame_reg_rtx != sp_reg_rtx
16173 && (info->cr_save_p
16175 || info->first_fp_reg_save < 64
16176 || info->first_gp_reg_save < 32
16179 if (frame_reg_rtx != sp_reg_rtx)
16180 rs6000_emit_stack_tie ();
16183 /* Handle world saves specially here. */
16184 if (WORLD_SAVE_P (info))
16191 /* save_world expects lr in r0. */
16192 reg0 = gen_rtx_REG (Pmode, 0);
16193 if (info->lr_save_p)
16195 insn = emit_move_insn (reg0,
16196 gen_rtx_REG (Pmode, LR_REGNO));
16197 RTX_FRAME_RELATED_P (insn) = 1;
16200 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
16201 assumptions about the offsets of various bits of the stack
16203 gcc_assert (info->gp_save_offset == -220
16204 && info->fp_save_offset == -144
16205 && info->lr_save_offset == 8
16206 && info->cr_save_offset == 4
16209 && (!crtl->calls_eh_return
16210 || info->ehrd_offset == -432)
16211 && info->vrsave_save_offset == -224
16212 && info->altivec_save_offset == -416);
16214 treg = gen_rtx_REG (SImode, 11);
16215 emit_move_insn (treg, GEN_INT (-info->total_size));
16217 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
16218 in R11. It also clobbers R12, so beware! */
16220 /* Preserve CR2 for save_world prologues */
16222 sz += 32 - info->first_gp_reg_save;
16223 sz += 64 - info->first_fp_reg_save;
16224 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
16225 p = rtvec_alloc (sz);
16227 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
16228 gen_rtx_REG (SImode,
16230 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
16231 gen_rtx_SYMBOL_REF (Pmode,
16233 /* We do floats first so that the instruction pattern matches
16235 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
16237 rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16238 ? DFmode : SFmode),
16239 info->first_fp_reg_save + i);
16240 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16241 GEN_INT (info->fp_save_offset
16242 + sp_offset + 8 * i));
16243 rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16244 ? DFmode : SFmode), addr);
16246 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16248 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
16250 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
16251 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16252 GEN_INT (info->altivec_save_offset
16253 + sp_offset + 16 * i));
16254 rtx mem = gen_frame_mem (V4SImode, addr);
16256 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16258 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16260 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16261 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16262 GEN_INT (info->gp_save_offset
16263 + sp_offset + reg_size * i));
16264 rtx mem = gen_frame_mem (reg_mode, addr);
16266 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16270 /* CR register traditionally saved as CR2. */
16271 rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
16272 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16273 GEN_INT (info->cr_save_offset
16275 rtx mem = gen_frame_mem (reg_mode, addr);
16277 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16279 /* Explain about use of R0. */
16280 if (info->lr_save_p)
16282 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16283 GEN_INT (info->lr_save_offset
16285 rtx mem = gen_frame_mem (reg_mode, addr);
16287 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
16289 /* Explain what happens to the stack pointer. */
16291 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
16292 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
16295 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
16296 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16297 treg, GEN_INT (-info->total_size));
16298 sp_offset = info->total_size;
16301 /* If we use the link register, get it into r0. */
16302 if (!WORLD_SAVE_P (info) && info->lr_save_p)
16304 rtx addr, reg, mem;
16306 insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
16307 gen_rtx_REG (Pmode, LR_REGNO));
16308 RTX_FRAME_RELATED_P (insn) = 1;
16310 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16311 GEN_INT (info->lr_save_offset + sp_offset));
16312 reg = gen_rtx_REG (Pmode, 0);
16313 mem = gen_rtx_MEM (Pmode, addr);
16314 /* This should not be of rs6000_sr_alias_set, because of
16315 __builtin_return_address. */
16317 insn = emit_move_insn (mem, reg);
16318 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16319 NULL_RTX, NULL_RTX);
16322 /* If we need to save CR, put it into r12. */
16323 if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
16327 cr_save_rtx = gen_rtx_REG (SImode, 12);
16328 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
16329 RTX_FRAME_RELATED_P (insn) = 1;
16330 /* Now, there's no way that dwarf2out_frame_debug_expr is going
16331 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
16332 But that's OK. All we have to do is specify that _one_ condition
16333 code register is saved in this stack slot. The thrower's epilogue
16334 will then restore all the call-saved registers.
16335 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
16336 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
16337 gen_rtx_REG (SImode, CR2_REGNO));
16338 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
16343 /* Do any required saving of fpr's. If only one or two to save, do
16344 it ourselves. Otherwise, call function. */
16345 if (!WORLD_SAVE_P (info) && saving_FPRs_inline)
16348 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
16349 if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
16350 && ! call_used_regs[info->first_fp_reg_save+i]))
16351 emit_frame_save (frame_reg_rtx, frame_ptr_rtx,
16352 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16354 info->first_fp_reg_save + i,
16355 info->fp_save_offset + sp_offset + 8 * i,
16358 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
16362 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
16363 info->fp_save_offset + sp_offset,
16365 /*savep=*/true, /*gpr=*/false,
16367 insn = emit_insn (par);
16368 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16369 NULL_RTX, NULL_RTX);
16372 /* Save GPRs. This is done as a PARALLEL if we are using
16373 the store-multiple instructions. */
16374 if (!WORLD_SAVE_P (info)
16376 && info->spe_64bit_regs_used != 0
16377 && info->first_gp_reg_save != 32)
16380 rtx spe_save_area_ptr;
16382 /* Determine whether we can address all of the registers that need
16383 to be saved with an offset from the stack pointer that fits in
16384 the small const field for SPE memory instructions. */
16385 int spe_regs_addressable_via_sp
16386 = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
16387 + (32 - info->first_gp_reg_save - 1) * reg_size)
16388 && saving_GPRs_inline);
16391 if (spe_regs_addressable_via_sp)
16393 spe_save_area_ptr = frame_reg_rtx;
16394 spe_offset = info->spe_gp_save_offset + sp_offset;
16398 /* Make r11 point to the start of the SPE save area. We need
16399 to be careful here if r11 is holding the static chain. If
16400 it is, then temporarily save it in r0. We would use r0 as
16401 our base register here, but using r0 as a base register in
16402 loads and stores means something different from what we
16404 int ool_adjust = (saving_GPRs_inline
16406 : (info->first_gp_reg_save
16407 - (FIRST_SAVRES_REGISTER+1))*8);
16408 HOST_WIDE_INT offset = (info->spe_gp_save_offset
16409 + sp_offset - ool_adjust);
16411 if (using_static_chain_p)
16413 rtx r0 = gen_rtx_REG (Pmode, 0);
16414 gcc_assert (info->first_gp_reg_save > 11);
16416 emit_move_insn (r0, gen_rtx_REG (Pmode, 11));
16419 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
16420 insn = emit_insn (gen_addsi3 (spe_save_area_ptr,
16422 GEN_INT (offset)));
16423 /* We need to make sure the move to r11 gets noted for
16424 properly outputting unwind information. */
16425 if (!saving_GPRs_inline)
16426 rs6000_frame_related (insn, frame_reg_rtx, offset,
16427 NULL_RTX, NULL_RTX);
16431 if (saving_GPRs_inline)
16433 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16434 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
16436 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16437 rtx offset, addr, mem;
16439 /* We're doing all this to ensure that the offset fits into
16440 the immediate offset of 'evstdd'. */
16441 gcc_assert (SPE_CONST_OFFSET_OK (reg_size * i + spe_offset));
16443 offset = GEN_INT (reg_size * i + spe_offset);
16444 addr = gen_rtx_PLUS (Pmode, spe_save_area_ptr, offset);
16445 mem = gen_rtx_MEM (V2SImode, addr);
16447 insn = emit_move_insn (mem, reg);
16449 rs6000_frame_related (insn, spe_save_area_ptr,
16450 info->spe_gp_save_offset
16451 + sp_offset + reg_size * i,
16452 offset, const0_rtx);
16459 par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
16461 /*savep=*/true, /*gpr=*/true,
16463 insn = emit_insn (par);
16464 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16465 NULL_RTX, NULL_RTX);
16469 /* Move the static chain pointer back. */
16470 if (using_static_chain_p && !spe_regs_addressable_via_sp)
16471 emit_move_insn (gen_rtx_REG (Pmode, 11), gen_rtx_REG (Pmode, 0));
16473 else if (!WORLD_SAVE_P (info) && !saving_GPRs_inline)
16477 /* Need to adjust r11 if we saved any FPRs. */
16478 if (info->first_fp_reg_save != 64)
16480 rtx r11 = gen_rtx_REG (reg_mode, 11);
16481 rtx offset = GEN_INT (info->total_size
16482 + (-8 * (64-info->first_fp_reg_save)));
16483 rtx ptr_reg = (sp_reg_rtx == frame_reg_rtx
16484 ? sp_reg_rtx : r11);
16486 emit_insn (TARGET_32BIT
16487 ? gen_addsi3 (r11, ptr_reg, offset)
16488 : gen_adddi3 (r11, ptr_reg, offset));
16491 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
16492 info->gp_save_offset + sp_offset,
16494 /*savep=*/true, /*gpr=*/true,
16496 insn = emit_insn (par);
16497 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16498 NULL_RTX, NULL_RTX);
16500 else if (!WORLD_SAVE_P (info) && using_store_multiple)
16504 p = rtvec_alloc (32 - info->first_gp_reg_save);
16505 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16507 rtx addr, reg, mem;
16508 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16509 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16510 GEN_INT (info->gp_save_offset
16513 mem = gen_frame_mem (reg_mode, addr);
16515 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
16517 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
16518 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16519 NULL_RTX, NULL_RTX);
16521 else if (!WORLD_SAVE_P (info))
16524 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16525 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
16527 rtx addr, reg, mem;
16528 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16530 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16531 GEN_INT (info->gp_save_offset
16534 mem = gen_frame_mem (reg_mode, addr);
16536 insn = emit_move_insn (mem, reg);
16537 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16538 NULL_RTX, NULL_RTX);
16542 /* ??? There's no need to emit actual instructions here, but it's the
16543 easiest way to get the frame unwind information emitted. */
16544 if (crtl->calls_eh_return)
16546 unsigned int i, regno;
16548 /* In AIX ABI we need to pretend we save r2 here. */
16551 rtx addr, reg, mem;
16553 reg = gen_rtx_REG (reg_mode, 2);
16554 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16555 GEN_INT (sp_offset + 5 * reg_size));
16556 mem = gen_frame_mem (reg_mode, addr);
16558 insn = emit_move_insn (mem, reg);
16559 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16560 NULL_RTX, NULL_RTX);
16561 PATTERN (insn) = gen_blockage ();
16566 regno = EH_RETURN_DATA_REGNO (i);
16567 if (regno == INVALID_REGNUM)
16570 emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, regno,
16571 info->ehrd_offset + sp_offset
16572 + reg_size * (int) i,
16577 /* Save CR if we use any that must be preserved. */
16578 if (!WORLD_SAVE_P (info) && info->cr_save_p)
16580 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16581 GEN_INT (info->cr_save_offset + sp_offset));
16582 rtx mem = gen_frame_mem (SImode, addr);
16583 /* See the large comment above about why CR2_REGNO is used. */
16584 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
16586 /* If r12 was used to hold the original sp, copy cr into r0 now
16588 if (REGNO (frame_reg_rtx) == 12)
16592 cr_save_rtx = gen_rtx_REG (SImode, 0);
16593 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
16594 RTX_FRAME_RELATED_P (insn) = 1;
16595 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
16596 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
16601 insn = emit_move_insn (mem, cr_save_rtx);
16603 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16604 NULL_RTX, NULL_RTX);
16607 /* Update stack and set back pointer unless this is V.4,
16608 for which it was done previously. */
16609 if (!WORLD_SAVE_P (info) && info->push_p
16610 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
16612 if (info->total_size < 32767)
16613 sp_offset = info->total_size;
16615 frame_reg_rtx = frame_ptr_rtx;
16616 rs6000_emit_allocate_stack (info->total_size,
16617 (frame_reg_rtx != sp_reg_rtx
16618 && ((info->altivec_size != 0)
16619 || (info->vrsave_mask != 0)
16622 if (frame_reg_rtx != sp_reg_rtx)
16623 rs6000_emit_stack_tie ();
16626 /* Set frame pointer, if needed. */
16627 if (frame_pointer_needed)
16629 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
16631 RTX_FRAME_RELATED_P (insn) = 1;
16634 /* Save AltiVec registers if needed. Save here because the red zone does
16635 not include AltiVec registers. */
16636 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
16640 /* There should be a non inline version of this, for when we
16641 are saving lots of vector registers. */
16642 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
16643 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
16645 rtx areg, savereg, mem;
16648 offset = info->altivec_save_offset + sp_offset
16649 + 16 * (i - info->first_altivec_reg_save);
16651 savereg = gen_rtx_REG (V4SImode, i);
16653 areg = gen_rtx_REG (Pmode, 0);
16654 emit_move_insn (areg, GEN_INT (offset));
16656 /* AltiVec addressing mode is [reg+reg]. */
16657 mem = gen_frame_mem (V4SImode,
16658 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
16660 insn = emit_move_insn (mem, savereg);
16662 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16663 areg, GEN_INT (offset));
16667 /* VRSAVE is a bit vector representing which AltiVec registers
16668 are used. The OS uses this to determine which vector
16669 registers to save on a context switch. We need to save
16670 VRSAVE on the stack frame, add whatever AltiVec registers we
16671 used in this function, and do the corresponding magic in the
16674 if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
16675 && info->vrsave_mask != 0)
16677 rtx reg, mem, vrsave;
16680 /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
16681 as frame_reg_rtx and r11 as the static chain pointer for
16682 nested functions. */
16683 reg = gen_rtx_REG (SImode, 0);
16684 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
16686 emit_insn (gen_get_vrsave_internal (reg));
16688 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
16690 if (!WORLD_SAVE_P (info))
16693 offset = info->vrsave_save_offset + sp_offset;
16694 mem = gen_frame_mem (SImode,
16695 gen_rtx_PLUS (Pmode, frame_reg_rtx,
16696 GEN_INT (offset)));
16697 insn = emit_move_insn (mem, reg);
16700 /* Include the registers in the mask. */
16701 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
16703 insn = emit_insn (generate_set_vrsave (reg, info, 0));
16706 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
16707 if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
16708 || (DEFAULT_ABI == ABI_V4
16709 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
16710 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM)))
16712 /* If emit_load_toc_table will use the link register, we need to save
16713 it. We use R12 for this purpose because emit_load_toc_table
16714 can use register 0. This allows us to use a plain 'blr' to return
16715 from the procedure more often. */
16716 int save_LR_around_toc_setup = (TARGET_ELF
16717 && DEFAULT_ABI != ABI_AIX
16719 && ! info->lr_save_p
16720 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
16721 if (save_LR_around_toc_setup)
16723 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
16725 insn = emit_move_insn (frame_ptr_rtx, lr);
16726 RTX_FRAME_RELATED_P (insn) = 1;
16728 rs6000_emit_load_toc_table (TRUE);
16730 insn = emit_move_insn (lr, frame_ptr_rtx);
16731 RTX_FRAME_RELATED_P (insn) = 1;
16734 rs6000_emit_load_toc_table (TRUE);
16738 if (DEFAULT_ABI == ABI_DARWIN
16739 && flag_pic && crtl->uses_pic_offset_table)
16741 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
16742 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
16744 /* Save and restore LR locally around this call (in R0). */
16745 if (!info->lr_save_p)
16746 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
16748 emit_insn (gen_load_macho_picbase (src));
16750 emit_move_insn (gen_rtx_REG (Pmode,
16751 RS6000_PIC_OFFSET_TABLE_REGNUM),
16754 if (!info->lr_save_p)
16755 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
16760 /* Write function prologue. */
16763 rs6000_output_function_prologue (FILE *file,
16764 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
16766 rs6000_stack_t *info = rs6000_stack_info ();
16768 if (TARGET_DEBUG_STACK)
16769 debug_stack_info (info);
16771 /* Write .extern for any function we will call to save and restore
16773 if (info->first_fp_reg_save < 64
16774 && !FP_SAVE_INLINE (info->first_fp_reg_save))
16775 fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
16776 SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
16777 RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
16779 /* Write .extern for AIX common mode routines, if needed. */
16780 if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
16782 fputs ("\t.extern __mulh\n", file);
16783 fputs ("\t.extern __mull\n", file);
16784 fputs ("\t.extern __divss\n", file);
16785 fputs ("\t.extern __divus\n", file);
16786 fputs ("\t.extern __quoss\n", file);
16787 fputs ("\t.extern __quous\n", file);
16788 common_mode_defined = 1;
16791 if (! HAVE_prologue)
16795 /* A NOTE_INSN_DELETED is supposed to be at the start and end of
16796 the "toplevel" insn chain. */
16797 emit_note (NOTE_INSN_DELETED);
16798 rs6000_emit_prologue ();
16799 emit_note (NOTE_INSN_DELETED);
16801 /* Expand INSN_ADDRESSES so final() doesn't crash. */
16805 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
16807 INSN_ADDRESSES_NEW (insn, addr);
16812 if (TARGET_DEBUG_STACK)
16813 debug_rtx_list (get_insns (), 100);
16814 final (get_insns (), file, FALSE);
16818 rs6000_pic_labelno++;
16821 /* Non-zero if vmx regs are restored before the frame pop, zero if
16822 we restore after the pop when possible. */
16823 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
16825 /* Reload CR from REG. */
16828 rs6000_restore_saved_cr (rtx reg, int using_mfcr_multiple)
16833 if (using_mfcr_multiple)
16835 for (i = 0; i < 8; i++)
16836 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
16838 gcc_assert (count);
16841 if (using_mfcr_multiple && count > 1)
16846 p = rtvec_alloc (count);
16849 for (i = 0; i < 8; i++)
16850 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
16852 rtvec r = rtvec_alloc (2);
16853 RTVEC_ELT (r, 0) = reg;
16854 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
16855 RTVEC_ELT (p, ndx) =
16856 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
16857 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
16860 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
16861 gcc_assert (ndx == count);
16864 for (i = 0; i < 8; i++)
16865 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
16867 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
16873 /* Emit function epilogue as insns.
16875 At present, dwarf2out_frame_debug_expr doesn't understand
16876 register restores, so we don't bother setting RTX_FRAME_RELATED_P
16877 anywhere in the epilogue. Most of the insns below would in any case
16878 need special notes to explain where r11 is in relation to the stack. */
16881 rs6000_emit_epilogue (int sibcall)
16883 rs6000_stack_t *info;
16884 int restoring_GPRs_inline;
16885 int restoring_FPRs_inline;
16886 int using_load_multiple;
16887 int using_mtcr_multiple;
16888 int use_backchain_to_restore_sp;
16892 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
16893 rtx frame_reg_rtx = sp_reg_rtx;
16894 enum machine_mode reg_mode = Pmode;
16895 int reg_size = TARGET_32BIT ? 4 : 8;
16898 info = rs6000_stack_info ();
16900 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
16902 reg_mode = V2SImode;
16906 strategy = rs6000_savres_strategy (info, /*savep=*/false,
16907 /*static_chain_p=*/0, sibcall);
16908 using_load_multiple = strategy & SAVRES_MULTIPLE;
16909 restoring_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
16910 restoring_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
16911 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
16912 || rs6000_cpu == PROCESSOR_PPC603
16913 || rs6000_cpu == PROCESSOR_PPC750
16915 /* Restore via the backchain when we have a large frame, since this
16916 is more efficient than an addis, addi pair. The second condition
16917 here will not trigger at the moment; We don't actually need a
16918 frame pointer for alloca, but the generic parts of the compiler
16919 give us one anyway. */
16920 use_backchain_to_restore_sp = (info->total_size > 32767
16921 || info->total_size
16922 + (info->lr_save_p ? info->lr_save_offset : 0)
16924 || (cfun->calls_alloca
16925 && !frame_pointer_needed));
16926 restore_lr = (info->lr_save_p
16927 && restoring_GPRs_inline
16928 && restoring_FPRs_inline);
16930 if (WORLD_SAVE_P (info))
16934 const char *alloc_rname;
16937 /* eh_rest_world_r10 will return to the location saved in the LR
16938 stack slot (which is not likely to be our caller.)
16939 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
16940 rest_world is similar, except any R10 parameter is ignored.
16941 The exception-handling stuff that was here in 2.95 is no
16942 longer necessary. */
16946 + 32 - info->first_gp_reg_save
16947 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
16948 + 63 + 1 - info->first_fp_reg_save);
16950 strcpy (rname, ((crtl->calls_eh_return) ?
16951 "*eh_rest_world_r10" : "*rest_world"));
16952 alloc_rname = ggc_strdup (rname);
16955 RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
16956 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
16957 gen_rtx_REG (Pmode,
16960 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
16961 /* The instruction pattern requires a clobber here;
16962 it is shared with the restVEC helper. */
16964 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
16967 /* CR register traditionally saved as CR2. */
16968 rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
16969 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16970 GEN_INT (info->cr_save_offset));
16971 rtx mem = gen_frame_mem (reg_mode, addr);
16973 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
16976 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16978 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16979 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16980 GEN_INT (info->gp_save_offset
16982 rtx mem = gen_frame_mem (reg_mode, addr);
16984 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
16986 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
16988 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
16989 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16990 GEN_INT (info->altivec_save_offset
16992 rtx mem = gen_frame_mem (V4SImode, addr);
16994 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
16996 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
16998 rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16999 ? DFmode : SFmode),
17000 info->first_fp_reg_save + i);
17001 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17002 GEN_INT (info->fp_save_offset
17004 rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
17005 ? DFmode : SFmode), addr);
17007 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
17010 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
17012 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
17014 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
17016 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
17018 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
17019 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
17024 /* frame_reg_rtx + sp_offset points to the top of this stack frame. */
17026 sp_offset = info->total_size;
17028 /* Restore AltiVec registers if we must do so before adjusting the
17030 if (TARGET_ALTIVEC_ABI
17031 && info->altivec_size != 0
17032 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17033 || (DEFAULT_ABI != ABI_V4
17034 && info->altivec_save_offset < (TARGET_32BIT ? -220 : -288))))
17038 if (use_backchain_to_restore_sp)
17040 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17041 emit_move_insn (frame_reg_rtx,
17042 gen_rtx_MEM (Pmode, sp_reg_rtx));
17045 else if (frame_pointer_needed)
17046 frame_reg_rtx = hard_frame_pointer_rtx;
17048 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
17049 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
17051 rtx addr, areg, mem;
17053 areg = gen_rtx_REG (Pmode, 0);
17055 (areg, GEN_INT (info->altivec_save_offset
17057 + 16 * (i - info->first_altivec_reg_save)));
17059 /* AltiVec addressing mode is [reg+reg]. */
17060 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
17061 mem = gen_frame_mem (V4SImode, addr);
17063 emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
17067 /* Restore VRSAVE if we must do so before adjusting the stack. */
17069 && TARGET_ALTIVEC_VRSAVE
17070 && info->vrsave_mask != 0
17071 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17072 || (DEFAULT_ABI != ABI_V4
17073 && info->vrsave_save_offset < (TARGET_32BIT ? -220 : -288))))
17075 rtx addr, mem, reg;
17077 if (frame_reg_rtx == sp_reg_rtx)
17079 if (use_backchain_to_restore_sp)
17081 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17082 emit_move_insn (frame_reg_rtx,
17083 gen_rtx_MEM (Pmode, sp_reg_rtx));
17086 else if (frame_pointer_needed)
17087 frame_reg_rtx = hard_frame_pointer_rtx;
17090 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17091 GEN_INT (info->vrsave_save_offset + sp_offset));
17092 mem = gen_frame_mem (SImode, addr);
17093 reg = gen_rtx_REG (SImode, 12);
17094 emit_move_insn (reg, mem);
17096 emit_insn (generate_set_vrsave (reg, info, 1));
17099 /* If we have a large stack frame, restore the old stack pointer
17100 using the backchain. */
17101 if (use_backchain_to_restore_sp)
17103 if (frame_reg_rtx == sp_reg_rtx)
17105 /* Under V.4, don't reset the stack pointer until after we're done
17106 loading the saved registers. */
17107 if (DEFAULT_ABI == ABI_V4)
17108 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17110 emit_move_insn (frame_reg_rtx,
17111 gen_rtx_MEM (Pmode, sp_reg_rtx));
17114 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17115 && DEFAULT_ABI == ABI_V4)
17116 /* frame_reg_rtx has been set up by the altivec restore. */
17120 emit_move_insn (sp_reg_rtx, frame_reg_rtx);
17121 frame_reg_rtx = sp_reg_rtx;
17124 /* If we have a frame pointer, we can restore the old stack pointer
17126 else if (frame_pointer_needed)
17128 frame_reg_rtx = sp_reg_rtx;
17129 if (DEFAULT_ABI == ABI_V4)
17130 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17132 emit_insn (TARGET_32BIT
17133 ? gen_addsi3 (frame_reg_rtx, hard_frame_pointer_rtx,
17134 GEN_INT (info->total_size))
17135 : gen_adddi3 (frame_reg_rtx, hard_frame_pointer_rtx,
17136 GEN_INT (info->total_size)));
17139 else if (info->push_p
17140 && DEFAULT_ABI != ABI_V4
17141 && !crtl->calls_eh_return)
17143 emit_insn (TARGET_32BIT
17144 ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
17145 GEN_INT (info->total_size))
17146 : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
17147 GEN_INT (info->total_size)));
17151 /* Restore AltiVec registers if we have not done so already. */
17152 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17153 && TARGET_ALTIVEC_ABI
17154 && info->altivec_size != 0
17155 && (DEFAULT_ABI == ABI_V4
17156 || info->altivec_save_offset >= (TARGET_32BIT ? -220 : -288)))
17160 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
17161 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
17163 rtx addr, areg, mem;
17165 areg = gen_rtx_REG (Pmode, 0);
17167 (areg, GEN_INT (info->altivec_save_offset
17169 + 16 * (i - info->first_altivec_reg_save)));
17171 /* AltiVec addressing mode is [reg+reg]. */
17172 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
17173 mem = gen_frame_mem (V4SImode, addr);
17175 emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
17179 /* Restore VRSAVE if we have not done so already. */
17180 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17182 && TARGET_ALTIVEC_VRSAVE
17183 && info->vrsave_mask != 0
17184 && (DEFAULT_ABI == ABI_V4
17185 || info->vrsave_save_offset >= (TARGET_32BIT ? -220 : -288)))
17187 rtx addr, mem, reg;
17189 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17190 GEN_INT (info->vrsave_save_offset + sp_offset));
17191 mem = gen_frame_mem (SImode, addr);
17192 reg = gen_rtx_REG (SImode, 12);
17193 emit_move_insn (reg, mem);
17195 emit_insn (generate_set_vrsave (reg, info, 1));
17198 /* Get the old lr if we saved it. If we are restoring registers
17199 out-of-line, then the out-of-line routines can do this for us. */
17202 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
17203 info->lr_save_offset + sp_offset);
17205 emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
17208 /* Get the old cr if we saved it. */
17209 if (info->cr_save_p)
17211 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17212 GEN_INT (info->cr_save_offset + sp_offset));
17213 rtx mem = gen_frame_mem (SImode, addr);
17215 emit_move_insn (gen_rtx_REG (SImode, 12), mem);
17218 /* Set LR here to try to overlap restores below. */
17220 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
17221 gen_rtx_REG (Pmode, 0));
17223 /* Load exception handler data registers, if needed. */
17224 if (crtl->calls_eh_return)
17226 unsigned int i, regno;
17230 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17231 GEN_INT (sp_offset + 5 * reg_size));
17232 rtx mem = gen_frame_mem (reg_mode, addr);
17234 emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
17241 regno = EH_RETURN_DATA_REGNO (i);
17242 if (regno == INVALID_REGNUM)
17245 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
17246 info->ehrd_offset + sp_offset
17247 + reg_size * (int) i);
17249 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
17253 /* Restore GPRs. This is done as a PARALLEL if we are using
17254 the load-multiple instructions. */
17256 && info->spe_64bit_regs_used != 0
17257 && info->first_gp_reg_save != 32)
17259 /* Determine whether we can address all of the registers that need
17260 to be saved with an offset from the stack pointer that fits in
17261 the small const field for SPE memory instructions. */
17262 int spe_regs_addressable_via_sp
17263 = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
17264 + (32 - info->first_gp_reg_save - 1) * reg_size)
17265 && restoring_GPRs_inline);
17268 if (spe_regs_addressable_via_sp)
17269 spe_offset = info->spe_gp_save_offset + sp_offset;
17272 rtx old_frame_reg_rtx = frame_reg_rtx;
17273 /* Make r11 point to the start of the SPE save area. We worried about
17274 not clobbering it when we were saving registers in the prologue.
17275 There's no need to worry here because the static chain is passed
17276 anew to every function. */
17277 int ool_adjust = (restoring_GPRs_inline
17279 : (info->first_gp_reg_save
17280 - (FIRST_SAVRES_REGISTER+1))*8);
17282 if (frame_reg_rtx == sp_reg_rtx)
17283 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17284 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
17285 GEN_INT (info->spe_gp_save_offset
17288 /* Keep the invariant that frame_reg_rtx + sp_offset points
17289 at the top of the stack frame. */
17290 sp_offset = -info->spe_gp_save_offset;
17295 if (restoring_GPRs_inline)
17297 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
17298 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
17300 rtx offset, addr, mem;
17302 /* We're doing all this to ensure that the immediate offset
17303 fits into the immediate field of 'evldd'. */
17304 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
17306 offset = GEN_INT (spe_offset + reg_size * i);
17307 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
17308 mem = gen_rtx_MEM (V2SImode, addr);
17310 emit_move_insn (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
17318 par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
17320 /*savep=*/false, /*gpr=*/true,
17322 emit_jump_insn (par);
17324 /* We don't want anybody else emitting things after we jumped
17329 else if (!restoring_GPRs_inline)
17331 /* We are jumping to an out-of-line function. */
17332 bool can_use_exit = info->first_fp_reg_save == 64;
17335 /* Emit stack reset code if we need it. */
17337 rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
17338 sp_offset, can_use_exit);
17340 emit_insn (gen_addsi3 (gen_rtx_REG (Pmode, 11),
17342 GEN_INT (sp_offset - info->fp_size)));
17344 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
17345 info->gp_save_offset, reg_mode,
17346 /*savep=*/false, /*gpr=*/true,
17347 /*exitp=*/can_use_exit);
17351 if (info->cr_save_p)
17352 rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12),
17353 using_mtcr_multiple);
17355 emit_jump_insn (par);
17357 /* We don't want anybody else emitting things after we jumped
17364 else if (using_load_multiple)
17367 p = rtvec_alloc (32 - info->first_gp_reg_save);
17368 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
17370 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17371 GEN_INT (info->gp_save_offset
17374 rtx mem = gen_frame_mem (reg_mode, addr);
17377 gen_rtx_SET (VOIDmode,
17378 gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
17381 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
17385 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
17386 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
17388 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17389 GEN_INT (info->gp_save_offset
17392 rtx mem = gen_frame_mem (reg_mode, addr);
17394 emit_move_insn (gen_rtx_REG (reg_mode,
17395 info->first_gp_reg_save + i), mem);
17399 /* Restore fpr's if we need to do it without calling a function. */
17400 if (restoring_FPRs_inline)
17401 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
17402 if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
17403 && ! call_used_regs[info->first_fp_reg_save+i]))
17406 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17407 GEN_INT (info->fp_save_offset
17410 mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
17411 ? DFmode : SFmode), addr);
17413 emit_move_insn (gen_rtx_REG (((TARGET_HARD_FLOAT
17414 && TARGET_DOUBLE_FLOAT)
17415 ? DFmode : SFmode),
17416 info->first_fp_reg_save + i),
17420 /* If we saved cr, restore it here. Just those that were used. */
17421 if (info->cr_save_p)
17422 rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), using_mtcr_multiple);
17424 /* If this is V.4, unwind the stack pointer after all of the loads
17426 rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
17427 sp_offset, !restoring_FPRs_inline);
17429 if (crtl->calls_eh_return)
17431 rtx sa = EH_RETURN_STACKADJ_RTX;
17432 emit_insn (TARGET_32BIT
17433 ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx, sa)
17434 : gen_adddi3 (sp_reg_rtx, sp_reg_rtx, sa));
17440 if (! restoring_FPRs_inline)
17441 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
17443 p = rtvec_alloc (2);
17445 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
17446 RTVEC_ELT (p, 1) = (restoring_FPRs_inline
17447 ? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
17448 : gen_rtx_CLOBBER (VOIDmode,
17449 gen_rtx_REG (Pmode, 65)));
17451 /* If we have to restore more than two FP registers, branch to the
17452 restore function. It will return to our caller. */
17453 if (! restoring_FPRs_inline)
17458 sym = rs6000_savres_routine_sym (info,
17462 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
17463 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
17464 gen_rtx_REG (Pmode, 11));
17465 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
17468 addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
17469 GEN_INT (info->fp_save_offset + 8*i));
17470 mem = gen_frame_mem (DFmode, addr);
17472 RTVEC_ELT (p, i+4) =
17473 gen_rtx_SET (VOIDmode,
17474 gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
17479 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
17483 /* Write function epilogue. */
17486 rs6000_output_function_epilogue (FILE *file,
17487 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
17489 if (! HAVE_epilogue)
17491 rtx insn = get_last_insn ();
17492 /* If the last insn was a BARRIER, we don't have to write anything except
17493 the trace table. */
17494 if (GET_CODE (insn) == NOTE)
17495 insn = prev_nonnote_insn (insn);
17496 if (insn == 0 || GET_CODE (insn) != BARRIER)
17498 /* This is slightly ugly, but at least we don't have two
17499 copies of the epilogue-emitting code. */
17502 /* A NOTE_INSN_DELETED is supposed to be at the start
17503 and end of the "toplevel" insn chain. */
17504 emit_note (NOTE_INSN_DELETED);
17505 rs6000_emit_epilogue (FALSE);
17506 emit_note (NOTE_INSN_DELETED);
17508 /* Expand INSN_ADDRESSES so final() doesn't crash. */
17512 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
17514 INSN_ADDRESSES_NEW (insn, addr);
17519 if (TARGET_DEBUG_STACK)
17520 debug_rtx_list (get_insns (), 100);
17521 final (get_insns (), file, FALSE);
17527 macho_branch_islands ();
17528 /* Mach-O doesn't support labels at the end of objects, so if
17529 it looks like we might want one, insert a NOP. */
17531 rtx insn = get_last_insn ();
17534 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
17535 insn = PREV_INSN (insn);
17539 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
17540 fputs ("\tnop\n", file);
17544 /* Output a traceback table here. See /usr/include/sys/debug.h for info
17547 We don't output a traceback table if -finhibit-size-directive was
17548 used. The documentation for -finhibit-size-directive reads
17549 ``don't output a @code{.size} assembler directive, or anything
17550 else that would cause trouble if the function is split in the
17551 middle, and the two halves are placed at locations far apart in
17552 memory.'' The traceback table has this property, since it
17553 includes the offset from the start of the function to the
17554 traceback table itself.
17556 System V.4 Powerpc's (and the embedded ABI derived from it) use a
17557 different traceback table. */
17558 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
17559 && rs6000_traceback != traceback_none && !crtl->is_thunk)
17561 const char *fname = NULL;
17562 const char *language_string = lang_hooks.name;
17563 int fixed_parms = 0, float_parms = 0, parm_info = 0;
17565 int optional_tbtab;
17566 rs6000_stack_t *info = rs6000_stack_info ();
17568 if (rs6000_traceback == traceback_full)
17569 optional_tbtab = 1;
17570 else if (rs6000_traceback == traceback_part)
17571 optional_tbtab = 0;
17573 optional_tbtab = !optimize_size && !TARGET_ELF;
17575 if (optional_tbtab)
17577 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
17578 while (*fname == '.') /* V.4 encodes . in the name */
17581 /* Need label immediately before tbtab, so we can compute
17582 its offset from the function start. */
17583 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
17584 ASM_OUTPUT_LABEL (file, fname);
17587 /* The .tbtab pseudo-op can only be used for the first eight
17588 expressions, since it can't handle the possibly variable
17589 length fields that follow. However, if you omit the optional
17590 fields, the assembler outputs zeros for all optional fields
17591 anyways, giving each variable length field is minimum length
17592 (as defined in sys/debug.h). Thus we can not use the .tbtab
17593 pseudo-op at all. */
17595 /* An all-zero word flags the start of the tbtab, for debuggers
17596 that have to find it by searching forward from the entry
17597 point or from the current pc. */
17598 fputs ("\t.long 0\n", file);
17600 /* Tbtab format type. Use format type 0. */
17601 fputs ("\t.byte 0,", file);
17603 /* Language type. Unfortunately, there does not seem to be any
17604 official way to discover the language being compiled, so we
17605 use language_string.
17606 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
17607 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
17608 a number, so for now use 9. */
17609 if (! strcmp (language_string, "GNU C"))
17611 else if (! strcmp (language_string, "GNU F77")
17612 || ! strcmp (language_string, "GNU Fortran"))
17614 else if (! strcmp (language_string, "GNU Pascal"))
17616 else if (! strcmp (language_string, "GNU Ada"))
17618 else if (! strcmp (language_string, "GNU C++")
17619 || ! strcmp (language_string, "GNU Objective-C++"))
17621 else if (! strcmp (language_string, "GNU Java"))
17623 else if (! strcmp (language_string, "GNU Objective-C"))
17626 gcc_unreachable ();
17627 fprintf (file, "%d,", i);
17629 /* 8 single bit fields: global linkage (not set for C extern linkage,
17630 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
17631 from start of procedure stored in tbtab, internal function, function
17632 has controlled storage, function has no toc, function uses fp,
17633 function logs/aborts fp operations. */
17634 /* Assume that fp operations are used if any fp reg must be saved. */
17635 fprintf (file, "%d,",
17636 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
17638 /* 6 bitfields: function is interrupt handler, name present in
17639 proc table, function calls alloca, on condition directives
17640 (controls stack walks, 3 bits), saves condition reg, saves
17642 /* The `function calls alloca' bit seems to be set whenever reg 31 is
17643 set up as a frame pointer, even when there is no alloca call. */
17644 fprintf (file, "%d,",
17645 ((optional_tbtab << 6)
17646 | ((optional_tbtab & frame_pointer_needed) << 5)
17647 | (info->cr_save_p << 1)
17648 | (info->lr_save_p)));
17650 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
17652 fprintf (file, "%d,",
17653 (info->push_p << 7) | (64 - info->first_fp_reg_save));
17655 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
17656 fprintf (file, "%d,", (32 - first_reg_to_save ()));
17658 if (optional_tbtab)
17660 /* Compute the parameter info from the function decl argument
17663 int next_parm_info_bit = 31;
17665 for (decl = DECL_ARGUMENTS (current_function_decl);
17666 decl; decl = TREE_CHAIN (decl))
17668 rtx parameter = DECL_INCOMING_RTL (decl);
17669 enum machine_mode mode = GET_MODE (parameter);
17671 if (GET_CODE (parameter) == REG)
17673 if (SCALAR_FLOAT_MODE_P (mode))
17694 gcc_unreachable ();
17697 /* If only one bit will fit, don't or in this entry. */
17698 if (next_parm_info_bit > 0)
17699 parm_info |= (bits << (next_parm_info_bit - 1));
17700 next_parm_info_bit -= 2;
17704 fixed_parms += ((GET_MODE_SIZE (mode)
17705 + (UNITS_PER_WORD - 1))
17707 next_parm_info_bit -= 1;
17713 /* Number of fixed point parameters. */
17714 /* This is actually the number of words of fixed point parameters; thus
17715 an 8 byte struct counts as 2; and thus the maximum value is 8. */
17716 fprintf (file, "%d,", fixed_parms);
17718 /* 2 bitfields: number of floating point parameters (7 bits), parameters
17720 /* This is actually the number of fp registers that hold parameters;
17721 and thus the maximum value is 13. */
17722 /* Set parameters on stack bit if parameters are not in their original
17723 registers, regardless of whether they are on the stack? Xlc
17724 seems to set the bit when not optimizing. */
17725 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
17727 if (! optional_tbtab)
17730 /* Optional fields follow. Some are variable length. */
17732 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
17733 11 double float. */
17734 /* There is an entry for each parameter in a register, in the order that
17735 they occur in the parameter list. Any intervening arguments on the
17736 stack are ignored. If the list overflows a long (max possible length
17737 34 bits) then completely leave off all elements that don't fit. */
17738 /* Only emit this long if there was at least one parameter. */
17739 if (fixed_parms || float_parms)
17740 fprintf (file, "\t.long %d\n", parm_info);
17742 /* Offset from start of code to tb table. */
17743 fputs ("\t.long ", file);
17744 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
17746 RS6000_OUTPUT_BASENAME (file, fname);
17748 assemble_name (file, fname);
17750 rs6000_output_function_entry (file, fname);
17753 /* Interrupt handler mask. */
17754 /* Omit this long, since we never set the interrupt handler bit
17757 /* Number of CTL (controlled storage) anchors. */
17758 /* Omit this long, since the has_ctl bit is never set above. */
17760 /* Displacement into stack of each CTL anchor. */
17761 /* Omit this list of longs, because there are no CTL anchors. */
17763 /* Length of function name. */
17766 fprintf (file, "\t.short %d\n", (int) strlen (fname));
17768 /* Function name. */
17769 assemble_string (fname, strlen (fname));
17771 /* Register for alloca automatic storage; this is always reg 31.
17772 Only emit this if the alloca bit was set above. */
17773 if (frame_pointer_needed)
17774 fputs ("\t.byte 31\n", file);
17776 fputs ("\t.align 2\n", file);
17780 /* A C compound statement that outputs the assembler code for a thunk
17781 function, used to implement C++ virtual function calls with
17782 multiple inheritance. The thunk acts as a wrapper around a virtual
17783 function, adjusting the implicit object parameter before handing
17784 control off to the real function.
17786 First, emit code to add the integer DELTA to the location that
17787 contains the incoming first argument. Assume that this argument
17788 contains a pointer, and is the one used to pass the `this' pointer
17789 in C++. This is the incoming argument *before* the function
17790 prologue, e.g. `%o0' on a sparc. The addition must preserve the
17791 values of all other incoming arguments.
17793 After the addition, emit code to jump to FUNCTION, which is a
17794 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
17795 not touch the return address. Hence returning from FUNCTION will
17796 return to whoever called the current `thunk'.
17798 The effect must be as if FUNCTION had been called directly with the
17799 adjusted first argument. This macro is responsible for emitting
17800 all of the code for a thunk function; output_function_prologue()
17801 and output_function_epilogue() are not invoked.
17803 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
17804 been extracted from it.) It might possibly be useful on some
17805 targets, but probably not.
17807 If you do not define this macro, the target-independent code in the
17808 C++ frontend will generate a less efficient heavyweight thunk that
17809 calls FUNCTION instead of jumping to it. The generic approach does
17810 not support varargs. */
17813 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
17814 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
17817 rtx this_rtx, insn, funexp;
17819 reload_completed = 1;
17820 epilogue_completed = 1;
17822 /* Mark the end of the (empty) prologue. */
17823 emit_note (NOTE_INSN_PROLOGUE_END);
17825 /* Find the "this" pointer. If the function returns a structure,
17826 the structure return pointer is in r3. */
17827 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
17828 this_rtx = gen_rtx_REG (Pmode, 4);
17830 this_rtx = gen_rtx_REG (Pmode, 3);
17832 /* Apply the constant offset, if required. */
17835 rtx delta_rtx = GEN_INT (delta);
17836 emit_insn (TARGET_32BIT
17837 ? gen_addsi3 (this_rtx, this_rtx, delta_rtx)
17838 : gen_adddi3 (this_rtx, this_rtx, delta_rtx));
17841 /* Apply the offset from the vtable, if required. */
17844 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
17845 rtx tmp = gen_rtx_REG (Pmode, 12);
17847 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
17848 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
17850 emit_insn (TARGET_32BIT
17851 ? gen_addsi3 (tmp, tmp, vcall_offset_rtx)
17852 : gen_adddi3 (tmp, tmp, vcall_offset_rtx));
17853 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
17857 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
17859 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
17861 emit_insn (TARGET_32BIT
17862 ? gen_addsi3 (this_rtx, this_rtx, tmp)
17863 : gen_adddi3 (this_rtx, this_rtx, tmp));
17866 /* Generate a tail call to the target function. */
17867 if (!TREE_USED (function))
17869 assemble_external (function);
17870 TREE_USED (function) = 1;
17872 funexp = XEXP (DECL_RTL (function), 0);
17873 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
17876 if (MACHOPIC_INDIRECT)
17877 funexp = machopic_indirect_call_target (funexp);
17880 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
17881 generate sibcall RTL explicitly. */
17882 insn = emit_call_insn (
17883 gen_rtx_PARALLEL (VOIDmode,
17885 gen_rtx_CALL (VOIDmode,
17886 funexp, const0_rtx),
17887 gen_rtx_USE (VOIDmode, const0_rtx),
17888 gen_rtx_USE (VOIDmode,
17889 gen_rtx_REG (SImode,
17891 gen_rtx_RETURN (VOIDmode))));
17892 SIBLING_CALL_P (insn) = 1;
17895 /* Run just enough of rest_of_compilation to get the insns emitted.
17896 There's not really enough bulk here to make other passes such as
17897 instruction scheduling worth while. Note that use_thunk calls
17898 assemble_start_function and assemble_end_function. */
17899 insn = get_insns ();
17900 insn_locators_alloc ();
17901 shorten_branches (insn);
17902 final_start_function (insn, file, 1);
17903 final (insn, file, 1);
17904 final_end_function ();
17905 free_after_compilation (cfun);
17907 reload_completed = 0;
17908 epilogue_completed = 0;
17911 /* A quick summary of the various types of 'constant-pool tables'
17914 Target Flags Name One table per
17915 AIX (none) AIX TOC object file
17916 AIX -mfull-toc AIX TOC object file
17917 AIX -mminimal-toc AIX minimal TOC translation unit
17918 SVR4/EABI (none) SVR4 SDATA object file
17919 SVR4/EABI -fpic SVR4 pic object file
17920 SVR4/EABI -fPIC SVR4 PIC translation unit
17921 SVR4/EABI -mrelocatable EABI TOC function
17922 SVR4/EABI -maix AIX TOC object file
17923 SVR4/EABI -maix -mminimal-toc
17924 AIX minimal TOC translation unit
17926 Name Reg. Set by entries contains:
17927 made by addrs? fp? sum?
17929 AIX TOC 2 crt0 as Y option option
17930 AIX minimal TOC 30 prolog gcc Y Y option
17931 SVR4 SDATA 13 crt0 gcc N Y N
17932 SVR4 pic 30 prolog ld Y not yet N
17933 SVR4 PIC 30 prolog gcc Y option option
17934 EABI TOC 30 prolog gcc Y option option
17938 /* Hash functions for the hash table. */
17941 rs6000_hash_constant (rtx k)
17943 enum rtx_code code = GET_CODE (k);
17944 enum machine_mode mode = GET_MODE (k);
17945 unsigned result = (code << 3) ^ mode;
17946 const char *format;
17949 format = GET_RTX_FORMAT (code);
17950 flen = strlen (format);
17956 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
17959 if (mode != VOIDmode)
17960 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
17972 for (; fidx < flen; fidx++)
17973 switch (format[fidx])
17978 const char *str = XSTR (k, fidx);
17979 len = strlen (str);
17980 result = result * 613 + len;
17981 for (i = 0; i < len; i++)
17982 result = result * 613 + (unsigned) str[i];
17987 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
17991 result = result * 613 + (unsigned) XINT (k, fidx);
17994 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
17995 result = result * 613 + (unsigned) XWINT (k, fidx);
17999 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
18000 result = result * 613 + (unsigned) (XWINT (k, fidx)
18007 gcc_unreachable ();
18014 toc_hash_function (const void *hash_entry)
18016 const struct toc_hash_struct *thc =
18017 (const struct toc_hash_struct *) hash_entry;
18018 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
18021 /* Compare H1 and H2 for equivalence. */
18024 toc_hash_eq (const void *h1, const void *h2)
18026 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
18027 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
18029 if (((const struct toc_hash_struct *) h1)->key_mode
18030 != ((const struct toc_hash_struct *) h2)->key_mode)
18033 return rtx_equal_p (r1, r2);
18036 /* These are the names given by the C++ front-end to vtables, and
18037 vtable-like objects. Ideally, this logic should not be here;
18038 instead, there should be some programmatic way of inquiring as
18039 to whether or not an object is a vtable. */
18041 #define VTABLE_NAME_P(NAME) \
18042 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
18043 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
18044 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
18045 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
18046 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
18048 #ifdef NO_DOLLAR_IN_LABEL
18049 /* Return a GGC-allocated character string translating dollar signs in
18050 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
18053 rs6000_xcoff_strip_dollar (const char *name)
18058 p = strchr (name, '$');
18060 if (p == 0 || p == name)
18063 len = strlen (name);
18064 strip = (char *) alloca (len + 1);
18065 strcpy (strip, name);
18066 p = strchr (strip, '$');
18070 p = strchr (p + 1, '$');
18073 return ggc_alloc_string (strip, len);
18078 rs6000_output_symbol_ref (FILE *file, rtx x)
18080 /* Currently C++ toc references to vtables can be emitted before it
18081 is decided whether the vtable is public or private. If this is
18082 the case, then the linker will eventually complain that there is
18083 a reference to an unknown section. Thus, for vtables only,
18084 we emit the TOC reference to reference the symbol and not the
18086 const char *name = XSTR (x, 0);
18088 if (VTABLE_NAME_P (name))
18090 RS6000_OUTPUT_BASENAME (file, name);
18093 assemble_name (file, name);
18096 /* Output a TOC entry. We derive the entry name from what is being
18100 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
18103 const char *name = buf;
18105 HOST_WIDE_INT offset = 0;
18107 gcc_assert (!TARGET_NO_TOC);
18109 /* When the linker won't eliminate them, don't output duplicate
18110 TOC entries (this happens on AIX if there is any kind of TOC,
18111 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
18113 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
18115 struct toc_hash_struct *h;
18118 /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
18119 time because GGC is not initialized at that point. */
18120 if (toc_hash_table == NULL)
18121 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
18122 toc_hash_eq, NULL);
18124 h = GGC_NEW (struct toc_hash_struct);
18126 h->key_mode = mode;
18127 h->labelno = labelno;
18129 found = htab_find_slot (toc_hash_table, h, 1);
18130 if (*found == NULL)
18132 else /* This is indeed a duplicate.
18133 Set this label equal to that label. */
18135 fputs ("\t.set ", file);
18136 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
18137 fprintf (file, "%d,", labelno);
18138 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
18139 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
18145 /* If we're going to put a double constant in the TOC, make sure it's
18146 aligned properly when strict alignment is on. */
18147 if (GET_CODE (x) == CONST_DOUBLE
18148 && STRICT_ALIGNMENT
18149 && GET_MODE_BITSIZE (mode) >= 64
18150 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
18151 ASM_OUTPUT_ALIGN (file, 3);
18154 (*targetm.asm_out.internal_label) (file, "LC", labelno);
18156 /* Handle FP constants specially. Note that if we have a minimal
18157 TOC, things we put here aren't actually in the TOC, so we can allow
18159 if (GET_CODE (x) == CONST_DOUBLE &&
18160 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
18162 REAL_VALUE_TYPE rv;
18165 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
18166 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
18167 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
18169 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
18173 if (TARGET_MINIMAL_TOC)
18174 fputs (DOUBLE_INT_ASM_OP, file);
18176 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
18177 k[0] & 0xffffffff, k[1] & 0xffffffff,
18178 k[2] & 0xffffffff, k[3] & 0xffffffff);
18179 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
18180 k[0] & 0xffffffff, k[1] & 0xffffffff,
18181 k[2] & 0xffffffff, k[3] & 0xffffffff);
18186 if (TARGET_MINIMAL_TOC)
18187 fputs ("\t.long ", file);
18189 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
18190 k[0] & 0xffffffff, k[1] & 0xffffffff,
18191 k[2] & 0xffffffff, k[3] & 0xffffffff);
18192 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
18193 k[0] & 0xffffffff, k[1] & 0xffffffff,
18194 k[2] & 0xffffffff, k[3] & 0xffffffff);
18198 else if (GET_CODE (x) == CONST_DOUBLE &&
18199 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
18201 REAL_VALUE_TYPE rv;
18204 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
18206 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
18207 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
18209 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
18213 if (TARGET_MINIMAL_TOC)
18214 fputs (DOUBLE_INT_ASM_OP, file);
18216 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
18217 k[0] & 0xffffffff, k[1] & 0xffffffff);
18218 fprintf (file, "0x%lx%08lx\n",
18219 k[0] & 0xffffffff, k[1] & 0xffffffff);
18224 if (TARGET_MINIMAL_TOC)
18225 fputs ("\t.long ", file);
18227 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
18228 k[0] & 0xffffffff, k[1] & 0xffffffff);
18229 fprintf (file, "0x%lx,0x%lx\n",
18230 k[0] & 0xffffffff, k[1] & 0xffffffff);
18234 else if (GET_CODE (x) == CONST_DOUBLE &&
18235 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
18237 REAL_VALUE_TYPE rv;
18240 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
18241 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
18242 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
18244 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
18248 if (TARGET_MINIMAL_TOC)
18249 fputs (DOUBLE_INT_ASM_OP, file);
18251 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
18252 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
18257 if (TARGET_MINIMAL_TOC)
18258 fputs ("\t.long ", file);
18260 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
18261 fprintf (file, "0x%lx\n", l & 0xffffffff);
18265 else if (GET_MODE (x) == VOIDmode
18266 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
18268 unsigned HOST_WIDE_INT low;
18269 HOST_WIDE_INT high;
18271 if (GET_CODE (x) == CONST_DOUBLE)
18273 low = CONST_DOUBLE_LOW (x);
18274 high = CONST_DOUBLE_HIGH (x);
18277 #if HOST_BITS_PER_WIDE_INT == 32
18280 high = (low & 0x80000000) ? ~0 : 0;
18284 low = INTVAL (x) & 0xffffffff;
18285 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
18289 /* TOC entries are always Pmode-sized, but since this
18290 is a bigendian machine then if we're putting smaller
18291 integer constants in the TOC we have to pad them.
18292 (This is still a win over putting the constants in
18293 a separate constant pool, because then we'd have
18294 to have both a TOC entry _and_ the actual constant.)
18296 For a 32-bit target, CONST_INT values are loaded and shifted
18297 entirely within `low' and can be stored in one TOC entry. */
18299 /* It would be easy to make this work, but it doesn't now. */
18300 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
18302 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
18304 #if HOST_BITS_PER_WIDE_INT == 32
18305 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
18306 POINTER_SIZE, &low, &high, 0);
18309 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
18310 high = (HOST_WIDE_INT) low >> 32;
18317 if (TARGET_MINIMAL_TOC)
18318 fputs (DOUBLE_INT_ASM_OP, file);
18320 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
18321 (long) high & 0xffffffff, (long) low & 0xffffffff);
18322 fprintf (file, "0x%lx%08lx\n",
18323 (long) high & 0xffffffff, (long) low & 0xffffffff);
18328 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
18330 if (TARGET_MINIMAL_TOC)
18331 fputs ("\t.long ", file);
18333 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
18334 (long) high & 0xffffffff, (long) low & 0xffffffff);
18335 fprintf (file, "0x%lx,0x%lx\n",
18336 (long) high & 0xffffffff, (long) low & 0xffffffff);
18340 if (TARGET_MINIMAL_TOC)
18341 fputs ("\t.long ", file);
18343 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
18344 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
18350 if (GET_CODE (x) == CONST)
18352 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS);
18354 base = XEXP (XEXP (x, 0), 0);
18355 offset = INTVAL (XEXP (XEXP (x, 0), 1));
18358 switch (GET_CODE (base))
18361 name = XSTR (base, 0);
18365 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
18366 CODE_LABEL_NUMBER (XEXP (base, 0)));
18370 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
18374 gcc_unreachable ();
18377 if (TARGET_MINIMAL_TOC)
18378 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
18381 fputs ("\t.tc ", file);
18382 RS6000_OUTPUT_BASENAME (file, name);
18385 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
18387 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
18389 fputs ("[TC],", file);
18392 /* Currently C++ toc references to vtables can be emitted before it
18393 is decided whether the vtable is public or private. If this is
18394 the case, then the linker will eventually complain that there is
18395 a TOC reference to an unknown section. Thus, for vtables only,
18396 we emit the TOC reference to reference the symbol and not the
18398 if (VTABLE_NAME_P (name))
18400 RS6000_OUTPUT_BASENAME (file, name);
18402 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
18403 else if (offset > 0)
18404 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
18407 output_addr_const (file, x);
18411 /* Output an assembler pseudo-op to write an ASCII string of N characters
18412 starting at P to FILE.
18414 On the RS/6000, we have to do this using the .byte operation and
18415 write out special characters outside the quoted string.
18416 Also, the assembler is broken; very long strings are truncated,
18417 so we must artificially break them up early. */
18420 output_ascii (FILE *file, const char *p, int n)
18423 int i, count_string;
18424 const char *for_string = "\t.byte \"";
18425 const char *for_decimal = "\t.byte ";
18426 const char *to_close = NULL;
18429 for (i = 0; i < n; i++)
18432 if (c >= ' ' && c < 0177)
18435 fputs (for_string, file);
18438 /* Write two quotes to get one. */
18446 for_decimal = "\"\n\t.byte ";
18450 if (count_string >= 512)
18452 fputs (to_close, file);
18454 for_string = "\t.byte \"";
18455 for_decimal = "\t.byte ";
18463 fputs (for_decimal, file);
18464 fprintf (file, "%d", c);
18466 for_string = "\n\t.byte \"";
18467 for_decimal = ", ";
18473 /* Now close the string if we have written one. Then end the line. */
18475 fputs (to_close, file);
18478 /* Generate a unique section name for FILENAME for a section type
18479 represented by SECTION_DESC. Output goes into BUF.
18481 SECTION_DESC can be any string, as long as it is different for each
18482 possible section type.
18484 We name the section in the same manner as xlc. The name begins with an
18485 underscore followed by the filename (after stripping any leading directory
18486 names) with the last period replaced by the string SECTION_DESC. If
18487 FILENAME does not contain a period, SECTION_DESC is appended to the end of
18491 rs6000_gen_section_name (char **buf, const char *filename,
18492 const char *section_desc)
18494 const char *q, *after_last_slash, *last_period = 0;
18498 after_last_slash = filename;
18499 for (q = filename; *q; q++)
18502 after_last_slash = q + 1;
18503 else if (*q == '.')
18507 len = strlen (after_last_slash) + strlen (section_desc) + 2;
18508 *buf = (char *) xmalloc (len);
18513 for (q = after_last_slash; *q; q++)
18515 if (q == last_period)
18517 strcpy (p, section_desc);
18518 p += strlen (section_desc);
18522 else if (ISALNUM (*q))
18526 if (last_period == 0)
18527 strcpy (p, section_desc);
18532 /* Emit profile function. */
18535 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
18537 /* Non-standard profiling for kernels, which just saves LR then calls
18538 _mcount without worrying about arg saves. The idea is to change
18539 the function prologue as little as possible as it isn't easy to
18540 account for arg save/restore code added just for _mcount. */
18541 if (TARGET_PROFILE_KERNEL)
18544 if (DEFAULT_ABI == ABI_AIX)
18546 #ifndef NO_PROFILE_COUNTERS
18547 # define NO_PROFILE_COUNTERS 0
18549 if (NO_PROFILE_COUNTERS)
18550 emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 0);
18554 const char *label_name;
18557 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
18558 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
18559 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
18561 emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 1,
18565 else if (DEFAULT_ABI == ABI_DARWIN)
18567 const char *mcount_name = RS6000_MCOUNT;
18568 int caller_addr_regno = LR_REGNO;
18570 /* Be conservative and always set this, at least for now. */
18571 crtl->uses_pic_offset_table = 1;
18574 /* For PIC code, set up a stub and collect the caller's address
18575 from r0, which is where the prologue puts it. */
18576 if (MACHOPIC_INDIRECT
18577 && crtl->uses_pic_offset_table)
18578 caller_addr_regno = 0;
18580 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
18582 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
18586 /* Write function profiler code. */
18589 output_function_profiler (FILE *file, int labelno)
18593 switch (DEFAULT_ABI)
18596 gcc_unreachable ();
18601 warning (0, "no profiling of 64-bit code for this ABI");
18604 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
18605 fprintf (file, "\tmflr %s\n", reg_names[0]);
18606 if (NO_PROFILE_COUNTERS)
18608 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18609 reg_names[0], reg_names[1]);
18611 else if (TARGET_SECURE_PLT && flag_pic)
18613 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
18614 reg_names[0], reg_names[1]);
18615 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
18616 asm_fprintf (file, "\t{cau|addis} %s,%s,",
18617 reg_names[12], reg_names[12]);
18618 assemble_name (file, buf);
18619 asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
18620 assemble_name (file, buf);
18621 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
18623 else if (flag_pic == 1)
18625 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
18626 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18627 reg_names[0], reg_names[1]);
18628 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
18629 asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
18630 assemble_name (file, buf);
18631 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
18633 else if (flag_pic > 1)
18635 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18636 reg_names[0], reg_names[1]);
18637 /* Now, we need to get the address of the label. */
18638 fputs ("\tbcl 20,31,1f\n\t.long ", file);
18639 assemble_name (file, buf);
18640 fputs ("-.\n1:", file);
18641 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
18642 asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
18643 reg_names[0], reg_names[11]);
18644 asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
18645 reg_names[0], reg_names[0], reg_names[11]);
18649 asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
18650 assemble_name (file, buf);
18651 fputs ("@ha\n", file);
18652 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18653 reg_names[0], reg_names[1]);
18654 asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
18655 assemble_name (file, buf);
18656 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
18659 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
18660 fprintf (file, "\tbl %s%s\n",
18661 RS6000_MCOUNT, flag_pic ? "@plt" : "");
18666 if (!TARGET_PROFILE_KERNEL)
18668 /* Don't do anything, done in output_profile_hook (). */
18672 gcc_assert (!TARGET_32BIT);
18674 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
18675 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
18677 if (cfun->static_chain_decl != NULL)
18679 asm_fprintf (file, "\tstd %s,24(%s)\n",
18680 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
18681 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
18682 asm_fprintf (file, "\tld %s,24(%s)\n",
18683 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
18686 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
18694 /* The following variable value is the last issued insn. */
18696 static rtx last_scheduled_insn;
18698 /* The following variable helps to balance issuing of load and
18699 store instructions */
18701 static int load_store_pendulum;
18703 /* Power4 load update and store update instructions are cracked into a
18704 load or store and an integer insn which are executed in the same cycle.
18705 Branches have their own dispatch slot which does not count against the
18706 GCC issue rate, but it changes the program flow so there are no other
18707 instructions to issue in this cycle. */
18710 rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
18711 int verbose ATTRIBUTE_UNUSED,
18712 rtx insn, int more)
18714 last_scheduled_insn = insn;
18715 if (GET_CODE (PATTERN (insn)) == USE
18716 || GET_CODE (PATTERN (insn)) == CLOBBER)
18718 cached_can_issue_more = more;
18719 return cached_can_issue_more;
18722 if (insn_terminates_group_p (insn, current_group))
18724 cached_can_issue_more = 0;
18725 return cached_can_issue_more;
18728 /* If no reservation, but reach here */
18729 if (recog_memoized (insn) < 0)
18732 if (rs6000_sched_groups)
18734 if (is_microcoded_insn (insn))
18735 cached_can_issue_more = 0;
18736 else if (is_cracked_insn (insn))
18737 cached_can_issue_more = more > 2 ? more - 2 : 0;
18739 cached_can_issue_more = more - 1;
18741 return cached_can_issue_more;
18744 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
18747 cached_can_issue_more = more - 1;
18748 return cached_can_issue_more;
18751 /* Adjust the cost of a scheduling dependency. Return the new cost of
18752 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
18755 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
18757 enum attr_type attr_type;
18759 if (! recog_memoized (insn))
18762 switch (REG_NOTE_KIND (link))
18766 /* Data dependency; DEP_INSN writes a register that INSN reads
18767 some cycles later. */
18769 /* Separate a load from a narrower, dependent store. */
18770 if (rs6000_sched_groups
18771 && GET_CODE (PATTERN (insn)) == SET
18772 && GET_CODE (PATTERN (dep_insn)) == SET
18773 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
18774 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
18775 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
18776 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
18779 attr_type = get_attr_type (insn);
18784 /* Tell the first scheduling pass about the latency between
18785 a mtctr and bctr (and mtlr and br/blr). The first
18786 scheduling pass will not know about this latency since
18787 the mtctr instruction, which has the latency associated
18788 to it, will be generated by reload. */
18789 return TARGET_POWER ? 5 : 4;
18791 /* Leave some extra cycles between a compare and its
18792 dependent branch, to inhibit expensive mispredicts. */
18793 if ((rs6000_cpu_attr == CPU_PPC603
18794 || rs6000_cpu_attr == CPU_PPC604
18795 || rs6000_cpu_attr == CPU_PPC604E
18796 || rs6000_cpu_attr == CPU_PPC620
18797 || rs6000_cpu_attr == CPU_PPC630
18798 || rs6000_cpu_attr == CPU_PPC750
18799 || rs6000_cpu_attr == CPU_PPC7400
18800 || rs6000_cpu_attr == CPU_PPC7450
18801 || rs6000_cpu_attr == CPU_POWER4
18802 || rs6000_cpu_attr == CPU_POWER5
18803 || rs6000_cpu_attr == CPU_CELL)
18804 && recog_memoized (dep_insn)
18805 && (INSN_CODE (dep_insn) >= 0))
18807 switch (get_attr_type (dep_insn))
18811 case TYPE_DELAYED_COMPARE:
18812 case TYPE_IMUL_COMPARE:
18813 case TYPE_LMUL_COMPARE:
18814 case TYPE_FPCOMPARE:
18815 case TYPE_CR_LOGICAL:
18816 case TYPE_DELAYED_CR:
18825 case TYPE_STORE_UX:
18827 case TYPE_FPSTORE_U:
18828 case TYPE_FPSTORE_UX:
18829 if ((rs6000_cpu == PROCESSOR_POWER6)
18830 && recog_memoized (dep_insn)
18831 && (INSN_CODE (dep_insn) >= 0))
18834 if (GET_CODE (PATTERN (insn)) != SET)
18835 /* If this happens, we have to extend this to schedule
18836 optimally. Return default for now. */
18839 /* Adjust the cost for the case where the value written
18840 by a fixed point operation is used as the address
18841 gen value on a store. */
18842 switch (get_attr_type (dep_insn))
18849 if (! store_data_bypass_p (dep_insn, insn))
18853 case TYPE_LOAD_EXT:
18854 case TYPE_LOAD_EXT_U:
18855 case TYPE_LOAD_EXT_UX:
18856 case TYPE_VAR_SHIFT_ROTATE:
18857 case TYPE_VAR_DELAYED_COMPARE:
18859 if (! store_data_bypass_p (dep_insn, insn))
18865 case TYPE_FAST_COMPARE:
18868 case TYPE_INSERT_WORD:
18869 case TYPE_INSERT_DWORD:
18870 case TYPE_FPLOAD_U:
18871 case TYPE_FPLOAD_UX:
18873 case TYPE_STORE_UX:
18874 case TYPE_FPSTORE_U:
18875 case TYPE_FPSTORE_UX:
18877 if (! store_data_bypass_p (dep_insn, insn))
18885 case TYPE_IMUL_COMPARE:
18886 case TYPE_LMUL_COMPARE:
18888 if (! store_data_bypass_p (dep_insn, insn))
18894 if (! store_data_bypass_p (dep_insn, insn))
18900 if (! store_data_bypass_p (dep_insn, insn))
18913 case TYPE_LOAD_EXT:
18914 case TYPE_LOAD_EXT_U:
18915 case TYPE_LOAD_EXT_UX:
18916 if ((rs6000_cpu == PROCESSOR_POWER6)
18917 && recog_memoized (dep_insn)
18918 && (INSN_CODE (dep_insn) >= 0))
18921 /* Adjust the cost for the case where the value written
18922 by a fixed point instruction is used within the address
18923 gen portion of a subsequent load(u)(x) */
18924 switch (get_attr_type (dep_insn))
18931 if (set_to_load_agen (dep_insn, insn))
18935 case TYPE_LOAD_EXT:
18936 case TYPE_LOAD_EXT_U:
18937 case TYPE_LOAD_EXT_UX:
18938 case TYPE_VAR_SHIFT_ROTATE:
18939 case TYPE_VAR_DELAYED_COMPARE:
18941 if (set_to_load_agen (dep_insn, insn))
18947 case TYPE_FAST_COMPARE:
18950 case TYPE_INSERT_WORD:
18951 case TYPE_INSERT_DWORD:
18952 case TYPE_FPLOAD_U:
18953 case TYPE_FPLOAD_UX:
18955 case TYPE_STORE_UX:
18956 case TYPE_FPSTORE_U:
18957 case TYPE_FPSTORE_UX:
18959 if (set_to_load_agen (dep_insn, insn))
18967 case TYPE_IMUL_COMPARE:
18968 case TYPE_LMUL_COMPARE:
18970 if (set_to_load_agen (dep_insn, insn))
18976 if (set_to_load_agen (dep_insn, insn))
18982 if (set_to_load_agen (dep_insn, insn))
18993 if ((rs6000_cpu == PROCESSOR_POWER6)
18994 && recog_memoized (dep_insn)
18995 && (INSN_CODE (dep_insn) >= 0)
18996 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
19003 /* Fall out to return default cost. */
19007 case REG_DEP_OUTPUT:
19008 /* Output dependency; DEP_INSN writes a register that INSN writes some
19010 if ((rs6000_cpu == PROCESSOR_POWER6)
19011 && recog_memoized (dep_insn)
19012 && (INSN_CODE (dep_insn) >= 0))
19014 attr_type = get_attr_type (insn);
19019 if (get_attr_type (dep_insn) == TYPE_FP)
19023 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
19031 /* Anti dependency; DEP_INSN reads a register that INSN writes some
19036 gcc_unreachable ();
19042 /* The function returns a true if INSN is microcoded.
19043 Return false otherwise. */
19046 is_microcoded_insn (rtx insn)
19048 if (!insn || !INSN_P (insn)
19049 || GET_CODE (PATTERN (insn)) == USE
19050 || GET_CODE (PATTERN (insn)) == CLOBBER)
19053 if (rs6000_cpu_attr == CPU_CELL)
19054 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
19056 if (rs6000_sched_groups)
19058 enum attr_type type = get_attr_type (insn);
19059 if (type == TYPE_LOAD_EXT_U
19060 || type == TYPE_LOAD_EXT_UX
19061 || type == TYPE_LOAD_UX
19062 || type == TYPE_STORE_UX
19063 || type == TYPE_MFCR)
19070 /* The function returns true if INSN is cracked into 2 instructions
19071 by the processor (and therefore occupies 2 issue slots). */
19074 is_cracked_insn (rtx insn)
19076 if (!insn || !INSN_P (insn)
19077 || GET_CODE (PATTERN (insn)) == USE
19078 || GET_CODE (PATTERN (insn)) == CLOBBER)
19081 if (rs6000_sched_groups)
19083 enum attr_type type = get_attr_type (insn);
19084 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
19085 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
19086 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
19087 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
19088 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
19089 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
19090 || type == TYPE_IDIV || type == TYPE_LDIV
19091 || type == TYPE_INSERT_WORD)
19098 /* The function returns true if INSN can be issued only from
19099 the branch slot. */
19102 is_branch_slot_insn (rtx insn)
19104 if (!insn || !INSN_P (insn)
19105 || GET_CODE (PATTERN (insn)) == USE
19106 || GET_CODE (PATTERN (insn)) == CLOBBER)
19109 if (rs6000_sched_groups)
19111 enum attr_type type = get_attr_type (insn);
19112 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
19120 /* The function returns true if out_inst sets a value that is
19121 used in the address generation computation of in_insn */
19123 set_to_load_agen (rtx out_insn, rtx in_insn)
19125 rtx out_set, in_set;
19127 /* For performance reasons, only handle the simple case where
19128 both loads are a single_set. */
19129 out_set = single_set (out_insn);
19132 in_set = single_set (in_insn);
19134 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
19140 /* The function returns true if the target storage location of
19141 out_insn is adjacent to the target storage location of in_insn */
19142 /* Return 1 if memory locations are adjacent. */
19145 adjacent_mem_locations (rtx insn1, rtx insn2)
19148 rtx a = get_store_dest (PATTERN (insn1));
19149 rtx b = get_store_dest (PATTERN (insn2));
19151 if ((GET_CODE (XEXP (a, 0)) == REG
19152 || (GET_CODE (XEXP (a, 0)) == PLUS
19153 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
19154 && (GET_CODE (XEXP (b, 0)) == REG
19155 || (GET_CODE (XEXP (b, 0)) == PLUS
19156 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
19158 HOST_WIDE_INT val0 = 0, val1 = 0, val_diff;
19161 if (GET_CODE (XEXP (a, 0)) == PLUS)
19163 reg0 = XEXP (XEXP (a, 0), 0);
19164 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
19167 reg0 = XEXP (a, 0);
19169 if (GET_CODE (XEXP (b, 0)) == PLUS)
19171 reg1 = XEXP (XEXP (b, 0), 0);
19172 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
19175 reg1 = XEXP (b, 0);
19177 val_diff = val1 - val0;
19179 return ((REGNO (reg0) == REGNO (reg1))
19180 && ((MEM_SIZE (a) && val_diff == INTVAL (MEM_SIZE (a)))
19181 || (MEM_SIZE (b) && val_diff == -INTVAL (MEM_SIZE (b)))));
19187 /* A C statement (sans semicolon) to update the integer scheduling
19188 priority INSN_PRIORITY (INSN). Increase the priority to execute the
19189 INSN earlier, reduce the priority to execute INSN later. Do not
19190 define this macro if you do not need to adjust the scheduling
19191 priorities of insns. */
19194 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
19196 /* On machines (like the 750) which have asymmetric integer units,
19197 where one integer unit can do multiply and divides and the other
19198 can't, reduce the priority of multiply/divide so it is scheduled
19199 before other integer operations. */
19202 if (! INSN_P (insn))
19205 if (GET_CODE (PATTERN (insn)) == USE)
19208 switch (rs6000_cpu_attr) {
19210 switch (get_attr_type (insn))
19217 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
19218 priority, priority);
19219 if (priority >= 0 && priority < 0x01000000)
19226 if (insn_must_be_first_in_group (insn)
19227 && reload_completed
19228 && current_sched_info->sched_max_insns_priority
19229 && rs6000_sched_restricted_insns_priority)
19232 /* Prioritize insns that can be dispatched only in the first
19234 if (rs6000_sched_restricted_insns_priority == 1)
19235 /* Attach highest priority to insn. This means that in
19236 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
19237 precede 'priority' (critical path) considerations. */
19238 return current_sched_info->sched_max_insns_priority;
19239 else if (rs6000_sched_restricted_insns_priority == 2)
19240 /* Increase priority of insn by a minimal amount. This means that in
19241 haifa-sched.c:ready_sort(), only 'priority' (critical path)
19242 considerations precede dispatch-slot restriction considerations. */
19243 return (priority + 1);
19246 if (rs6000_cpu == PROCESSOR_POWER6
19247 && ((load_store_pendulum == -2 && is_load_insn (insn))
19248 || (load_store_pendulum == 2 && is_store_insn (insn))))
19249 /* Attach highest priority to insn if the scheduler has just issued two
19250 stores and this instruction is a load, or two loads and this instruction
19251 is a store. Power6 wants loads and stores scheduled alternately
19253 return current_sched_info->sched_max_insns_priority;
19258 /* Return true if the instruction is nonpipelined on the Cell. */
19260 is_nonpipeline_insn (rtx insn)
19262 enum attr_type type;
19263 if (!insn || !INSN_P (insn)
19264 || GET_CODE (PATTERN (insn)) == USE
19265 || GET_CODE (PATTERN (insn)) == CLOBBER)
19268 type = get_attr_type (insn);
19269 if (type == TYPE_IMUL
19270 || type == TYPE_IMUL2
19271 || type == TYPE_IMUL3
19272 || type == TYPE_LMUL
19273 || type == TYPE_IDIV
19274 || type == TYPE_LDIV
19275 || type == TYPE_SDIV
19276 || type == TYPE_DDIV
19277 || type == TYPE_SSQRT
19278 || type == TYPE_DSQRT
19279 || type == TYPE_MFCR
19280 || type == TYPE_MFCRF
19281 || type == TYPE_MFJMPR)
19289 /* Return how many instructions the machine can issue per cycle. */
19292 rs6000_issue_rate (void)
19294 /* Use issue rate of 1 for first scheduling pass to decrease degradation. */
19295 if (!reload_completed)
19298 switch (rs6000_cpu_attr) {
19299 case CPU_RIOS1: /* ? */
19301 case CPU_PPC601: /* ? */
19310 case CPU_PPCE300C2:
19311 case CPU_PPCE300C3:
19312 case CPU_PPCE500MC:
19329 /* Return how many instructions to look ahead for better insn
19333 rs6000_use_sched_lookahead (void)
19335 if (rs6000_cpu_attr == CPU_PPC8540)
19337 if (rs6000_cpu_attr == CPU_CELL)
19338 return (reload_completed ? 8 : 0);
19342 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
19344 rs6000_use_sched_lookahead_guard (rtx insn)
19346 if (rs6000_cpu_attr != CPU_CELL)
19349 if (insn == NULL_RTX || !INSN_P (insn))
19352 if (!reload_completed
19353 || is_nonpipeline_insn (insn)
19354 || is_microcoded_insn (insn))
19360 /* Determine is PAT refers to memory. */
19363 is_mem_ref (rtx pat)
19369 /* stack_tie does not produce any real memory traffic. */
19370 if (GET_CODE (pat) == UNSPEC
19371 && XINT (pat, 1) == UNSPEC_TIE)
19374 if (GET_CODE (pat) == MEM)
19377 /* Recursively process the pattern. */
19378 fmt = GET_RTX_FORMAT (GET_CODE (pat));
19380 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
19383 ret |= is_mem_ref (XEXP (pat, i));
19384 else if (fmt[i] == 'E')
19385 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
19386 ret |= is_mem_ref (XVECEXP (pat, i, j));
19392 /* Determine if PAT is a PATTERN of a load insn. */
19395 is_load_insn1 (rtx pat)
19397 if (!pat || pat == NULL_RTX)
19400 if (GET_CODE (pat) == SET)
19401 return is_mem_ref (SET_SRC (pat));
19403 if (GET_CODE (pat) == PARALLEL)
19407 for (i = 0; i < XVECLEN (pat, 0); i++)
19408 if (is_load_insn1 (XVECEXP (pat, 0, i)))
19415 /* Determine if INSN loads from memory. */
19418 is_load_insn (rtx insn)
19420 if (!insn || !INSN_P (insn))
19423 if (GET_CODE (insn) == CALL_INSN)
19426 return is_load_insn1 (PATTERN (insn));
19429 /* Determine if PAT is a PATTERN of a store insn. */
19432 is_store_insn1 (rtx pat)
19434 if (!pat || pat == NULL_RTX)
19437 if (GET_CODE (pat) == SET)
19438 return is_mem_ref (SET_DEST (pat));
19440 if (GET_CODE (pat) == PARALLEL)
19444 for (i = 0; i < XVECLEN (pat, 0); i++)
19445 if (is_store_insn1 (XVECEXP (pat, 0, i)))
19452 /* Determine if INSN stores to memory. */
19455 is_store_insn (rtx insn)
19457 if (!insn || !INSN_P (insn))
19460 return is_store_insn1 (PATTERN (insn));
19463 /* Return the dest of a store insn. */
19466 get_store_dest (rtx pat)
19468 gcc_assert (is_store_insn1 (pat));
19470 if (GET_CODE (pat) == SET)
19471 return SET_DEST (pat);
19472 else if (GET_CODE (pat) == PARALLEL)
19476 for (i = 0; i < XVECLEN (pat, 0); i++)
19478 rtx inner_pat = XVECEXP (pat, 0, i);
19479 if (GET_CODE (inner_pat) == SET
19480 && is_mem_ref (SET_DEST (inner_pat)))
19484 /* We shouldn't get here, because we should have either a simple
19485 store insn or a store with update which are covered above. */
19489 /* Returns whether the dependence between INSN and NEXT is considered
19490 costly by the given target. */
19493 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
19498 /* If the flag is not enabled - no dependence is considered costly;
19499 allow all dependent insns in the same group.
19500 This is the most aggressive option. */
19501 if (rs6000_sched_costly_dep == no_dep_costly)
19504 /* If the flag is set to 1 - a dependence is always considered costly;
19505 do not allow dependent instructions in the same group.
19506 This is the most conservative option. */
19507 if (rs6000_sched_costly_dep == all_deps_costly)
19510 insn = DEP_PRO (dep);
19511 next = DEP_CON (dep);
19513 if (rs6000_sched_costly_dep == store_to_load_dep_costly
19514 && is_load_insn (next)
19515 && is_store_insn (insn))
19516 /* Prevent load after store in the same group. */
19519 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
19520 && is_load_insn (next)
19521 && is_store_insn (insn)
19522 && DEP_TYPE (dep) == REG_DEP_TRUE)
19523 /* Prevent load after store in the same group if it is a true
19527 /* The flag is set to X; dependences with latency >= X are considered costly,
19528 and will not be scheduled in the same group. */
19529 if (rs6000_sched_costly_dep <= max_dep_latency
19530 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
19536 /* Return the next insn after INSN that is found before TAIL is reached,
19537 skipping any "non-active" insns - insns that will not actually occupy
19538 an issue slot. Return NULL_RTX if such an insn is not found. */
19541 get_next_active_insn (rtx insn, rtx tail)
19543 if (insn == NULL_RTX || insn == tail)
19548 insn = NEXT_INSN (insn);
19549 if (insn == NULL_RTX || insn == tail)
19554 || (NONJUMP_INSN_P (insn)
19555 && GET_CODE (PATTERN (insn)) != USE
19556 && GET_CODE (PATTERN (insn)) != CLOBBER
19557 && INSN_CODE (insn) != CODE_FOR_stack_tie))
19563 /* We are about to begin issuing insns for this clock cycle. */
19566 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
19567 rtx *ready ATTRIBUTE_UNUSED,
19568 int *pn_ready ATTRIBUTE_UNUSED,
19569 int clock_var ATTRIBUTE_UNUSED)
19571 int n_ready = *pn_ready;
19574 fprintf (dump, "// rs6000_sched_reorder :\n");
19576 /* Reorder the ready list, if the second to last ready insn
19577 is a nonepipeline insn. */
19578 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
19580 if (is_nonpipeline_insn (ready[n_ready - 1])
19581 && (recog_memoized (ready[n_ready - 2]) > 0))
19582 /* Simply swap first two insns. */
19584 rtx tmp = ready[n_ready - 1];
19585 ready[n_ready - 1] = ready[n_ready - 2];
19586 ready[n_ready - 2] = tmp;
19590 if (rs6000_cpu == PROCESSOR_POWER6)
19591 load_store_pendulum = 0;
19593 return rs6000_issue_rate ();
19596 /* Like rs6000_sched_reorder, but called after issuing each insn. */
19599 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
19600 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
19603 fprintf (dump, "// rs6000_sched_reorder2 :\n");
19605 /* For Power6, we need to handle some special cases to try and keep the
19606 store queue from overflowing and triggering expensive flushes.
19608 This code monitors how load and store instructions are being issued
19609 and skews the ready list one way or the other to increase the likelihood
19610 that a desired instruction is issued at the proper time.
19612 A couple of things are done. First, we maintain a "load_store_pendulum"
19613 to track the current state of load/store issue.
19615 - If the pendulum is at zero, then no loads or stores have been
19616 issued in the current cycle so we do nothing.
19618 - If the pendulum is 1, then a single load has been issued in this
19619 cycle and we attempt to locate another load in the ready list to
19622 - If the pendulum is -2, then two stores have already been
19623 issued in this cycle, so we increase the priority of the first load
19624 in the ready list to increase it's likelihood of being chosen first
19627 - If the pendulum is -1, then a single store has been issued in this
19628 cycle and we attempt to locate another store in the ready list to
19629 issue with it, preferring a store to an adjacent memory location to
19630 facilitate store pairing in the store queue.
19632 - If the pendulum is 2, then two loads have already been
19633 issued in this cycle, so we increase the priority of the first store
19634 in the ready list to increase it's likelihood of being chosen first
19637 - If the pendulum < -2 or > 2, then do nothing.
19639 Note: This code covers the most common scenarios. There exist non
19640 load/store instructions which make use of the LSU and which
19641 would need to be accounted for to strictly model the behavior
19642 of the machine. Those instructions are currently unaccounted
19643 for to help minimize compile time overhead of this code.
19645 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
19651 if (is_store_insn (last_scheduled_insn))
19652 /* Issuing a store, swing the load_store_pendulum to the left */
19653 load_store_pendulum--;
19654 else if (is_load_insn (last_scheduled_insn))
19655 /* Issuing a load, swing the load_store_pendulum to the right */
19656 load_store_pendulum++;
19658 return cached_can_issue_more;
19660 /* If the pendulum is balanced, or there is only one instruction on
19661 the ready list, then all is well, so return. */
19662 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
19663 return cached_can_issue_more;
19665 if (load_store_pendulum == 1)
19667 /* A load has been issued in this cycle. Scan the ready list
19668 for another load to issue with it */
19673 if (is_load_insn (ready[pos]))
19675 /* Found a load. Move it to the head of the ready list,
19676 and adjust it's priority so that it is more likely to
19679 for (i=pos; i<*pn_ready-1; i++)
19680 ready[i] = ready[i + 1];
19681 ready[*pn_ready-1] = tmp;
19683 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
19684 INSN_PRIORITY (tmp)++;
19690 else if (load_store_pendulum == -2)
19692 /* Two stores have been issued in this cycle. Increase the
19693 priority of the first load in the ready list to favor it for
19694 issuing in the next cycle. */
19699 if (is_load_insn (ready[pos])
19701 && INSN_PRIORITY_KNOWN (ready[pos]))
19703 INSN_PRIORITY (ready[pos])++;
19705 /* Adjust the pendulum to account for the fact that a load
19706 was found and increased in priority. This is to prevent
19707 increasing the priority of multiple loads */
19708 load_store_pendulum--;
19715 else if (load_store_pendulum == -1)
19717 /* A store has been issued in this cycle. Scan the ready list for
19718 another store to issue with it, preferring a store to an adjacent
19720 int first_store_pos = -1;
19726 if (is_store_insn (ready[pos]))
19728 /* Maintain the index of the first store found on the
19730 if (first_store_pos == -1)
19731 first_store_pos = pos;
19733 if (is_store_insn (last_scheduled_insn)
19734 && adjacent_mem_locations (last_scheduled_insn,ready[pos]))
19736 /* Found an adjacent store. Move it to the head of the
19737 ready list, and adjust it's priority so that it is
19738 more likely to stay there */
19740 for (i=pos; i<*pn_ready-1; i++)
19741 ready[i] = ready[i + 1];
19742 ready[*pn_ready-1] = tmp;
19744 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
19745 INSN_PRIORITY (tmp)++;
19747 first_store_pos = -1;
19755 if (first_store_pos >= 0)
19757 /* An adjacent store wasn't found, but a non-adjacent store was,
19758 so move the non-adjacent store to the front of the ready
19759 list, and adjust its priority so that it is more likely to
19761 tmp = ready[first_store_pos];
19762 for (i=first_store_pos; i<*pn_ready-1; i++)
19763 ready[i] = ready[i + 1];
19764 ready[*pn_ready-1] = tmp;
19765 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
19766 INSN_PRIORITY (tmp)++;
19769 else if (load_store_pendulum == 2)
19771 /* Two loads have been issued in this cycle. Increase the priority
19772 of the first store in the ready list to favor it for issuing in
19778 if (is_store_insn (ready[pos])
19780 && INSN_PRIORITY_KNOWN (ready[pos]))
19782 INSN_PRIORITY (ready[pos])++;
19784 /* Adjust the pendulum to account for the fact that a store
19785 was found and increased in priority. This is to prevent
19786 increasing the priority of multiple stores */
19787 load_store_pendulum++;
19796 return cached_can_issue_more;
19799 /* Return whether the presence of INSN causes a dispatch group termination
19800 of group WHICH_GROUP.
19802 If WHICH_GROUP == current_group, this function will return true if INSN
19803 causes the termination of the current group (i.e, the dispatch group to
19804 which INSN belongs). This means that INSN will be the last insn in the
19805 group it belongs to.
19807 If WHICH_GROUP == previous_group, this function will return true if INSN
19808 causes the termination of the previous group (i.e, the dispatch group that
19809 precedes the group to which INSN belongs). This means that INSN will be
19810 the first insn in the group it belongs to). */
19813 insn_terminates_group_p (rtx insn, enum group_termination which_group)
19820 first = insn_must_be_first_in_group (insn);
19821 last = insn_must_be_last_in_group (insn);
19826 if (which_group == current_group)
19828 else if (which_group == previous_group)
19836 insn_must_be_first_in_group (rtx insn)
19838 enum attr_type type;
19841 || insn == NULL_RTX
19842 || GET_CODE (insn) == NOTE
19843 || GET_CODE (PATTERN (insn)) == USE
19844 || GET_CODE (PATTERN (insn)) == CLOBBER)
19847 switch (rs6000_cpu)
19849 case PROCESSOR_POWER5:
19850 if (is_cracked_insn (insn))
19852 case PROCESSOR_POWER4:
19853 if (is_microcoded_insn (insn))
19856 if (!rs6000_sched_groups)
19859 type = get_attr_type (insn);
19866 case TYPE_DELAYED_CR:
19867 case TYPE_CR_LOGICAL:
19881 case PROCESSOR_POWER6:
19882 type = get_attr_type (insn);
19886 case TYPE_INSERT_DWORD:
19890 case TYPE_VAR_SHIFT_ROTATE:
19897 case TYPE_INSERT_WORD:
19898 case TYPE_DELAYED_COMPARE:
19899 case TYPE_IMUL_COMPARE:
19900 case TYPE_LMUL_COMPARE:
19901 case TYPE_FPCOMPARE:
19912 case TYPE_LOAD_EXT_UX:
19914 case TYPE_STORE_UX:
19915 case TYPE_FPLOAD_U:
19916 case TYPE_FPLOAD_UX:
19917 case TYPE_FPSTORE_U:
19918 case TYPE_FPSTORE_UX:
19932 insn_must_be_last_in_group (rtx insn)
19934 enum attr_type type;
19937 || insn == NULL_RTX
19938 || GET_CODE (insn) == NOTE
19939 || GET_CODE (PATTERN (insn)) == USE
19940 || GET_CODE (PATTERN (insn)) == CLOBBER)
19943 switch (rs6000_cpu) {
19944 case PROCESSOR_POWER4:
19945 case PROCESSOR_POWER5:
19946 if (is_microcoded_insn (insn))
19949 if (is_branch_slot_insn (insn))
19953 case PROCESSOR_POWER6:
19954 type = get_attr_type (insn);
19961 case TYPE_VAR_SHIFT_ROTATE:
19968 case TYPE_DELAYED_COMPARE:
19969 case TYPE_IMUL_COMPARE:
19970 case TYPE_LMUL_COMPARE:
19971 case TYPE_FPCOMPARE:
19992 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
19993 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
19996 is_costly_group (rtx *group_insns, rtx next_insn)
19999 int issue_rate = rs6000_issue_rate ();
20001 for (i = 0; i < issue_rate; i++)
20003 sd_iterator_def sd_it;
20005 rtx insn = group_insns[i];
20010 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
20012 rtx next = DEP_CON (dep);
20014 if (next == next_insn
20015 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
20023 /* Utility of the function redefine_groups.
20024 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
20025 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
20026 to keep it "far" (in a separate group) from GROUP_INSNS, following
20027 one of the following schemes, depending on the value of the flag
20028 -minsert_sched_nops = X:
20029 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
20030 in order to force NEXT_INSN into a separate group.
20031 (2) X < sched_finish_regroup_exact: insert exactly X nops.
20032 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
20033 insertion (has a group just ended, how many vacant issue slots remain in the
20034 last group, and how many dispatch groups were encountered so far). */
20037 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
20038 rtx next_insn, bool *group_end, int can_issue_more,
20043 int issue_rate = rs6000_issue_rate ();
20044 bool end = *group_end;
20047 if (next_insn == NULL_RTX)
20048 return can_issue_more;
20050 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
20051 return can_issue_more;
20053 force = is_costly_group (group_insns, next_insn);
20055 return can_issue_more;
20057 if (sched_verbose > 6)
20058 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
20059 *group_count ,can_issue_more);
20061 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
20064 can_issue_more = 0;
20066 /* Since only a branch can be issued in the last issue_slot, it is
20067 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
20068 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
20069 in this case the last nop will start a new group and the branch
20070 will be forced to the new group. */
20071 if (can_issue_more && !is_branch_slot_insn (next_insn))
20074 while (can_issue_more > 0)
20077 emit_insn_before (nop, next_insn);
20085 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
20087 int n_nops = rs6000_sched_insert_nops;
20089 /* Nops can't be issued from the branch slot, so the effective
20090 issue_rate for nops is 'issue_rate - 1'. */
20091 if (can_issue_more == 0)
20092 can_issue_more = issue_rate;
20094 if (can_issue_more == 0)
20096 can_issue_more = issue_rate - 1;
20099 for (i = 0; i < issue_rate; i++)
20101 group_insns[i] = 0;
20108 emit_insn_before (nop, next_insn);
20109 if (can_issue_more == issue_rate - 1) /* new group begins */
20112 if (can_issue_more == 0)
20114 can_issue_more = issue_rate - 1;
20117 for (i = 0; i < issue_rate; i++)
20119 group_insns[i] = 0;
20125 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
20128 /* Is next_insn going to start a new group? */
20131 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
20132 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
20133 || (can_issue_more < issue_rate &&
20134 insn_terminates_group_p (next_insn, previous_group)));
20135 if (*group_end && end)
20138 if (sched_verbose > 6)
20139 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
20140 *group_count, can_issue_more);
20141 return can_issue_more;
20144 return can_issue_more;
20147 /* This function tries to synch the dispatch groups that the compiler "sees"
20148 with the dispatch groups that the processor dispatcher is expected to
20149 form in practice. It tries to achieve this synchronization by forcing the
20150 estimated processor grouping on the compiler (as opposed to the function
20151 'pad_goups' which tries to force the scheduler's grouping on the processor).
20153 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
20154 examines the (estimated) dispatch groups that will be formed by the processor
20155 dispatcher. It marks these group boundaries to reflect the estimated
20156 processor grouping, overriding the grouping that the scheduler had marked.
20157 Depending on the value of the flag '-minsert-sched-nops' this function can
20158 force certain insns into separate groups or force a certain distance between
20159 them by inserting nops, for example, if there exists a "costly dependence"
20162 The function estimates the group boundaries that the processor will form as
20163 follows: It keeps track of how many vacant issue slots are available after
20164 each insn. A subsequent insn will start a new group if one of the following
20166 - no more vacant issue slots remain in the current dispatch group.
20167 - only the last issue slot, which is the branch slot, is vacant, but the next
20168 insn is not a branch.
20169 - only the last 2 or less issue slots, including the branch slot, are vacant,
20170 which means that a cracked insn (which occupies two issue slots) can't be
20171 issued in this group.
20172 - less than 'issue_rate' slots are vacant, and the next insn always needs to
20173 start a new group. */
20176 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
20178 rtx insn, next_insn;
20180 int can_issue_more;
20183 int group_count = 0;
20187 issue_rate = rs6000_issue_rate ();
20188 group_insns = XALLOCAVEC (rtx, issue_rate);
20189 for (i = 0; i < issue_rate; i++)
20191 group_insns[i] = 0;
20193 can_issue_more = issue_rate;
20195 insn = get_next_active_insn (prev_head_insn, tail);
20198 while (insn != NULL_RTX)
20200 slot = (issue_rate - can_issue_more);
20201 group_insns[slot] = insn;
20203 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
20204 if (insn_terminates_group_p (insn, current_group))
20205 can_issue_more = 0;
20207 next_insn = get_next_active_insn (insn, tail);
20208 if (next_insn == NULL_RTX)
20209 return group_count + 1;
20211 /* Is next_insn going to start a new group? */
20213 = (can_issue_more == 0
20214 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
20215 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
20216 || (can_issue_more < issue_rate &&
20217 insn_terminates_group_p (next_insn, previous_group)));
20219 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
20220 next_insn, &group_end, can_issue_more,
20226 can_issue_more = 0;
20227 for (i = 0; i < issue_rate; i++)
20229 group_insns[i] = 0;
20233 if (GET_MODE (next_insn) == TImode && can_issue_more)
20234 PUT_MODE (next_insn, VOIDmode);
20235 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
20236 PUT_MODE (next_insn, TImode);
20239 if (can_issue_more == 0)
20240 can_issue_more = issue_rate;
20243 return group_count;
20246 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
20247 dispatch group boundaries that the scheduler had marked. Pad with nops
20248 any dispatch groups which have vacant issue slots, in order to force the
20249 scheduler's grouping on the processor dispatcher. The function
20250 returns the number of dispatch groups found. */
20253 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
20255 rtx insn, next_insn;
20258 int can_issue_more;
20260 int group_count = 0;
20262 /* Initialize issue_rate. */
20263 issue_rate = rs6000_issue_rate ();
20264 can_issue_more = issue_rate;
20266 insn = get_next_active_insn (prev_head_insn, tail);
20267 next_insn = get_next_active_insn (insn, tail);
20269 while (insn != NULL_RTX)
20272 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
20274 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
20276 if (next_insn == NULL_RTX)
20281 /* If the scheduler had marked group termination at this location
20282 (between insn and next_insn), and neither insn nor next_insn will
20283 force group termination, pad the group with nops to force group
20286 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
20287 && !insn_terminates_group_p (insn, current_group)
20288 && !insn_terminates_group_p (next_insn, previous_group))
20290 if (!is_branch_slot_insn (next_insn))
20293 while (can_issue_more)
20296 emit_insn_before (nop, next_insn);
20301 can_issue_more = issue_rate;
20306 next_insn = get_next_active_insn (insn, tail);
20309 return group_count;
20312 /* We're beginning a new block. Initialize data structures as necessary. */
20315 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
20316 int sched_verbose ATTRIBUTE_UNUSED,
20317 int max_ready ATTRIBUTE_UNUSED)
20319 last_scheduled_insn = NULL_RTX;
20320 load_store_pendulum = 0;
20323 /* The following function is called at the end of scheduling BB.
20324 After reload, it inserts nops at insn group bundling. */
20327 rs6000_sched_finish (FILE *dump, int sched_verbose)
20332 fprintf (dump, "=== Finishing schedule.\n");
20334 if (reload_completed && rs6000_sched_groups)
20336 /* Do not run sched_finish hook when selective scheduling enabled. */
20337 if (sel_sched_p ())
20340 if (rs6000_sched_insert_nops == sched_finish_none)
20343 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
20344 n_groups = pad_groups (dump, sched_verbose,
20345 current_sched_info->prev_head,
20346 current_sched_info->next_tail);
20348 n_groups = redefine_groups (dump, sched_verbose,
20349 current_sched_info->prev_head,
20350 current_sched_info->next_tail);
20352 if (sched_verbose >= 6)
20354 fprintf (dump, "ngroups = %d\n", n_groups);
20355 print_rtl (dump, current_sched_info->prev_head);
20356 fprintf (dump, "Done finish_sched\n");
20361 struct _rs6000_sched_context
20363 short cached_can_issue_more;
20364 rtx last_scheduled_insn;
20365 int load_store_pendulum;
20368 typedef struct _rs6000_sched_context rs6000_sched_context_def;
20369 typedef rs6000_sched_context_def *rs6000_sched_context_t;
20371 /* Allocate store for new scheduling context. */
20373 rs6000_alloc_sched_context (void)
20375 return xmalloc (sizeof (rs6000_sched_context_def));
20378 /* If CLEAN_P is true then initializes _SC with clean data,
20379 and from the global context otherwise. */
20381 rs6000_init_sched_context (void *_sc, bool clean_p)
20383 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
20387 sc->cached_can_issue_more = 0;
20388 sc->last_scheduled_insn = NULL_RTX;
20389 sc->load_store_pendulum = 0;
20393 sc->cached_can_issue_more = cached_can_issue_more;
20394 sc->last_scheduled_insn = last_scheduled_insn;
20395 sc->load_store_pendulum = load_store_pendulum;
20399 /* Sets the global scheduling context to the one pointed to by _SC. */
20401 rs6000_set_sched_context (void *_sc)
20403 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
20405 gcc_assert (sc != NULL);
20407 cached_can_issue_more = sc->cached_can_issue_more;
20408 last_scheduled_insn = sc->last_scheduled_insn;
20409 load_store_pendulum = sc->load_store_pendulum;
20414 rs6000_free_sched_context (void *_sc)
20416 gcc_assert (_sc != NULL);
20422 /* Length in units of the trampoline for entering a nested function. */
20425 rs6000_trampoline_size (void)
20429 switch (DEFAULT_ABI)
20432 gcc_unreachable ();
20435 ret = (TARGET_32BIT) ? 12 : 24;
20440 ret = (TARGET_32BIT) ? 40 : 48;
20447 /* Emit RTL insns to initialize the variable parts of a trampoline.
20448 FNADDR is an RTX for the address of the function's pure code.
20449 CXT is an RTX for the static chain value for the function. */
20452 rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
20454 int regsize = (TARGET_32BIT) ? 4 : 8;
20455 rtx ctx_reg = force_reg (Pmode, cxt);
20457 switch (DEFAULT_ABI)
20460 gcc_unreachable ();
20462 /* Macros to shorten the code expansions below. */
20463 #define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
20464 #define MEM_PLUS(addr,offset) \
20465 gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
20467 /* Under AIX, just build the 3 word function descriptor */
20470 rtx fn_reg = gen_reg_rtx (Pmode);
20471 rtx toc_reg = gen_reg_rtx (Pmode);
20472 emit_move_insn (fn_reg, MEM_DEREF (fnaddr));
20473 emit_move_insn (toc_reg, MEM_PLUS (fnaddr, regsize));
20474 emit_move_insn (MEM_DEREF (addr), fn_reg);
20475 emit_move_insn (MEM_PLUS (addr, regsize), toc_reg);
20476 emit_move_insn (MEM_PLUS (addr, 2*regsize), ctx_reg);
20480 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
20483 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
20484 FALSE, VOIDmode, 4,
20486 GEN_INT (rs6000_trampoline_size ()), SImode,
20496 /* Table of valid machine attributes. */
20498 const struct attribute_spec rs6000_attribute_table[] =
20500 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
20501 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
20502 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
20503 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
20504 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
20505 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
20506 #ifdef SUBTARGET_ATTRIBUTE_TABLE
20507 SUBTARGET_ATTRIBUTE_TABLE,
20509 { NULL, 0, 0, false, false, false, NULL }
20512 /* Handle the "altivec" attribute. The attribute may have
20513 arguments as follows:
20515 __attribute__((altivec(vector__)))
20516 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
20517 __attribute__((altivec(bool__))) (always followed by 'unsigned')
20519 and may appear more than once (e.g., 'vector bool char') in a
20520 given declaration. */
20523 rs6000_handle_altivec_attribute (tree *node,
20524 tree name ATTRIBUTE_UNUSED,
20526 int flags ATTRIBUTE_UNUSED,
20527 bool *no_add_attrs)
20529 tree type = *node, result = NULL_TREE;
20530 enum machine_mode mode;
20533 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
20534 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
20535 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
20538 while (POINTER_TYPE_P (type)
20539 || TREE_CODE (type) == FUNCTION_TYPE
20540 || TREE_CODE (type) == METHOD_TYPE
20541 || TREE_CODE (type) == ARRAY_TYPE)
20542 type = TREE_TYPE (type);
20544 mode = TYPE_MODE (type);
20546 /* Check for invalid AltiVec type qualifiers. */
20547 if (type == long_unsigned_type_node || type == long_integer_type_node)
20550 error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
20551 else if (rs6000_warn_altivec_long)
20552 warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
20554 else if (type == long_long_unsigned_type_node
20555 || type == long_long_integer_type_node)
20556 error ("use of %<long long%> in AltiVec types is invalid");
20557 else if (type == double_type_node)
20558 error ("use of %<double%> in AltiVec types is invalid");
20559 else if (type == long_double_type_node)
20560 error ("use of %<long double%> in AltiVec types is invalid");
20561 else if (type == boolean_type_node)
20562 error ("use of boolean types in AltiVec types is invalid");
20563 else if (TREE_CODE (type) == COMPLEX_TYPE)
20564 error ("use of %<complex%> in AltiVec types is invalid");
20565 else if (DECIMAL_FLOAT_MODE_P (mode))
20566 error ("use of decimal floating point types in AltiVec types is invalid");
20568 switch (altivec_type)
20571 unsigned_p = TYPE_UNSIGNED (type);
20575 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
20578 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
20581 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
20583 case SFmode: result = V4SF_type_node; break;
20584 /* If the user says 'vector int bool', we may be handed the 'bool'
20585 attribute _before_ the 'vector' attribute, and so select the
20586 proper type in the 'b' case below. */
20587 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
20595 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
20596 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
20597 case QImode: case V16QImode: result = bool_V16QI_type_node;
20604 case V8HImode: result = pixel_V8HI_type_node;
20610 /* Propagate qualifiers attached to the element type
20611 onto the vector type. */
20612 if (result && result != type && TYPE_QUALS (type))
20613 result = build_qualified_type (result, TYPE_QUALS (type));
20615 *no_add_attrs = true; /* No need to hang on to the attribute. */
20618 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
20623 /* AltiVec defines four built-in scalar types that serve as vector
20624 elements; we must teach the compiler how to mangle them. */
20626 static const char *
20627 rs6000_mangle_type (const_tree type)
20629 type = TYPE_MAIN_VARIANT (type);
20631 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
20632 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
20635 if (type == bool_char_type_node) return "U6__boolc";
20636 if (type == bool_short_type_node) return "U6__bools";
20637 if (type == pixel_type_node) return "u7__pixel";
20638 if (type == bool_int_type_node) return "U6__booli";
20640 /* Mangle IBM extended float long double as `g' (__float128) on
20641 powerpc*-linux where long-double-64 previously was the default. */
20642 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
20644 && TARGET_LONG_DOUBLE_128
20645 && !TARGET_IEEEQUAD)
20648 /* For all other types, use normal C++ mangling. */
20652 /* Handle a "longcall" or "shortcall" attribute; arguments as in
20653 struct attribute_spec.handler. */
20656 rs6000_handle_longcall_attribute (tree *node, tree name,
20657 tree args ATTRIBUTE_UNUSED,
20658 int flags ATTRIBUTE_UNUSED,
20659 bool *no_add_attrs)
20661 if (TREE_CODE (*node) != FUNCTION_TYPE
20662 && TREE_CODE (*node) != FIELD_DECL
20663 && TREE_CODE (*node) != TYPE_DECL)
20665 warning (OPT_Wattributes, "%qs attribute only applies to functions",
20666 IDENTIFIER_POINTER (name));
20667 *no_add_attrs = true;
20673 /* Set longcall attributes on all functions declared when
20674 rs6000_default_long_calls is true. */
20676 rs6000_set_default_type_attributes (tree type)
20678 if (rs6000_default_long_calls
20679 && (TREE_CODE (type) == FUNCTION_TYPE
20680 || TREE_CODE (type) == METHOD_TYPE))
20681 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
20683 TYPE_ATTRIBUTES (type));
20686 darwin_set_default_type_attributes (type);
20690 /* Return a reference suitable for calling a function with the
20691 longcall attribute. */
20694 rs6000_longcall_ref (rtx call_ref)
20696 const char *call_name;
20699 if (GET_CODE (call_ref) != SYMBOL_REF)
20702 /* System V adds '.' to the internal name, so skip them. */
20703 call_name = XSTR (call_ref, 0);
20704 if (*call_name == '.')
20706 while (*call_name == '.')
20709 node = get_identifier (call_name);
20710 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
20713 return force_reg (Pmode, call_ref);
20716 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
20717 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
20720 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
20721 struct attribute_spec.handler. */
20723 rs6000_handle_struct_attribute (tree *node, tree name,
20724 tree args ATTRIBUTE_UNUSED,
20725 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
20728 if (DECL_P (*node))
20730 if (TREE_CODE (*node) == TYPE_DECL)
20731 type = &TREE_TYPE (*node);
20736 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
20737 || TREE_CODE (*type) == UNION_TYPE)))
20739 warning (OPT_Wattributes, "%qs attribute ignored", IDENTIFIER_POINTER (name));
20740 *no_add_attrs = true;
20743 else if ((is_attribute_p ("ms_struct", name)
20744 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
20745 || ((is_attribute_p ("gcc_struct", name)
20746 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
20748 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
20749 IDENTIFIER_POINTER (name));
20750 *no_add_attrs = true;
20757 rs6000_ms_bitfield_layout_p (const_tree record_type)
20759 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
20760 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
20761 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
20764 #ifdef USING_ELFOS_H
20766 /* A get_unnamed_section callback, used for switching to toc_section. */
20769 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
20771 if (DEFAULT_ABI == ABI_AIX
20772 && TARGET_MINIMAL_TOC
20773 && !TARGET_RELOCATABLE)
20775 if (!toc_initialized)
20777 toc_initialized = 1;
20778 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
20779 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
20780 fprintf (asm_out_file, "\t.tc ");
20781 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
20782 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
20783 fprintf (asm_out_file, "\n");
20785 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
20786 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
20787 fprintf (asm_out_file, " = .+32768\n");
20790 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
20792 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
20793 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
20796 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
20797 if (!toc_initialized)
20799 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
20800 fprintf (asm_out_file, " = .+32768\n");
20801 toc_initialized = 1;
20806 /* Implement TARGET_ASM_INIT_SECTIONS. */
20809 rs6000_elf_asm_init_sections (void)
20812 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
20815 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
20816 SDATA2_SECTION_ASM_OP);
20819 /* Implement TARGET_SELECT_RTX_SECTION. */
20822 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
20823 unsigned HOST_WIDE_INT align)
20825 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
20826 return toc_section;
20828 return default_elf_select_rtx_section (mode, x, align);
20831 /* For a SYMBOL_REF, set generic flags and then perform some
20832 target-specific processing.
20834 When the AIX ABI is requested on a non-AIX system, replace the
20835 function name with the real name (with a leading .) rather than the
20836 function descriptor name. This saves a lot of overriding code to
20837 read the prefixes. */
20840 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
20842 default_encode_section_info (decl, rtl, first);
20845 && TREE_CODE (decl) == FUNCTION_DECL
20847 && DEFAULT_ABI == ABI_AIX)
20849 rtx sym_ref = XEXP (rtl, 0);
20850 size_t len = strlen (XSTR (sym_ref, 0));
20851 char *str = XALLOCAVEC (char, len + 2);
20853 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
20854 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
20859 compare_section_name (const char *section, const char *templ)
20863 len = strlen (templ);
20864 return (strncmp (section, templ, len) == 0
20865 && (section[len] == 0 || section[len] == '.'));
20869 rs6000_elf_in_small_data_p (const_tree decl)
20871 if (rs6000_sdata == SDATA_NONE)
20874 /* We want to merge strings, so we never consider them small data. */
20875 if (TREE_CODE (decl) == STRING_CST)
20878 /* Functions are never in the small data area. */
20879 if (TREE_CODE (decl) == FUNCTION_DECL)
20882 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
20884 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
20885 if (compare_section_name (section, ".sdata")
20886 || compare_section_name (section, ".sdata2")
20887 || compare_section_name (section, ".gnu.linkonce.s")
20888 || compare_section_name (section, ".sbss")
20889 || compare_section_name (section, ".sbss2")
20890 || compare_section_name (section, ".gnu.linkonce.sb")
20891 || strcmp (section, ".PPC.EMB.sdata0") == 0
20892 || strcmp (section, ".PPC.EMB.sbss0") == 0)
20897 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
20900 && (unsigned HOST_WIDE_INT) size <= g_switch_value
20901 /* If it's not public, and we're not going to reference it there,
20902 there's no need to put it in the small data section. */
20903 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
20910 #endif /* USING_ELFOS_H */
20912 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
20915 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
20917 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
20920 /* Return a REG that occurs in ADDR with coefficient 1.
20921 ADDR can be effectively incremented by incrementing REG.
20923 r0 is special and we must not select it as an address
20924 register by this routine since our caller will try to
20925 increment the returned register via an "la" instruction. */
20928 find_addr_reg (rtx addr)
20930 while (GET_CODE (addr) == PLUS)
20932 if (GET_CODE (XEXP (addr, 0)) == REG
20933 && REGNO (XEXP (addr, 0)) != 0)
20934 addr = XEXP (addr, 0);
20935 else if (GET_CODE (XEXP (addr, 1)) == REG
20936 && REGNO (XEXP (addr, 1)) != 0)
20937 addr = XEXP (addr, 1);
20938 else if (CONSTANT_P (XEXP (addr, 0)))
20939 addr = XEXP (addr, 1);
20940 else if (CONSTANT_P (XEXP (addr, 1)))
20941 addr = XEXP (addr, 0);
20943 gcc_unreachable ();
20945 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
20950 rs6000_fatal_bad_address (rtx op)
20952 fatal_insn ("bad address", op);
20957 static tree branch_island_list = 0;
20959 /* Remember to generate a branch island for far calls to the given
20963 add_compiler_branch_island (tree label_name, tree function_name,
20966 tree branch_island = build_tree_list (function_name, label_name);
20967 TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
20968 TREE_CHAIN (branch_island) = branch_island_list;
20969 branch_island_list = branch_island;
20972 #define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
20973 #define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
20974 #define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
20975 TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
20977 /* Generate far-jump branch islands for everything on the
20978 branch_island_list. Invoked immediately after the last instruction
20979 of the epilogue has been emitted; the branch-islands must be
20980 appended to, and contiguous with, the function body. Mach-O stubs
20981 are generated in machopic_output_stub(). */
20984 macho_branch_islands (void)
20987 tree branch_island;
20989 for (branch_island = branch_island_list;
20991 branch_island = TREE_CHAIN (branch_island))
20993 const char *label =
20994 IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
20996 IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island));
20997 char name_buf[512];
20998 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
20999 if (name[0] == '*' || name[0] == '&')
21000 strcpy (name_buf, name+1);
21004 strcpy (name_buf+1, name);
21006 strcpy (tmp_buf, "\n");
21007 strcat (tmp_buf, label);
21008 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
21009 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
21010 dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
21011 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
21014 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
21015 strcat (tmp_buf, label);
21016 strcat (tmp_buf, "_pic\n");
21017 strcat (tmp_buf, label);
21018 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
21020 strcat (tmp_buf, "\taddis r11,r11,ha16(");
21021 strcat (tmp_buf, name_buf);
21022 strcat (tmp_buf, " - ");
21023 strcat (tmp_buf, label);
21024 strcat (tmp_buf, "_pic)\n");
21026 strcat (tmp_buf, "\tmtlr r0\n");
21028 strcat (tmp_buf, "\taddi r12,r11,lo16(");
21029 strcat (tmp_buf, name_buf);
21030 strcat (tmp_buf, " - ");
21031 strcat (tmp_buf, label);
21032 strcat (tmp_buf, "_pic)\n");
21034 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
21038 strcat (tmp_buf, ":\nlis r12,hi16(");
21039 strcat (tmp_buf, name_buf);
21040 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
21041 strcat (tmp_buf, name_buf);
21042 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
21044 output_asm_insn (tmp_buf, 0);
21045 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
21046 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
21047 dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
21048 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
21051 branch_island_list = 0;
21054 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
21055 already there or not. */
21058 no_previous_def (tree function_name)
21060 tree branch_island;
21061 for (branch_island = branch_island_list;
21063 branch_island = TREE_CHAIN (branch_island))
21064 if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
21069 /* GET_PREV_LABEL gets the label name from the previous definition of
21073 get_prev_label (tree function_name)
21075 tree branch_island;
21076 for (branch_island = branch_island_list;
21078 branch_island = TREE_CHAIN (branch_island))
21079 if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
21080 return BRANCH_ISLAND_LABEL_NAME (branch_island);
21084 #ifndef DARWIN_LINKER_GENERATES_ISLANDS
21085 #define DARWIN_LINKER_GENERATES_ISLANDS 0
21088 /* KEXTs still need branch islands. */
21089 #define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
21090 || flag_mkernel || flag_apple_kext)
21092 /* INSN is either a function call or a millicode call. It may have an
21093 unconditional jump in its delay slot.
21095 CALL_DEST is the routine we are calling. */
21098 output_call (rtx insn, rtx *operands, int dest_operand_number,
21099 int cookie_operand_number)
21101 static char buf[256];
21102 if (DARWIN_GENERATE_ISLANDS
21103 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
21104 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
21107 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
21109 if (no_previous_def (funname))
21111 rtx label_rtx = gen_label_rtx ();
21112 char *label_buf, temp_buf[256];
21113 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
21114 CODE_LABEL_NUMBER (label_rtx));
21115 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
21116 labelname = get_identifier (label_buf);
21117 add_compiler_branch_island (labelname, funname, insn_line (insn));
21120 labelname = get_prev_label (funname);
21122 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21123 instruction will reach 'foo', otherwise link as 'bl L42'".
21124 "L42" should be a 'branch island', that will do a far jump to
21125 'foo'. Branch islands are generated in
21126 macho_branch_islands(). */
21127 sprintf (buf, "jbsr %%z%d,%.246s",
21128 dest_operand_number, IDENTIFIER_POINTER (labelname));
21131 sprintf (buf, "bl %%z%d", dest_operand_number);
21135 /* Generate PIC and indirect symbol stubs. */
21138 machopic_output_stub (FILE *file, const char *symb, const char *stub)
21140 unsigned int length;
21141 char *symbol_name, *lazy_ptr_name;
21142 char *local_label_0;
21143 static int label = 0;
21145 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
21146 symb = (*targetm.strip_name_encoding) (symb);
21149 length = strlen (symb);
21150 symbol_name = XALLOCAVEC (char, length + 32);
21151 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
21153 lazy_ptr_name = XALLOCAVEC (char, length + 32);
21154 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
21157 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
21159 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
21163 fprintf (file, "\t.align 5\n");
21165 fprintf (file, "%s:\n", stub);
21166 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
21169 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
21170 sprintf (local_label_0, "\"L%011d$spb\"", label);
21172 fprintf (file, "\tmflr r0\n");
21173 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
21174 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
21175 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
21176 lazy_ptr_name, local_label_0);
21177 fprintf (file, "\tmtlr r0\n");
21178 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
21179 (TARGET_64BIT ? "ldu" : "lwzu"),
21180 lazy_ptr_name, local_label_0);
21181 fprintf (file, "\tmtctr r12\n");
21182 fprintf (file, "\tbctr\n");
21186 fprintf (file, "\t.align 4\n");
21188 fprintf (file, "%s:\n", stub);
21189 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
21191 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
21192 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
21193 (TARGET_64BIT ? "ldu" : "lwzu"),
21195 fprintf (file, "\tmtctr r12\n");
21196 fprintf (file, "\tbctr\n");
21199 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
21200 fprintf (file, "%s:\n", lazy_ptr_name);
21201 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
21202 fprintf (file, "%sdyld_stub_binding_helper\n",
21203 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
21206 /* Legitimize PIC addresses. If the address is already
21207 position-independent, we return ORIG. Newly generated
21208 position-independent addresses go into a reg. This is REG if non
21209 zero, otherwise we allocate register(s) as necessary. */
21211 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
21214 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
21219 if (reg == NULL && ! reload_in_progress && ! reload_completed)
21220 reg = gen_reg_rtx (Pmode);
21222 if (GET_CODE (orig) == CONST)
21226 if (GET_CODE (XEXP (orig, 0)) == PLUS
21227 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
21230 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
21232 /* Use a different reg for the intermediate value, as
21233 it will be marked UNCHANGING. */
21234 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
21235 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
21238 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
21241 if (GET_CODE (offset) == CONST_INT)
21243 if (SMALL_INT (offset))
21244 return plus_constant (base, INTVAL (offset));
21245 else if (! reload_in_progress && ! reload_completed)
21246 offset = force_reg (Pmode, offset);
21249 rtx mem = force_const_mem (Pmode, orig);
21250 return machopic_legitimize_pic_address (mem, Pmode, reg);
21253 return gen_rtx_PLUS (Pmode, base, offset);
21256 /* Fall back on generic machopic code. */
21257 return machopic_legitimize_pic_address (orig, mode, reg);
21260 /* Output a .machine directive for the Darwin assembler, and call
21261 the generic start_file routine. */
21264 rs6000_darwin_file_start (void)
21266 static const struct
21272 { "ppc64", "ppc64", MASK_64BIT },
21273 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
21274 { "power4", "ppc970", 0 },
21275 { "G5", "ppc970", 0 },
21276 { "7450", "ppc7450", 0 },
21277 { "7400", "ppc7400", MASK_ALTIVEC },
21278 { "G4", "ppc7400", 0 },
21279 { "750", "ppc750", 0 },
21280 { "740", "ppc750", 0 },
21281 { "G3", "ppc750", 0 },
21282 { "604e", "ppc604e", 0 },
21283 { "604", "ppc604", 0 },
21284 { "603e", "ppc603", 0 },
21285 { "603", "ppc603", 0 },
21286 { "601", "ppc601", 0 },
21287 { NULL, "ppc", 0 } };
21288 const char *cpu_id = "";
21291 rs6000_file_start ();
21292 darwin_file_start ();
21294 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
21295 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
21296 if (rs6000_select[i].set_arch_p && rs6000_select[i].string
21297 && rs6000_select[i].string[0] != '\0')
21298 cpu_id = rs6000_select[i].string;
21300 /* Look through the mapping array. Pick the first name that either
21301 matches the argument, has a bit set in IF_SET that is also set
21302 in the target flags, or has a NULL name. */
21305 while (mapping[i].arg != NULL
21306 && strcmp (mapping[i].arg, cpu_id) != 0
21307 && (mapping[i].if_set & target_flags) == 0)
21310 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
21313 #endif /* TARGET_MACHO */
21317 rs6000_elf_reloc_rw_mask (void)
21321 else if (DEFAULT_ABI == ABI_AIX)
21327 /* Record an element in the table of global constructors. SYMBOL is
21328 a SYMBOL_REF of the function to be called; PRIORITY is a number
21329 between 0 and MAX_INIT_PRIORITY.
21331 This differs from default_named_section_asm_out_constructor in
21332 that we have special handling for -mrelocatable. */
21335 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
21337 const char *section = ".ctors";
21340 if (priority != DEFAULT_INIT_PRIORITY)
21342 sprintf (buf, ".ctors.%.5u",
21343 /* Invert the numbering so the linker puts us in the proper
21344 order; constructors are run from right to left, and the
21345 linker sorts in increasing order. */
21346 MAX_INIT_PRIORITY - priority);
21350 switch_to_section (get_section (section, SECTION_WRITE, NULL));
21351 assemble_align (POINTER_SIZE);
21353 if (TARGET_RELOCATABLE)
21355 fputs ("\t.long (", asm_out_file);
21356 output_addr_const (asm_out_file, symbol);
21357 fputs (")@fixup\n", asm_out_file);
21360 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
21364 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
21366 const char *section = ".dtors";
21369 if (priority != DEFAULT_INIT_PRIORITY)
21371 sprintf (buf, ".dtors.%.5u",
21372 /* Invert the numbering so the linker puts us in the proper
21373 order; constructors are run from right to left, and the
21374 linker sorts in increasing order. */
21375 MAX_INIT_PRIORITY - priority);
21379 switch_to_section (get_section (section, SECTION_WRITE, NULL));
21380 assemble_align (POINTER_SIZE);
21382 if (TARGET_RELOCATABLE)
21384 fputs ("\t.long (", asm_out_file);
21385 output_addr_const (asm_out_file, symbol);
21386 fputs (")@fixup\n", asm_out_file);
21389 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
21393 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
21397 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
21398 ASM_OUTPUT_LABEL (file, name);
21399 fputs (DOUBLE_INT_ASM_OP, file);
21400 rs6000_output_function_entry (file, name);
21401 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
21404 fputs ("\t.size\t", file);
21405 assemble_name (file, name);
21406 fputs (",24\n\t.type\t.", file);
21407 assemble_name (file, name);
21408 fputs (",@function\n", file);
21409 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
21411 fputs ("\t.globl\t.", file);
21412 assemble_name (file, name);
21417 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
21418 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
21419 rs6000_output_function_entry (file, name);
21420 fputs (":\n", file);
21424 if (TARGET_RELOCATABLE
21425 && !TARGET_SECURE_PLT
21426 && (get_pool_size () != 0 || crtl->profile)
21431 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
21433 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21434 fprintf (file, "\t.long ");
21435 assemble_name (file, buf);
21437 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21438 assemble_name (file, buf);
21442 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
21443 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
21445 if (DEFAULT_ABI == ABI_AIX)
21447 const char *desc_name, *orig_name;
21449 orig_name = (*targetm.strip_name_encoding) (name);
21450 desc_name = orig_name;
21451 while (*desc_name == '.')
21454 if (TREE_PUBLIC (decl))
21455 fprintf (file, "\t.globl %s\n", desc_name);
21457 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
21458 fprintf (file, "%s:\n", desc_name);
21459 fprintf (file, "\t.long %s\n", orig_name);
21460 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
21461 if (DEFAULT_ABI == ABI_AIX)
21462 fputs ("\t.long 0\n", file);
21463 fprintf (file, "\t.previous\n");
21465 ASM_OUTPUT_LABEL (file, name);
21469 rs6000_elf_end_indicate_exec_stack (void)
21472 file_end_indicate_exec_stack ();
21478 rs6000_xcoff_asm_output_anchor (rtx symbol)
21482 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
21483 SYMBOL_REF_BLOCK_OFFSET (symbol));
21484 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
21488 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
21490 fputs (GLOBAL_ASM_OP, stream);
21491 RS6000_OUTPUT_BASENAME (stream, name);
21492 putc ('\n', stream);
21495 /* A get_unnamed_decl callback, used for read-only sections. PTR
21496 points to the section string variable. */
21499 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
21501 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
21502 *(const char *const *) directive,
21503 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
21506 /* Likewise for read-write sections. */
21509 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
21511 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
21512 *(const char *const *) directive,
21513 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
21516 /* A get_unnamed_section callback, used for switching to toc_section. */
21519 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
21521 if (TARGET_MINIMAL_TOC)
21523 /* toc_section is always selected at least once from
21524 rs6000_xcoff_file_start, so this is guaranteed to
21525 always be defined once and only once in each file. */
21526 if (!toc_initialized)
21528 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
21529 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
21530 toc_initialized = 1;
21532 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
21533 (TARGET_32BIT ? "" : ",3"));
21536 fputs ("\t.toc\n", asm_out_file);
21539 /* Implement TARGET_ASM_INIT_SECTIONS. */
21542 rs6000_xcoff_asm_init_sections (void)
21544 read_only_data_section
21545 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
21546 &xcoff_read_only_section_name);
21548 private_data_section
21549 = get_unnamed_section (SECTION_WRITE,
21550 rs6000_xcoff_output_readwrite_section_asm_op,
21551 &xcoff_private_data_section_name);
21553 read_only_private_data_section
21554 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
21555 &xcoff_private_data_section_name);
21558 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
21560 readonly_data_section = read_only_data_section;
21561 exception_section = data_section;
21565 rs6000_xcoff_reloc_rw_mask (void)
21571 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
21572 tree decl ATTRIBUTE_UNUSED)
21575 static const char * const suffix[3] = { "PR", "RO", "RW" };
21577 if (flags & SECTION_CODE)
21579 else if (flags & SECTION_WRITE)
21584 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
21585 (flags & SECTION_CODE) ? "." : "",
21586 name, suffix[smclass], flags & SECTION_ENTSIZE);
21590 rs6000_xcoff_select_section (tree decl, int reloc,
21591 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
21593 if (decl_readonly_section (decl, reloc))
21595 if (TREE_PUBLIC (decl))
21596 return read_only_data_section;
21598 return read_only_private_data_section;
21602 if (TREE_PUBLIC (decl))
21603 return data_section;
21605 return private_data_section;
21610 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
21614 /* Use select_section for private and uninitialized data. */
21615 if (!TREE_PUBLIC (decl)
21616 || DECL_COMMON (decl)
21617 || DECL_INITIAL (decl) == NULL_TREE
21618 || DECL_INITIAL (decl) == error_mark_node
21619 || (flag_zero_initialized_in_bss
21620 && initializer_zerop (DECL_INITIAL (decl))))
21623 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
21624 name = (*targetm.strip_name_encoding) (name);
21625 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
21628 /* Select section for constant in constant pool.
21630 On RS/6000, all constants are in the private read-only data area.
21631 However, if this is being placed in the TOC it must be output as a
21635 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
21636 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
21638 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
21639 return toc_section;
21641 return read_only_private_data_section;
21644 /* Remove any trailing [DS] or the like from the symbol name. */
21646 static const char *
21647 rs6000_xcoff_strip_name_encoding (const char *name)
21652 len = strlen (name);
21653 if (name[len - 1] == ']')
21654 return ggc_alloc_string (name, len - 4);
21659 /* Section attributes. AIX is always PIC. */
21661 static unsigned int
21662 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
21664 unsigned int align;
21665 unsigned int flags = default_section_type_flags (decl, name, reloc);
21667 /* Align to at least UNIT size. */
21668 if (flags & SECTION_CODE)
21669 align = MIN_UNITS_PER_WORD;
21671 /* Increase alignment of large objects if not already stricter. */
21672 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
21673 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
21674 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
21676 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
21679 /* Output at beginning of assembler file.
21681 Initialize the section names for the RS/6000 at this point.
21683 Specify filename, including full path, to assembler.
21685 We want to go into the TOC section so at least one .toc will be emitted.
21686 Also, in order to output proper .bs/.es pairs, we need at least one static
21687 [RW] section emitted.
21689 Finally, declare mcount when profiling to make the assembler happy. */
21692 rs6000_xcoff_file_start (void)
21694 rs6000_gen_section_name (&xcoff_bss_section_name,
21695 main_input_filename, ".bss_");
21696 rs6000_gen_section_name (&xcoff_private_data_section_name,
21697 main_input_filename, ".rw_");
21698 rs6000_gen_section_name (&xcoff_read_only_section_name,
21699 main_input_filename, ".ro_");
21701 fputs ("\t.file\t", asm_out_file);
21702 output_quoted_string (asm_out_file, main_input_filename);
21703 fputc ('\n', asm_out_file);
21704 if (write_symbols != NO_DEBUG)
21705 switch_to_section (private_data_section);
21706 switch_to_section (text_section);
21708 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
21709 rs6000_file_start ();
21712 /* Output at end of assembler file.
21713 On the RS/6000, referencing data should automatically pull in text. */
21716 rs6000_xcoff_file_end (void)
21718 switch_to_section (text_section);
21719 fputs ("_section_.text:\n", asm_out_file);
21720 switch_to_section (data_section);
21721 fputs (TARGET_32BIT
21722 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
21725 #endif /* TARGET_XCOFF */
21727 /* Compute a (partial) cost for rtx X. Return true if the complete
21728 cost has been computed, and false if subexpressions should be
21729 scanned. In either case, *TOTAL contains the cost result. */
21732 rs6000_rtx_costs (rtx x, int code, int outer_code, int *total,
21735 enum machine_mode mode = GET_MODE (x);
21739 /* On the RS/6000, if it is valid in the insn, it is free. */
21741 if (((outer_code == SET
21742 || outer_code == PLUS
21743 || outer_code == MINUS)
21744 && (satisfies_constraint_I (x)
21745 || satisfies_constraint_L (x)))
21746 || (outer_code == AND
21747 && (satisfies_constraint_K (x)
21749 ? satisfies_constraint_L (x)
21750 : satisfies_constraint_J (x))
21751 || mask_operand (x, mode)
21753 && mask64_operand (x, DImode))))
21754 || ((outer_code == IOR || outer_code == XOR)
21755 && (satisfies_constraint_K (x)
21757 ? satisfies_constraint_L (x)
21758 : satisfies_constraint_J (x))))
21759 || outer_code == ASHIFT
21760 || outer_code == ASHIFTRT
21761 || outer_code == LSHIFTRT
21762 || outer_code == ROTATE
21763 || outer_code == ROTATERT
21764 || outer_code == ZERO_EXTRACT
21765 || (outer_code == MULT
21766 && satisfies_constraint_I (x))
21767 || ((outer_code == DIV || outer_code == UDIV
21768 || outer_code == MOD || outer_code == UMOD)
21769 && exact_log2 (INTVAL (x)) >= 0)
21770 || (outer_code == COMPARE
21771 && (satisfies_constraint_I (x)
21772 || satisfies_constraint_K (x)))
21773 || (outer_code == EQ
21774 && (satisfies_constraint_I (x)
21775 || satisfies_constraint_K (x)
21777 ? satisfies_constraint_L (x)
21778 : satisfies_constraint_J (x))))
21779 || (outer_code == GTU
21780 && satisfies_constraint_I (x))
21781 || (outer_code == LTU
21782 && satisfies_constraint_P (x)))
21787 else if ((outer_code == PLUS
21788 && reg_or_add_cint_operand (x, VOIDmode))
21789 || (outer_code == MINUS
21790 && reg_or_sub_cint_operand (x, VOIDmode))
21791 || ((outer_code == SET
21792 || outer_code == IOR
21793 || outer_code == XOR)
21795 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
21797 *total = COSTS_N_INSNS (1);
21803 if (mode == DImode && code == CONST_DOUBLE)
21805 if ((outer_code == IOR || outer_code == XOR)
21806 && CONST_DOUBLE_HIGH (x) == 0
21807 && (CONST_DOUBLE_LOW (x)
21808 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
21813 else if ((outer_code == AND && and64_2_operand (x, DImode))
21814 || ((outer_code == SET
21815 || outer_code == IOR
21816 || outer_code == XOR)
21817 && CONST_DOUBLE_HIGH (x) == 0))
21819 *total = COSTS_N_INSNS (1);
21829 /* When optimizing for size, MEM should be slightly more expensive
21830 than generating address, e.g., (plus (reg) (const)).
21831 L1 cache latency is about two instructions. */
21832 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
21840 if (mode == DFmode)
21842 if (GET_CODE (XEXP (x, 0)) == MULT)
21844 /* FNMA accounted in outer NEG. */
21845 if (outer_code == NEG)
21846 *total = rs6000_cost->dmul - rs6000_cost->fp;
21848 *total = rs6000_cost->dmul;
21851 *total = rs6000_cost->fp;
21853 else if (mode == SFmode)
21855 /* FNMA accounted in outer NEG. */
21856 if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
21859 *total = rs6000_cost->fp;
21862 *total = COSTS_N_INSNS (1);
21866 if (mode == DFmode)
21868 if (GET_CODE (XEXP (x, 0)) == MULT
21869 || GET_CODE (XEXP (x, 1)) == MULT)
21871 /* FNMA accounted in outer NEG. */
21872 if (outer_code == NEG)
21873 *total = rs6000_cost->dmul - rs6000_cost->fp;
21875 *total = rs6000_cost->dmul;
21878 *total = rs6000_cost->fp;
21880 else if (mode == SFmode)
21882 /* FNMA accounted in outer NEG. */
21883 if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
21886 *total = rs6000_cost->fp;
21889 *total = COSTS_N_INSNS (1);
21893 if (GET_CODE (XEXP (x, 1)) == CONST_INT
21894 && satisfies_constraint_I (XEXP (x, 1)))
21896 if (INTVAL (XEXP (x, 1)) >= -256
21897 && INTVAL (XEXP (x, 1)) <= 255)
21898 *total = rs6000_cost->mulsi_const9;
21900 *total = rs6000_cost->mulsi_const;
21902 /* FMA accounted in outer PLUS/MINUS. */
21903 else if ((mode == DFmode || mode == SFmode)
21904 && (outer_code == PLUS || outer_code == MINUS))
21906 else if (mode == DFmode)
21907 *total = rs6000_cost->dmul;
21908 else if (mode == SFmode)
21909 *total = rs6000_cost->fp;
21910 else if (mode == DImode)
21911 *total = rs6000_cost->muldi;
21913 *total = rs6000_cost->mulsi;
21918 if (FLOAT_MODE_P (mode))
21920 *total = mode == DFmode ? rs6000_cost->ddiv
21921 : rs6000_cost->sdiv;
21928 if (GET_CODE (XEXP (x, 1)) == CONST_INT
21929 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
21931 if (code == DIV || code == MOD)
21933 *total = COSTS_N_INSNS (2);
21936 *total = COSTS_N_INSNS (1);
21940 if (GET_MODE (XEXP (x, 1)) == DImode)
21941 *total = rs6000_cost->divdi;
21943 *total = rs6000_cost->divsi;
21945 /* Add in shift and subtract for MOD. */
21946 if (code == MOD || code == UMOD)
21947 *total += COSTS_N_INSNS (2);
21952 *total = COSTS_N_INSNS (4);
21956 *total = COSTS_N_INSNS (6);
21960 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
21972 *total = COSTS_N_INSNS (1);
21980 /* Handle mul_highpart. */
21981 if (outer_code == TRUNCATE
21982 && GET_CODE (XEXP (x, 0)) == MULT)
21984 if (mode == DImode)
21985 *total = rs6000_cost->muldi;
21987 *total = rs6000_cost->mulsi;
21990 else if (outer_code == AND)
21993 *total = COSTS_N_INSNS (1);
21998 if (GET_CODE (XEXP (x, 0)) == MEM)
22001 *total = COSTS_N_INSNS (1);
22007 if (!FLOAT_MODE_P (mode))
22009 *total = COSTS_N_INSNS (1);
22015 case UNSIGNED_FLOAT:
22018 case FLOAT_TRUNCATE:
22019 *total = rs6000_cost->fp;
22023 if (mode == DFmode)
22026 *total = rs6000_cost->fp;
22030 switch (XINT (x, 1))
22033 *total = rs6000_cost->fp;
22045 *total = COSTS_N_INSNS (1);
22048 else if (FLOAT_MODE_P (mode)
22049 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
22051 *total = rs6000_cost->fp;
22059 /* Carry bit requires mode == Pmode.
22060 NEG or PLUS already counted so only add one. */
22062 && (outer_code == NEG || outer_code == PLUS))
22064 *total = COSTS_N_INSNS (1);
22067 if (outer_code == SET)
22069 if (XEXP (x, 1) == const0_rtx)
22071 *total = COSTS_N_INSNS (2);
22074 else if (mode == Pmode)
22076 *total = COSTS_N_INSNS (3);
22085 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
22087 *total = COSTS_N_INSNS (2);
22091 if (outer_code == COMPARE)
22105 /* A C expression returning the cost of moving data from a register of class
22106 CLASS1 to one of CLASS2. */
22109 rs6000_register_move_cost (enum machine_mode mode,
22110 enum reg_class from, enum reg_class to)
22112 /* Moves from/to GENERAL_REGS. */
22113 if (reg_classes_intersect_p (to, GENERAL_REGS)
22114 || reg_classes_intersect_p (from, GENERAL_REGS))
22116 if (! reg_classes_intersect_p (to, GENERAL_REGS))
22119 if (from == FLOAT_REGS || from == ALTIVEC_REGS)
22120 return (rs6000_memory_move_cost (mode, from, 0)
22121 + rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
22123 /* It's more expensive to move CR_REGS than CR0_REGS because of the
22125 else if (from == CR_REGS)
22128 /* Power6 has slower LR/CTR moves so make them more expensive than
22129 memory in order to bias spills to memory .*/
22130 else if (rs6000_cpu == PROCESSOR_POWER6
22131 && reg_classes_intersect_p (from, LINK_OR_CTR_REGS))
22132 return 6 * hard_regno_nregs[0][mode];
22135 /* A move will cost one instruction per GPR moved. */
22136 return 2 * hard_regno_nregs[0][mode];
22139 /* Moving between two similar registers is just one instruction. */
22140 else if (reg_classes_intersect_p (to, from))
22141 return (mode == TFmode || mode == TDmode) ? 4 : 2;
22143 /* Everything else has to go through GENERAL_REGS. */
22145 return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
22146 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
22149 /* A C expressions returning the cost of moving data of MODE from a register to
22153 rs6000_memory_move_cost (enum machine_mode mode, enum reg_class rclass,
22154 int in ATTRIBUTE_UNUSED)
22156 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
22157 return 4 * hard_regno_nregs[0][mode];
22158 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
22159 return 4 * hard_regno_nregs[32][mode];
22160 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
22161 return 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
22163 return 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
22166 /* Returns a code for a target-specific builtin that implements
22167 reciprocal of the function, or NULL_TREE if not available. */
22170 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
22171 bool sqrt ATTRIBUTE_UNUSED)
22173 if (! (TARGET_RECIP && TARGET_PPC_GFXOPT && !optimize_size
22174 && flag_finite_math_only && !flag_trapping_math
22175 && flag_unsafe_math_optimizations))
22183 case BUILT_IN_SQRTF:
22184 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
22191 /* Newton-Raphson approximation of single-precision floating point divide n/d.
22192 Assumes no trapping math and finite arguments. */
22195 rs6000_emit_swdivsf (rtx dst, rtx n, rtx d)
22197 rtx x0, e0, e1, y1, u0, v0, one;
22199 x0 = gen_reg_rtx (SFmode);
22200 e0 = gen_reg_rtx (SFmode);
22201 e1 = gen_reg_rtx (SFmode);
22202 y1 = gen_reg_rtx (SFmode);
22203 u0 = gen_reg_rtx (SFmode);
22204 v0 = gen_reg_rtx (SFmode);
22205 one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
22207 /* x0 = 1./d estimate */
22208 emit_insn (gen_rtx_SET (VOIDmode, x0,
22209 gen_rtx_UNSPEC (SFmode, gen_rtvec (1, d),
22211 /* e0 = 1. - d * x0 */
22212 emit_insn (gen_rtx_SET (VOIDmode, e0,
22213 gen_rtx_MINUS (SFmode, one,
22214 gen_rtx_MULT (SFmode, d, x0))));
22215 /* e1 = e0 + e0 * e0 */
22216 emit_insn (gen_rtx_SET (VOIDmode, e1,
22217 gen_rtx_PLUS (SFmode,
22218 gen_rtx_MULT (SFmode, e0, e0), e0)));
22219 /* y1 = x0 + e1 * x0 */
22220 emit_insn (gen_rtx_SET (VOIDmode, y1,
22221 gen_rtx_PLUS (SFmode,
22222 gen_rtx_MULT (SFmode, e1, x0), x0)));
22224 emit_insn (gen_rtx_SET (VOIDmode, u0,
22225 gen_rtx_MULT (SFmode, n, y1)));
22226 /* v0 = n - d * u0 */
22227 emit_insn (gen_rtx_SET (VOIDmode, v0,
22228 gen_rtx_MINUS (SFmode, n,
22229 gen_rtx_MULT (SFmode, d, u0))));
22230 /* dst = u0 + v0 * y1 */
22231 emit_insn (gen_rtx_SET (VOIDmode, dst,
22232 gen_rtx_PLUS (SFmode,
22233 gen_rtx_MULT (SFmode, v0, y1), u0)));
22236 /* Newton-Raphson approximation of double-precision floating point divide n/d.
22237 Assumes no trapping math and finite arguments. */
22240 rs6000_emit_swdivdf (rtx dst, rtx n, rtx d)
22242 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
22244 x0 = gen_reg_rtx (DFmode);
22245 e0 = gen_reg_rtx (DFmode);
22246 e1 = gen_reg_rtx (DFmode);
22247 e2 = gen_reg_rtx (DFmode);
22248 y1 = gen_reg_rtx (DFmode);
22249 y2 = gen_reg_rtx (DFmode);
22250 y3 = gen_reg_rtx (DFmode);
22251 u0 = gen_reg_rtx (DFmode);
22252 v0 = gen_reg_rtx (DFmode);
22253 one = force_reg (DFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, DFmode));
22255 /* x0 = 1./d estimate */
22256 emit_insn (gen_rtx_SET (VOIDmode, x0,
22257 gen_rtx_UNSPEC (DFmode, gen_rtvec (1, d),
22259 /* e0 = 1. - d * x0 */
22260 emit_insn (gen_rtx_SET (VOIDmode, e0,
22261 gen_rtx_MINUS (DFmode, one,
22262 gen_rtx_MULT (SFmode, d, x0))));
22263 /* y1 = x0 + e0 * x0 */
22264 emit_insn (gen_rtx_SET (VOIDmode, y1,
22265 gen_rtx_PLUS (DFmode,
22266 gen_rtx_MULT (DFmode, e0, x0), x0)));
22268 emit_insn (gen_rtx_SET (VOIDmode, e1,
22269 gen_rtx_MULT (DFmode, e0, e0)));
22270 /* y2 = y1 + e1 * y1 */
22271 emit_insn (gen_rtx_SET (VOIDmode, y2,
22272 gen_rtx_PLUS (DFmode,
22273 gen_rtx_MULT (DFmode, e1, y1), y1)));
22275 emit_insn (gen_rtx_SET (VOIDmode, e2,
22276 gen_rtx_MULT (DFmode, e1, e1)));
22277 /* y3 = y2 + e2 * y2 */
22278 emit_insn (gen_rtx_SET (VOIDmode, y3,
22279 gen_rtx_PLUS (DFmode,
22280 gen_rtx_MULT (DFmode, e2, y2), y2)));
22282 emit_insn (gen_rtx_SET (VOIDmode, u0,
22283 gen_rtx_MULT (DFmode, n, y3)));
22284 /* v0 = n - d * u0 */
22285 emit_insn (gen_rtx_SET (VOIDmode, v0,
22286 gen_rtx_MINUS (DFmode, n,
22287 gen_rtx_MULT (DFmode, d, u0))));
22288 /* dst = u0 + v0 * y3 */
22289 emit_insn (gen_rtx_SET (VOIDmode, dst,
22290 gen_rtx_PLUS (DFmode,
22291 gen_rtx_MULT (DFmode, v0, y3), u0)));
22295 /* Newton-Raphson approximation of single-precision floating point rsqrt.
22296 Assumes no trapping math and finite arguments. */
22299 rs6000_emit_swrsqrtsf (rtx dst, rtx src)
22301 rtx x0, x1, x2, y1, u0, u1, u2, v0, v1, v2, t0,
22302 half, one, halfthree, c1, cond, label;
22304 x0 = gen_reg_rtx (SFmode);
22305 x1 = gen_reg_rtx (SFmode);
22306 x2 = gen_reg_rtx (SFmode);
22307 y1 = gen_reg_rtx (SFmode);
22308 u0 = gen_reg_rtx (SFmode);
22309 u1 = gen_reg_rtx (SFmode);
22310 u2 = gen_reg_rtx (SFmode);
22311 v0 = gen_reg_rtx (SFmode);
22312 v1 = gen_reg_rtx (SFmode);
22313 v2 = gen_reg_rtx (SFmode);
22314 t0 = gen_reg_rtx (SFmode);
22315 halfthree = gen_reg_rtx (SFmode);
22316 cond = gen_rtx_REG (CCFPmode, CR1_REGNO);
22317 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22319 /* check 0.0, 1.0, NaN, Inf by testing src * src = src */
22320 emit_insn (gen_rtx_SET (VOIDmode, t0,
22321 gen_rtx_MULT (SFmode, src, src)));
22323 emit_insn (gen_rtx_SET (VOIDmode, cond,
22324 gen_rtx_COMPARE (CCFPmode, t0, src)));
22325 c1 = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
22326 emit_unlikely_jump (c1, label);
22328 half = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf, SFmode));
22329 one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
22331 /* halfthree = 1.5 = 1.0 + 0.5 */
22332 emit_insn (gen_rtx_SET (VOIDmode, halfthree,
22333 gen_rtx_PLUS (SFmode, one, half)));
22335 /* x0 = rsqrt estimate */
22336 emit_insn (gen_rtx_SET (VOIDmode, x0,
22337 gen_rtx_UNSPEC (SFmode, gen_rtvec (1, src),
22340 /* y1 = 0.5 * src = 1.5 * src - src -> fewer constants */
22341 emit_insn (gen_rtx_SET (VOIDmode, y1,
22342 gen_rtx_MINUS (SFmode,
22343 gen_rtx_MULT (SFmode, src, halfthree),
22346 /* x1 = x0 * (1.5 - y1 * (x0 * x0)) */
22347 emit_insn (gen_rtx_SET (VOIDmode, u0,
22348 gen_rtx_MULT (SFmode, x0, x0)));
22349 emit_insn (gen_rtx_SET (VOIDmode, v0,
22350 gen_rtx_MINUS (SFmode,
22352 gen_rtx_MULT (SFmode, y1, u0))));
22353 emit_insn (gen_rtx_SET (VOIDmode, x1,
22354 gen_rtx_MULT (SFmode, x0, v0)));
22356 /* x2 = x1 * (1.5 - y1 * (x1 * x1)) */
22357 emit_insn (gen_rtx_SET (VOIDmode, u1,
22358 gen_rtx_MULT (SFmode, x1, x1)));
22359 emit_insn (gen_rtx_SET (VOIDmode, v1,
22360 gen_rtx_MINUS (SFmode,
22362 gen_rtx_MULT (SFmode, y1, u1))));
22363 emit_insn (gen_rtx_SET (VOIDmode, x2,
22364 gen_rtx_MULT (SFmode, x1, v1)));
22366 /* dst = x2 * (1.5 - y1 * (x2 * x2)) */
22367 emit_insn (gen_rtx_SET (VOIDmode, u2,
22368 gen_rtx_MULT (SFmode, x2, x2)));
22369 emit_insn (gen_rtx_SET (VOIDmode, v2,
22370 gen_rtx_MINUS (SFmode,
22372 gen_rtx_MULT (SFmode, y1, u2))));
22373 emit_insn (gen_rtx_SET (VOIDmode, dst,
22374 gen_rtx_MULT (SFmode, x2, v2)));
22376 emit_label (XEXP (label, 0));
22379 /* Emit popcount intrinsic on TARGET_POPCNTB targets. DST is the
22380 target, and SRC is the argument operand. */
22383 rs6000_emit_popcount (rtx dst, rtx src)
22385 enum machine_mode mode = GET_MODE (dst);
22388 tmp1 = gen_reg_rtx (mode);
22390 if (mode == SImode)
22392 emit_insn (gen_popcntbsi2 (tmp1, src));
22393 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
22395 tmp2 = force_reg (SImode, tmp2);
22396 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
22400 emit_insn (gen_popcntbdi2 (tmp1, src));
22401 tmp2 = expand_mult (DImode, tmp1,
22402 GEN_INT ((HOST_WIDE_INT)
22403 0x01010101 << 32 | 0x01010101),
22405 tmp2 = force_reg (DImode, tmp2);
22406 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
22411 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
22412 target, and SRC is the argument operand. */
22415 rs6000_emit_parity (rtx dst, rtx src)
22417 enum machine_mode mode = GET_MODE (dst);
22420 tmp = gen_reg_rtx (mode);
22421 if (mode == SImode)
22423 /* Is mult+shift >= shift+xor+shift+xor? */
22424 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
22426 rtx tmp1, tmp2, tmp3, tmp4;
22428 tmp1 = gen_reg_rtx (SImode);
22429 emit_insn (gen_popcntbsi2 (tmp1, src));
22431 tmp2 = gen_reg_rtx (SImode);
22432 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
22433 tmp3 = gen_reg_rtx (SImode);
22434 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
22436 tmp4 = gen_reg_rtx (SImode);
22437 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
22438 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
22441 rs6000_emit_popcount (tmp, src);
22442 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
22446 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
22447 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
22449 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
22451 tmp1 = gen_reg_rtx (DImode);
22452 emit_insn (gen_popcntbdi2 (tmp1, src));
22454 tmp2 = gen_reg_rtx (DImode);
22455 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
22456 tmp3 = gen_reg_rtx (DImode);
22457 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
22459 tmp4 = gen_reg_rtx (DImode);
22460 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
22461 tmp5 = gen_reg_rtx (DImode);
22462 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
22464 tmp6 = gen_reg_rtx (DImode);
22465 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
22466 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
22469 rs6000_emit_popcount (tmp, src);
22470 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
22474 /* Return an RTX representing where to find the function value of a
22475 function returning MODE. */
22477 rs6000_complex_function_value (enum machine_mode mode)
22479 unsigned int regno;
22481 enum machine_mode inner = GET_MODE_INNER (mode);
22482 unsigned int inner_bytes = GET_MODE_SIZE (inner);
22484 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
22485 regno = FP_ARG_RETURN;
22488 regno = GP_ARG_RETURN;
22490 /* 32-bit is OK since it'll go in r3/r4. */
22491 if (TARGET_32BIT && inner_bytes >= 4)
22492 return gen_rtx_REG (mode, regno);
22495 if (inner_bytes >= 8)
22496 return gen_rtx_REG (mode, regno);
22498 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
22500 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
22501 GEN_INT (inner_bytes));
22502 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
22505 /* Define how to find the value returned by a function.
22506 VALTYPE is the data type of the value (as a tree).
22507 If the precise function being called is known, FUNC is its FUNCTION_DECL;
22508 otherwise, FUNC is 0.
22510 On the SPE, both FPs and vectors are returned in r3.
22512 On RS/6000 an integer value is in r3 and a floating-point value is in
22513 fp1, unless -msoft-float. */
22516 rs6000_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
22518 enum machine_mode mode;
22519 unsigned int regno;
22521 /* Special handling for structs in darwin64. */
22522 if (rs6000_darwin64_abi
22523 && TYPE_MODE (valtype) == BLKmode
22524 && TREE_CODE (valtype) == RECORD_TYPE
22525 && int_size_in_bytes (valtype) > 0)
22527 CUMULATIVE_ARGS valcum;
22531 valcum.fregno = FP_ARG_MIN_REG;
22532 valcum.vregno = ALTIVEC_ARG_MIN_REG;
22533 /* Do a trial code generation as if this were going to be passed as
22534 an argument; if any part goes in memory, we return NULL. */
22535 valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
22538 /* Otherwise fall through to standard ABI rules. */
22541 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
22543 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
22544 return gen_rtx_PARALLEL (DImode,
22546 gen_rtx_EXPR_LIST (VOIDmode,
22547 gen_rtx_REG (SImode, GP_ARG_RETURN),
22549 gen_rtx_EXPR_LIST (VOIDmode,
22550 gen_rtx_REG (SImode,
22551 GP_ARG_RETURN + 1),
22554 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
22556 return gen_rtx_PARALLEL (DCmode,
22558 gen_rtx_EXPR_LIST (VOIDmode,
22559 gen_rtx_REG (SImode, GP_ARG_RETURN),
22561 gen_rtx_EXPR_LIST (VOIDmode,
22562 gen_rtx_REG (SImode,
22563 GP_ARG_RETURN + 1),
22565 gen_rtx_EXPR_LIST (VOIDmode,
22566 gen_rtx_REG (SImode,
22567 GP_ARG_RETURN + 2),
22569 gen_rtx_EXPR_LIST (VOIDmode,
22570 gen_rtx_REG (SImode,
22571 GP_ARG_RETURN + 3),
22575 mode = TYPE_MODE (valtype);
22576 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
22577 || POINTER_TYPE_P (valtype))
22578 mode = TARGET_32BIT ? SImode : DImode;
22580 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
22581 /* _Decimal128 must use an even/odd register pair. */
22582 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
22583 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
22584 regno = FP_ARG_RETURN;
22585 else if (TREE_CODE (valtype) == COMPLEX_TYPE
22586 && targetm.calls.split_complex_arg)
22587 return rs6000_complex_function_value (mode);
22588 else if (TREE_CODE (valtype) == VECTOR_TYPE
22589 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
22590 && ALTIVEC_VECTOR_MODE (mode))
22591 regno = ALTIVEC_ARG_RETURN;
22592 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
22593 && (mode == DFmode || mode == DCmode
22594 || mode == TFmode || mode == TCmode))
22595 return spe_build_register_parallel (mode, GP_ARG_RETURN);
22597 regno = GP_ARG_RETURN;
22599 return gen_rtx_REG (mode, regno);
22602 /* Define how to find the value returned by a library function
22603 assuming the value has mode MODE. */
22605 rs6000_libcall_value (enum machine_mode mode)
22607 unsigned int regno;
22609 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
22611 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
22612 return gen_rtx_PARALLEL (DImode,
22614 gen_rtx_EXPR_LIST (VOIDmode,
22615 gen_rtx_REG (SImode, GP_ARG_RETURN),
22617 gen_rtx_EXPR_LIST (VOIDmode,
22618 gen_rtx_REG (SImode,
22619 GP_ARG_RETURN + 1),
22623 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
22624 /* _Decimal128 must use an even/odd register pair. */
22625 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
22626 else if (SCALAR_FLOAT_MODE_P (mode)
22627 && TARGET_HARD_FLOAT && TARGET_FPRS)
22628 regno = FP_ARG_RETURN;
22629 else if (ALTIVEC_VECTOR_MODE (mode)
22630 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
22631 regno = ALTIVEC_ARG_RETURN;
22632 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
22633 return rs6000_complex_function_value (mode);
22634 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
22635 && (mode == DFmode || mode == DCmode
22636 || mode == TFmode || mode == TCmode))
22637 return spe_build_register_parallel (mode, GP_ARG_RETURN);
22639 regno = GP_ARG_RETURN;
22641 return gen_rtx_REG (mode, regno);
22644 /* Define the offset between two registers, FROM to be eliminated and its
22645 replacement TO, at the start of a routine. */
22647 rs6000_initial_elimination_offset (int from, int to)
22649 rs6000_stack_t *info = rs6000_stack_info ();
22650 HOST_WIDE_INT offset;
22652 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
22653 offset = info->push_p ? 0 : -info->total_size;
22654 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
22656 offset = info->push_p ? 0 : -info->total_size;
22657 if (FRAME_GROWS_DOWNWARD)
22658 offset += info->fixed_size + info->vars_size + info->parm_size;
22660 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
22661 offset = FRAME_GROWS_DOWNWARD
22662 ? info->fixed_size + info->vars_size + info->parm_size
22664 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
22665 offset = info->total_size;
22666 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
22667 offset = info->push_p ? info->total_size : 0;
22668 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
22671 gcc_unreachable ();
22676 /* Return true if TYPE is a SPE or AltiVec opaque type. */
22679 rs6000_is_opaque_type (const_tree type)
22681 return (type == opaque_V2SI_type_node
22682 || type == opaque_V2SF_type_node
22683 || type == opaque_V4SI_type_node);
22687 rs6000_dwarf_register_span (rtx reg)
22692 && (SPE_VECTOR_MODE (GET_MODE (reg))
22693 || (TARGET_E500_DOUBLE
22694 && (GET_MODE (reg) == DFmode || GET_MODE (reg) == DDmode))))
22699 regno = REGNO (reg);
22701 /* The duality of the SPE register size wreaks all kinds of havoc.
22702 This is a way of distinguishing r0 in 32-bits from r0 in
22705 gen_rtx_PARALLEL (VOIDmode,
22708 gen_rtx_REG (SImode, regno + 1200),
22709 gen_rtx_REG (SImode, regno))
22711 gen_rtx_REG (SImode, regno),
22712 gen_rtx_REG (SImode, regno + 1200)));
22715 /* Fill in sizes for SPE register high parts in table used by unwinder. */
22718 rs6000_init_dwarf_reg_sizes_extra (tree address)
22723 enum machine_mode mode = TYPE_MODE (char_type_node);
22724 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, 0);
22725 rtx mem = gen_rtx_MEM (BLKmode, addr);
22726 rtx value = gen_int_mode (4, mode);
22728 for (i = 1201; i < 1232; i++)
22730 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
22731 HOST_WIDE_INT offset
22732 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
22734 emit_move_insn (adjust_address (mem, mode, offset), value);
22739 /* Map internal gcc register numbers to DWARF2 register numbers. */
22742 rs6000_dbx_register_number (unsigned int regno)
22744 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
22746 if (regno == MQ_REGNO)
22748 if (regno == LR_REGNO)
22750 if (regno == CTR_REGNO)
22752 if (CR_REGNO_P (regno))
22753 return regno - CR0_REGNO + 86;
22754 if (regno == XER_REGNO)
22756 if (ALTIVEC_REGNO_P (regno))
22757 return regno - FIRST_ALTIVEC_REGNO + 1124;
22758 if (regno == VRSAVE_REGNO)
22760 if (regno == VSCR_REGNO)
22762 if (regno == SPE_ACC_REGNO)
22764 if (regno == SPEFSCR_REGNO)
22766 /* SPE high reg number. We get these values of regno from
22767 rs6000_dwarf_register_span. */
22768 gcc_assert (regno >= 1200 && regno < 1232);
22772 /* target hook eh_return_filter_mode */
22773 static enum machine_mode
22774 rs6000_eh_return_filter_mode (void)
22776 return TARGET_32BIT ? SImode : word_mode;
22779 /* Target hook for scalar_mode_supported_p. */
22781 rs6000_scalar_mode_supported_p (enum machine_mode mode)
22783 if (DECIMAL_FLOAT_MODE_P (mode))
22786 return default_scalar_mode_supported_p (mode);
22789 /* Target hook for vector_mode_supported_p. */
22791 rs6000_vector_mode_supported_p (enum machine_mode mode)
22794 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
22797 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
22800 else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
22807 /* Target hook for invalid_arg_for_unprototyped_fn. */
22808 static const char *
22809 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
22811 return (!rs6000_darwin64_abi
22813 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
22814 && (funcdecl == NULL_TREE
22815 || (TREE_CODE (funcdecl) == FUNCTION_DECL
22816 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
22817 ? N_("AltiVec argument passed to unprototyped function")
22821 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
22822 setup by using __stack_chk_fail_local hidden function instead of
22823 calling __stack_chk_fail directly. Otherwise it is better to call
22824 __stack_chk_fail directly. */
22827 rs6000_stack_protect_fail (void)
22829 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
22830 ? default_hidden_stack_protect_fail ()
22831 : default_external_stack_protect_fail ();
22835 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
22836 int num_operands ATTRIBUTE_UNUSED)
22838 if (rs6000_warn_cell_microcode)
22841 int insn_code_number = recog_memoized (insn);
22842 location_t location = locator_location (INSN_LOCATOR (insn));
22844 /* Punt on insns we cannot recognize. */
22845 if (insn_code_number < 0)
22848 temp = get_insn_template (insn_code_number, insn);
22850 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
22851 warning_at (location, OPT_mwarn_cell_microcode,
22852 "emitting microcode insn %s\t[%s] #%d",
22853 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
22854 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
22855 warning_at (location, OPT_mwarn_cell_microcode,
22856 "emitting conditional microcode insn %s\t[%s] #%d",
22857 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
22861 #include "gt-rs6000.h"