1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
43 #include "basic-block.h"
44 #include "integrate.h"
50 #include "target-def.h"
51 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "sched-int.h"
56 #include "tree-flow.h"
59 #include "tm-constrs.h"
61 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #include "gstab.h" /* for N_SLINE */
67 #ifndef TARGET_NO_PROTOTYPE
68 #define TARGET_NO_PROTOTYPE 0
71 #define min(A,B) ((A) < (B) ? (A) : (B))
72 #define max(A,B) ((A) > (B) ? (A) : (B))
74 /* Structure used to define the rs6000 stack */
75 typedef struct rs6000_stack {
76 int first_gp_reg_save; /* first callee saved GP register used */
77 int first_fp_reg_save; /* first callee saved FP register used */
78 int first_altivec_reg_save; /* first callee saved AltiVec register used */
79 int lr_save_p; /* true if the link reg needs to be saved */
80 int cr_save_p; /* true if the CR reg needs to be saved */
81 unsigned int vrsave_mask; /* mask of vec registers to save */
82 int push_p; /* true if we need to allocate stack space */
83 int calls_p; /* true if the function makes any calls */
84 int world_save_p; /* true if we're saving *everything*:
85 r13-r31, cr, f14-f31, vrsave, v20-v31 */
86 enum rs6000_abi abi; /* which ABI to use */
87 int gp_save_offset; /* offset to save GP regs from initial SP */
88 int fp_save_offset; /* offset to save FP regs from initial SP */
89 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
90 int lr_save_offset; /* offset to save LR from initial SP */
91 int cr_save_offset; /* offset to save CR from initial SP */
92 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
93 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
94 int varargs_save_offset; /* offset to save the varargs registers */
95 int ehrd_offset; /* offset to EH return data */
96 int reg_size; /* register size (4 or 8) */
97 HOST_WIDE_INT vars_size; /* variable save area size */
98 int parm_size; /* outgoing parameter size */
99 int save_size; /* save area size */
100 int fixed_size; /* fixed size of stack frame */
101 int gp_size; /* size of saved GP registers */
102 int fp_size; /* size of saved FP registers */
103 int altivec_size; /* size of saved AltiVec registers */
104 int cr_size; /* size to hold CR if not in save_size */
105 int vrsave_size; /* size to hold VRSAVE if not in save_size */
106 int altivec_padding_size; /* size of altivec alignment padding if
108 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
109 int spe_padding_size;
110 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
111 int spe_64bit_regs_used;
114 /* A C structure for machine-specific, per-function data.
115 This is added to the cfun structure. */
116 typedef struct GTY(()) machine_function
118 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
119 int ra_needs_full_frame;
120 /* Some local-dynamic symbol. */
121 const char *some_ld_name;
122 /* Whether the instruction chain has been scanned already. */
123 int insn_chain_scanned_p;
124 /* Flags if __builtin_return_address (0) was used. */
126 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
127 varargs save area. */
128 HOST_WIDE_INT varargs_save_offset;
129 /* Temporary stack slot to use for SDmode copies. This slot is
130 64-bits wide and is allocated early enough so that the offset
131 does not overflow the 16-bit load/store offset field. */
132 rtx sdmode_stack_slot;
135 /* Target cpu type */
137 enum processor_type rs6000_cpu;
138 struct rs6000_cpu_select rs6000_select[3] =
140 /* switch name, tune arch */
141 { (const char *)0, "--with-cpu=", 1, 1 },
142 { (const char *)0, "-mcpu=", 1, 1 },
143 { (const char *)0, "-mtune=", 1, 0 },
146 /* Always emit branch hint bits. */
147 static GTY(()) bool rs6000_always_hint;
149 /* Schedule instructions for group formation. */
150 static GTY(()) bool rs6000_sched_groups;
152 /* Align branch targets. */
153 static GTY(()) bool rs6000_align_branch_targets;
155 /* Support for -msched-costly-dep option. */
156 const char *rs6000_sched_costly_dep_str;
157 enum rs6000_dependence_cost rs6000_sched_costly_dep;
159 /* Support for -minsert-sched-nops option. */
160 const char *rs6000_sched_insert_nops_str;
161 enum rs6000_nop_insertion rs6000_sched_insert_nops;
163 /* Support targetm.vectorize.builtin_mask_for_load. */
164 static GTY(()) tree altivec_builtin_mask_for_load;
166 /* Size of long double. */
167 int rs6000_long_double_type_size;
169 /* IEEE quad extended precision long double. */
172 /* Nonzero to use AltiVec ABI. */
173 int rs6000_altivec_abi;
175 /* Nonzero if we want SPE SIMD instructions. */
178 /* Nonzero if we want SPE ABI extensions. */
181 /* Nonzero to use isel instructions. */
184 /* Nonzero if floating point operations are done in the GPRs. */
185 int rs6000_float_gprs = 0;
187 /* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
188 int rs6000_darwin64_abi;
190 /* Set to nonzero once AIX common-mode calls have been defined. */
191 static GTY(()) int common_mode_defined;
193 /* Label number of label created for -mrelocatable, to call to so we can
194 get the address of the GOT section */
195 int rs6000_pic_labelno;
198 /* Which abi to adhere to */
199 const char *rs6000_abi_name;
201 /* Semantics of the small data area */
202 enum rs6000_sdata_type rs6000_sdata = SDATA_DATA;
204 /* Which small data model to use */
205 const char *rs6000_sdata_name = (char *)0;
207 /* Counter for labels which are to be placed in .fixup. */
208 int fixuplabelno = 0;
211 /* Bit size of immediate TLS offsets and string from which it is decoded. */
212 int rs6000_tls_size = 32;
213 const char *rs6000_tls_size_string;
215 /* ABI enumeration available for subtarget to use. */
216 enum rs6000_abi rs6000_current_abi;
218 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
222 const char *rs6000_debug_name;
223 int rs6000_debug_stack; /* debug stack applications */
224 int rs6000_debug_arg; /* debug argument handling */
226 /* Value is TRUE if register/mode pair is acceptable. */
227 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
229 /* Built in types. */
231 tree rs6000_builtin_types[RS6000_BTI_MAX];
232 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
234 const char *rs6000_traceback_name;
236 traceback_default = 0,
242 /* Flag to say the TOC is initialized */
244 char toc_label_name[10];
246 /* Cached value of rs6000_variable_issue. This is cached in
247 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
248 static short cached_can_issue_more;
250 static GTY(()) section *read_only_data_section;
251 static GTY(()) section *private_data_section;
252 static GTY(()) section *read_only_private_data_section;
253 static GTY(()) section *sdata2_section;
254 static GTY(()) section *toc_section;
256 /* Control alignment for fields within structures. */
257 /* String from -malign-XXXXX. */
258 int rs6000_alignment_flags;
260 /* True for any options that were explicitly set. */
262 bool aix_struct_ret; /* True if -maix-struct-ret was used. */
263 bool alignment; /* True if -malign- was used. */
264 bool spe_abi; /* True if -mabi=spe/no-spe was used. */
265 bool altivec_abi; /* True if -mabi=altivec/no-altivec used. */
266 bool spe; /* True if -mspe= was used. */
267 bool float_gprs; /* True if -mfloat-gprs= was used. */
268 bool isel; /* True if -misel was used. */
269 bool long_double; /* True if -mlong-double- was used. */
270 bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
271 bool vrsave; /* True if -mvrsave was used. */
272 } rs6000_explicit_options;
274 struct builtin_description
276 /* mask is not const because we're going to alter it below. This
277 nonsense will go away when we rewrite the -march infrastructure
278 to give us more target flag bits. */
280 const enum insn_code icode;
281 const char *const name;
282 const enum rs6000_builtins code;
285 /* Target cpu costs. */
287 struct processor_costs {
288 const int mulsi; /* cost of SImode multiplication. */
289 const int mulsi_const; /* cost of SImode multiplication by constant. */
290 const int mulsi_const9; /* cost of SImode mult by short constant. */
291 const int muldi; /* cost of DImode multiplication. */
292 const int divsi; /* cost of SImode division. */
293 const int divdi; /* cost of DImode division. */
294 const int fp; /* cost of simple SFmode and DFmode insns. */
295 const int dmul; /* cost of DFmode multiplication (and fmadd). */
296 const int sdiv; /* cost of SFmode division (fdivs). */
297 const int ddiv; /* cost of DFmode division (fdiv). */
298 const int cache_line_size; /* cache line size in bytes. */
299 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
300 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
301 const int simultaneous_prefetches; /* number of parallel prefetch
305 const struct processor_costs *rs6000_cost;
307 /* Processor costs (relative to an add) */
309 /* Instruction size costs on 32bit processors. */
311 struct processor_costs size32_cost = {
312 COSTS_N_INSNS (1), /* mulsi */
313 COSTS_N_INSNS (1), /* mulsi_const */
314 COSTS_N_INSNS (1), /* mulsi_const9 */
315 COSTS_N_INSNS (1), /* muldi */
316 COSTS_N_INSNS (1), /* divsi */
317 COSTS_N_INSNS (1), /* divdi */
318 COSTS_N_INSNS (1), /* fp */
319 COSTS_N_INSNS (1), /* dmul */
320 COSTS_N_INSNS (1), /* sdiv */
321 COSTS_N_INSNS (1), /* ddiv */
328 /* Instruction size costs on 64bit processors. */
330 struct processor_costs size64_cost = {
331 COSTS_N_INSNS (1), /* mulsi */
332 COSTS_N_INSNS (1), /* mulsi_const */
333 COSTS_N_INSNS (1), /* mulsi_const9 */
334 COSTS_N_INSNS (1), /* muldi */
335 COSTS_N_INSNS (1), /* divsi */
336 COSTS_N_INSNS (1), /* divdi */
337 COSTS_N_INSNS (1), /* fp */
338 COSTS_N_INSNS (1), /* dmul */
339 COSTS_N_INSNS (1), /* sdiv */
340 COSTS_N_INSNS (1), /* ddiv */
347 /* Instruction costs on RIOS1 processors. */
349 struct processor_costs rios1_cost = {
350 COSTS_N_INSNS (5), /* mulsi */
351 COSTS_N_INSNS (4), /* mulsi_const */
352 COSTS_N_INSNS (3), /* mulsi_const9 */
353 COSTS_N_INSNS (5), /* muldi */
354 COSTS_N_INSNS (19), /* divsi */
355 COSTS_N_INSNS (19), /* divdi */
356 COSTS_N_INSNS (2), /* fp */
357 COSTS_N_INSNS (2), /* dmul */
358 COSTS_N_INSNS (19), /* sdiv */
359 COSTS_N_INSNS (19), /* ddiv */
360 128, /* cache line size */
366 /* Instruction costs on RIOS2 processors. */
368 struct processor_costs rios2_cost = {
369 COSTS_N_INSNS (2), /* mulsi */
370 COSTS_N_INSNS (2), /* mulsi_const */
371 COSTS_N_INSNS (2), /* mulsi_const9 */
372 COSTS_N_INSNS (2), /* muldi */
373 COSTS_N_INSNS (13), /* divsi */
374 COSTS_N_INSNS (13), /* divdi */
375 COSTS_N_INSNS (2), /* fp */
376 COSTS_N_INSNS (2), /* dmul */
377 COSTS_N_INSNS (17), /* sdiv */
378 COSTS_N_INSNS (17), /* ddiv */
379 256, /* cache line size */
385 /* Instruction costs on RS64A processors. */
387 struct processor_costs rs64a_cost = {
388 COSTS_N_INSNS (20), /* mulsi */
389 COSTS_N_INSNS (12), /* mulsi_const */
390 COSTS_N_INSNS (8), /* mulsi_const9 */
391 COSTS_N_INSNS (34), /* muldi */
392 COSTS_N_INSNS (65), /* divsi */
393 COSTS_N_INSNS (67), /* divdi */
394 COSTS_N_INSNS (4), /* fp */
395 COSTS_N_INSNS (4), /* dmul */
396 COSTS_N_INSNS (31), /* sdiv */
397 COSTS_N_INSNS (31), /* ddiv */
398 128, /* cache line size */
404 /* Instruction costs on MPCCORE processors. */
406 struct processor_costs mpccore_cost = {
407 COSTS_N_INSNS (2), /* mulsi */
408 COSTS_N_INSNS (2), /* mulsi_const */
409 COSTS_N_INSNS (2), /* mulsi_const9 */
410 COSTS_N_INSNS (2), /* muldi */
411 COSTS_N_INSNS (6), /* divsi */
412 COSTS_N_INSNS (6), /* divdi */
413 COSTS_N_INSNS (4), /* fp */
414 COSTS_N_INSNS (5), /* dmul */
415 COSTS_N_INSNS (10), /* sdiv */
416 COSTS_N_INSNS (17), /* ddiv */
417 32, /* cache line size */
423 /* Instruction costs on PPC403 processors. */
425 struct processor_costs ppc403_cost = {
426 COSTS_N_INSNS (4), /* mulsi */
427 COSTS_N_INSNS (4), /* mulsi_const */
428 COSTS_N_INSNS (4), /* mulsi_const9 */
429 COSTS_N_INSNS (4), /* muldi */
430 COSTS_N_INSNS (33), /* divsi */
431 COSTS_N_INSNS (33), /* divdi */
432 COSTS_N_INSNS (11), /* fp */
433 COSTS_N_INSNS (11), /* dmul */
434 COSTS_N_INSNS (11), /* sdiv */
435 COSTS_N_INSNS (11), /* ddiv */
436 32, /* cache line size */
442 /* Instruction costs on PPC405 processors. */
444 struct processor_costs ppc405_cost = {
445 COSTS_N_INSNS (5), /* mulsi */
446 COSTS_N_INSNS (4), /* mulsi_const */
447 COSTS_N_INSNS (3), /* mulsi_const9 */
448 COSTS_N_INSNS (5), /* muldi */
449 COSTS_N_INSNS (35), /* divsi */
450 COSTS_N_INSNS (35), /* divdi */
451 COSTS_N_INSNS (11), /* fp */
452 COSTS_N_INSNS (11), /* dmul */
453 COSTS_N_INSNS (11), /* sdiv */
454 COSTS_N_INSNS (11), /* ddiv */
455 32, /* cache line size */
461 /* Instruction costs on PPC440 processors. */
463 struct processor_costs ppc440_cost = {
464 COSTS_N_INSNS (3), /* mulsi */
465 COSTS_N_INSNS (2), /* mulsi_const */
466 COSTS_N_INSNS (2), /* mulsi_const9 */
467 COSTS_N_INSNS (3), /* muldi */
468 COSTS_N_INSNS (34), /* divsi */
469 COSTS_N_INSNS (34), /* divdi */
470 COSTS_N_INSNS (5), /* fp */
471 COSTS_N_INSNS (5), /* dmul */
472 COSTS_N_INSNS (19), /* sdiv */
473 COSTS_N_INSNS (33), /* ddiv */
474 32, /* cache line size */
480 /* Instruction costs on PPC601 processors. */
482 struct processor_costs ppc601_cost = {
483 COSTS_N_INSNS (5), /* mulsi */
484 COSTS_N_INSNS (5), /* mulsi_const */
485 COSTS_N_INSNS (5), /* mulsi_const9 */
486 COSTS_N_INSNS (5), /* muldi */
487 COSTS_N_INSNS (36), /* divsi */
488 COSTS_N_INSNS (36), /* divdi */
489 COSTS_N_INSNS (4), /* fp */
490 COSTS_N_INSNS (5), /* dmul */
491 COSTS_N_INSNS (17), /* sdiv */
492 COSTS_N_INSNS (31), /* ddiv */
493 32, /* cache line size */
499 /* Instruction costs on PPC603 processors. */
501 struct processor_costs ppc603_cost = {
502 COSTS_N_INSNS (5), /* mulsi */
503 COSTS_N_INSNS (3), /* mulsi_const */
504 COSTS_N_INSNS (2), /* mulsi_const9 */
505 COSTS_N_INSNS (5), /* muldi */
506 COSTS_N_INSNS (37), /* divsi */
507 COSTS_N_INSNS (37), /* divdi */
508 COSTS_N_INSNS (3), /* fp */
509 COSTS_N_INSNS (4), /* dmul */
510 COSTS_N_INSNS (18), /* sdiv */
511 COSTS_N_INSNS (33), /* ddiv */
512 32, /* cache line size */
518 /* Instruction costs on PPC604 processors. */
520 struct processor_costs ppc604_cost = {
521 COSTS_N_INSNS (4), /* mulsi */
522 COSTS_N_INSNS (4), /* mulsi_const */
523 COSTS_N_INSNS (4), /* mulsi_const9 */
524 COSTS_N_INSNS (4), /* muldi */
525 COSTS_N_INSNS (20), /* divsi */
526 COSTS_N_INSNS (20), /* divdi */
527 COSTS_N_INSNS (3), /* fp */
528 COSTS_N_INSNS (3), /* dmul */
529 COSTS_N_INSNS (18), /* sdiv */
530 COSTS_N_INSNS (32), /* ddiv */
531 32, /* cache line size */
537 /* Instruction costs on PPC604e processors. */
539 struct processor_costs ppc604e_cost = {
540 COSTS_N_INSNS (2), /* mulsi */
541 COSTS_N_INSNS (2), /* mulsi_const */
542 COSTS_N_INSNS (2), /* mulsi_const9 */
543 COSTS_N_INSNS (2), /* muldi */
544 COSTS_N_INSNS (20), /* divsi */
545 COSTS_N_INSNS (20), /* divdi */
546 COSTS_N_INSNS (3), /* fp */
547 COSTS_N_INSNS (3), /* dmul */
548 COSTS_N_INSNS (18), /* sdiv */
549 COSTS_N_INSNS (32), /* ddiv */
550 32, /* cache line size */
556 /* Instruction costs on PPC620 processors. */
558 struct processor_costs ppc620_cost = {
559 COSTS_N_INSNS (5), /* mulsi */
560 COSTS_N_INSNS (4), /* mulsi_const */
561 COSTS_N_INSNS (3), /* mulsi_const9 */
562 COSTS_N_INSNS (7), /* muldi */
563 COSTS_N_INSNS (21), /* divsi */
564 COSTS_N_INSNS (37), /* divdi */
565 COSTS_N_INSNS (3), /* fp */
566 COSTS_N_INSNS (3), /* dmul */
567 COSTS_N_INSNS (18), /* sdiv */
568 COSTS_N_INSNS (32), /* ddiv */
569 128, /* cache line size */
575 /* Instruction costs on PPC630 processors. */
577 struct processor_costs ppc630_cost = {
578 COSTS_N_INSNS (5), /* mulsi */
579 COSTS_N_INSNS (4), /* mulsi_const */
580 COSTS_N_INSNS (3), /* mulsi_const9 */
581 COSTS_N_INSNS (7), /* muldi */
582 COSTS_N_INSNS (21), /* divsi */
583 COSTS_N_INSNS (37), /* divdi */
584 COSTS_N_INSNS (3), /* fp */
585 COSTS_N_INSNS (3), /* dmul */
586 COSTS_N_INSNS (17), /* sdiv */
587 COSTS_N_INSNS (21), /* ddiv */
588 128, /* cache line size */
594 /* Instruction costs on Cell processor. */
595 /* COSTS_N_INSNS (1) ~ one add. */
597 struct processor_costs ppccell_cost = {
598 COSTS_N_INSNS (9/2)+2, /* mulsi */
599 COSTS_N_INSNS (6/2), /* mulsi_const */
600 COSTS_N_INSNS (6/2), /* mulsi_const9 */
601 COSTS_N_INSNS (15/2)+2, /* muldi */
602 COSTS_N_INSNS (38/2), /* divsi */
603 COSTS_N_INSNS (70/2), /* divdi */
604 COSTS_N_INSNS (10/2), /* fp */
605 COSTS_N_INSNS (10/2), /* dmul */
606 COSTS_N_INSNS (74/2), /* sdiv */
607 COSTS_N_INSNS (74/2), /* ddiv */
608 128, /* cache line size */
614 /* Instruction costs on PPC750 and PPC7400 processors. */
616 struct processor_costs ppc750_cost = {
617 COSTS_N_INSNS (5), /* mulsi */
618 COSTS_N_INSNS (3), /* mulsi_const */
619 COSTS_N_INSNS (2), /* mulsi_const9 */
620 COSTS_N_INSNS (5), /* muldi */
621 COSTS_N_INSNS (17), /* divsi */
622 COSTS_N_INSNS (17), /* divdi */
623 COSTS_N_INSNS (3), /* fp */
624 COSTS_N_INSNS (3), /* dmul */
625 COSTS_N_INSNS (17), /* sdiv */
626 COSTS_N_INSNS (31), /* ddiv */
627 32, /* cache line size */
633 /* Instruction costs on PPC7450 processors. */
635 struct processor_costs ppc7450_cost = {
636 COSTS_N_INSNS (4), /* mulsi */
637 COSTS_N_INSNS (3), /* mulsi_const */
638 COSTS_N_INSNS (3), /* mulsi_const9 */
639 COSTS_N_INSNS (4), /* muldi */
640 COSTS_N_INSNS (23), /* divsi */
641 COSTS_N_INSNS (23), /* divdi */
642 COSTS_N_INSNS (5), /* fp */
643 COSTS_N_INSNS (5), /* dmul */
644 COSTS_N_INSNS (21), /* sdiv */
645 COSTS_N_INSNS (35), /* ddiv */
646 32, /* cache line size */
652 /* Instruction costs on PPC8540 processors. */
654 struct processor_costs ppc8540_cost = {
655 COSTS_N_INSNS (4), /* mulsi */
656 COSTS_N_INSNS (4), /* mulsi_const */
657 COSTS_N_INSNS (4), /* mulsi_const9 */
658 COSTS_N_INSNS (4), /* muldi */
659 COSTS_N_INSNS (19), /* divsi */
660 COSTS_N_INSNS (19), /* divdi */
661 COSTS_N_INSNS (4), /* fp */
662 COSTS_N_INSNS (4), /* dmul */
663 COSTS_N_INSNS (29), /* sdiv */
664 COSTS_N_INSNS (29), /* ddiv */
665 32, /* cache line size */
668 1, /* prefetch streams /*/
671 /* Instruction costs on E300C2 and E300C3 cores. */
673 struct processor_costs ppce300c2c3_cost = {
674 COSTS_N_INSNS (4), /* mulsi */
675 COSTS_N_INSNS (4), /* mulsi_const */
676 COSTS_N_INSNS (4), /* mulsi_const9 */
677 COSTS_N_INSNS (4), /* muldi */
678 COSTS_N_INSNS (19), /* divsi */
679 COSTS_N_INSNS (19), /* divdi */
680 COSTS_N_INSNS (3), /* fp */
681 COSTS_N_INSNS (4), /* dmul */
682 COSTS_N_INSNS (18), /* sdiv */
683 COSTS_N_INSNS (33), /* ddiv */
687 1, /* prefetch streams /*/
690 /* Instruction costs on PPCE500MC processors. */
692 struct processor_costs ppce500mc_cost = {
693 COSTS_N_INSNS (4), /* mulsi */
694 COSTS_N_INSNS (4), /* mulsi_const */
695 COSTS_N_INSNS (4), /* mulsi_const9 */
696 COSTS_N_INSNS (4), /* muldi */
697 COSTS_N_INSNS (14), /* divsi */
698 COSTS_N_INSNS (14), /* divdi */
699 COSTS_N_INSNS (8), /* fp */
700 COSTS_N_INSNS (10), /* dmul */
701 COSTS_N_INSNS (36), /* sdiv */
702 COSTS_N_INSNS (66), /* ddiv */
703 64, /* cache line size */
706 1, /* prefetch streams /*/
709 /* Instruction costs on POWER4 and POWER5 processors. */
711 struct processor_costs power4_cost = {
712 COSTS_N_INSNS (3), /* mulsi */
713 COSTS_N_INSNS (2), /* mulsi_const */
714 COSTS_N_INSNS (2), /* mulsi_const9 */
715 COSTS_N_INSNS (4), /* muldi */
716 COSTS_N_INSNS (18), /* divsi */
717 COSTS_N_INSNS (34), /* divdi */
718 COSTS_N_INSNS (3), /* fp */
719 COSTS_N_INSNS (3), /* dmul */
720 COSTS_N_INSNS (17), /* sdiv */
721 COSTS_N_INSNS (17), /* ddiv */
722 128, /* cache line size */
725 8, /* prefetch streams /*/
728 /* Instruction costs on POWER6 processors. */
730 struct processor_costs power6_cost = {
731 COSTS_N_INSNS (8), /* mulsi */
732 COSTS_N_INSNS (8), /* mulsi_const */
733 COSTS_N_INSNS (8), /* mulsi_const9 */
734 COSTS_N_INSNS (8), /* muldi */
735 COSTS_N_INSNS (22), /* divsi */
736 COSTS_N_INSNS (28), /* divdi */
737 COSTS_N_INSNS (3), /* fp */
738 COSTS_N_INSNS (3), /* dmul */
739 COSTS_N_INSNS (13), /* sdiv */
740 COSTS_N_INSNS (16), /* ddiv */
741 128, /* cache line size */
744 16, /* prefetch streams */
748 static bool rs6000_function_ok_for_sibcall (tree, tree);
749 static const char *rs6000_invalid_within_doloop (const_rtx);
750 static bool rs6000_legitimate_address_p (enum machine_mode, rtx, bool);
751 static rtx rs6000_generate_compare (rtx, enum machine_mode);
752 static void rs6000_emit_stack_tie (void);
753 static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx);
754 static bool spe_func_has_64bit_regs_p (void);
755 static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int,
757 static rtx gen_frame_mem_offset (enum machine_mode, rtx, int);
758 static void rs6000_emit_allocate_stack (HOST_WIDE_INT, int, int);
759 static unsigned rs6000_hash_constant (rtx);
760 static unsigned toc_hash_function (const void *);
761 static int toc_hash_eq (const void *, const void *);
762 static bool constant_pool_expr_p (rtx);
763 static bool legitimate_small_data_p (enum machine_mode, rtx);
764 static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
765 static struct machine_function * rs6000_init_machine_status (void);
766 static bool rs6000_assemble_integer (rtx, unsigned int, int);
767 static bool no_global_regs_above (int, bool);
768 #ifdef HAVE_GAS_HIDDEN
769 static void rs6000_assemble_visibility (tree, int);
771 static int rs6000_ra_ever_killed (void);
772 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
773 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
774 static bool rs6000_ms_bitfield_layout_p (const_tree);
775 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
776 static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
777 static const char *rs6000_mangle_type (const_tree);
778 EXPORTED_CONST struct attribute_spec rs6000_attribute_table[];
779 static void rs6000_set_default_type_attributes (tree);
780 static rtx rs6000_savres_routine_sym (rs6000_stack_t *, bool, bool, bool);
781 static rtx rs6000_emit_stack_reset (rs6000_stack_t *, rtx, rtx, int, bool);
782 static rtx rs6000_make_savres_rtx (rs6000_stack_t *, rtx, int,
783 enum machine_mode, bool, bool, bool);
784 static bool rs6000_reg_live_or_pic_offset_p (int);
785 static int rs6000_savres_strategy (rs6000_stack_t *, bool, int, int);
786 static void rs6000_restore_saved_cr (rtx, int);
787 static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
788 static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT);
789 static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
791 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
792 static bool rs6000_return_in_memory (const_tree, const_tree);
793 static void rs6000_file_start (void);
795 static int rs6000_elf_reloc_rw_mask (void);
796 static void rs6000_elf_asm_out_constructor (rtx, int);
797 static void rs6000_elf_asm_out_destructor (rtx, int);
798 static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
799 static void rs6000_elf_asm_init_sections (void);
800 static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
801 unsigned HOST_WIDE_INT);
802 static void rs6000_elf_encode_section_info (tree, rtx, int)
805 static bool rs6000_use_blocks_for_constant_p (enum machine_mode, const_rtx);
806 static void rs6000_alloc_sdmode_stack_slot (void);
807 static void rs6000_instantiate_decls (void);
809 static void rs6000_xcoff_asm_output_anchor (rtx);
810 static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
811 static void rs6000_xcoff_asm_init_sections (void);
812 static int rs6000_xcoff_reloc_rw_mask (void);
813 static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
814 static section *rs6000_xcoff_select_section (tree, int,
815 unsigned HOST_WIDE_INT);
816 static void rs6000_xcoff_unique_section (tree, int);
817 static section *rs6000_xcoff_select_rtx_section
818 (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
819 static const char * rs6000_xcoff_strip_name_encoding (const char *);
820 static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
821 static void rs6000_xcoff_file_start (void);
822 static void rs6000_xcoff_file_end (void);
824 static int rs6000_variable_issue (FILE *, int, rtx, int);
825 static bool rs6000_rtx_costs (rtx, int, int, int *, bool);
826 static int rs6000_adjust_cost (rtx, rtx, rtx, int);
827 static void rs6000_sched_init (FILE *, int, int);
828 static bool is_microcoded_insn (rtx);
829 static bool is_nonpipeline_insn (rtx);
830 static bool is_cracked_insn (rtx);
831 static bool is_branch_slot_insn (rtx);
832 static bool is_load_insn (rtx);
833 static rtx get_store_dest (rtx pat);
834 static bool is_store_insn (rtx);
835 static bool set_to_load_agen (rtx,rtx);
836 static bool adjacent_mem_locations (rtx,rtx);
837 static int rs6000_adjust_priority (rtx, int);
838 static int rs6000_issue_rate (void);
839 static bool rs6000_is_costly_dependence (dep_t, int, int);
840 static rtx get_next_active_insn (rtx, rtx);
841 static bool insn_terminates_group_p (rtx , enum group_termination);
842 static bool insn_must_be_first_in_group (rtx);
843 static bool insn_must_be_last_in_group (rtx);
844 static bool is_costly_group (rtx *, rtx);
845 static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *);
846 static int redefine_groups (FILE *, int, rtx, rtx);
847 static int pad_groups (FILE *, int, rtx, rtx);
848 static void rs6000_sched_finish (FILE *, int);
849 static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int);
850 static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
851 static int rs6000_use_sched_lookahead (void);
852 static int rs6000_use_sched_lookahead_guard (rtx);
853 static void * rs6000_alloc_sched_context (void);
854 static void rs6000_init_sched_context (void *, bool);
855 static void rs6000_set_sched_context (void *);
856 static void rs6000_free_sched_context (void *);
857 static tree rs6000_builtin_reciprocal (unsigned int, bool, bool);
858 static tree rs6000_builtin_mask_for_load (void);
859 static tree rs6000_builtin_mul_widen_even (tree);
860 static tree rs6000_builtin_mul_widen_odd (tree);
861 static tree rs6000_builtin_conversion (unsigned int, tree);
862 static tree rs6000_builtin_vec_perm (tree, tree *);
864 static void def_builtin (int, const char *, tree, int);
865 static bool rs6000_vector_alignment_reachable (const_tree, bool);
866 static void rs6000_init_builtins (void);
867 static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
868 static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
869 static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx);
870 static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
871 static void altivec_init_builtins (void);
872 static void rs6000_common_init_builtins (void);
873 static void rs6000_init_libfuncs (void);
875 static void paired_init_builtins (void);
876 static rtx paired_expand_builtin (tree, rtx, bool *);
877 static rtx paired_expand_lv_builtin (enum insn_code, tree, rtx);
878 static rtx paired_expand_stv_builtin (enum insn_code, tree);
879 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
881 static void enable_mask_for_builtins (struct builtin_description *, int,
882 enum rs6000_builtins,
883 enum rs6000_builtins);
884 static void spe_init_builtins (void);
885 static rtx spe_expand_builtin (tree, rtx, bool *);
886 static rtx spe_expand_stv_builtin (enum insn_code, tree);
887 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
888 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
889 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
890 static rs6000_stack_t *rs6000_stack_info (void);
891 static void debug_stack_info (rs6000_stack_t *);
893 static rtx altivec_expand_builtin (tree, rtx, bool *);
894 static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
895 static rtx altivec_expand_st_builtin (tree, rtx, bool *);
896 static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
897 static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
898 static rtx altivec_expand_predicate_builtin (enum insn_code,
899 const char *, tree, rtx);
900 static rtx altivec_expand_stv_builtin (enum insn_code, tree);
901 static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
902 static rtx altivec_expand_vec_set_builtin (tree);
903 static rtx altivec_expand_vec_ext_builtin (tree, rtx);
904 static int get_element_number (tree, tree);
905 static bool rs6000_handle_option (size_t, const char *, int);
906 static void rs6000_parse_tls_size_option (void);
907 static void rs6000_parse_yes_no_option (const char *, const char *, int *);
908 static int first_altivec_reg_to_save (void);
909 static unsigned int compute_vrsave_mask (void);
910 static void compute_save_world_info (rs6000_stack_t *info_ptr);
911 static void is_altivec_return_reg (rtx, void *);
912 static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
913 int easy_vector_constant (rtx, enum machine_mode);
914 static rtx rs6000_dwarf_register_span (rtx);
915 static void rs6000_init_dwarf_reg_sizes_extra (tree);
916 static rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
917 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
918 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
919 static rtx rs6000_tls_get_addr (void);
920 static rtx rs6000_got_sym (void);
921 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
922 static const char *rs6000_get_some_local_dynamic_name (void);
923 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
924 static rtx rs6000_complex_function_value (enum machine_mode);
925 static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
926 enum machine_mode, tree);
927 static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
929 static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
930 tree, HOST_WIDE_INT);
931 static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
934 static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
935 const_tree, HOST_WIDE_INT,
937 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, int, bool);
938 static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
939 static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
940 static void setup_incoming_varargs (CUMULATIVE_ARGS *,
941 enum machine_mode, tree,
943 static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
945 static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
947 static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree);
949 static void macho_branch_islands (void);
950 static int no_previous_def (tree function_name);
951 static tree get_prev_label (tree function_name);
952 static void rs6000_darwin_file_start (void);
955 static tree rs6000_build_builtin_va_list (void);
956 static void rs6000_va_start (tree, rtx);
957 static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
958 static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
959 static bool rs6000_scalar_mode_supported_p (enum machine_mode);
960 static bool rs6000_vector_mode_supported_p (enum machine_mode);
961 static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
963 static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
965 static int get_vsel_insn (enum machine_mode);
966 static void rs6000_emit_vector_select (rtx, rtx, rtx, rtx);
967 static tree rs6000_stack_protect_fail (void);
969 const int INSN_NOT_AVAILABLE = -1;
970 static enum machine_mode rs6000_eh_return_filter_mode (void);
972 /* Hash table stuff for keeping track of TOC entries. */
974 struct GTY(()) toc_hash_struct
976 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
977 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
979 enum machine_mode key_mode;
983 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
985 /* Default register names. */
986 char rs6000_reg_names[][8] =
988 "0", "1", "2", "3", "4", "5", "6", "7",
989 "8", "9", "10", "11", "12", "13", "14", "15",
990 "16", "17", "18", "19", "20", "21", "22", "23",
991 "24", "25", "26", "27", "28", "29", "30", "31",
992 "0", "1", "2", "3", "4", "5", "6", "7",
993 "8", "9", "10", "11", "12", "13", "14", "15",
994 "16", "17", "18", "19", "20", "21", "22", "23",
995 "24", "25", "26", "27", "28", "29", "30", "31",
996 "mq", "lr", "ctr","ap",
997 "0", "1", "2", "3", "4", "5", "6", "7",
999 /* AltiVec registers. */
1000 "0", "1", "2", "3", "4", "5", "6", "7",
1001 "8", "9", "10", "11", "12", "13", "14", "15",
1002 "16", "17", "18", "19", "20", "21", "22", "23",
1003 "24", "25", "26", "27", "28", "29", "30", "31",
1005 /* SPE registers. */
1006 "spe_acc", "spefscr",
1007 /* Soft frame pointer. */
1011 #ifdef TARGET_REGNAMES
1012 static const char alt_reg_names[][8] =
1014 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1015 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1016 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1017 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1018 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1019 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1020 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1021 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1022 "mq", "lr", "ctr", "ap",
1023 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1025 /* AltiVec registers. */
1026 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1027 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1028 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1029 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1031 /* SPE registers. */
1032 "spe_acc", "spefscr",
1033 /* Soft frame pointer. */
1038 #ifndef MASK_STRICT_ALIGN
1039 #define MASK_STRICT_ALIGN 0
1041 #ifndef TARGET_PROFILE_KERNEL
1042 #define TARGET_PROFILE_KERNEL 0
1045 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1046 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1048 /* Initialize the GCC target structure. */
1049 #undef TARGET_ATTRIBUTE_TABLE
1050 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1051 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1052 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1054 #undef TARGET_ASM_ALIGNED_DI_OP
1055 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1057 /* Default unaligned ops are only provided for ELF. Find the ops needed
1058 for non-ELF systems. */
1059 #ifndef OBJECT_FORMAT_ELF
1061 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1063 #undef TARGET_ASM_UNALIGNED_HI_OP
1064 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1065 #undef TARGET_ASM_UNALIGNED_SI_OP
1066 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1067 #undef TARGET_ASM_UNALIGNED_DI_OP
1068 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1071 #undef TARGET_ASM_UNALIGNED_HI_OP
1072 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1073 #undef TARGET_ASM_UNALIGNED_SI_OP
1074 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1075 #undef TARGET_ASM_UNALIGNED_DI_OP
1076 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1077 #undef TARGET_ASM_ALIGNED_DI_OP
1078 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1082 /* This hook deals with fixups for relocatable code and DI-mode objects
1084 #undef TARGET_ASM_INTEGER
1085 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1087 #ifdef HAVE_GAS_HIDDEN
1088 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1089 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1092 #undef TARGET_HAVE_TLS
1093 #define TARGET_HAVE_TLS HAVE_AS_TLS
1095 #undef TARGET_CANNOT_FORCE_CONST_MEM
1096 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
1098 #undef TARGET_ASM_FUNCTION_PROLOGUE
1099 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1100 #undef TARGET_ASM_FUNCTION_EPILOGUE
1101 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1103 #undef TARGET_LEGITIMIZE_ADDRESS
1104 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1106 #undef TARGET_SCHED_VARIABLE_ISSUE
1107 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1109 #undef TARGET_SCHED_ISSUE_RATE
1110 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1111 #undef TARGET_SCHED_ADJUST_COST
1112 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1113 #undef TARGET_SCHED_ADJUST_PRIORITY
1114 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1115 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1116 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1117 #undef TARGET_SCHED_INIT
1118 #define TARGET_SCHED_INIT rs6000_sched_init
1119 #undef TARGET_SCHED_FINISH
1120 #define TARGET_SCHED_FINISH rs6000_sched_finish
1121 #undef TARGET_SCHED_REORDER
1122 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1123 #undef TARGET_SCHED_REORDER2
1124 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1126 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1127 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1129 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1130 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1132 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1133 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1134 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1135 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1136 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1137 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1138 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1139 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1141 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1142 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1143 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
1144 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
1145 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
1146 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
1147 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
1148 #define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion
1149 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
1150 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM rs6000_builtin_vec_perm
1152 #undef TARGET_VECTOR_ALIGNMENT_REACHABLE
1153 #define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1155 #undef TARGET_INIT_BUILTINS
1156 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1158 #undef TARGET_EXPAND_BUILTIN
1159 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1161 #undef TARGET_MANGLE_TYPE
1162 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1164 #undef TARGET_INIT_LIBFUNCS
1165 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1168 #undef TARGET_BINDS_LOCAL_P
1169 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1172 #undef TARGET_MS_BITFIELD_LAYOUT_P
1173 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1175 #undef TARGET_ASM_OUTPUT_MI_THUNK
1176 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1178 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1179 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1181 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1182 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1184 #undef TARGET_INVALID_WITHIN_DOLOOP
1185 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1187 #undef TARGET_RTX_COSTS
1188 #define TARGET_RTX_COSTS rs6000_rtx_costs
1189 #undef TARGET_ADDRESS_COST
1190 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1192 #undef TARGET_DWARF_REGISTER_SPAN
1193 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1195 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1196 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1198 /* On rs6000, function arguments are promoted, as are function return
1200 #undef TARGET_PROMOTE_FUNCTION_ARGS
1201 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1202 #undef TARGET_PROMOTE_FUNCTION_RETURN
1203 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1205 #undef TARGET_RETURN_IN_MEMORY
1206 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1208 #undef TARGET_SETUP_INCOMING_VARARGS
1209 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1211 /* Always strict argument naming on rs6000. */
1212 #undef TARGET_STRICT_ARGUMENT_NAMING
1213 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1214 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1215 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1216 #undef TARGET_SPLIT_COMPLEX_ARG
1217 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1218 #undef TARGET_MUST_PASS_IN_STACK
1219 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1220 #undef TARGET_PASS_BY_REFERENCE
1221 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1222 #undef TARGET_ARG_PARTIAL_BYTES
1223 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1225 #undef TARGET_BUILD_BUILTIN_VA_LIST
1226 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1228 #undef TARGET_EXPAND_BUILTIN_VA_START
1229 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1231 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1232 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1234 #undef TARGET_EH_RETURN_FILTER_MODE
1235 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1237 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1238 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1240 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1241 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1243 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1244 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1246 #undef TARGET_HANDLE_OPTION
1247 #define TARGET_HANDLE_OPTION rs6000_handle_option
1249 #undef TARGET_DEFAULT_TARGET_FLAGS
1250 #define TARGET_DEFAULT_TARGET_FLAGS \
1253 #undef TARGET_STACK_PROTECT_FAIL
1254 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1256 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1257 The PowerPC architecture requires only weak consistency among
1258 processors--that is, memory accesses between processors need not be
1259 sequentially consistent and memory accesses among processors can occur
1260 in any order. The ability to order memory accesses weakly provides
1261 opportunities for more efficient use of the system bus. Unless a
1262 dependency exists, the 604e allows read operations to precede store
1264 #undef TARGET_RELAXED_ORDERING
1265 #define TARGET_RELAXED_ORDERING true
1268 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1269 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1272 /* Use a 32-bit anchor range. This leads to sequences like:
1274 addis tmp,anchor,high
1277 where tmp itself acts as an anchor, and can be shared between
1278 accesses to the same 64k page. */
1279 #undef TARGET_MIN_ANCHOR_OFFSET
1280 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1281 #undef TARGET_MAX_ANCHOR_OFFSET
1282 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1283 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1284 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1286 #undef TARGET_BUILTIN_RECIPROCAL
1287 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1289 #undef TARGET_EXPAND_TO_RTL_HOOK
1290 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1292 #undef TARGET_INSTANTIATE_DECLS
1293 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1295 #undef TARGET_LEGITIMATE_ADDRESS_P
1296 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1298 struct gcc_target targetm = TARGET_INITIALIZER;
1301 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1304 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1306 /* The GPRs can hold any mode, but values bigger than one register
1307 cannot go past R31. */
1308 if (INT_REGNO_P (regno))
1309 return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
1311 /* The float registers can only hold floating modes and DImode.
1312 This excludes the 32-bit decimal float mode for now. */
1313 if (FP_REGNO_P (regno))
1315 ((SCALAR_FLOAT_MODE_P (mode)
1316 && (mode != TDmode || (regno % 2) == 0)
1317 && FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
1318 || (GET_MODE_CLASS (mode) == MODE_INT
1319 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1320 || (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1321 && PAIRED_VECTOR_MODE (mode)));
1323 /* The CR register can only hold CC modes. */
1324 if (CR_REGNO_P (regno))
1325 return GET_MODE_CLASS (mode) == MODE_CC;
1327 if (XER_REGNO_P (regno))
1328 return mode == PSImode;
1330 /* AltiVec only in AldyVec registers. */
1331 if (ALTIVEC_REGNO_P (regno))
1332 return ALTIVEC_VECTOR_MODE (mode);
1334 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1335 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1338 /* We cannot put TImode anywhere except general register and it must be
1339 able to fit within the register set. */
1341 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1344 /* Initialize rs6000_hard_regno_mode_ok_p table. */
1346 rs6000_init_hard_regno_mode_ok (void)
1350 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
1351 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1352 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode) m))
1353 rs6000_hard_regno_mode_ok_p[m][r] = true;
1357 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
1360 darwin_rs6000_override_options (void)
1362 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
1364 rs6000_altivec_abi = 1;
1365 TARGET_ALTIVEC_VRSAVE = 1;
1366 if (DEFAULT_ABI == ABI_DARWIN)
1368 if (MACHO_DYNAMIC_NO_PIC_P)
1371 warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
1374 else if (flag_pic == 1)
1379 if (TARGET_64BIT && ! TARGET_POWERPC64)
1381 target_flags |= MASK_POWERPC64;
1382 warning (0, "-m64 requires PowerPC64 architecture, enabling");
1386 rs6000_default_long_calls = 1;
1387 target_flags |= MASK_SOFT_FLOAT;
1390 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
1392 if (!flag_mkernel && !flag_apple_kext
1394 && ! (target_flags_explicit & MASK_ALTIVEC))
1395 target_flags |= MASK_ALTIVEC;
1397 /* Unless the user (not the configurer) has explicitly overridden
1398 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
1399 G4 unless targetting the kernel. */
1402 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
1403 && ! (target_flags_explicit & MASK_ALTIVEC)
1404 && ! rs6000_select[1].string)
1406 target_flags |= MASK_ALTIVEC;
1411 /* If not otherwise specified by a target, make 'long double' equivalent to
1414 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
1415 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
1418 /* Override command line options. Mostly we process the processor
1419 type and sometimes adjust other TARGET_ options. */
1422 rs6000_override_options (const char *default_cpu)
1425 struct rs6000_cpu_select *ptr;
1428 /* Simplifications for entries below. */
1431 POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
1432 POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
1435 /* This table occasionally claims that a processor does not support
1436 a particular feature even though it does, but the feature is slower
1437 than the alternative. Thus, it shouldn't be relied on as a
1438 complete description of the processor's support.
1440 Please keep this list in order, and don't forget to update the
1441 documentation in invoke.texi when adding a new processor or
1445 const char *const name; /* Canonical processor name. */
1446 const enum processor_type processor; /* Processor type enum value. */
1447 const int target_enable; /* Target flags to enable. */
1448 } const processor_target_table[]
1449 = {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1450 {"403", PROCESSOR_PPC403,
1451 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
1452 {"405", PROCESSOR_PPC405,
1453 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
1454 {"405fp", PROCESSOR_PPC405,
1455 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
1456 {"440", PROCESSOR_PPC440,
1457 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
1458 {"440fp", PROCESSOR_PPC440,
1459 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
1460 {"464", PROCESSOR_PPC440,
1461 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
1462 {"464fp", PROCESSOR_PPC440,
1463 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
1464 {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
1465 {"601", PROCESSOR_PPC601,
1466 MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
1467 {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1468 {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1469 {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1470 {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1471 {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1472 {"620", PROCESSOR_PPC620,
1473 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1474 {"630", PROCESSOR_PPC630,
1475 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1476 {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1477 {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK},
1478 {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK},
1479 {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1480 {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1481 {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1482 {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1483 {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
1484 /* 8548 has a dummy entry for now. */
1485 {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
1486 {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1487 {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK},
1488 {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1489 {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1490 {"970", PROCESSOR_POWER4,
1491 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
1492 {"cell", PROCESSOR_CELL,
1493 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
1494 {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
1495 {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
1496 {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
1497 {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK},
1498 {"G5", PROCESSOR_POWER4,
1499 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
1500 {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1501 {"power2", PROCESSOR_POWER,
1502 MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
1503 {"power3", PROCESSOR_PPC630,
1504 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1505 {"power4", PROCESSOR_POWER4,
1506 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1508 {"power5", PROCESSOR_POWER5,
1509 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1510 | MASK_MFCRF | MASK_POPCNTB},
1511 {"power5+", PROCESSOR_POWER5,
1512 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1513 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
1514 {"power6", PROCESSOR_POWER6,
1515 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1516 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
1517 {"power6x", PROCESSOR_POWER6,
1518 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
1519 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
1521 {"power7", PROCESSOR_POWER5,
1522 POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF
1523 | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
1524 {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
1525 {"powerpc64", PROCESSOR_POWERPC64,
1526 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
1527 {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1528 {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1529 {"rios2", PROCESSOR_RIOS2,
1530 MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
1531 {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1532 {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
1533 {"rs64", PROCESSOR_RS64A,
1534 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
1537 const size_t ptt_size = ARRAY_SIZE (processor_target_table);
1539 /* Some OSs don't support saving the high part of 64-bit registers on
1540 context switch. Other OSs don't support saving Altivec registers.
1541 On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
1542 settings; if the user wants either, the user must explicitly specify
1543 them and we won't interfere with the user's specification. */
1546 POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
1547 POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1548 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1549 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1550 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP)
1553 set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
1554 #ifdef OS_MISSING_POWERPC64
1555 if (OS_MISSING_POWERPC64)
1556 set_masks &= ~MASK_POWERPC64;
1558 #ifdef OS_MISSING_ALTIVEC
1559 if (OS_MISSING_ALTIVEC)
1560 set_masks &= ~MASK_ALTIVEC;
1563 /* Don't override by the processor default if given explicitly. */
1564 set_masks &= ~target_flags_explicit;
1566 /* Identify the processor type. */
1567 rs6000_select[0].string = default_cpu;
1568 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
1570 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
1572 ptr = &rs6000_select[i];
1573 if (ptr->string != (char *)0 && ptr->string[0] != '\0')
1575 for (j = 0; j < ptt_size; j++)
1576 if (! strcmp (ptr->string, processor_target_table[j].name))
1578 if (ptr->set_tune_p)
1579 rs6000_cpu = processor_target_table[j].processor;
1581 if (ptr->set_arch_p)
1583 target_flags &= ~set_masks;
1584 target_flags |= (processor_target_table[j].target_enable
1591 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
1595 if ((TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
1596 && !rs6000_explicit_options.isel)
1599 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
1600 || rs6000_cpu == PROCESSOR_PPCE500MC)
1603 error ("AltiVec not supported in this target");
1605 error ("Spe not supported in this target");
1608 /* Disable Cell microcode if we are optimizing for the Cell
1609 and not optimizing for size. */
1610 if (rs6000_gen_cell_microcode == -1)
1611 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
1614 /* If we are optimizing big endian systems for space, use the load/store
1615 multiple and string instructions unless we are not generating
1617 if (BYTES_BIG_ENDIAN && optimize_size && !rs6000_gen_cell_microcode)
1618 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
1620 /* Don't allow -mmultiple or -mstring on little endian systems
1621 unless the cpu is a 750, because the hardware doesn't support the
1622 instructions used in little endian mode, and causes an alignment
1623 trap. The 750 does not cause an alignment trap (except when the
1624 target is unaligned). */
1626 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
1628 if (TARGET_MULTIPLE)
1630 target_flags &= ~MASK_MULTIPLE;
1631 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
1632 warning (0, "-mmultiple is not supported on little endian systems");
1637 target_flags &= ~MASK_STRING;
1638 if ((target_flags_explicit & MASK_STRING) != 0)
1639 warning (0, "-mstring is not supported on little endian systems");
1643 /* Set debug flags */
1644 if (rs6000_debug_name)
1646 if (! strcmp (rs6000_debug_name, "all"))
1647 rs6000_debug_stack = rs6000_debug_arg = 1;
1648 else if (! strcmp (rs6000_debug_name, "stack"))
1649 rs6000_debug_stack = 1;
1650 else if (! strcmp (rs6000_debug_name, "arg"))
1651 rs6000_debug_arg = 1;
1653 error ("unknown -mdebug-%s switch", rs6000_debug_name);
1656 if (rs6000_traceback_name)
1658 if (! strncmp (rs6000_traceback_name, "full", 4))
1659 rs6000_traceback = traceback_full;
1660 else if (! strncmp (rs6000_traceback_name, "part", 4))
1661 rs6000_traceback = traceback_part;
1662 else if (! strncmp (rs6000_traceback_name, "no", 2))
1663 rs6000_traceback = traceback_none;
1665 error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
1666 rs6000_traceback_name);
1669 if (!rs6000_explicit_options.long_double)
1670 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
1672 #ifndef POWERPC_LINUX
1673 if (!rs6000_explicit_options.ieee)
1674 rs6000_ieeequad = 1;
1677 /* Enable Altivec ABI for AIX -maltivec. */
1678 if (TARGET_XCOFF && TARGET_ALTIVEC)
1679 rs6000_altivec_abi = 1;
1681 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
1682 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
1683 be explicitly overridden in either case. */
1686 if (!rs6000_explicit_options.altivec_abi
1687 && (TARGET_64BIT || TARGET_ALTIVEC))
1688 rs6000_altivec_abi = 1;
1690 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
1691 if (!rs6000_explicit_options.vrsave)
1692 TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
1695 /* Set the Darwin64 ABI as default for 64-bit Darwin. */
1696 if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
1698 rs6000_darwin64_abi = 1;
1700 darwin_one_byte_bool = 1;
1702 /* Default to natural alignment, for better performance. */
1703 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
1706 /* Place FP constants in the constant pool instead of TOC
1707 if section anchors enabled. */
1708 if (flag_section_anchors)
1709 TARGET_NO_FP_IN_TOC = 1;
1711 /* Handle -mtls-size option. */
1712 rs6000_parse_tls_size_option ();
1714 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1715 SUBTARGET_OVERRIDE_OPTIONS;
1717 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1718 SUBSUBTARGET_OVERRIDE_OPTIONS;
1720 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
1721 SUB3TARGET_OVERRIDE_OPTIONS;
1724 if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
1726 /* The e500 and e500mc do not have string instructions, and we set
1727 MASK_STRING above when optimizing for size. */
1728 if ((target_flags & MASK_STRING) != 0)
1729 target_flags = target_flags & ~MASK_STRING;
1731 else if (rs6000_select[1].string != NULL)
1733 /* For the powerpc-eabispe configuration, we set all these by
1734 default, so let's unset them if we manually set another
1735 CPU that is not the E500. */
1736 if (!rs6000_explicit_options.spe_abi)
1738 if (!rs6000_explicit_options.spe)
1740 if (!rs6000_explicit_options.float_gprs)
1741 rs6000_float_gprs = 0;
1742 if (!rs6000_explicit_options.isel)
1746 /* Detect invalid option combinations with E500. */
1749 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
1750 && rs6000_cpu != PROCESSOR_POWER5
1751 && rs6000_cpu != PROCESSOR_POWER6
1752 && rs6000_cpu != PROCESSOR_CELL);
1753 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
1754 || rs6000_cpu == PROCESSOR_POWER5);
1755 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
1756 || rs6000_cpu == PROCESSOR_POWER5
1757 || rs6000_cpu == PROCESSOR_POWER6);
1759 rs6000_sched_restricted_insns_priority
1760 = (rs6000_sched_groups ? 1 : 0);
1762 /* Handle -msched-costly-dep option. */
1763 rs6000_sched_costly_dep
1764 = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
1766 if (rs6000_sched_costly_dep_str)
1768 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
1769 rs6000_sched_costly_dep = no_dep_costly;
1770 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
1771 rs6000_sched_costly_dep = all_deps_costly;
1772 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
1773 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
1774 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
1775 rs6000_sched_costly_dep = store_to_load_dep_costly;
1777 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
1778 atoi (rs6000_sched_costly_dep_str));
1781 /* Handle -minsert-sched-nops option. */
1782 rs6000_sched_insert_nops
1783 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
1785 if (rs6000_sched_insert_nops_str)
1787 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
1788 rs6000_sched_insert_nops = sched_finish_none;
1789 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
1790 rs6000_sched_insert_nops = sched_finish_pad_groups;
1791 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
1792 rs6000_sched_insert_nops = sched_finish_regroup_exact;
1794 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
1795 atoi (rs6000_sched_insert_nops_str));
1798 #ifdef TARGET_REGNAMES
1799 /* If the user desires alternate register names, copy in the
1800 alternate names now. */
1801 if (TARGET_REGNAMES)
1802 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
1805 /* Set aix_struct_return last, after the ABI is determined.
1806 If -maix-struct-return or -msvr4-struct-return was explicitly
1807 used, don't override with the ABI default. */
1808 if (!rs6000_explicit_options.aix_struct_ret)
1809 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
1811 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
1812 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
1815 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
1817 /* We can only guarantee the availability of DI pseudo-ops when
1818 assembling for 64-bit targets. */
1821 targetm.asm_out.aligned_op.di = NULL;
1822 targetm.asm_out.unaligned_op.di = NULL;
1825 /* Set branch target alignment, if not optimizing for size. */
1828 /* Cell wants to be aligned 8byte for dual issue. */
1829 if (rs6000_cpu == PROCESSOR_CELL)
1831 if (align_functions <= 0)
1832 align_functions = 8;
1833 if (align_jumps <= 0)
1835 if (align_loops <= 0)
1838 if (rs6000_align_branch_targets)
1840 if (align_functions <= 0)
1841 align_functions = 16;
1842 if (align_jumps <= 0)
1844 if (align_loops <= 0)
1847 if (align_jumps_max_skip <= 0)
1848 align_jumps_max_skip = 15;
1849 if (align_loops_max_skip <= 0)
1850 align_loops_max_skip = 15;
1853 /* Arrange to save and restore machine status around nested functions. */
1854 init_machine_status = rs6000_init_machine_status;
1856 /* We should always be splitting complex arguments, but we can't break
1857 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
1858 if (DEFAULT_ABI != ABI_AIX)
1859 targetm.calls.split_complex_arg = NULL;
1861 /* Initialize rs6000_cost with the appropriate target costs. */
1863 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
1867 case PROCESSOR_RIOS1:
1868 rs6000_cost = &rios1_cost;
1871 case PROCESSOR_RIOS2:
1872 rs6000_cost = &rios2_cost;
1875 case PROCESSOR_RS64A:
1876 rs6000_cost = &rs64a_cost;
1879 case PROCESSOR_MPCCORE:
1880 rs6000_cost = &mpccore_cost;
1883 case PROCESSOR_PPC403:
1884 rs6000_cost = &ppc403_cost;
1887 case PROCESSOR_PPC405:
1888 rs6000_cost = &ppc405_cost;
1891 case PROCESSOR_PPC440:
1892 rs6000_cost = &ppc440_cost;
1895 case PROCESSOR_PPC601:
1896 rs6000_cost = &ppc601_cost;
1899 case PROCESSOR_PPC603:
1900 rs6000_cost = &ppc603_cost;
1903 case PROCESSOR_PPC604:
1904 rs6000_cost = &ppc604_cost;
1907 case PROCESSOR_PPC604e:
1908 rs6000_cost = &ppc604e_cost;
1911 case PROCESSOR_PPC620:
1912 rs6000_cost = &ppc620_cost;
1915 case PROCESSOR_PPC630:
1916 rs6000_cost = &ppc630_cost;
1919 case PROCESSOR_CELL:
1920 rs6000_cost = &ppccell_cost;
1923 case PROCESSOR_PPC750:
1924 case PROCESSOR_PPC7400:
1925 rs6000_cost = &ppc750_cost;
1928 case PROCESSOR_PPC7450:
1929 rs6000_cost = &ppc7450_cost;
1932 case PROCESSOR_PPC8540:
1933 rs6000_cost = &ppc8540_cost;
1936 case PROCESSOR_PPCE300C2:
1937 case PROCESSOR_PPCE300C3:
1938 rs6000_cost = &ppce300c2c3_cost;
1941 case PROCESSOR_PPCE500MC:
1942 rs6000_cost = &ppce500mc_cost;
1945 case PROCESSOR_POWER4:
1946 case PROCESSOR_POWER5:
1947 rs6000_cost = &power4_cost;
1950 case PROCESSOR_POWER6:
1951 rs6000_cost = &power6_cost;
1958 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
1959 set_param_value ("simultaneous-prefetches",
1960 rs6000_cost->simultaneous_prefetches);
1961 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
1962 set_param_value ("l1-cache-size", rs6000_cost->l1_cache_size);
1963 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
1964 set_param_value ("l1-cache-line-size", rs6000_cost->cache_line_size);
1965 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
1966 set_param_value ("l2-cache-size", rs6000_cost->l2_cache_size);
1968 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
1969 can be optimized to ap = __builtin_next_arg (0). */
1970 if (DEFAULT_ABI != ABI_V4)
1971 targetm.expand_builtin_va_start = NULL;
1973 /* Set up single/double float flags.
1974 If TARGET_HARD_FLOAT is set, but neither single or double is set,
1975 then set both flags. */
1976 if (TARGET_HARD_FLOAT && TARGET_FPRS
1977 && rs6000_single_float == 0 && rs6000_double_float == 0)
1978 rs6000_single_float = rs6000_double_float = 1;
1980 /* Reset single and double FP flags if target is E500. */
1983 rs6000_single_float = rs6000_double_float = 0;
1984 if (TARGET_E500_SINGLE)
1985 rs6000_single_float = 1;
1986 if (TARGET_E500_DOUBLE)
1987 rs6000_single_float = rs6000_double_float = 1;
1990 /* If not explicitly specified via option, decide whether to generate indexed
1991 load/store instructions. */
1992 if (TARGET_AVOID_XFORM == -1)
1993 /* Avoid indexed addressing when targeting Power6 in order to avoid
1994 the DERAT mispredict penalty. */
1995 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB);
1997 rs6000_init_hard_regno_mode_ok ();
2000 /* Implement targetm.vectorize.builtin_mask_for_load. */
2002 rs6000_builtin_mask_for_load (void)
2005 return altivec_builtin_mask_for_load;
2010 /* Implement targetm.vectorize.builtin_conversion.
2011 Returns a decl of a function that implements conversion of an integer vector
2012 into a floating-point vector, or vice-versa. TYPE is the type of the integer
2013 side of the conversion.
2014 Return NULL_TREE if it is not available. */
2016 rs6000_builtin_conversion (unsigned int tcode, tree type)
2018 enum tree_code code = (enum tree_code) tcode;
2020 if (!TARGET_ALTIVEC)
2025 case FIX_TRUNC_EXPR:
2026 switch (TYPE_MODE (type))
2029 return TYPE_UNSIGNED (type)
2030 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTUXS]
2031 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTSXS];
2037 switch (TYPE_MODE (type))
2040 return TYPE_UNSIGNED (type)
2041 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFUX]
2042 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFSX];
2052 /* Implement targetm.vectorize.builtin_mul_widen_even. */
2054 rs6000_builtin_mul_widen_even (tree type)
2056 if (!TARGET_ALTIVEC)
2059 switch (TYPE_MODE (type))
2062 return TYPE_UNSIGNED (type)
2063 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH]
2064 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
2067 return TYPE_UNSIGNED (type)
2068 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB]
2069 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
2075 /* Implement targetm.vectorize.builtin_mul_widen_odd. */
2077 rs6000_builtin_mul_widen_odd (tree type)
2079 if (!TARGET_ALTIVEC)
2082 switch (TYPE_MODE (type))
2085 return TYPE_UNSIGNED (type)
2086 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH]
2087 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
2090 return TYPE_UNSIGNED (type)
2091 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB]
2092 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
2099 /* Return true iff, data reference of TYPE can reach vector alignment (16)
2100 after applying N number of iterations. This routine does not determine
2101 how may iterations are required to reach desired alignment. */
2104 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
2111 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
2114 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
2124 /* Assuming that all other types are naturally aligned. CHECKME! */
2129 /* Implement targetm.vectorize.builtin_vec_perm. */
2131 rs6000_builtin_vec_perm (tree type, tree *mask_element_type)
2135 *mask_element_type = unsigned_char_type_node;
2137 switch (TYPE_MODE (type))
2140 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_16QI];
2144 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_8HI];
2148 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SI];
2152 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SF];
2163 /* Handle generic options of the form -mfoo=yes/no.
2164 NAME is the option name.
2165 VALUE is the option value.
2166 FLAG is the pointer to the flag where to store a 1 or 0, depending on
2167 whether the option value is 'yes' or 'no' respectively. */
2169 rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
2173 else if (!strcmp (value, "yes"))
2175 else if (!strcmp (value, "no"))
2178 error ("unknown -m%s= option specified: '%s'", name, value);
2181 /* Validate and record the size specified with the -mtls-size option. */
2184 rs6000_parse_tls_size_option (void)
2186 if (rs6000_tls_size_string == 0)
2188 else if (strcmp (rs6000_tls_size_string, "16") == 0)
2189 rs6000_tls_size = 16;
2190 else if (strcmp (rs6000_tls_size_string, "32") == 0)
2191 rs6000_tls_size = 32;
2192 else if (strcmp (rs6000_tls_size_string, "64") == 0)
2193 rs6000_tls_size = 64;
2195 error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string);
2199 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
2201 if (DEFAULT_ABI == ABI_DARWIN)
2202 /* The Darwin libraries never set errno, so we might as well
2203 avoid calling them when that's the only reason we would. */
2204 flag_errno_math = 0;
2206 /* Double growth factor to counter reduced min jump length. */
2207 set_param_value ("max-grow-copy-bb-insns", 16);
2209 /* Enable section anchors by default.
2210 Skip section anchors for Objective C and Objective C++
2211 until front-ends fixed. */
2212 if (!TARGET_MACHO && lang_hooks.name[4] != 'O')
2213 flag_section_anchors = 2;
2216 static enum fpu_type_t
2217 rs6000_parse_fpu_option (const char *option)
2219 if (!strcmp("none", option)) return FPU_NONE;
2220 if (!strcmp("sp_lite", option)) return FPU_SF_LITE;
2221 if (!strcmp("dp_lite", option)) return FPU_DF_LITE;
2222 if (!strcmp("sp_full", option)) return FPU_SF_FULL;
2223 if (!strcmp("dp_full", option)) return FPU_DF_FULL;
2224 error("unknown value %s for -mfpu", option);
2228 /* Implement TARGET_HANDLE_OPTION. */
2231 rs6000_handle_option (size_t code, const char *arg, int value)
2233 enum fpu_type_t fpu_type = FPU_NONE;
2238 target_flags &= ~(MASK_POWER | MASK_POWER2
2239 | MASK_MULTIPLE | MASK_STRING);
2240 target_flags_explicit |= (MASK_POWER | MASK_POWER2
2241 | MASK_MULTIPLE | MASK_STRING);
2243 case OPT_mno_powerpc:
2244 target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
2245 | MASK_PPC_GFXOPT | MASK_POWERPC64);
2246 target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
2247 | MASK_PPC_GFXOPT | MASK_POWERPC64);
2250 target_flags &= ~MASK_MINIMAL_TOC;
2251 TARGET_NO_FP_IN_TOC = 0;
2252 TARGET_NO_SUM_IN_TOC = 0;
2253 target_flags_explicit |= MASK_MINIMAL_TOC;
2254 #ifdef TARGET_USES_SYSV4_OPT
2255 /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
2256 just the same as -mminimal-toc. */
2257 target_flags |= MASK_MINIMAL_TOC;
2258 target_flags_explicit |= MASK_MINIMAL_TOC;
2262 #ifdef TARGET_USES_SYSV4_OPT
2264 /* Make -mtoc behave like -mminimal-toc. */
2265 target_flags |= MASK_MINIMAL_TOC;
2266 target_flags_explicit |= MASK_MINIMAL_TOC;
2270 #ifdef TARGET_USES_AIX64_OPT
2275 target_flags |= MASK_POWERPC64 | MASK_POWERPC;
2276 target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
2277 target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
2280 #ifdef TARGET_USES_AIX64_OPT
2285 target_flags &= ~MASK_POWERPC64;
2286 target_flags_explicit |= MASK_POWERPC64;
2289 case OPT_minsert_sched_nops_:
2290 rs6000_sched_insert_nops_str = arg;
2293 case OPT_mminimal_toc:
2296 TARGET_NO_FP_IN_TOC = 0;
2297 TARGET_NO_SUM_IN_TOC = 0;
2304 target_flags |= (MASK_MULTIPLE | MASK_STRING);
2305 target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
2312 target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
2313 target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
2317 case OPT_mpowerpc_gpopt:
2318 case OPT_mpowerpc_gfxopt:
2321 target_flags |= MASK_POWERPC;
2322 target_flags_explicit |= MASK_POWERPC;
2326 case OPT_maix_struct_return:
2327 case OPT_msvr4_struct_return:
2328 rs6000_explicit_options.aix_struct_ret = true;
2332 rs6000_explicit_options.vrsave = true;
2333 TARGET_ALTIVEC_VRSAVE = value;
2337 rs6000_explicit_options.vrsave = true;
2338 rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
2342 rs6000_explicit_options.isel = true;
2343 rs6000_isel = value;
2347 rs6000_explicit_options.isel = true;
2348 rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel));
2352 rs6000_explicit_options.spe = true;
2357 rs6000_explicit_options.spe = true;
2358 rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
2362 rs6000_debug_name = arg;
2365 #ifdef TARGET_USES_SYSV4_OPT
2367 rs6000_abi_name = arg;
2371 rs6000_sdata_name = arg;
2374 case OPT_mtls_size_:
2375 rs6000_tls_size_string = arg;
2378 case OPT_mrelocatable:
2381 target_flags |= MASK_MINIMAL_TOC;
2382 target_flags_explicit |= MASK_MINIMAL_TOC;
2383 TARGET_NO_FP_IN_TOC = 1;
2387 case OPT_mrelocatable_lib:
2390 target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
2391 target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
2392 TARGET_NO_FP_IN_TOC = 1;
2396 target_flags &= ~MASK_RELOCATABLE;
2397 target_flags_explicit |= MASK_RELOCATABLE;
2403 if (!strcmp (arg, "altivec"))
2405 rs6000_explicit_options.altivec_abi = true;
2406 rs6000_altivec_abi = 1;
2408 /* Enabling the AltiVec ABI turns off the SPE ABI. */
2411 else if (! strcmp (arg, "no-altivec"))
2413 rs6000_explicit_options.altivec_abi = true;
2414 rs6000_altivec_abi = 0;
2416 else if (! strcmp (arg, "spe"))
2418 rs6000_explicit_options.spe_abi = true;
2420 rs6000_altivec_abi = 0;
2421 if (!TARGET_SPE_ABI)
2422 error ("not configured for ABI: '%s'", arg);
2424 else if (! strcmp (arg, "no-spe"))
2426 rs6000_explicit_options.spe_abi = true;
2430 /* These are here for testing during development only, do not
2431 document in the manual please. */
2432 else if (! strcmp (arg, "d64"))
2434 rs6000_darwin64_abi = 1;
2435 warning (0, "Using darwin64 ABI");
2437 else if (! strcmp (arg, "d32"))
2439 rs6000_darwin64_abi = 0;
2440 warning (0, "Using old darwin ABI");
2443 else if (! strcmp (arg, "ibmlongdouble"))
2445 rs6000_explicit_options.ieee = true;
2446 rs6000_ieeequad = 0;
2447 warning (0, "Using IBM extended precision long double");
2449 else if (! strcmp (arg, "ieeelongdouble"))
2451 rs6000_explicit_options.ieee = true;
2452 rs6000_ieeequad = 1;
2453 warning (0, "Using IEEE extended precision long double");
2458 error ("unknown ABI specified: '%s'", arg);
2464 rs6000_select[1].string = arg;
2468 rs6000_select[2].string = arg;
2471 case OPT_mtraceback_:
2472 rs6000_traceback_name = arg;
2475 case OPT_mfloat_gprs_:
2476 rs6000_explicit_options.float_gprs = true;
2477 if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
2478 rs6000_float_gprs = 1;
2479 else if (! strcmp (arg, "double"))
2480 rs6000_float_gprs = 2;
2481 else if (! strcmp (arg, "no"))
2482 rs6000_float_gprs = 0;
2485 error ("invalid option for -mfloat-gprs: '%s'", arg);
2490 case OPT_mlong_double_:
2491 rs6000_explicit_options.long_double = true;
2492 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2493 if (value != 64 && value != 128)
2495 error ("Unknown switch -mlong-double-%s", arg);
2496 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2500 rs6000_long_double_type_size = value;
2503 case OPT_msched_costly_dep_:
2504 rs6000_sched_costly_dep_str = arg;
2508 rs6000_explicit_options.alignment = true;
2509 if (! strcmp (arg, "power"))
2511 /* On 64-bit Darwin, power alignment is ABI-incompatible with
2512 some C library functions, so warn about it. The flag may be
2513 useful for performance studies from time to time though, so
2514 don't disable it entirely. */
2515 if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
2516 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2517 " it is incompatible with the installed C and C++ libraries");
2518 rs6000_alignment_flags = MASK_ALIGN_POWER;
2520 else if (! strcmp (arg, "natural"))
2521 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2524 error ("unknown -malign-XXXXX option specified: '%s'", arg);
2529 case OPT_msingle_float:
2530 if (!TARGET_SINGLE_FPU)
2531 warning (0, "-msingle-float option equivalent to -mhard-float");
2532 /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */
2533 rs6000_double_float = 0;
2534 target_flags &= ~MASK_SOFT_FLOAT;
2535 target_flags_explicit |= MASK_SOFT_FLOAT;
2538 case OPT_mdouble_float:
2539 /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */
2540 rs6000_single_float = 1;
2541 target_flags &= ~MASK_SOFT_FLOAT;
2542 target_flags_explicit |= MASK_SOFT_FLOAT;
2545 case OPT_msimple_fpu:
2546 if (!TARGET_SINGLE_FPU)
2547 warning (0, "-msimple-fpu option ignored");
2550 case OPT_mhard_float:
2551 /* -mhard_float implies -msingle-float and -mdouble-float. */
2552 rs6000_single_float = rs6000_double_float = 1;
2555 case OPT_msoft_float:
2556 /* -msoft_float implies -mnosingle-float and -mnodouble-float. */
2557 rs6000_single_float = rs6000_double_float = 0;
2561 fpu_type = rs6000_parse_fpu_option(arg);
2562 if (fpu_type != FPU_NONE)
2563 /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */
2565 target_flags &= ~MASK_SOFT_FLOAT;
2566 target_flags_explicit |= MASK_SOFT_FLOAT;
2567 rs6000_xilinx_fpu = 1;
2568 if (fpu_type == FPU_SF_LITE || fpu_type == FPU_SF_FULL)
2569 rs6000_single_float = 1;
2570 if (fpu_type == FPU_DF_LITE || fpu_type == FPU_DF_FULL)
2571 rs6000_single_float = rs6000_double_float = 1;
2572 if (fpu_type == FPU_SF_LITE || fpu_type == FPU_DF_LITE)
2573 rs6000_simple_fpu = 1;
2577 /* -mfpu=none is equivalent to -msoft-float */
2578 target_flags |= MASK_SOFT_FLOAT;
2579 target_flags_explicit |= MASK_SOFT_FLOAT;
2580 rs6000_single_float = rs6000_double_float = 0;
2587 /* Do anything needed at the start of the asm file. */
2590 rs6000_file_start (void)
2594 const char *start = buffer;
2595 struct rs6000_cpu_select *ptr;
2596 const char *default_cpu = TARGET_CPU_DEFAULT;
2597 FILE *file = asm_out_file;
2599 default_file_start ();
2601 #ifdef TARGET_BI_ARCH
2602 if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT)
2606 if (flag_verbose_asm)
2608 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
2609 rs6000_select[0].string = default_cpu;
2611 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
2613 ptr = &rs6000_select[i];
2614 if (ptr->string != (char *)0 && ptr->string[0] != '\0')
2616 fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
2621 if (PPC405_ERRATUM77)
2623 fprintf (file, "%s PPC405CR_ERRATUM77", start);
2627 #ifdef USING_ELFOS_H
2628 switch (rs6000_sdata)
2630 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
2631 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
2632 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
2633 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
2636 if (rs6000_sdata && g_switch_value)
2638 fprintf (file, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED, start,
2648 #ifdef HAVE_AS_GNU_ATTRIBUTE
2649 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
2651 fprintf (file, "\t.gnu_attribute 4, %d\n",
2652 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
2653 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
2655 fprintf (file, "\t.gnu_attribute 8, %d\n",
2656 (TARGET_ALTIVEC_ABI ? 2
2657 : TARGET_SPE_ABI ? 3
2659 fprintf (file, "\t.gnu_attribute 12, %d\n",
2660 aix_struct_return ? 2 : 1);
2665 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
2667 switch_to_section (toc_section);
2668 switch_to_section (text_section);
2673 /* Return nonzero if this function is known to have a null epilogue. */
2676 direct_return (void)
2678 if (reload_completed)
2680 rs6000_stack_t *info = rs6000_stack_info ();
2682 if (info->first_gp_reg_save == 32
2683 && info->first_fp_reg_save == 64
2684 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
2685 && ! info->lr_save_p
2686 && ! info->cr_save_p
2687 && info->vrsave_mask == 0
2695 /* Return the number of instructions it takes to form a constant in an
2696 integer register. */
2699 num_insns_constant_wide (HOST_WIDE_INT value)
2701 /* signed constant loadable with {cal|addi} */
2702 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
2705 /* constant loadable with {cau|addis} */
2706 else if ((value & 0xffff) == 0
2707 && (value >> 31 == -1 || value >> 31 == 0))
2710 #if HOST_BITS_PER_WIDE_INT == 64
2711 else if (TARGET_POWERPC64)
2713 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
2714 HOST_WIDE_INT high = value >> 31;
2716 if (high == 0 || high == -1)
2722 return num_insns_constant_wide (high) + 1;
2724 return (num_insns_constant_wide (high)
2725 + num_insns_constant_wide (low) + 1);
2734 num_insns_constant (rtx op, enum machine_mode mode)
2736 HOST_WIDE_INT low, high;
2738 switch (GET_CODE (op))
2741 #if HOST_BITS_PER_WIDE_INT == 64
2742 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
2743 && mask64_operand (op, mode))
2747 return num_insns_constant_wide (INTVAL (op));
2750 if (mode == SFmode || mode == SDmode)
2755 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
2756 if (DECIMAL_FLOAT_MODE_P (mode))
2757 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
2759 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
2760 return num_insns_constant_wide ((HOST_WIDE_INT) l);
2763 if (mode == VOIDmode || mode == DImode)
2765 high = CONST_DOUBLE_HIGH (op);
2766 low = CONST_DOUBLE_LOW (op);
2773 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
2774 if (DECIMAL_FLOAT_MODE_P (mode))
2775 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
2777 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
2778 high = l[WORDS_BIG_ENDIAN == 0];
2779 low = l[WORDS_BIG_ENDIAN != 0];
2783 return (num_insns_constant_wide (low)
2784 + num_insns_constant_wide (high));
2787 if ((high == 0 && low >= 0)
2788 || (high == -1 && low < 0))
2789 return num_insns_constant_wide (low);
2791 else if (mask64_operand (op, mode))
2795 return num_insns_constant_wide (high) + 1;
2798 return (num_insns_constant_wide (high)
2799 + num_insns_constant_wide (low) + 1);
2807 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
2808 If the mode of OP is MODE_VECTOR_INT, this simply returns the
2809 corresponding element of the vector, but for V4SFmode and V2SFmode,
2810 the corresponding "float" is interpreted as an SImode integer. */
2813 const_vector_elt_as_int (rtx op, unsigned int elt)
2815 rtx tmp = CONST_VECTOR_ELT (op, elt);
2816 if (GET_MODE (op) == V4SFmode
2817 || GET_MODE (op) == V2SFmode)
2818 tmp = gen_lowpart (SImode, tmp);
2819 return INTVAL (tmp);
2822 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
2823 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
2824 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
2825 all items are set to the same value and contain COPIES replicas of the
2826 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
2827 operand and the others are set to the value of the operand's msb. */
2830 vspltis_constant (rtx op, unsigned step, unsigned copies)
2832 enum machine_mode mode = GET_MODE (op);
2833 enum machine_mode inner = GET_MODE_INNER (mode);
2836 unsigned nunits = GET_MODE_NUNITS (mode);
2837 unsigned bitsize = GET_MODE_BITSIZE (inner);
2838 unsigned mask = GET_MODE_MASK (inner);
2840 HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
2841 HOST_WIDE_INT splat_val = val;
2842 HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
2844 /* Construct the value to be splatted, if possible. If not, return 0. */
2845 for (i = 2; i <= copies; i *= 2)
2847 HOST_WIDE_INT small_val;
2849 small_val = splat_val >> bitsize;
2851 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
2853 splat_val = small_val;
2856 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
2857 if (EASY_VECTOR_15 (splat_val))
2860 /* Also check if we can splat, and then add the result to itself. Do so if
2861 the value is positive, of if the splat instruction is using OP's mode;
2862 for splat_val < 0, the splat and the add should use the same mode. */
2863 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
2864 && (splat_val >= 0 || (step == 1 && copies == 1)))
2870 /* Check if VAL is present in every STEP-th element, and the
2871 other elements are filled with its most significant bit. */
2872 for (i = 0; i < nunits - 1; ++i)
2874 HOST_WIDE_INT desired_val;
2875 if (((i + 1) & (step - 1)) == 0)
2878 desired_val = msb_val;
2880 if (desired_val != const_vector_elt_as_int (op, i))
2888 /* Return true if OP is of the given MODE and can be synthesized
2889 with a vspltisb, vspltish or vspltisw. */
2892 easy_altivec_constant (rtx op, enum machine_mode mode)
2894 unsigned step, copies;
2896 if (mode == VOIDmode)
2897 mode = GET_MODE (op);
2898 else if (mode != GET_MODE (op))
2901 /* Start with a vspltisw. */
2902 step = GET_MODE_NUNITS (mode) / 4;
2905 if (vspltis_constant (op, step, copies))
2908 /* Then try with a vspltish. */
2914 if (vspltis_constant (op, step, copies))
2917 /* And finally a vspltisb. */
2923 if (vspltis_constant (op, step, copies))
2929 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
2930 result is OP. Abort if it is not possible. */
2933 gen_easy_altivec_constant (rtx op)
2935 enum machine_mode mode = GET_MODE (op);
2936 int nunits = GET_MODE_NUNITS (mode);
2937 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
2938 unsigned step = nunits / 4;
2939 unsigned copies = 1;
2941 /* Start with a vspltisw. */
2942 if (vspltis_constant (op, step, copies))
2943 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
2945 /* Then try with a vspltish. */
2951 if (vspltis_constant (op, step, copies))
2952 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
2954 /* And finally a vspltisb. */
2960 if (vspltis_constant (op, step, copies))
2961 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
2967 output_vec_const_move (rtx *operands)
2970 enum machine_mode mode;
2975 mode = GET_MODE (dest);
2980 if (zero_constant (vec, mode))
2981 return "vxor %0,%0,%0";
2983 splat_vec = gen_easy_altivec_constant (vec);
2984 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
2985 operands[1] = XEXP (splat_vec, 0);
2986 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
2989 switch (GET_MODE (splat_vec))
2992 return "vspltisw %0,%1";
2995 return "vspltish %0,%1";
2998 return "vspltisb %0,%1";
3005 gcc_assert (TARGET_SPE);
3007 /* Vector constant 0 is handled as a splitter of V2SI, and in the
3008 pattern of V1DI, V4HI, and V2SF.
3010 FIXME: We should probably return # and add post reload
3011 splitters for these, but this way is so easy ;-). */
3012 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
3013 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
3014 operands[1] = CONST_VECTOR_ELT (vec, 0);
3015 operands[2] = CONST_VECTOR_ELT (vec, 1);
3017 return "li %0,%1\n\tevmergelo %0,%0,%0";
3019 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
3022 /* Initialize TARGET of vector PAIRED to VALS. */
3025 paired_expand_vector_init (rtx target, rtx vals)
3027 enum machine_mode mode = GET_MODE (target);
3028 int n_elts = GET_MODE_NUNITS (mode);
3030 rtx x, new_rtx, tmp, constant_op, op1, op2;
3033 for (i = 0; i < n_elts; ++i)
3035 x = XVECEXP (vals, 0, i);
3036 if (!CONSTANT_P (x))
3041 /* Load from constant pool. */
3042 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
3048 /* The vector is initialized only with non-constants. */
3049 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
3050 XVECEXP (vals, 0, 1));
3052 emit_move_insn (target, new_rtx);
3056 /* One field is non-constant and the other one is a constant. Load the
3057 constant from the constant pool and use ps_merge instruction to
3058 construct the whole vector. */
3059 op1 = XVECEXP (vals, 0, 0);
3060 op2 = XVECEXP (vals, 0, 1);
3062 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
3064 tmp = gen_reg_rtx (GET_MODE (constant_op));
3065 emit_move_insn (tmp, constant_op);
3067 if (CONSTANT_P (op1))
3068 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
3070 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
3072 emit_move_insn (target, new_rtx);
3076 paired_expand_vector_move (rtx operands[])
3078 rtx op0 = operands[0], op1 = operands[1];
3080 emit_move_insn (op0, op1);
3083 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
3084 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
3085 operands for the relation operation COND. This is a recursive
3089 paired_emit_vector_compare (enum rtx_code rcode,
3090 rtx dest, rtx op0, rtx op1,
3091 rtx cc_op0, rtx cc_op1)
3093 rtx tmp = gen_reg_rtx (V2SFmode);
3094 rtx tmp1, max, min, equal_zero;
3096 gcc_assert (TARGET_PAIRED_FLOAT);
3097 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
3103 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
3107 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
3108 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
3112 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
3115 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
3118 tmp1 = gen_reg_rtx (V2SFmode);
3119 max = gen_reg_rtx (V2SFmode);
3120 min = gen_reg_rtx (V2SFmode);
3121 equal_zero = gen_reg_rtx (V2SFmode);
3123 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
3124 emit_insn (gen_selv2sf4
3125 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
3126 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
3127 emit_insn (gen_selv2sf4
3128 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
3129 emit_insn (gen_subv2sf3 (tmp1, min, max));
3130 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
3133 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
3136 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
3139 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
3142 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
3145 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
3154 /* Emit vector conditional expression.
3155 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
3156 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
3159 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
3160 rtx cond, rtx cc_op0, rtx cc_op1)
3162 enum rtx_code rcode = GET_CODE (cond);
3164 if (!TARGET_PAIRED_FLOAT)
3167 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
3172 /* Initialize vector TARGET to VALS. */
3175 rs6000_expand_vector_init (rtx target, rtx vals)
3177 enum machine_mode mode = GET_MODE (target);
3178 enum machine_mode inner_mode = GET_MODE_INNER (mode);
3179 int n_elts = GET_MODE_NUNITS (mode);
3180 int n_var = 0, one_var = -1;
3181 bool all_same = true, all_const_zero = true;
3185 for (i = 0; i < n_elts; ++i)
3187 x = XVECEXP (vals, 0, i);
3188 if (!CONSTANT_P (x))
3189 ++n_var, one_var = i;
3190 else if (x != CONST0_RTX (inner_mode))
3191 all_const_zero = false;
3193 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
3199 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
3200 if (mode != V4SFmode && all_const_zero)
3202 /* Zero register. */
3203 emit_insn (gen_rtx_SET (VOIDmode, target,
3204 gen_rtx_XOR (mode, target, target)));
3207 else if (mode != V4SFmode && easy_vector_constant (const_vec, mode))
3209 /* Splat immediate. */
3210 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
3214 ; /* Splat vector element. */
3217 /* Load from constant pool. */
3218 emit_move_insn (target, const_vec);
3223 /* Store value to stack temp. Load vector element. Splat. */
3226 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
3227 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
3228 XVECEXP (vals, 0, 0));
3229 x = gen_rtx_UNSPEC (VOIDmode,
3230 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
3231 emit_insn (gen_rtx_PARALLEL (VOIDmode,
3233 gen_rtx_SET (VOIDmode,
3236 x = gen_rtx_VEC_SELECT (inner_mode, target,
3237 gen_rtx_PARALLEL (VOIDmode,
3238 gen_rtvec (1, const0_rtx)));
3239 emit_insn (gen_rtx_SET (VOIDmode, target,
3240 gen_rtx_VEC_DUPLICATE (mode, x)));
3244 /* One field is non-constant. Load constant then overwrite
3248 rtx copy = copy_rtx (vals);
3250 /* Load constant part of vector, substitute neighboring value for
3252 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
3253 rs6000_expand_vector_init (target, copy);
3255 /* Insert variable. */
3256 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
3260 /* Construct the vector in memory one field at a time
3261 and load the whole vector. */
3262 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
3263 for (i = 0; i < n_elts; i++)
3264 emit_move_insn (adjust_address_nv (mem, inner_mode,
3265 i * GET_MODE_SIZE (inner_mode)),
3266 XVECEXP (vals, 0, i));
3267 emit_move_insn (target, mem);
3270 /* Set field ELT of TARGET to VAL. */
3273 rs6000_expand_vector_set (rtx target, rtx val, int elt)
3275 enum machine_mode mode = GET_MODE (target);
3276 enum machine_mode inner_mode = GET_MODE_INNER (mode);
3277 rtx reg = gen_reg_rtx (mode);
3279 int width = GET_MODE_SIZE (inner_mode);
3282 /* Load single variable value. */
3283 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
3284 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
3285 x = gen_rtx_UNSPEC (VOIDmode,
3286 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
3287 emit_insn (gen_rtx_PARALLEL (VOIDmode,
3289 gen_rtx_SET (VOIDmode,
3293 /* Linear sequence. */
3294 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
3295 for (i = 0; i < 16; ++i)
3296 XVECEXP (mask, 0, i) = GEN_INT (i);
3298 /* Set permute mask to insert element into target. */
3299 for (i = 0; i < width; ++i)
3300 XVECEXP (mask, 0, elt*width + i)
3301 = GEN_INT (i + 0x10);
3302 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
3303 x = gen_rtx_UNSPEC (mode,
3304 gen_rtvec (3, target, reg,
3305 force_reg (V16QImode, x)),
3307 emit_insn (gen_rtx_SET (VOIDmode, target, x));
3310 /* Extract field ELT from VEC into TARGET. */
3313 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
3315 enum machine_mode mode = GET_MODE (vec);
3316 enum machine_mode inner_mode = GET_MODE_INNER (mode);
3319 /* Allocate mode-sized buffer. */
3320 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
3322 /* Add offset to field within buffer matching vector element. */
3323 mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
3325 /* Store single field into mode-sized buffer. */
3326 x = gen_rtx_UNSPEC (VOIDmode,
3327 gen_rtvec (1, const0_rtx), UNSPEC_STVE);
3328 emit_insn (gen_rtx_PARALLEL (VOIDmode,
3330 gen_rtx_SET (VOIDmode,
3333 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
3336 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
3337 implement ANDing by the mask IN. */
3339 build_mask64_2_operands (rtx in, rtx *out)
3341 #if HOST_BITS_PER_WIDE_INT >= 64
3342 unsigned HOST_WIDE_INT c, lsb, m1, m2;
3345 gcc_assert (GET_CODE (in) == CONST_INT);
3350 /* Assume c initially something like 0x00fff000000fffff. The idea
3351 is to rotate the word so that the middle ^^^^^^ group of zeros
3352 is at the MS end and can be cleared with an rldicl mask. We then
3353 rotate back and clear off the MS ^^ group of zeros with a
3355 c = ~c; /* c == 0xff000ffffff00000 */
3356 lsb = c & -c; /* lsb == 0x0000000000100000 */
3357 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
3358 c = ~c; /* c == 0x00fff000000fffff */
3359 c &= -lsb; /* c == 0x00fff00000000000 */
3360 lsb = c & -c; /* lsb == 0x0000100000000000 */
3361 c = ~c; /* c == 0xff000fffffffffff */
3362 c &= -lsb; /* c == 0xff00000000000000 */
3364 while ((lsb >>= 1) != 0)
3365 shift++; /* shift == 44 on exit from loop */
3366 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
3367 m1 = ~m1; /* m1 == 0x000000ffffffffff */
3368 m2 = ~c; /* m2 == 0x00ffffffffffffff */
3372 /* Assume c initially something like 0xff000f0000000000. The idea
3373 is to rotate the word so that the ^^^ middle group of zeros
3374 is at the LS end and can be cleared with an rldicr mask. We then
3375 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
3377 lsb = c & -c; /* lsb == 0x0000010000000000 */
3378 m2 = -lsb; /* m2 == 0xffffff0000000000 */
3379 c = ~c; /* c == 0x00fff0ffffffffff */
3380 c &= -lsb; /* c == 0x00fff00000000000 */
3381 lsb = c & -c; /* lsb == 0x0000100000000000 */
3382 c = ~c; /* c == 0xff000fffffffffff */
3383 c &= -lsb; /* c == 0xff00000000000000 */
3385 while ((lsb >>= 1) != 0)
3386 shift++; /* shift == 44 on exit from loop */
3387 m1 = ~c; /* m1 == 0x00ffffffffffffff */
3388 m1 >>= shift; /* m1 == 0x0000000000000fff */
3389 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
3392 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
3393 masks will be all 1's. We are guaranteed more than one transition. */
3394 out[0] = GEN_INT (64 - shift);
3395 out[1] = GEN_INT (m1);
3396 out[2] = GEN_INT (shift);
3397 out[3] = GEN_INT (m2);
3405 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
3408 invalid_e500_subreg (rtx op, enum machine_mode mode)
3410 if (TARGET_E500_DOUBLE)
3412 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
3413 subreg:TI and reg:TF. Decimal float modes are like integer
3414 modes (only low part of each register used) for this
3416 if (GET_CODE (op) == SUBREG
3417 && (mode == SImode || mode == DImode || mode == TImode
3418 || mode == DDmode || mode == TDmode)
3419 && REG_P (SUBREG_REG (op))
3420 && (GET_MODE (SUBREG_REG (op)) == DFmode
3421 || GET_MODE (SUBREG_REG (op)) == TFmode))
3424 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
3426 if (GET_CODE (op) == SUBREG
3427 && (mode == DFmode || mode == TFmode)
3428 && REG_P (SUBREG_REG (op))
3429 && (GET_MODE (SUBREG_REG (op)) == DImode
3430 || GET_MODE (SUBREG_REG (op)) == TImode
3431 || GET_MODE (SUBREG_REG (op)) == DDmode
3432 || GET_MODE (SUBREG_REG (op)) == TDmode))
3437 && GET_CODE (op) == SUBREG
3439 && REG_P (SUBREG_REG (op))
3440 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
3446 /* AIX increases natural record alignment to doubleword if the first
3447 field is an FP double while the FP fields remain word aligned. */
3450 rs6000_special_round_type_align (tree type, unsigned int computed,
3451 unsigned int specified)
3453 unsigned int align = MAX (computed, specified);
3454 tree field = TYPE_FIELDS (type);
3456 /* Skip all non field decls */
3457 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
3458 field = TREE_CHAIN (field);
3460 if (field != NULL && field != type)
3462 type = TREE_TYPE (field);
3463 while (TREE_CODE (type) == ARRAY_TYPE)
3464 type = TREE_TYPE (type);
3466 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
3467 align = MAX (align, 64);
3473 /* Darwin increases record alignment to the natural alignment of
3477 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
3478 unsigned int specified)
3480 unsigned int align = MAX (computed, specified);
3482 if (TYPE_PACKED (type))
3485 /* Find the first field, looking down into aggregates. */
3487 tree field = TYPE_FIELDS (type);
3488 /* Skip all non field decls */
3489 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
3490 field = TREE_CHAIN (field);
3493 type = TREE_TYPE (field);
3494 while (TREE_CODE (type) == ARRAY_TYPE)
3495 type = TREE_TYPE (type);
3496 } while (AGGREGATE_TYPE_P (type));
3498 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
3499 align = MAX (align, TYPE_ALIGN (type));
3504 /* Return 1 for an operand in small memory on V.4/eabi. */
3507 small_data_operand (rtx op ATTRIBUTE_UNUSED,
3508 enum machine_mode mode ATTRIBUTE_UNUSED)
3513 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
3516 if (DEFAULT_ABI != ABI_V4)
3519 /* Vector and float memory instructions have a limited offset on the
3520 SPE, so using a vector or float variable directly as an operand is
3523 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
3526 if (GET_CODE (op) == SYMBOL_REF)
3529 else if (GET_CODE (op) != CONST
3530 || GET_CODE (XEXP (op, 0)) != PLUS
3531 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
3532 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
3537 rtx sum = XEXP (op, 0);
3538 HOST_WIDE_INT summand;
3540 /* We have to be careful here, because it is the referenced address
3541 that must be 32k from _SDA_BASE_, not just the symbol. */
3542 summand = INTVAL (XEXP (sum, 1));
3543 if (summand < 0 || (unsigned HOST_WIDE_INT) summand > g_switch_value)
3546 sym_ref = XEXP (sum, 0);
3549 return SYMBOL_REF_SMALL_P (sym_ref);
3555 /* Return true if either operand is a general purpose register. */
3558 gpr_or_gpr_p (rtx op0, rtx op1)
3560 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
3561 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
3565 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
3568 constant_pool_expr_p (rtx op)
3572 split_const (op, &base, &offset);
3573 return (GET_CODE (base) == SYMBOL_REF
3574 && CONSTANT_POOL_ADDRESS_P (base)
3575 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
3579 toc_relative_expr_p (rtx op)
3583 if (GET_CODE (op) != CONST)
3586 split_const (op, &base, &offset);
3587 return (GET_CODE (base) == UNSPEC
3588 && XINT (base, 1) == UNSPEC_TOCREL);
3592 legitimate_constant_pool_address_p (rtx x)
3595 && GET_CODE (x) == PLUS
3596 && GET_CODE (XEXP (x, 0)) == REG
3597 && (TARGET_MINIMAL_TOC || REGNO (XEXP (x, 0)) == TOC_REGISTER)
3598 && toc_relative_expr_p (XEXP (x, 1)));
3602 legitimate_small_data_p (enum machine_mode mode, rtx x)
3604 return (DEFAULT_ABI == ABI_V4
3605 && !flag_pic && !TARGET_TOC
3606 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
3607 && small_data_operand (x, mode));
3610 /* SPE offset addressing is limited to 5-bits worth of double words. */
3611 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
3614 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
3616 unsigned HOST_WIDE_INT offset, extra;
3618 if (GET_CODE (x) != PLUS)
3620 if (GET_CODE (XEXP (x, 0)) != REG)
3622 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
3624 if (legitimate_constant_pool_address_p (x))
3626 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
3629 offset = INTVAL (XEXP (x, 1));
3637 /* AltiVec vector modes. Only reg+reg addressing is valid and
3638 constant offset zero should not occur due to canonicalization. */
3645 /* Paired vector modes. Only reg+reg addressing is valid and
3646 constant offset zero should not occur due to canonicalization. */
3647 if (TARGET_PAIRED_FLOAT)
3649 /* SPE vector modes. */
3650 return SPE_CONST_OFFSET_OK (offset);
3653 if (TARGET_E500_DOUBLE)
3654 return SPE_CONST_OFFSET_OK (offset);
3658 /* On e500v2, we may have:
3660 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
3662 Which gets addressed with evldd instructions. */
3663 if (TARGET_E500_DOUBLE)
3664 return SPE_CONST_OFFSET_OK (offset);
3666 if (mode == DFmode || mode == DDmode || !TARGET_POWERPC64)
3668 else if (offset & 3)
3673 if (TARGET_E500_DOUBLE)
3674 return (SPE_CONST_OFFSET_OK (offset)
3675 && SPE_CONST_OFFSET_OK (offset + 8));
3679 if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
3681 else if (offset & 3)
3692 return (offset < 0x10000) && (offset + extra < 0x10000);
3696 legitimate_indexed_address_p (rtx x, int strict)
3700 if (GET_CODE (x) != PLUS)
3706 /* Recognize the rtl generated by reload which we know will later be
3707 replaced with proper base and index regs. */
3709 && reload_in_progress
3710 && (REG_P (op0) || GET_CODE (op0) == PLUS)
3714 return (REG_P (op0) && REG_P (op1)
3715 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
3716 && INT_REG_OK_FOR_INDEX_P (op1, strict))
3717 || (INT_REG_OK_FOR_BASE_P (op1, strict)
3718 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
3722 avoiding_indexed_address_p (enum machine_mode mode)
3724 /* Avoid indexed addressing for modes that have non-indexed
3725 load/store instruction forms. */
3726 return TARGET_AVOID_XFORM && !ALTIVEC_VECTOR_MODE (mode);
3730 legitimate_indirect_address_p (rtx x, int strict)
3732 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
3736 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
3738 if (!TARGET_MACHO || !flag_pic
3739 || mode != SImode || GET_CODE (x) != MEM)
3743 if (GET_CODE (x) != LO_SUM)
3745 if (GET_CODE (XEXP (x, 0)) != REG)
3747 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
3751 return CONSTANT_P (x);
3755 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
3757 if (GET_CODE (x) != LO_SUM)
3759 if (GET_CODE (XEXP (x, 0)) != REG)
3761 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
3763 /* Restrict addressing for DI because of our SUBREG hackery. */
3764 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
3765 || mode == DDmode || mode == TDmode
3770 if (TARGET_ELF || TARGET_MACHO)
3772 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
3776 if (GET_MODE_NUNITS (mode) != 1)
3778 if (GET_MODE_BITSIZE (mode) > 64
3779 || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
3780 && !(TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
3781 && (mode == DFmode || mode == DDmode))))
3784 return CONSTANT_P (x);
3791 /* Try machine-dependent ways of modifying an illegitimate address
3792 to be legitimate. If we find one, return the new, valid address.
3793 This is used from only one place: `memory_address' in explow.c.
3795 OLDX is the address as it was before break_out_memory_refs was
3796 called. In some cases it is useful to look at this to decide what
3799 It is always safe for this function to do nothing. It exists to
3800 recognize opportunities to optimize the output.
3802 On RS/6000, first check for the sum of a register with a constant
3803 integer that is out of range. If so, generate code to add the
3804 constant with the low-order 16 bits masked to the register and force
3805 this result into another register (this can be done with `cau').
3806 Then generate an address of REG+(CONST&0xffff), allowing for the
3807 possibility of bit 16 being a one.
3809 Then check for the sum of a register and something not constant, try to
3810 load the other things into a register and return the sum. */
3813 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3814 enum machine_mode mode)
3816 if (GET_CODE (x) == SYMBOL_REF)
3818 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
3820 return rs6000_legitimize_tls_address (x, model);
3823 if (GET_CODE (x) == PLUS
3824 && GET_CODE (XEXP (x, 0)) == REG
3825 && GET_CODE (XEXP (x, 1)) == CONST_INT
3826 && (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000
3827 && !((TARGET_POWERPC64
3828 && (mode == DImode || mode == TImode)
3829 && (INTVAL (XEXP (x, 1)) & 3) != 0)
3830 || SPE_VECTOR_MODE (mode)
3831 || ALTIVEC_VECTOR_MODE (mode)
3832 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
3833 || mode == DImode || mode == DDmode
3834 || mode == TDmode))))
3836 HOST_WIDE_INT high_int, low_int;
3838 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
3839 high_int = INTVAL (XEXP (x, 1)) - low_int;
3840 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
3841 GEN_INT (high_int)), 0);
3842 return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
3844 else if (GET_CODE (x) == PLUS
3845 && GET_CODE (XEXP (x, 0)) == REG
3846 && GET_CODE (XEXP (x, 1)) != CONST_INT
3847 && GET_MODE_NUNITS (mode) == 1
3848 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3850 || ((mode != DImode && mode != DFmode && mode != DDmode)
3851 || (TARGET_E500_DOUBLE && mode != DDmode)))
3852 && (TARGET_POWERPC64 || mode != DImode)
3853 && !avoiding_indexed_address_p (mode)
3858 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
3859 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
3861 else if (ALTIVEC_VECTOR_MODE (mode))
3865 /* Make sure both operands are registers. */
3866 if (GET_CODE (x) == PLUS)
3867 return gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
3868 force_reg (Pmode, XEXP (x, 1)));
3870 reg = force_reg (Pmode, x);
3873 else if (SPE_VECTOR_MODE (mode)
3874 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
3875 || mode == DDmode || mode == TDmode
3876 || mode == DImode)))
3880 /* We accept [reg + reg] and [reg + OFFSET]. */
3882 if (GET_CODE (x) == PLUS)
3884 rtx op1 = XEXP (x, 0);
3885 rtx op2 = XEXP (x, 1);
3888 op1 = force_reg (Pmode, op1);
3890 if (GET_CODE (op2) != REG
3891 && (GET_CODE (op2) != CONST_INT
3892 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
3893 || (GET_MODE_SIZE (mode) > 8
3894 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
3895 op2 = force_reg (Pmode, op2);
3897 /* We can't always do [reg + reg] for these, because [reg +
3898 reg + offset] is not a legitimate addressing mode. */
3899 y = gen_rtx_PLUS (Pmode, op1, op2);
3901 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
3902 return force_reg (Pmode, y);
3907 return force_reg (Pmode, x);
3913 && GET_CODE (x) != CONST_INT
3914 && GET_CODE (x) != CONST_DOUBLE
3916 && GET_MODE_NUNITS (mode) == 1
3917 && (GET_MODE_BITSIZE (mode) <= 32
3918 || ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3919 && (mode == DFmode || mode == DDmode))))
3921 rtx reg = gen_reg_rtx (Pmode);
3922 emit_insn (gen_elf_high (reg, x));
3923 return gen_rtx_LO_SUM (Pmode, reg, x);
3925 else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
3928 && ! MACHO_DYNAMIC_NO_PIC_P
3930 && GET_CODE (x) != CONST_INT
3931 && GET_CODE (x) != CONST_DOUBLE
3933 && GET_MODE_NUNITS (mode) == 1
3934 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3935 || (mode != DFmode && mode != DDmode))
3939 rtx reg = gen_reg_rtx (Pmode);
3940 emit_insn (gen_macho_high (reg, x));
3941 return gen_rtx_LO_SUM (Pmode, reg, x);
3944 && GET_CODE (x) == SYMBOL_REF
3945 && constant_pool_expr_p (x)
3946 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
3948 return create_TOC_reference (x);
3954 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
3955 We need to emit DTP-relative relocations. */
3958 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
3963 fputs ("\t.long\t", file);
3966 fputs (DOUBLE_INT_ASM_OP, file);
3971 output_addr_const (file, x);
3972 fputs ("@dtprel+0x8000", file);
3975 /* Construct the SYMBOL_REF for the tls_get_addr function. */
3977 static GTY(()) rtx rs6000_tls_symbol;
3979 rs6000_tls_get_addr (void)
3981 if (!rs6000_tls_symbol)
3982 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
3984 return rs6000_tls_symbol;
3987 /* Construct the SYMBOL_REF for TLS GOT references. */
3989 static GTY(()) rtx rs6000_got_symbol;
3991 rs6000_got_sym (void)
3993 if (!rs6000_got_symbol)
3995 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3996 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
3997 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
4000 return rs6000_got_symbol;
4003 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4004 this (thread-local) address. */
4007 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
4011 dest = gen_reg_rtx (Pmode);
4012 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
4018 tlsreg = gen_rtx_REG (Pmode, 13);
4019 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
4023 tlsreg = gen_rtx_REG (Pmode, 2);
4024 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
4028 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
4032 tmp = gen_reg_rtx (Pmode);
4035 tlsreg = gen_rtx_REG (Pmode, 13);
4036 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
4040 tlsreg = gen_rtx_REG (Pmode, 2);
4041 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
4045 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
4047 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
4052 rtx r3, got, tga, tmp1, tmp2, eqv;
4054 /* We currently use relocations like @got@tlsgd for tls, which
4055 means the linker will handle allocation of tls entries, placing
4056 them in the .got section. So use a pointer to the .got section,
4057 not one to secondary TOC sections used by 64-bit -mminimal-toc,
4058 or to secondary GOT sections used by 32-bit -fPIC. */
4060 got = gen_rtx_REG (Pmode, 2);
4064 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
4067 rtx gsym = rs6000_got_sym ();
4068 got = gen_reg_rtx (Pmode);
4070 rs6000_emit_move (got, gsym, Pmode);
4076 tmp1 = gen_reg_rtx (Pmode);
4077 tmp2 = gen_reg_rtx (Pmode);
4078 tmp3 = gen_reg_rtx (Pmode);
4079 mem = gen_const_mem (Pmode, tmp1);
4081 first = emit_insn (gen_load_toc_v4_PIC_1b (gsym));
4082 emit_move_insn (tmp1,
4083 gen_rtx_REG (Pmode, LR_REGNO));
4084 emit_move_insn (tmp2, mem);
4085 emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
4086 last = emit_move_insn (got, tmp3);
4087 set_unique_reg_note (last, REG_EQUAL, gsym);
4092 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
4094 r3 = gen_rtx_REG (Pmode, 3);
4095 tga = rs6000_tls_get_addr ();
4097 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
4098 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
4099 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
4100 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
4101 else if (DEFAULT_ABI == ABI_V4)
4102 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
4107 insn = emit_call_insn (insn);
4108 RTL_CONST_CALL_P (insn) = 1;
4109 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
4110 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
4111 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4112 insn = get_insns ();
4114 emit_libcall_block (insn, dest, r3, addr);
4116 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
4118 r3 = gen_rtx_REG (Pmode, 3);
4119 tga = rs6000_tls_get_addr ();
4121 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
4122 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
4123 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
4124 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
4125 else if (DEFAULT_ABI == ABI_V4)
4126 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
4131 insn = emit_call_insn (insn);
4132 RTL_CONST_CALL_P (insn) = 1;
4133 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
4134 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
4135 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4136 insn = get_insns ();
4138 tmp1 = gen_reg_rtx (Pmode);
4139 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4141 emit_libcall_block (insn, tmp1, r3, eqv);
4142 if (rs6000_tls_size == 16)
4145 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
4147 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
4149 else if (rs6000_tls_size == 32)
4151 tmp2 = gen_reg_rtx (Pmode);
4153 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
4155 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
4158 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
4160 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
4164 tmp2 = gen_reg_rtx (Pmode);
4166 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
4168 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
4170 insn = gen_rtx_SET (Pmode, dest,
4171 gen_rtx_PLUS (Pmode, tmp2, tmp1));
4177 /* IE, or 64-bit offset LE. */
4178 tmp2 = gen_reg_rtx (Pmode);
4180 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
4182 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
4185 insn = gen_tls_tls_64 (dest, tmp2, addr);
4187 insn = gen_tls_tls_32 (dest, tmp2, addr);
4195 /* Return 1 if X contains a thread-local symbol. */
4198 rs6000_tls_referenced_p (rtx x)
4200 if (! TARGET_HAVE_TLS)
4203 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
4206 /* Return 1 if *X is a thread-local symbol. This is the same as
4207 rs6000_tls_symbol_ref except for the type of the unused argument. */
4210 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4212 return RS6000_SYMBOL_REF_TLS_P (*x);
4215 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4216 replace the input X, or the original X if no replacement is called for.
4217 The output parameter *WIN is 1 if the calling macro should goto WIN,
4220 For RS/6000, we wish to handle large displacements off a base
4221 register by splitting the addend across an addiu/addis and the mem insn.
4222 This cuts number of extra insns needed from 3 to 1.
4224 On Darwin, we use this to generate code for floating point constants.
4225 A movsf_low is generated so we wind up with 2 instructions rather than 3.
4226 The Darwin code is inside #if TARGET_MACHO because only then are the
4227 machopic_* functions defined. */
4229 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
4230 int opnum, int type,
4231 int ind_levels ATTRIBUTE_UNUSED, int *win)
4233 /* We must recognize output that we have already generated ourselves. */
4234 if (GET_CODE (x) == PLUS
4235 && GET_CODE (XEXP (x, 0)) == PLUS
4236 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4237 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4238 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4240 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4241 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4242 opnum, (enum reload_type)type);
4248 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
4249 && GET_CODE (x) == LO_SUM
4250 && GET_CODE (XEXP (x, 0)) == PLUS
4251 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
4252 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
4253 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
4254 && machopic_operand_p (XEXP (x, 1)))
4256 /* Result of previous invocation of this function on Darwin
4257 floating point constant. */
4258 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4259 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4260 opnum, (enum reload_type)type);
4266 /* Force ld/std non-word aligned offset into base register by wrapping
4268 if (GET_CODE (x) == PLUS
4269 && GET_CODE (XEXP (x, 0)) == REG
4270 && REGNO (XEXP (x, 0)) < 32
4271 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
4272 && GET_CODE (XEXP (x, 1)) == CONST_INT
4273 && (INTVAL (XEXP (x, 1)) & 3) != 0
4274 && !ALTIVEC_VECTOR_MODE (mode)
4275 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
4276 && TARGET_POWERPC64)
4278 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
4279 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4280 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4281 opnum, (enum reload_type) type);
4286 if (GET_CODE (x) == PLUS
4287 && GET_CODE (XEXP (x, 0)) == REG
4288 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
4289 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
4290 && GET_CODE (XEXP (x, 1)) == CONST_INT
4291 && !SPE_VECTOR_MODE (mode)
4292 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
4293 || mode == DDmode || mode == TDmode
4295 && !ALTIVEC_VECTOR_MODE (mode))
4297 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
4298 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
4300 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
4302 /* Check for 32-bit overflow. */
4303 if (high + low != val)
4309 /* Reload the high part into a base reg; leave the low part
4310 in the mem directly. */
4312 x = gen_rtx_PLUS (GET_MODE (x),
4313 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
4317 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4318 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4319 opnum, (enum reload_type)type);
4324 if (GET_CODE (x) == SYMBOL_REF
4325 && !ALTIVEC_VECTOR_MODE (mode)
4326 && !SPE_VECTOR_MODE (mode)
4328 && DEFAULT_ABI == ABI_DARWIN
4329 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
4331 && DEFAULT_ABI == ABI_V4
4334 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
4335 The same goes for DImode without 64-bit gprs and DFmode and DDmode
4339 && (mode != DImode || TARGET_POWERPC64)
4340 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
4341 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
4346 rtx offset = machopic_gen_offset (x);
4347 x = gen_rtx_LO_SUM (GET_MODE (x),
4348 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
4349 gen_rtx_HIGH (Pmode, offset)), offset);
4353 x = gen_rtx_LO_SUM (GET_MODE (x),
4354 gen_rtx_HIGH (Pmode, x), x);
4356 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4357 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4358 opnum, (enum reload_type)type);
4363 /* Reload an offset address wrapped by an AND that represents the
4364 masking of the lower bits. Strip the outer AND and let reload
4365 convert the offset address into an indirect address. */
4367 && ALTIVEC_VECTOR_MODE (mode)
4368 && GET_CODE (x) == AND
4369 && GET_CODE (XEXP (x, 0)) == PLUS
4370 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4371 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4372 && GET_CODE (XEXP (x, 1)) == CONST_INT
4373 && INTVAL (XEXP (x, 1)) == -16)
4381 && GET_CODE (x) == SYMBOL_REF
4382 && constant_pool_expr_p (x)
4383 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
4385 x = create_TOC_reference (x);
4393 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
4394 that is a valid memory address for an instruction.
4395 The MODE argument is the machine mode for the MEM expression
4396 that wants to use this address.
4398 On the RS/6000, there are four valid address: a SYMBOL_REF that
4399 refers to a constant pool entry of an address (or the sum of it
4400 plus a constant), a short (16-bit signed) constant plus a register,
4401 the sum of two registers, or a register indirect, possibly with an
4402 auto-increment. For DFmode, DDmode and DImode with a constant plus
4403 register, we must ensure that both words are addressable or PowerPC64
4404 with offset word aligned.
4406 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
4407 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
4408 because adjacent memory cells are accessed by adding word-sized offsets
4409 during assembly output. */
4411 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
4413 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
4415 && ALTIVEC_VECTOR_MODE (mode)
4416 && GET_CODE (x) == AND
4417 && GET_CODE (XEXP (x, 1)) == CONST_INT
4418 && INTVAL (XEXP (x, 1)) == -16)
4421 if (RS6000_SYMBOL_REF_TLS_P (x))
4423 if (legitimate_indirect_address_p (x, reg_ok_strict))
4425 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
4426 && !ALTIVEC_VECTOR_MODE (mode)
4427 && !SPE_VECTOR_MODE (mode)
4430 /* Restrict addressing for DI because of our SUBREG hackery. */
4431 && !(TARGET_E500_DOUBLE
4432 && (mode == DFmode || mode == DDmode || mode == DImode))
4434 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
4436 if (legitimate_small_data_p (mode, x))
4438 if (legitimate_constant_pool_address_p (x))
4440 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
4442 && GET_CODE (x) == PLUS
4443 && GET_CODE (XEXP (x, 0)) == REG
4444 && (XEXP (x, 0) == virtual_stack_vars_rtx
4445 || XEXP (x, 0) == arg_pointer_rtx)
4446 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4448 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
4453 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
4455 || (mode != DFmode && mode != DDmode)
4456 || (TARGET_E500_DOUBLE && mode != DDmode))
4457 && (TARGET_POWERPC64 || mode != DImode)
4458 && !avoiding_indexed_address_p (mode)
4459 && legitimate_indexed_address_p (x, reg_ok_strict))
4461 if (GET_CODE (x) == PRE_MODIFY
4465 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
4467 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
4468 && (TARGET_POWERPC64 || mode != DImode)
4469 && !ALTIVEC_VECTOR_MODE (mode)
4470 && !SPE_VECTOR_MODE (mode)
4471 /* Restrict addressing for DI because of our SUBREG hackery. */
4472 && !(TARGET_E500_DOUBLE
4473 && (mode == DFmode || mode == DDmode || mode == DImode))
4475 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
4476 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
4477 || (!avoiding_indexed_address_p (mode)
4478 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
4479 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
4481 if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
4486 /* Go to LABEL if ADDR (a legitimate address expression)
4487 has an effect that depends on the machine mode it is used for.
4489 On the RS/6000 this is true of all integral offsets (since AltiVec
4490 modes don't allow them) or is a pre-increment or decrement.
4492 ??? Except that due to conceptual problems in offsettable_address_p
4493 we can't really report the problems of integral offsets. So leave
4494 this assuming that the adjustable offset must be valid for the
4495 sub-words of a TFmode operand, which is what we had before. */
4498 rs6000_mode_dependent_address (rtx addr)
4500 switch (GET_CODE (addr))
4503 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4505 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
4506 return val + 12 + 0x8000 >= 0x10000;
4513 /* Auto-increment cases are now treated generically in recog.c. */
4515 return TARGET_UPDATE;
4524 /* Implement FIND_BASE_TERM. */
4527 rs6000_find_base_term (rtx op)
4531 split_const (op, &base, &offset);
4532 if (GET_CODE (base) == UNSPEC)
4533 switch (XINT (base, 1))
4536 case UNSPEC_MACHOPIC_OFFSET:
4537 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
4538 for aliasing purposes. */
4539 return XVECEXP (base, 0, 0);
4545 /* More elaborate version of recog's offsettable_memref_p predicate
4546 that works around the ??? note of rs6000_mode_dependent_address.
4547 In particular it accepts
4549 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
4551 in 32-bit mode, that the recog predicate rejects. */
4554 rs6000_offsettable_memref_p (rtx op)
4559 /* First mimic offsettable_memref_p. */
4560 if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
4563 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
4564 the latter predicate knows nothing about the mode of the memory
4565 reference and, therefore, assumes that it is the largest supported
4566 mode (TFmode). As a consequence, legitimate offsettable memory
4567 references are rejected. rs6000_legitimate_offset_address_p contains
4568 the correct logic for the PLUS case of rs6000_mode_dependent_address. */
4569 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
4572 /* Return number of consecutive hard regs needed starting at reg REGNO
4573 to hold something of mode MODE.
4574 This is ordinarily the length in words of a value of mode MODE
4575 but can be less for certain modes in special long registers.
4577 For the SPE, GPRs are 64 bits but only 32 bits are visible in
4578 scalar instructions. The upper 32 bits are only available to the
4581 POWER and PowerPC GPRs hold 32 bits worth;
4582 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
4585 rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
4587 if (FP_REGNO_P (regno))
4588 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
4590 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
4591 return (GET_MODE_SIZE (mode) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD;
4593 if (ALTIVEC_REGNO_P (regno))
4595 (GET_MODE_SIZE (mode) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD;
4597 /* The value returned for SCmode in the E500 double case is 2 for
4598 ABI compatibility; storing an SCmode value in a single register
4599 would require function_arg and rs6000_spe_function_arg to handle
4600 SCmode so as to pass the value correctly in a pair of
4602 if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
4603 && !DECIMAL_FLOAT_MODE_P (mode))
4604 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
4606 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4609 /* Change register usage conditional on target flags. */
4611 rs6000_conditional_register_usage (void)
4615 /* Set MQ register fixed (already call_used) if not POWER
4616 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
4621 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
4623 fixed_regs[13] = call_used_regs[13]
4624 = call_really_used_regs[13] = 1;
4626 /* Conditionally disable FPRs. */
4627 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
4628 for (i = 32; i < 64; i++)
4629 fixed_regs[i] = call_used_regs[i]
4630 = call_really_used_regs[i] = 1;
4632 /* The TOC register is not killed across calls in a way that is
4633 visible to the compiler. */
4634 if (DEFAULT_ABI == ABI_AIX)
4635 call_really_used_regs[2] = 0;
4637 if (DEFAULT_ABI == ABI_V4
4638 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
4640 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4642 if (DEFAULT_ABI == ABI_V4
4643 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
4645 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4646 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4647 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4649 if (DEFAULT_ABI == ABI_DARWIN
4650 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
4651 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4652 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4653 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4655 if (TARGET_TOC && TARGET_MINIMAL_TOC)
4656 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
4657 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
4661 global_regs[SPEFSCR_REGNO] = 1;
4662 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
4663 registers in prologues and epilogues. We no longer use r14
4664 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
4665 pool for link-compatibility with older versions of GCC. Once
4666 "old" code has died out, we can return r14 to the allocation
4669 = call_used_regs[14]
4670 = call_really_used_regs[14] = 1;
4673 if (!TARGET_ALTIVEC)
4675 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
4676 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
4677 call_really_used_regs[VRSAVE_REGNO] = 1;
4681 global_regs[VSCR_REGNO] = 1;
4683 if (TARGET_ALTIVEC_ABI)
4685 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
4686 call_used_regs[i] = call_really_used_regs[i] = 1;
4688 /* AIX reserves VR20:31 in non-extended ABI mode. */
4690 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
4691 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
4695 /* Try to output insns to set TARGET equal to the constant C if it can
4696 be done in less than N insns. Do all computations in MODE.
4697 Returns the place where the output has been placed if it can be
4698 done and the insns have been emitted. If it would take more than N
4699 insns, zero is returned and no insns and emitted. */
4702 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
4703 rtx source, int n ATTRIBUTE_UNUSED)
4705 rtx result, insn, set;
4706 HOST_WIDE_INT c0, c1;
4713 dest = gen_reg_rtx (mode);
4714 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
4718 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
4720 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
4721 GEN_INT (INTVAL (source)
4722 & (~ (HOST_WIDE_INT) 0xffff))));
4723 emit_insn (gen_rtx_SET (VOIDmode, dest,
4724 gen_rtx_IOR (SImode, copy_rtx (result),
4725 GEN_INT (INTVAL (source) & 0xffff))));
4730 switch (GET_CODE (source))
4733 c0 = INTVAL (source);
4738 #if HOST_BITS_PER_WIDE_INT >= 64
4739 c0 = CONST_DOUBLE_LOW (source);
4742 c0 = CONST_DOUBLE_LOW (source);
4743 c1 = CONST_DOUBLE_HIGH (source);
4751 result = rs6000_emit_set_long_const (dest, c0, c1);
4758 insn = get_last_insn ();
4759 set = single_set (insn);
4760 if (! CONSTANT_P (SET_SRC (set)))
4761 set_unique_reg_note (insn, REG_EQUAL, source);
4766 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
4767 fall back to a straight forward decomposition. We do this to avoid
4768 exponential run times encountered when looking for longer sequences
4769 with rs6000_emit_set_const. */
4771 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
4773 if (!TARGET_POWERPC64)
4775 rtx operand1, operand2;
4777 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
4779 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
4781 emit_move_insn (operand1, GEN_INT (c1));
4782 emit_move_insn (operand2, GEN_INT (c2));
4786 HOST_WIDE_INT ud1, ud2, ud3, ud4;
4789 ud2 = (c1 & 0xffff0000) >> 16;
4790 #if HOST_BITS_PER_WIDE_INT >= 64
4794 ud4 = (c2 & 0xffff0000) >> 16;
4796 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
4797 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
4800 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
4802 emit_move_insn (dest, GEN_INT (ud1));
4805 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
4806 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
4809 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
4812 emit_move_insn (dest, GEN_INT (ud2 << 16));
4814 emit_move_insn (copy_rtx (dest),
4815 gen_rtx_IOR (DImode, copy_rtx (dest),
4818 else if ((ud4 == 0xffff && (ud3 & 0x8000))
4819 || (ud4 == 0 && ! (ud3 & 0x8000)))
4822 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
4825 emit_move_insn (dest, GEN_INT (ud3 << 16));
4828 emit_move_insn (copy_rtx (dest),
4829 gen_rtx_IOR (DImode, copy_rtx (dest),
4831 emit_move_insn (copy_rtx (dest),
4832 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
4835 emit_move_insn (copy_rtx (dest),
4836 gen_rtx_IOR (DImode, copy_rtx (dest),
4842 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
4845 emit_move_insn (dest, GEN_INT (ud4 << 16));
4848 emit_move_insn (copy_rtx (dest),
4849 gen_rtx_IOR (DImode, copy_rtx (dest),
4852 emit_move_insn (copy_rtx (dest),
4853 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
4856 emit_move_insn (copy_rtx (dest),
4857 gen_rtx_IOR (DImode, copy_rtx (dest),
4858 GEN_INT (ud2 << 16)));
4860 emit_move_insn (copy_rtx (dest),
4861 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
4867 /* Helper for the following. Get rid of [r+r] memory refs
4868 in cases where it won't work (TImode, TFmode, TDmode). */
4871 rs6000_eliminate_indexed_memrefs (rtx operands[2])
4873 if (GET_CODE (operands[0]) == MEM
4874 && GET_CODE (XEXP (operands[0], 0)) != REG
4875 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0))
4876 && ! reload_in_progress)
4878 = replace_equiv_address (operands[0],
4879 copy_addr_to_reg (XEXP (operands[0], 0)));
4881 if (GET_CODE (operands[1]) == MEM
4882 && GET_CODE (XEXP (operands[1], 0)) != REG
4883 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0))
4884 && ! reload_in_progress)
4886 = replace_equiv_address (operands[1],
4887 copy_addr_to_reg (XEXP (operands[1], 0)));
4890 /* Emit a move from SOURCE to DEST in mode MODE. */
4892 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
4896 operands[1] = source;
4898 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
4899 if (GET_CODE (operands[1]) == CONST_DOUBLE
4900 && ! FLOAT_MODE_P (mode)
4901 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4903 /* FIXME. This should never happen. */
4904 /* Since it seems that it does, do the safe thing and convert
4906 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
4908 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
4909 || FLOAT_MODE_P (mode)
4910 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
4911 || CONST_DOUBLE_LOW (operands[1]) < 0)
4912 && (CONST_DOUBLE_HIGH (operands[1]) != -1
4913 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
4915 /* Check if GCC is setting up a block move that will end up using FP
4916 registers as temporaries. We must make sure this is acceptable. */
4917 if (GET_CODE (operands[0]) == MEM
4918 && GET_CODE (operands[1]) == MEM
4920 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
4921 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
4922 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
4923 ? 32 : MEM_ALIGN (operands[0])))
4924 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
4926 : MEM_ALIGN (operands[1]))))
4927 && ! MEM_VOLATILE_P (operands [0])
4928 && ! MEM_VOLATILE_P (operands [1]))
4930 emit_move_insn (adjust_address (operands[0], SImode, 0),
4931 adjust_address (operands[1], SImode, 0));
4932 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
4933 adjust_address (copy_rtx (operands[1]), SImode, 4));
4937 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
4938 && !gpc_reg_operand (operands[1], mode))
4939 operands[1] = force_reg (mode, operands[1]);
4941 if (mode == SFmode && ! TARGET_POWERPC
4942 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
4943 && GET_CODE (operands[0]) == MEM)
4947 if (reload_in_progress || reload_completed)
4948 regnum = true_regnum (operands[1]);
4949 else if (GET_CODE (operands[1]) == REG)
4950 regnum = REGNO (operands[1]);
4954 /* If operands[1] is a register, on POWER it may have
4955 double-precision data in it, so truncate it to single
4957 if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
4960 newreg = (!can_create_pseudo_p () ? copy_rtx (operands[1])
4961 : gen_reg_rtx (mode));
4962 emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
4963 operands[1] = newreg;
4967 /* Recognize the case where operand[1] is a reference to thread-local
4968 data and load its address to a register. */
4969 if (rs6000_tls_referenced_p (operands[1]))
4971 enum tls_model model;
4972 rtx tmp = operands[1];
4975 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
4977 addend = XEXP (XEXP (tmp, 0), 1);
4978 tmp = XEXP (XEXP (tmp, 0), 0);
4981 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
4982 model = SYMBOL_REF_TLS_MODEL (tmp);
4983 gcc_assert (model != 0);
4985 tmp = rs6000_legitimize_tls_address (tmp, model);
4988 tmp = gen_rtx_PLUS (mode, tmp, addend);
4989 tmp = force_operand (tmp, operands[0]);
4994 /* Handle the case where reload calls us with an invalid address. */
4995 if (reload_in_progress && mode == Pmode
4996 && (! general_operand (operands[1], mode)
4997 || ! nonimmediate_operand (operands[0], mode)))
5000 /* 128-bit constant floating-point values on Darwin should really be
5001 loaded as two parts. */
5002 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
5003 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
5005 /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
5006 know how to get a DFmode SUBREG of a TFmode. */
5007 enum machine_mode imode = (TARGET_E500_DOUBLE ? DFmode : DImode);
5008 rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode, 0),
5009 simplify_gen_subreg (imode, operands[1], mode, 0),
5011 rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode,
5012 GET_MODE_SIZE (imode)),
5013 simplify_gen_subreg (imode, operands[1], mode,
5014 GET_MODE_SIZE (imode)),
5019 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
5020 cfun->machine->sdmode_stack_slot =
5021 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
5023 if (reload_in_progress
5025 && MEM_P (operands[0])
5026 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
5027 && REG_P (operands[1]))
5029 if (FP_REGNO_P (REGNO (operands[1])))
5031 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
5032 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5033 emit_insn (gen_movsd_store (mem, operands[1]));
5035 else if (INT_REGNO_P (REGNO (operands[1])))
5037 rtx mem = adjust_address_nv (operands[0], mode, 4);
5038 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5039 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
5045 if (reload_in_progress
5047 && REG_P (operands[0])
5048 && MEM_P (operands[1])
5049 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
5051 if (FP_REGNO_P (REGNO (operands[0])))
5053 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
5054 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5055 emit_insn (gen_movsd_load (operands[0], mem));
5057 else if (INT_REGNO_P (REGNO (operands[0])))
5059 rtx mem = adjust_address_nv (operands[1], mode, 4);
5060 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
5061 emit_insn (gen_movsd_hardfloat (operands[0], mem));
5068 /* FIXME: In the long term, this switch statement should go away
5069 and be replaced by a sequence of tests based on things like
5075 if (CONSTANT_P (operands[1])
5076 && GET_CODE (operands[1]) != CONST_INT)
5077 operands[1] = force_const_mem (mode, operands[1]);
5082 rs6000_eliminate_indexed_memrefs (operands);
5089 if (CONSTANT_P (operands[1])
5090 && ! easy_fp_constant (operands[1], mode))
5091 operands[1] = force_const_mem (mode, operands[1]);
5102 if (CONSTANT_P (operands[1])
5103 && !easy_vector_constant (operands[1], mode))
5104 operands[1] = force_const_mem (mode, operands[1]);
5109 /* Use default pattern for address of ELF small data */
5112 && DEFAULT_ABI == ABI_V4
5113 && (GET_CODE (operands[1]) == SYMBOL_REF
5114 || GET_CODE (operands[1]) == CONST)
5115 && small_data_operand (operands[1], mode))
5117 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
5121 if (DEFAULT_ABI == ABI_V4
5122 && mode == Pmode && mode == SImode
5123 && flag_pic == 1 && got_operand (operands[1], mode))
5125 emit_insn (gen_movsi_got (operands[0], operands[1]));
5129 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
5133 && CONSTANT_P (operands[1])
5134 && GET_CODE (operands[1]) != HIGH
5135 && GET_CODE (operands[1]) != CONST_INT)
5137 rtx target = (!can_create_pseudo_p ()
5139 : gen_reg_rtx (mode));
5141 /* If this is a function address on -mcall-aixdesc,
5142 convert it to the address of the descriptor. */
5143 if (DEFAULT_ABI == ABI_AIX
5144 && GET_CODE (operands[1]) == SYMBOL_REF
5145 && XSTR (operands[1], 0)[0] == '.')
5147 const char *name = XSTR (operands[1], 0);
5149 while (*name == '.')
5151 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
5152 CONSTANT_POOL_ADDRESS_P (new_ref)
5153 = CONSTANT_POOL_ADDRESS_P (operands[1]);
5154 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
5155 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
5156 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
5157 operands[1] = new_ref;
5160 if (DEFAULT_ABI == ABI_DARWIN)
5163 if (MACHO_DYNAMIC_NO_PIC_P)
5165 /* Take care of any required data indirection. */
5166 operands[1] = rs6000_machopic_legitimize_pic_address (
5167 operands[1], mode, operands[0]);
5168 if (operands[0] != operands[1])
5169 emit_insn (gen_rtx_SET (VOIDmode,
5170 operands[0], operands[1]));
5174 emit_insn (gen_macho_high (target, operands[1]));
5175 emit_insn (gen_macho_low (operands[0], target, operands[1]));
5179 emit_insn (gen_elf_high (target, operands[1]));
5180 emit_insn (gen_elf_low (operands[0], target, operands[1]));
5184 /* If this is a SYMBOL_REF that refers to a constant pool entry,
5185 and we have put it in the TOC, we just need to make a TOC-relative
5188 && GET_CODE (operands[1]) == SYMBOL_REF
5189 && constant_pool_expr_p (operands[1])
5190 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
5191 get_pool_mode (operands[1])))
5193 operands[1] = create_TOC_reference (operands[1]);
5195 else if (mode == Pmode
5196 && CONSTANT_P (operands[1])
5197 && ((GET_CODE (operands[1]) != CONST_INT
5198 && ! easy_fp_constant (operands[1], mode))
5199 || (GET_CODE (operands[1]) == CONST_INT
5200 && num_insns_constant (operands[1], mode) > 2)
5201 || (GET_CODE (operands[0]) == REG
5202 && FP_REGNO_P (REGNO (operands[0]))))
5203 && GET_CODE (operands[1]) != HIGH
5204 && ! legitimate_constant_pool_address_p (operands[1])
5205 && ! toc_relative_expr_p (operands[1]))
5207 /* Emit a USE operation so that the constant isn't deleted if
5208 expensive optimizations are turned on because nobody
5209 references it. This should only be done for operands that
5210 contain SYMBOL_REFs with CONSTANT_POOL_ADDRESS_P set.
5211 This should not be done for operands that contain LABEL_REFs.
5212 For now, we just handle the obvious case. */
5213 if (GET_CODE (operands[1]) != LABEL_REF)
5214 emit_use (operands[1]);
5217 /* Darwin uses a special PIC legitimizer. */
5218 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
5221 rs6000_machopic_legitimize_pic_address (operands[1], mode,
5223 if (operands[0] != operands[1])
5224 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
5229 /* If we are to limit the number of things we put in the TOC and
5230 this is a symbol plus a constant we can add in one insn,
5231 just put the symbol in the TOC and add the constant. Don't do
5232 this if reload is in progress. */
5233 if (GET_CODE (operands[1]) == CONST
5234 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
5235 && GET_CODE (XEXP (operands[1], 0)) == PLUS
5236 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
5237 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
5238 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
5239 && ! side_effects_p (operands[0]))
5242 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
5243 rtx other = XEXP (XEXP (operands[1], 0), 1);
5245 sym = force_reg (mode, sym);
5247 emit_insn (gen_addsi3 (operands[0], sym, other));
5249 emit_insn (gen_adddi3 (operands[0], sym, other));
5253 operands[1] = force_const_mem (mode, operands[1]);
5256 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
5257 && constant_pool_expr_p (XEXP (operands[1], 0))
5258 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
5259 get_pool_constant (XEXP (operands[1], 0)),
5260 get_pool_mode (XEXP (operands[1], 0))))
5263 = gen_const_mem (mode,
5264 create_TOC_reference (XEXP (operands[1], 0)));
5265 set_mem_alias_set (operands[1], get_TOC_alias_set ());
5271 rs6000_eliminate_indexed_memrefs (operands);
5275 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5277 gen_rtx_SET (VOIDmode,
5278 operands[0], operands[1]),
5279 gen_rtx_CLOBBER (VOIDmode,
5280 gen_rtx_SCRATCH (SImode)))));
5289 /* Above, we may have called force_const_mem which may have returned
5290 an invalid address. If we can, fix this up; otherwise, reload will
5291 have to deal with it. */
5292 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
5293 operands[1] = validize_mem (operands[1]);
5296 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
5299 /* Nonzero if we can use a floating-point register to pass this arg. */
5300 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
5301 (SCALAR_FLOAT_MODE_P (MODE) \
5302 && (CUM)->fregno <= FP_ARG_MAX_REG \
5303 && TARGET_HARD_FLOAT && TARGET_FPRS)
5305 /* Nonzero if we can use an AltiVec register to pass this arg. */
5306 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
5307 (ALTIVEC_VECTOR_MODE (MODE) \
5308 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
5309 && TARGET_ALTIVEC_ABI \
5312 /* Return a nonzero value to say to return the function value in
5313 memory, just as large structures are always returned. TYPE will be
5314 the data type of the value, and FNTYPE will be the type of the
5315 function doing the returning, or @code{NULL} for libcalls.
5317 The AIX ABI for the RS/6000 specifies that all structures are
5318 returned in memory. The Darwin ABI does the same. The SVR4 ABI
5319 specifies that structures <= 8 bytes are returned in r3/r4, but a
5320 draft put them in memory, and GCC used to implement the draft
5321 instead of the final standard. Therefore, aix_struct_return
5322 controls this instead of DEFAULT_ABI; V.4 targets needing backward
5323 compatibility can change DRAFT_V4_STRUCT_RET to override the
5324 default, and -m switches get the final word. See
5325 rs6000_override_options for more details.
5327 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
5328 long double support is enabled. These values are returned in memory.
5330 int_size_in_bytes returns -1 for variable size objects, which go in
5331 memory always. The cast to unsigned makes -1 > 8. */
5334 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5336 /* In the darwin64 abi, try to use registers for larger structs
5338 if (rs6000_darwin64_abi
5339 && TREE_CODE (type) == RECORD_TYPE
5340 && int_size_in_bytes (type) > 0)
5342 CUMULATIVE_ARGS valcum;
5346 valcum.fregno = FP_ARG_MIN_REG;
5347 valcum.vregno = ALTIVEC_ARG_MIN_REG;
5348 /* Do a trial code generation as if this were going to be passed
5349 as an argument; if any part goes in memory, we return NULL. */
5350 valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
5353 /* Otherwise fall through to more conventional ABI rules. */
5356 if (AGGREGATE_TYPE_P (type)
5357 && (aix_struct_return
5358 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
5361 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
5362 modes only exist for GCC vector types if -maltivec. */
5363 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
5364 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
5367 /* Return synthetic vectors in memory. */
5368 if (TREE_CODE (type) == VECTOR_TYPE
5369 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
5371 static bool warned_for_return_big_vectors = false;
5372 if (!warned_for_return_big_vectors)
5374 warning (0, "GCC vector returned by reference: "
5375 "non-standard ABI extension with no compatibility guarantee");
5376 warned_for_return_big_vectors = true;
5381 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
5387 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5388 for a call to a function whose data type is FNTYPE.
5389 For a library call, FNTYPE is 0.
5391 For incoming args we set the number of arguments in the prototype large
5392 so we never return a PARALLEL. */
5395 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
5396 rtx libname ATTRIBUTE_UNUSED, int incoming,
5397 int libcall, int n_named_args)
5399 static CUMULATIVE_ARGS zero_cumulative;
5401 *cum = zero_cumulative;
5403 cum->fregno = FP_ARG_MIN_REG;
5404 cum->vregno = ALTIVEC_ARG_MIN_REG;
5405 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
5406 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
5407 ? CALL_LIBCALL : CALL_NORMAL);
5408 cum->sysv_gregno = GP_ARG_MIN_REG;
5409 cum->stdarg = fntype
5410 && (TYPE_ARG_TYPES (fntype) != 0
5411 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5412 != void_type_node));
5414 cum->nargs_prototype = 0;
5415 if (incoming || cum->prototype)
5416 cum->nargs_prototype = n_named_args;
5418 /* Check for a longcall attribute. */
5419 if ((!fntype && rs6000_default_long_calls)
5421 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
5422 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
5423 cum->call_cookie |= CALL_LONG;
5425 if (TARGET_DEBUG_ARG)
5427 fprintf (stderr, "\ninit_cumulative_args:");
5430 tree ret_type = TREE_TYPE (fntype);
5431 fprintf (stderr, " ret code = %s,",
5432 tree_code_name[ (int)TREE_CODE (ret_type) ]);
5435 if (cum->call_cookie & CALL_LONG)
5436 fprintf (stderr, " longcall,");
5438 fprintf (stderr, " proto = %d, nargs = %d\n",
5439 cum->prototype, cum->nargs_prototype);
5444 && TARGET_ALTIVEC_ABI
5445 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
5447 error ("cannot return value in vector register because"
5448 " altivec instructions are disabled, use -maltivec"
5453 /* Return true if TYPE must be passed on the stack and not in registers. */
5456 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
5458 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
5459 return must_pass_in_stack_var_size (mode, type);
5461 return must_pass_in_stack_var_size_or_pad (mode, type);
5464 /* If defined, a C expression which determines whether, and in which
5465 direction, to pad out an argument with extra space. The value
5466 should be of type `enum direction': either `upward' to pad above
5467 the argument, `downward' to pad below, or `none' to inhibit
5470 For the AIX ABI structs are always stored left shifted in their
5474 function_arg_padding (enum machine_mode mode, const_tree type)
5476 #ifndef AGGREGATE_PADDING_FIXED
5477 #define AGGREGATE_PADDING_FIXED 0
5479 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
5480 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
5483 if (!AGGREGATE_PADDING_FIXED)
5485 /* GCC used to pass structures of the same size as integer types as
5486 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
5487 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
5488 passed padded downward, except that -mstrict-align further
5489 muddied the water in that multi-component structures of 2 and 4
5490 bytes in size were passed padded upward.
5492 The following arranges for best compatibility with previous
5493 versions of gcc, but removes the -mstrict-align dependency. */
5494 if (BYTES_BIG_ENDIAN)
5496 HOST_WIDE_INT size = 0;
5498 if (mode == BLKmode)
5500 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
5501 size = int_size_in_bytes (type);
5504 size = GET_MODE_SIZE (mode);
5506 if (size == 1 || size == 2 || size == 4)
5512 if (AGGREGATES_PAD_UPWARD_ALWAYS)
5514 if (type != 0 && AGGREGATE_TYPE_P (type))
5518 /* Fall back to the default. */
5519 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5522 /* If defined, a C expression that gives the alignment boundary, in bits,
5523 of an argument with the specified mode and type. If it is not defined,
5524 PARM_BOUNDARY is used for all arguments.
5526 V.4 wants long longs and doubles to be double word aligned. Just
5527 testing the mode size is a boneheaded way to do this as it means
5528 that other types such as complex int are also double word aligned.
5529 However, we're stuck with this because changing the ABI might break
5530 existing library interfaces.
5532 Doubleword align SPE vectors.
5533 Quadword align Altivec vectors.
5534 Quadword align large synthetic vector types. */
5537 function_arg_boundary (enum machine_mode mode, tree type)
5539 if (DEFAULT_ABI == ABI_V4
5540 && (GET_MODE_SIZE (mode) == 8
5541 || (TARGET_HARD_FLOAT
5543 && (mode == TFmode || mode == TDmode))))
5545 else if (SPE_VECTOR_MODE (mode)
5546 || (type && TREE_CODE (type) == VECTOR_TYPE
5547 && int_size_in_bytes (type) >= 8
5548 && int_size_in_bytes (type) < 16))
5550 else if (ALTIVEC_VECTOR_MODE (mode)
5551 || (type && TREE_CODE (type) == VECTOR_TYPE
5552 && int_size_in_bytes (type) >= 16))
5554 else if (rs6000_darwin64_abi && mode == BLKmode
5555 && type && TYPE_ALIGN (type) > 64)
5558 return PARM_BOUNDARY;
5561 /* For a function parm of MODE and TYPE, return the starting word in
5562 the parameter area. NWORDS of the parameter area are already used. */
5565 rs6000_parm_start (enum machine_mode mode, tree type, unsigned int nwords)
5568 unsigned int parm_offset;
5570 align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
5571 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
5572 return nwords + (-(parm_offset + nwords) & align);
5575 /* Compute the size (in words) of a function argument. */
5577 static unsigned long
5578 rs6000_arg_size (enum machine_mode mode, tree type)
5582 if (mode != BLKmode)
5583 size = GET_MODE_SIZE (mode);
5585 size = int_size_in_bytes (type);
5588 return (size + 3) >> 2;
5590 return (size + 7) >> 3;
5593 /* Use this to flush pending int fields. */
5596 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
5597 HOST_WIDE_INT bitpos)
5599 unsigned int startbit, endbit;
5600 int intregs, intoffset;
5601 enum machine_mode mode;
5603 if (cum->intoffset == -1)
5606 intoffset = cum->intoffset;
5607 cum->intoffset = -1;
5609 if (intoffset % BITS_PER_WORD != 0)
5611 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5613 if (mode == BLKmode)
5615 /* We couldn't find an appropriate mode, which happens,
5616 e.g., in packed structs when there are 3 bytes to load.
5617 Back intoffset back to the beginning of the word in this
5619 intoffset = intoffset & -BITS_PER_WORD;
5623 startbit = intoffset & -BITS_PER_WORD;
5624 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5625 intregs = (endbit - startbit) / BITS_PER_WORD;
5626 cum->words += intregs;
5629 /* The darwin64 ABI calls for us to recurse down through structs,
5630 looking for elements passed in registers. Unfortunately, we have
5631 to track int register count here also because of misalignments
5632 in powerpc alignment mode. */
5635 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
5637 HOST_WIDE_INT startbitpos)
5641 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
5642 if (TREE_CODE (f) == FIELD_DECL)
5644 HOST_WIDE_INT bitpos = startbitpos;
5645 tree ftype = TREE_TYPE (f);
5646 enum machine_mode mode;
5647 if (ftype == error_mark_node)
5649 mode = TYPE_MODE (ftype);
5651 if (DECL_SIZE (f) != 0
5652 && host_integerp (bit_position (f), 1))
5653 bitpos += int_bit_position (f);
5655 /* ??? FIXME: else assume zero offset. */
5657 if (TREE_CODE (ftype) == RECORD_TYPE)
5658 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
5659 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
5661 rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
5662 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
5663 cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
5665 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
5667 rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
5671 else if (cum->intoffset == -1)
5672 cum->intoffset = bitpos;
5676 /* Update the data in CUM to advance over an argument
5677 of mode MODE and data type TYPE.
5678 (TYPE is null for libcalls where that information may not be available.)
5680 Note that for args passed by reference, function_arg will be called
5681 with MODE and TYPE set to that of the pointer to the arg, not the arg
5685 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5686 tree type, int named, int depth)
5690 /* Only tick off an argument if we're not recursing. */
5692 cum->nargs_prototype--;
5694 if (TARGET_ALTIVEC_ABI
5695 && (ALTIVEC_VECTOR_MODE (mode)
5696 || (type && TREE_CODE (type) == VECTOR_TYPE
5697 && int_size_in_bytes (type) == 16)))
5701 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
5704 if (!TARGET_ALTIVEC)
5705 error ("cannot pass argument in vector register because"
5706 " altivec instructions are disabled, use -maltivec"
5709 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
5710 even if it is going to be passed in a vector register.
5711 Darwin does the same for variable-argument functions. */
5712 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5713 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
5723 /* Vector parameters must be 16-byte aligned. This places
5724 them at 2 mod 4 in terms of words in 32-bit mode, since
5725 the parameter save area starts at offset 24 from the
5726 stack. In 64-bit mode, they just have to start on an
5727 even word, since the parameter save area is 16-byte
5728 aligned. Space for GPRs is reserved even if the argument
5729 will be passed in memory. */
5731 align = (2 - cum->words) & 3;
5733 align = cum->words & 1;
5734 cum->words += align + rs6000_arg_size (mode, type);
5736 if (TARGET_DEBUG_ARG)
5738 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
5740 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
5741 cum->nargs_prototype, cum->prototype,
5742 GET_MODE_NAME (mode));
5746 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
5748 && cum->sysv_gregno <= GP_ARG_MAX_REG)
5751 else if (rs6000_darwin64_abi
5753 && TREE_CODE (type) == RECORD_TYPE
5754 && (size = int_size_in_bytes (type)) > 0)
5756 /* Variable sized types have size == -1 and are
5757 treated as if consisting entirely of ints.
5758 Pad to 16 byte boundary if needed. */
5759 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
5760 && (cum->words % 2) != 0)
5762 /* For varargs, we can just go up by the size of the struct. */
5764 cum->words += (size + 7) / 8;
5767 /* It is tempting to say int register count just goes up by
5768 sizeof(type)/8, but this is wrong in a case such as
5769 { int; double; int; } [powerpc alignment]. We have to
5770 grovel through the fields for these too. */
5772 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
5773 rs6000_darwin64_record_arg_advance_flush (cum,
5774 size * BITS_PER_UNIT);
5777 else if (DEFAULT_ABI == ABI_V4)
5779 if (TARGET_HARD_FLOAT && TARGET_FPRS
5780 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
5781 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
5782 || (mode == TFmode && !TARGET_IEEEQUAD)
5783 || mode == SDmode || mode == DDmode || mode == TDmode))
5785 /* _Decimal128 must use an even/odd register pair. This assumes
5786 that the register number is odd when fregno is odd. */
5787 if (mode == TDmode && (cum->fregno % 2) == 1)
5790 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
5791 <= FP_ARG_V4_MAX_REG)
5792 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
5795 cum->fregno = FP_ARG_V4_MAX_REG + 1;
5796 if (mode == DFmode || mode == TFmode
5797 || mode == DDmode || mode == TDmode)
5798 cum->words += cum->words & 1;
5799 cum->words += rs6000_arg_size (mode, type);
5804 int n_words = rs6000_arg_size (mode, type);
5805 int gregno = cum->sysv_gregno;
5807 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
5808 (r7,r8) or (r9,r10). As does any other 2 word item such
5809 as complex int due to a historical mistake. */
5811 gregno += (1 - gregno) & 1;
5813 /* Multi-reg args are not split between registers and stack. */
5814 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
5816 /* Long long and SPE vectors are aligned on the stack.
5817 So are other 2 word items such as complex int due to
5818 a historical mistake. */
5820 cum->words += cum->words & 1;
5821 cum->words += n_words;
5824 /* Note: continuing to accumulate gregno past when we've started
5825 spilling to the stack indicates the fact that we've started
5826 spilling to the stack to expand_builtin_saveregs. */
5827 cum->sysv_gregno = gregno + n_words;
5830 if (TARGET_DEBUG_ARG)
5832 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
5833 cum->words, cum->fregno);
5834 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
5835 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
5836 fprintf (stderr, "mode = %4s, named = %d\n",
5837 GET_MODE_NAME (mode), named);
5842 int n_words = rs6000_arg_size (mode, type);
5843 int start_words = cum->words;
5844 int align_words = rs6000_parm_start (mode, type, start_words);
5846 cum->words = align_words + n_words;
5848 if (SCALAR_FLOAT_MODE_P (mode)
5849 && TARGET_HARD_FLOAT && TARGET_FPRS)
5851 /* _Decimal128 must be passed in an even/odd float register pair.
5852 This assumes that the register number is odd when fregno is
5854 if (mode == TDmode && (cum->fregno % 2) == 1)
5856 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
5859 if (TARGET_DEBUG_ARG)
5861 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
5862 cum->words, cum->fregno);
5863 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
5864 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
5865 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
5866 named, align_words - start_words, depth);
5872 spe_build_register_parallel (enum machine_mode mode, int gregno)
5879 r1 = gen_rtx_REG (DImode, gregno);
5880 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
5881 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
5885 r1 = gen_rtx_REG (DImode, gregno);
5886 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
5887 r3 = gen_rtx_REG (DImode, gregno + 2);
5888 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
5889 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
5892 r1 = gen_rtx_REG (DImode, gregno);
5893 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
5894 r3 = gen_rtx_REG (DImode, gregno + 2);
5895 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
5896 r5 = gen_rtx_REG (DImode, gregno + 4);
5897 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
5898 r7 = gen_rtx_REG (DImode, gregno + 6);
5899 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
5900 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
5907 /* Determine where to put a SIMD argument on the SPE. */
5909 rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5912 int gregno = cum->sysv_gregno;
5914 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
5915 are passed and returned in a pair of GPRs for ABI compatibility. */
5916 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5917 || mode == DCmode || mode == TCmode))
5919 int n_words = rs6000_arg_size (mode, type);
5921 /* Doubles go in an odd/even register pair (r5/r6, etc). */
5923 gregno += (1 - gregno) & 1;
5925 /* Multi-reg args are not split between registers and stack. */
5926 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
5929 return spe_build_register_parallel (mode, gregno);
5933 int n_words = rs6000_arg_size (mode, type);
5935 /* SPE vectors are put in odd registers. */
5936 if (n_words == 2 && (gregno & 1) == 0)
5939 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
5942 enum machine_mode m = SImode;
5944 r1 = gen_rtx_REG (m, gregno);
5945 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
5946 r2 = gen_rtx_REG (m, gregno + 1);
5947 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
5948 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
5955 if (gregno <= GP_ARG_MAX_REG)
5956 return gen_rtx_REG (mode, gregno);
5962 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
5963 structure between cum->intoffset and bitpos to integer registers. */
5966 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
5967 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
5969 enum machine_mode mode;
5971 unsigned int startbit, endbit;
5972 int this_regno, intregs, intoffset;
5975 if (cum->intoffset == -1)
5978 intoffset = cum->intoffset;
5979 cum->intoffset = -1;
5981 /* If this is the trailing part of a word, try to only load that
5982 much into the register. Otherwise load the whole register. Note
5983 that in the latter case we may pick up unwanted bits. It's not a
5984 problem at the moment but may wish to revisit. */
5986 if (intoffset % BITS_PER_WORD != 0)
5988 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5990 if (mode == BLKmode)
5992 /* We couldn't find an appropriate mode, which happens,
5993 e.g., in packed structs when there are 3 bytes to load.
5994 Back intoffset back to the beginning of the word in this
5996 intoffset = intoffset & -BITS_PER_WORD;
6003 startbit = intoffset & -BITS_PER_WORD;
6004 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6005 intregs = (endbit - startbit) / BITS_PER_WORD;
6006 this_regno = cum->words + intoffset / BITS_PER_WORD;
6008 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
6011 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
6015 intoffset /= BITS_PER_UNIT;
6018 regno = GP_ARG_MIN_REG + this_regno;
6019 reg = gen_rtx_REG (mode, regno);
6021 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6024 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
6028 while (intregs > 0);
6031 /* Recursive workhorse for the following. */
6034 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
6035 HOST_WIDE_INT startbitpos, rtx rvec[],
6040 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
6041 if (TREE_CODE (f) == FIELD_DECL)
6043 HOST_WIDE_INT bitpos = startbitpos;
6044 tree ftype = TREE_TYPE (f);
6045 enum machine_mode mode;
6046 if (ftype == error_mark_node)
6048 mode = TYPE_MODE (ftype);
6050 if (DECL_SIZE (f) != 0
6051 && host_integerp (bit_position (f), 1))
6052 bitpos += int_bit_position (f);
6054 /* ??? FIXME: else assume zero offset. */
6056 if (TREE_CODE (ftype) == RECORD_TYPE)
6057 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
6058 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
6063 case SCmode: mode = SFmode; break;
6064 case DCmode: mode = DFmode; break;
6065 case TCmode: mode = TFmode; break;
6069 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
6071 = gen_rtx_EXPR_LIST (VOIDmode,
6072 gen_rtx_REG (mode, cum->fregno++),
6073 GEN_INT (bitpos / BITS_PER_UNIT));
6074 if (mode == TFmode || mode == TDmode)
6077 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
6079 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
6081 = gen_rtx_EXPR_LIST (VOIDmode,
6082 gen_rtx_REG (mode, cum->vregno++),
6083 GEN_INT (bitpos / BITS_PER_UNIT));
6085 else if (cum->intoffset == -1)
6086 cum->intoffset = bitpos;
6090 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
6091 the register(s) to be used for each field and subfield of a struct
6092 being passed by value, along with the offset of where the
6093 register's value may be found in the block. FP fields go in FP
6094 register, vector fields go in vector registers, and everything
6095 else goes in int registers, packed as in memory.
6097 This code is also used for function return values. RETVAL indicates
6098 whether this is the case.
6100 Much of this is taken from the SPARC V9 port, which has a similar
6101 calling convention. */
6104 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
6105 int named, bool retval)
6107 rtx rvec[FIRST_PSEUDO_REGISTER];
6108 int k = 1, kbase = 1;
6109 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6110 /* This is a copy; modifications are not visible to our caller. */
6111 CUMULATIVE_ARGS copy_cum = *orig_cum;
6112 CUMULATIVE_ARGS *cum = ©_cum;
6114 /* Pad to 16 byte boundary if needed. */
6115 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
6116 && (cum->words % 2) != 0)
6123 /* Put entries into rvec[] for individual FP and vector fields, and
6124 for the chunks of memory that go in int regs. Note we start at
6125 element 1; 0 is reserved for an indication of using memory, and
6126 may or may not be filled in below. */
6127 rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
6128 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
6130 /* If any part of the struct went on the stack put all of it there.
6131 This hack is because the generic code for
6132 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
6133 parts of the struct are not at the beginning. */
6137 return NULL_RTX; /* doesn't go in registers at all */
6139 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6141 if (k > 1 || cum->use_stack)
6142 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
6147 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
6150 rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
6154 rtx rvec[GP_ARG_NUM_REG + 1];
6156 if (align_words >= GP_ARG_NUM_REG)
6159 n_units = rs6000_arg_size (mode, type);
6161 /* Optimize the simple case where the arg fits in one gpr, except in
6162 the case of BLKmode due to assign_parms assuming that registers are
6163 BITS_PER_WORD wide. */
6165 || (n_units == 1 && mode != BLKmode))
6166 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6169 if (align_words + n_units > GP_ARG_NUM_REG)
6170 /* Not all of the arg fits in gprs. Say that it goes in memory too,
6171 using a magic NULL_RTX component.
6172 This is not strictly correct. Only some of the arg belongs in
6173 memory, not all of it. However, the normal scheme using
6174 function_arg_partial_nregs can result in unusual subregs, eg.
6175 (subreg:SI (reg:DF) 4), which are not handled well. The code to
6176 store the whole arg to memory is often more efficient than code
6177 to store pieces, and we know that space is available in the right
6178 place for the whole arg. */
6179 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6184 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
6185 rtx off = GEN_INT (i++ * 4);
6186 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
6188 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
6190 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
6193 /* Determine where to put an argument to a function.
6194 Value is zero to push the argument on the stack,
6195 or a hard register in which to store the argument.
6197 MODE is the argument's machine mode.
6198 TYPE is the data type of the argument (as a tree).
6199 This is null for libcalls where that information may
6201 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6202 the preceding args and about the function being called. It is
6203 not modified in this routine.
6204 NAMED is nonzero if this argument is a named parameter
6205 (otherwise it is an extra parameter matching an ellipsis).
6207 On RS/6000 the first eight words of non-FP are normally in registers
6208 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
6209 Under V.4, the first 8 FP args are in registers.
6211 If this is floating-point and no prototype is specified, we use
6212 both an FP and integer register (or possibly FP reg and stack). Library
6213 functions (when CALL_LIBCALL is set) always have the proper types for args,
6214 so we can pass the FP value just in one register. emit_library_function
6215 doesn't support PARALLEL anyway.
6217 Note that for args passed by reference, function_arg will be called
6218 with MODE and TYPE set to that of the pointer to the arg, not the arg
6222 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6223 tree type, int named)
6225 enum rs6000_abi abi = DEFAULT_ABI;
6227 /* Return a marker to indicate whether CR1 needs to set or clear the
6228 bit that V.4 uses to say fp args were passed in registers.
6229 Assume that we don't need the marker for software floating point,
6230 or compiler generated library calls. */
6231 if (mode == VOIDmode)
6234 && (cum->call_cookie & CALL_LIBCALL) == 0
6236 || (cum->nargs_prototype < 0
6237 && (cum->prototype || TARGET_NO_PROTOTYPE))))
6239 /* For the SPE, we need to crxor CR6 always. */
6241 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
6242 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
6243 return GEN_INT (cum->call_cookie
6244 | ((cum->fregno == FP_ARG_MIN_REG)
6245 ? CALL_V4_SET_FP_ARGS
6246 : CALL_V4_CLEAR_FP_ARGS));
6249 return GEN_INT (cum->call_cookie);
6252 if (rs6000_darwin64_abi && mode == BLKmode
6253 && TREE_CODE (type) == RECORD_TYPE)
6255 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
6256 if (rslt != NULL_RTX)
6258 /* Else fall through to usual handling. */
6261 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
6262 if (TARGET_64BIT && ! cum->prototype)
6264 /* Vector parameters get passed in vector register
6265 and also in GPRs or memory, in absence of prototype. */
6268 align_words = (cum->words + 1) & ~1;
6270 if (align_words >= GP_ARG_NUM_REG)
6276 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6278 return gen_rtx_PARALLEL (mode,
6280 gen_rtx_EXPR_LIST (VOIDmode,
6282 gen_rtx_EXPR_LIST (VOIDmode,
6283 gen_rtx_REG (mode, cum->vregno),
6287 return gen_rtx_REG (mode, cum->vregno);
6288 else if (TARGET_ALTIVEC_ABI
6289 && (ALTIVEC_VECTOR_MODE (mode)
6290 || (type && TREE_CODE (type) == VECTOR_TYPE
6291 && int_size_in_bytes (type) == 16)))
6293 if (named || abi == ABI_V4)
6297 /* Vector parameters to varargs functions under AIX or Darwin
6298 get passed in memory and possibly also in GPRs. */
6299 int align, align_words, n_words;
6300 enum machine_mode part_mode;
6302 /* Vector parameters must be 16-byte aligned. This places them at
6303 2 mod 4 in terms of words in 32-bit mode, since the parameter
6304 save area starts at offset 24 from the stack. In 64-bit mode,
6305 they just have to start on an even word, since the parameter
6306 save area is 16-byte aligned. */
6308 align = (2 - cum->words) & 3;
6310 align = cum->words & 1;
6311 align_words = cum->words + align;
6313 /* Out of registers? Memory, then. */
6314 if (align_words >= GP_ARG_NUM_REG)
6317 if (TARGET_32BIT && TARGET_POWERPC64)
6318 return rs6000_mixed_function_arg (mode, type, align_words);
6320 /* The vector value goes in GPRs. Only the part of the
6321 value in GPRs is reported here. */
6323 n_words = rs6000_arg_size (mode, type);
6324 if (align_words + n_words > GP_ARG_NUM_REG)
6325 /* Fortunately, there are only two possibilities, the value
6326 is either wholly in GPRs or half in GPRs and half not. */
6329 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
6332 else if (TARGET_SPE_ABI && TARGET_SPE
6333 && (SPE_VECTOR_MODE (mode)
6334 || (TARGET_E500_DOUBLE && (mode == DFmode
6337 || mode == TCmode))))
6338 return rs6000_spe_function_arg (cum, mode, type);
6340 else if (abi == ABI_V4)
6342 if (TARGET_HARD_FLOAT && TARGET_FPRS
6343 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
6344 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
6345 || (mode == TFmode && !TARGET_IEEEQUAD)
6346 || mode == SDmode || mode == DDmode || mode == TDmode))
6348 /* _Decimal128 must use an even/odd register pair. This assumes
6349 that the register number is odd when fregno is odd. */
6350 if (mode == TDmode && (cum->fregno % 2) == 1)
6353 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
6354 <= FP_ARG_V4_MAX_REG)
6355 return gen_rtx_REG (mode, cum->fregno);
6361 int n_words = rs6000_arg_size (mode, type);
6362 int gregno = cum->sysv_gregno;
6364 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
6365 (r7,r8) or (r9,r10). As does any other 2 word item such
6366 as complex int due to a historical mistake. */
6368 gregno += (1 - gregno) & 1;
6370 /* Multi-reg args are not split between registers and stack. */
6371 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
6374 if (TARGET_32BIT && TARGET_POWERPC64)
6375 return rs6000_mixed_function_arg (mode, type,
6376 gregno - GP_ARG_MIN_REG);
6377 return gen_rtx_REG (mode, gregno);
6382 int align_words = rs6000_parm_start (mode, type, cum->words);
6384 /* _Decimal128 must be passed in an even/odd float register pair.
6385 This assumes that the register number is odd when fregno is odd. */
6386 if (mode == TDmode && (cum->fregno % 2) == 1)
6389 if (USE_FP_FOR_ARG_P (cum, mode, type))
6391 rtx rvec[GP_ARG_NUM_REG + 1];
6395 enum machine_mode fmode = mode;
6396 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
6398 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
6400 /* Currently, we only ever need one reg here because complex
6401 doubles are split. */
6402 gcc_assert (cum->fregno == FP_ARG_MAX_REG
6403 && (fmode == TFmode || fmode == TDmode));
6405 /* Long double or _Decimal128 split over regs and memory. */
6406 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
6409 /* Do we also need to pass this arg in the parameter save
6412 && (cum->nargs_prototype <= 0
6413 || (DEFAULT_ABI == ABI_AIX
6415 && align_words >= GP_ARG_NUM_REG)));
6417 if (!needs_psave && mode == fmode)
6418 return gen_rtx_REG (fmode, cum->fregno);
6423 /* Describe the part that goes in gprs or the stack.
6424 This piece must come first, before the fprs. */
6425 if (align_words < GP_ARG_NUM_REG)
6427 unsigned long n_words = rs6000_arg_size (mode, type);
6429 if (align_words + n_words > GP_ARG_NUM_REG
6430 || (TARGET_32BIT && TARGET_POWERPC64))
6432 /* If this is partially on the stack, then we only
6433 include the portion actually in registers here. */
6434 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
6437 if (align_words + n_words > GP_ARG_NUM_REG)
6438 /* Not all of the arg fits in gprs. Say that it
6439 goes in memory too, using a magic NULL_RTX
6440 component. Also see comment in
6441 rs6000_mixed_function_arg for why the normal
6442 function_arg_partial_nregs scheme doesn't work
6444 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
6448 r = gen_rtx_REG (rmode,
6449 GP_ARG_MIN_REG + align_words);
6450 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
6451 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
6453 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
6457 /* The whole arg fits in gprs. */
6458 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6459 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
6463 /* It's entirely in memory. */
6464 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6467 /* Describe where this piece goes in the fprs. */
6468 r = gen_rtx_REG (fmode, cum->fregno);
6469 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
6471 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
6473 else if (align_words < GP_ARG_NUM_REG)
6475 if (TARGET_32BIT && TARGET_POWERPC64)
6476 return rs6000_mixed_function_arg (mode, type, align_words);
6478 if (mode == BLKmode)
6481 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
6488 /* For an arg passed partly in registers and partly in memory, this is
6489 the number of bytes passed in registers. For args passed entirely in
6490 registers or entirely in memory, zero. When an arg is described by a
6491 PARALLEL, perhaps using more than one register type, this function
6492 returns the number of bytes used by the first element of the PARALLEL. */
6495 rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6496 tree type, bool named)
6501 if (DEFAULT_ABI == ABI_V4)
6504 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
6505 && cum->nargs_prototype >= 0)
6508 /* In this complicated case we just disable the partial_nregs code. */
6509 if (rs6000_darwin64_abi && mode == BLKmode
6510 && TREE_CODE (type) == RECORD_TYPE
6511 && int_size_in_bytes (type) > 0)
6514 align_words = rs6000_parm_start (mode, type, cum->words);
6516 if (USE_FP_FOR_ARG_P (cum, mode, type))
6518 /* If we are passing this arg in the fixed parameter save area
6519 (gprs or memory) as well as fprs, then this function should
6520 return the number of partial bytes passed in the parameter
6521 save area rather than partial bytes passed in fprs. */
6523 && (cum->nargs_prototype <= 0
6524 || (DEFAULT_ABI == ABI_AIX
6526 && align_words >= GP_ARG_NUM_REG)))
6528 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
6529 > FP_ARG_MAX_REG + 1)
6530 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
6531 else if (cum->nargs_prototype >= 0)
6535 if (align_words < GP_ARG_NUM_REG
6536 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
6537 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
6539 if (ret != 0 && TARGET_DEBUG_ARG)
6540 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
6545 /* A C expression that indicates when an argument must be passed by
6546 reference. If nonzero for an argument, a copy of that argument is
6547 made in memory and a pointer to the argument is passed instead of
6548 the argument itself. The pointer is passed in whatever way is
6549 appropriate for passing a pointer to that type.
6551 Under V.4, aggregates and long double are passed by reference.
6553 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
6554 reference unless the AltiVec vector extension ABI is in force.
6556 As an extension to all ABIs, variable sized types are passed by
6560 rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6561 enum machine_mode mode, const_tree type,
6562 bool named ATTRIBUTE_UNUSED)
6564 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
6566 if (TARGET_DEBUG_ARG)
6567 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
6574 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
6576 if (TARGET_DEBUG_ARG)
6577 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
6581 if (int_size_in_bytes (type) < 0)
6583 if (TARGET_DEBUG_ARG)
6584 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
6588 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
6589 modes only exist for GCC vector types if -maltivec. */
6590 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
6592 if (TARGET_DEBUG_ARG)
6593 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
6597 /* Pass synthetic vectors in memory. */
6598 if (TREE_CODE (type) == VECTOR_TYPE
6599 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
6601 static bool warned_for_pass_big_vectors = false;
6602 if (TARGET_DEBUG_ARG)
6603 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
6604 if (!warned_for_pass_big_vectors)
6606 warning (0, "GCC vector passed by reference: "
6607 "non-standard ABI extension with no compatibility guarantee");
6608 warned_for_pass_big_vectors = true;
6617 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
6620 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
6625 for (i = 0; i < nregs; i++)
6627 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
6628 if (reload_completed)
6630 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
6633 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
6634 i * GET_MODE_SIZE (reg_mode));
6637 tem = replace_equiv_address (tem, XEXP (tem, 0));
6641 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
6645 /* Perform any needed actions needed for a function that is receiving a
6646 variable number of arguments.
6650 MODE and TYPE are the mode and type of the current parameter.
6652 PRETEND_SIZE is a variable that should be set to the amount of stack
6653 that must be pushed by the prolog to pretend that our caller pushed
6656 Normally, this macro will push all remaining incoming registers on the
6657 stack and set PRETEND_SIZE to the length of the registers pushed. */
6660 setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6661 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6664 CUMULATIVE_ARGS next_cum;
6665 int reg_size = TARGET_32BIT ? 4 : 8;
6666 rtx save_area = NULL_RTX, mem;
6667 int first_reg_offset;
6670 /* Skip the last named argument. */
6672 function_arg_advance (&next_cum, mode, type, 1, 0);
6674 if (DEFAULT_ABI == ABI_V4)
6676 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
6680 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
6681 HOST_WIDE_INT offset = 0;
6683 /* Try to optimize the size of the varargs save area.
6684 The ABI requires that ap.reg_save_area is doubleword
6685 aligned, but we don't need to allocate space for all
6686 the bytes, only those to which we actually will save
6688 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
6689 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
6690 if (TARGET_HARD_FLOAT && TARGET_FPRS
6691 && next_cum.fregno <= FP_ARG_V4_MAX_REG
6692 && cfun->va_list_fpr_size)
6695 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
6696 * UNITS_PER_FP_WORD;
6697 if (cfun->va_list_fpr_size
6698 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
6699 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
6701 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
6702 * UNITS_PER_FP_WORD;
6706 offset = -((first_reg_offset * reg_size) & ~7);
6707 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
6709 gpr_reg_num = cfun->va_list_gpr_size;
6710 if (reg_size == 4 && (first_reg_offset & 1))
6713 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
6716 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
6718 - (int) (GP_ARG_NUM_REG * reg_size);
6720 if (gpr_size + fpr_size)
6723 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
6724 gcc_assert (GET_CODE (reg_save_area) == MEM);
6725 reg_save_area = XEXP (reg_save_area, 0);
6726 if (GET_CODE (reg_save_area) == PLUS)
6728 gcc_assert (XEXP (reg_save_area, 0)
6729 == virtual_stack_vars_rtx);
6730 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
6731 offset += INTVAL (XEXP (reg_save_area, 1));
6734 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
6737 cfun->machine->varargs_save_offset = offset;
6738 save_area = plus_constant (virtual_stack_vars_rtx, offset);
6743 first_reg_offset = next_cum.words;
6744 save_area = virtual_incoming_args_rtx;
6746 if (targetm.calls.must_pass_in_stack (mode, type))
6747 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
6750 set = get_varargs_alias_set ();
6751 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
6752 && cfun->va_list_gpr_size)
6754 int nregs = GP_ARG_NUM_REG - first_reg_offset;
6756 if (va_list_gpr_counter_field)
6758 /* V4 va_list_gpr_size counts number of registers needed. */
6759 if (nregs > cfun->va_list_gpr_size)
6760 nregs = cfun->va_list_gpr_size;
6764 /* char * va_list instead counts number of bytes needed. */
6765 if (nregs > cfun->va_list_gpr_size / reg_size)
6766 nregs = cfun->va_list_gpr_size / reg_size;
6769 mem = gen_rtx_MEM (BLKmode,
6770 plus_constant (save_area,
6771 first_reg_offset * reg_size));
6772 MEM_NOTRAP_P (mem) = 1;
6773 set_mem_alias_set (mem, set);
6774 set_mem_align (mem, BITS_PER_WORD);
6776 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
6780 /* Save FP registers if needed. */
6781 if (DEFAULT_ABI == ABI_V4
6782 && TARGET_HARD_FLOAT && TARGET_FPRS
6784 && next_cum.fregno <= FP_ARG_V4_MAX_REG
6785 && cfun->va_list_fpr_size)
6787 int fregno = next_cum.fregno, nregs;
6788 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
6789 rtx lab = gen_label_rtx ();
6790 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
6791 * UNITS_PER_FP_WORD);
6794 (gen_rtx_SET (VOIDmode,
6796 gen_rtx_IF_THEN_ELSE (VOIDmode,
6797 gen_rtx_NE (VOIDmode, cr1,
6799 gen_rtx_LABEL_REF (VOIDmode, lab),
6803 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
6804 fregno++, off += UNITS_PER_FP_WORD, nregs++)
6806 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
6808 plus_constant (save_area, off));
6809 MEM_NOTRAP_P (mem) = 1;
6810 set_mem_alias_set (mem, set);
6811 set_mem_align (mem, GET_MODE_ALIGNMENT (
6812 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
6813 ? DFmode : SFmode));
6814 emit_move_insn (mem, gen_rtx_REG (
6815 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
6816 ? DFmode : SFmode, fregno));
6823 /* Create the va_list data type. */
6826 rs6000_build_builtin_va_list (void)
6828 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
6830 /* For AIX, prefer 'char *' because that's what the system
6831 header files like. */
6832 if (DEFAULT_ABI != ABI_V4)
6833 return build_pointer_type (char_type_node);
6835 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6836 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
6837 get_identifier ("__va_list_tag"), record);
6839 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
6840 unsigned_char_type_node);
6841 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
6842 unsigned_char_type_node);
6843 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
6845 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
6846 get_identifier ("reserved"), short_unsigned_type_node);
6847 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
6848 get_identifier ("overflow_arg_area"),
6850 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
6851 get_identifier ("reg_save_area"),
6854 va_list_gpr_counter_field = f_gpr;
6855 va_list_fpr_counter_field = f_fpr;
6857 DECL_FIELD_CONTEXT (f_gpr) = record;
6858 DECL_FIELD_CONTEXT (f_fpr) = record;
6859 DECL_FIELD_CONTEXT (f_res) = record;
6860 DECL_FIELD_CONTEXT (f_ovf) = record;
6861 DECL_FIELD_CONTEXT (f_sav) = record;
6863 TREE_CHAIN (record) = type_decl;
6864 TYPE_NAME (record) = type_decl;
6865 TYPE_FIELDS (record) = f_gpr;
6866 TREE_CHAIN (f_gpr) = f_fpr;
6867 TREE_CHAIN (f_fpr) = f_res;
6868 TREE_CHAIN (f_res) = f_ovf;
6869 TREE_CHAIN (f_ovf) = f_sav;
6871 layout_type (record);
6873 /* The correct type is an array type of one element. */
6874 return build_array_type (record, build_index_type (size_zero_node));
6877 /* Implement va_start. */
6880 rs6000_va_start (tree valist, rtx nextarg)
6882 HOST_WIDE_INT words, n_gpr, n_fpr;
6883 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
6884 tree gpr, fpr, ovf, sav, t;
6886 /* Only SVR4 needs something special. */
6887 if (DEFAULT_ABI != ABI_V4)
6889 std_expand_builtin_va_start (valist, nextarg);
6893 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
6894 f_fpr = TREE_CHAIN (f_gpr);
6895 f_res = TREE_CHAIN (f_fpr);
6896 f_ovf = TREE_CHAIN (f_res);
6897 f_sav = TREE_CHAIN (f_ovf);
6899 valist = build_va_arg_indirect_ref (valist);
6900 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6901 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
6903 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
6905 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
6908 /* Count number of gp and fp argument registers used. */
6909 words = crtl->args.info.words;
6910 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
6912 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
6915 if (TARGET_DEBUG_ARG)
6916 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
6917 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
6918 words, n_gpr, n_fpr);
6920 if (cfun->va_list_gpr_size)
6922 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
6923 build_int_cst (NULL_TREE, n_gpr));
6924 TREE_SIDE_EFFECTS (t) = 1;
6925 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6928 if (cfun->va_list_fpr_size)
6930 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
6931 build_int_cst (NULL_TREE, n_fpr));
6932 TREE_SIDE_EFFECTS (t) = 1;
6933 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6936 /* Find the overflow area. */
6937 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
6939 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t,
6940 size_int (words * UNITS_PER_WORD));
6941 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
6942 TREE_SIDE_EFFECTS (t) = 1;
6943 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6945 /* If there were no va_arg invocations, don't set up the register
6947 if (!cfun->va_list_gpr_size
6948 && !cfun->va_list_fpr_size
6949 && n_gpr < GP_ARG_NUM_REG
6950 && n_fpr < FP_ARG_V4_MAX_REG)
6953 /* Find the register save area. */
6954 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
6955 if (cfun->machine->varargs_save_offset)
6956 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
6957 size_int (cfun->machine->varargs_save_offset));
6958 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
6959 TREE_SIDE_EFFECTS (t) = 1;
6960 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6963 /* Implement va_arg. */
6966 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6969 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
6970 tree gpr, fpr, ovf, sav, reg, t, u;
6971 int size, rsize, n_reg, sav_ofs, sav_scale;
6972 tree lab_false, lab_over, addr;
6974 tree ptrtype = build_pointer_type (type);
6978 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6980 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
6981 return build_va_arg_indirect_ref (t);
6984 if (DEFAULT_ABI != ABI_V4)
6986 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
6988 tree elem_type = TREE_TYPE (type);
6989 enum machine_mode elem_mode = TYPE_MODE (elem_type);
6990 int elem_size = GET_MODE_SIZE (elem_mode);
6992 if (elem_size < UNITS_PER_WORD)
6994 tree real_part, imag_part;
6995 gimple_seq post = NULL;
6997 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
6999 /* Copy the value into a temporary, lest the formal temporary
7000 be reused out from under us. */
7001 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
7002 gimple_seq_add_seq (pre_p, post);
7004 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
7007 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
7011 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7014 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
7015 f_fpr = TREE_CHAIN (f_gpr);
7016 f_res = TREE_CHAIN (f_fpr);
7017 f_ovf = TREE_CHAIN (f_res);
7018 f_sav = TREE_CHAIN (f_ovf);
7020 valist = build_va_arg_indirect_ref (valist);
7021 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
7022 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7024 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7026 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7029 size = int_size_in_bytes (type);
7030 rsize = (size + 3) / 4;
7033 if (TARGET_HARD_FLOAT && TARGET_FPRS
7034 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
7035 || (TARGET_DOUBLE_FLOAT
7036 && (TYPE_MODE (type) == DFmode
7037 || TYPE_MODE (type) == TFmode
7038 || TYPE_MODE (type) == SDmode
7039 || TYPE_MODE (type) == DDmode
7040 || TYPE_MODE (type) == TDmode))))
7042 /* FP args go in FP registers, if present. */
7044 n_reg = (size + 7) / 8;
7045 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
7046 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
7047 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
7052 /* Otherwise into GP registers. */
7061 /* Pull the value out of the saved registers.... */
7064 addr = create_tmp_var (ptr_type_node, "addr");
7065 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
7067 /* AltiVec vectors never go in registers when -mabi=altivec. */
7068 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7072 lab_false = create_artificial_label (input_location);
7073 lab_over = create_artificial_label (input_location);
7075 /* Long long and SPE vectors are aligned in the registers.
7076 As are any other 2 gpr item such as complex int due to a
7077 historical mistake. */
7079 if (n_reg == 2 && reg == gpr)
7082 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
7083 build_int_cst (TREE_TYPE (reg), n_reg - 1));
7084 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
7085 unshare_expr (reg), u);
7087 /* _Decimal128 is passed in even/odd fpr pairs; the stored
7088 reg number is 0 for f1, so we want to make it odd. */
7089 else if (reg == fpr && TYPE_MODE (type) == TDmode)
7091 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
7092 build_int_cst (TREE_TYPE (reg), 1));
7093 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
7096 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
7097 t = build2 (GE_EXPR, boolean_type_node, u, t);
7098 u = build1 (GOTO_EXPR, void_type_node, lab_false);
7099 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
7100 gimplify_and_add (t, pre_p);
7104 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
7106 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
7107 build_int_cst (TREE_TYPE (reg), n_reg));
7108 u = fold_convert (sizetype, u);
7109 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
7110 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, u);
7112 /* _Decimal32 varargs are located in the second word of the 64-bit
7113 FP register for 32-bit binaries. */
7114 if (!TARGET_POWERPC64
7115 && TARGET_HARD_FLOAT && TARGET_FPRS
7116 && TYPE_MODE (type) == SDmode)
7117 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
7119 gimplify_assign (addr, t, pre_p);
7121 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7123 stmt = gimple_build_label (lab_false);
7124 gimple_seq_add_stmt (pre_p, stmt);
7126 if ((n_reg == 2 && !regalign) || n_reg > 2)
7128 /* Ensure that we don't find any more args in regs.
7129 Alignment has taken care of for special cases. */
7130 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
7134 /* ... otherwise out of the overflow area. */
7136 /* Care for on-stack alignment if needed. */
7140 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
7141 t = fold_convert (sizetype, t);
7142 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7144 t = fold_convert (TREE_TYPE (ovf), t);
7146 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7148 gimplify_assign (unshare_expr (addr), t, pre_p);
7150 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
7151 gimplify_assign (unshare_expr (ovf), t, pre_p);
7155 stmt = gimple_build_label (lab_over);
7156 gimple_seq_add_stmt (pre_p, stmt);
7159 if (STRICT_ALIGNMENT
7160 && (TYPE_ALIGN (type)
7161 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
7163 /* The value (of type complex double, for example) may not be
7164 aligned in memory in the saved registers, so copy via a
7165 temporary. (This is the same code as used for SPARC.) */
7166 tree tmp = create_tmp_var (type, "va_arg_tmp");
7167 tree dest_addr = build_fold_addr_expr (tmp);
7169 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
7170 3, dest_addr, addr, size_int (rsize * 4));
7172 gimplify_and_add (copy, pre_p);
7176 addr = fold_convert (ptrtype, addr);
7177 return build_va_arg_indirect_ref (addr);
7183 def_builtin (int mask, const char *name, tree type, int code)
7185 if ((mask & target_flags) || TARGET_PAIRED_FLOAT)
7187 if (rs6000_builtin_decls[code])
7190 rs6000_builtin_decls[code] =
7191 add_builtin_function (name, type, code, BUILT_IN_MD,
7196 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
7198 static const struct builtin_description bdesc_3arg[] =
7200 { MASK_ALTIVEC, CODE_FOR_altivec_vmaddfp, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP },
7201 { MASK_ALTIVEC, CODE_FOR_altivec_vmhaddshs, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS },
7202 { MASK_ALTIVEC, CODE_FOR_altivec_vmhraddshs, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS },
7203 { MASK_ALTIVEC, CODE_FOR_altivec_vmladduhm, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM},
7204 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumubm, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM },
7205 { MASK_ALTIVEC, CODE_FOR_altivec_vmsummbm, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM },
7206 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhm, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM },
7207 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
7208 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
7209 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
7210 { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
7211 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
7212 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
7213 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
7214 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
7215 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
7216 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
7217 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
7218 { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
7219 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
7220 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
7221 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
7222 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
7224 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD },
7225 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS },
7226 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD },
7227 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS },
7228 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM },
7229 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM },
7230 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM },
7231 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM },
7232 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM },
7233 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS },
7234 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS },
7235 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS },
7236 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB },
7237 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM },
7238 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL },
7240 { 0, CODE_FOR_paired_msub, "__builtin_paired_msub", PAIRED_BUILTIN_MSUB },
7241 { 0, CODE_FOR_paired_madd, "__builtin_paired_madd", PAIRED_BUILTIN_MADD },
7242 { 0, CODE_FOR_paired_madds0, "__builtin_paired_madds0", PAIRED_BUILTIN_MADDS0 },
7243 { 0, CODE_FOR_paired_madds1, "__builtin_paired_madds1", PAIRED_BUILTIN_MADDS1 },
7244 { 0, CODE_FOR_paired_nmsub, "__builtin_paired_nmsub", PAIRED_BUILTIN_NMSUB },
7245 { 0, CODE_FOR_paired_nmadd, "__builtin_paired_nmadd", PAIRED_BUILTIN_NMADD },
7246 { 0, CODE_FOR_paired_sum0, "__builtin_paired_sum0", PAIRED_BUILTIN_SUM0 },
7247 { 0, CODE_FOR_paired_sum1, "__builtin_paired_sum1", PAIRED_BUILTIN_SUM1 },
7248 { 0, CODE_FOR_selv2sf4, "__builtin_paired_selv2sf4", PAIRED_BUILTIN_SELV2SF4 },
7251 /* DST operations: void foo (void *, const int, const char). */
7253 static const struct builtin_description bdesc_dst[] =
7255 { MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
7256 { MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
7257 { MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
7258 { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT },
7260 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST },
7261 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT },
7262 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST },
7263 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT }
7266 /* Simple binary operations: VECc = foo (VECa, VECb). */
7268 static struct builtin_description bdesc_2arg[] =
7270 { MASK_ALTIVEC, CODE_FOR_addv16qi3, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM },
7271 { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
7272 { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
7273 { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
7274 { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
7275 { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
7276 { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
7277 { MASK_ALTIVEC, CODE_FOR_altivec_vadduhs, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS },
7278 { MASK_ALTIVEC, CODE_FOR_altivec_vaddshs, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS },
7279 { MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
7280 { MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
7281 { MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
7282 { MASK_ALTIVEC, CODE_FOR_andcv4si3, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
7283 { MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
7284 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
7285 { MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
7286 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsh, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH },
7287 { MASK_ALTIVEC, CODE_FOR_altivec_vavguw, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW },
7288 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsw, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW },
7289 { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
7290 { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
7291 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
7292 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
7293 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
7294 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
7295 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
7296 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
7297 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
7298 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
7299 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
7300 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
7301 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
7302 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
7303 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
7304 { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
7305 { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
7306 { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
7307 { MASK_ALTIVEC, CODE_FOR_smaxv16qi3, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB },
7308 { MASK_ALTIVEC, CODE_FOR_umaxv8hi3, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH },
7309 { MASK_ALTIVEC, CODE_FOR_smaxv8hi3, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH },
7310 { MASK_ALTIVEC, CODE_FOR_umaxv4si3, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW },
7311 { MASK_ALTIVEC, CODE_FOR_smaxv4si3, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW },
7312 { MASK_ALTIVEC, CODE_FOR_smaxv4sf3, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP },
7313 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghb, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB },
7314 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghh, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH },
7315 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghw, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW },
7316 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglb, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB },
7317 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglh, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH },
7318 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglw, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW },
7319 { MASK_ALTIVEC, CODE_FOR_uminv16qi3, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB },
7320 { MASK_ALTIVEC, CODE_FOR_sminv16qi3, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB },
7321 { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH },
7322 { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH },
7323 { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW },
7324 { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW },
7325 { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP },
7326 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB },
7327 { MASK_ALTIVEC, CODE_FOR_altivec_vmulesb, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB },
7328 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH },
7329 { MASK_ALTIVEC, CODE_FOR_altivec_vmulesh, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH },
7330 { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB },
7331 { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
7332 { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
7333 { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
7334 { MASK_ALTIVEC, CODE_FOR_altivec_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
7335 { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
7336 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
7337 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
7338 { MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
7339 { MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
7340 { MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
7341 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
7342 { MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
7343 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwus, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS },
7344 { MASK_ALTIVEC, CODE_FOR_altivec_vpkswus, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS },
7345 { MASK_ALTIVEC, CODE_FOR_altivec_vrlb, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB },
7346 { MASK_ALTIVEC, CODE_FOR_altivec_vrlh, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH },
7347 { MASK_ALTIVEC, CODE_FOR_altivec_vrlw, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW },
7348 { MASK_ALTIVEC, CODE_FOR_vashlv16qi3, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB },
7349 { MASK_ALTIVEC, CODE_FOR_vashlv8hi3, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH },
7350 { MASK_ALTIVEC, CODE_FOR_vashlv4si3, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW },
7351 { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL },
7352 { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO },
7353 { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
7354 { MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
7355 { MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
7356 { MASK_ALTIVEC, CODE_FOR_vlshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
7357 { MASK_ALTIVEC, CODE_FOR_vlshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
7358 { MASK_ALTIVEC, CODE_FOR_vlshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
7359 { MASK_ALTIVEC, CODE_FOR_vashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
7360 { MASK_ALTIVEC, CODE_FOR_vashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
7361 { MASK_ALTIVEC, CODE_FOR_vashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
7362 { MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
7363 { MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
7364 { MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
7365 { MASK_ALTIVEC, CODE_FOR_subv8hi3, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM },
7366 { MASK_ALTIVEC, CODE_FOR_subv4si3, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM },
7367 { MASK_ALTIVEC, CODE_FOR_subv4sf3, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP },
7368 { MASK_ALTIVEC, CODE_FOR_altivec_vsubcuw, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW },
7369 { MASK_ALTIVEC, CODE_FOR_altivec_vsububs, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS },
7370 { MASK_ALTIVEC, CODE_FOR_altivec_vsubsbs, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS },
7371 { MASK_ALTIVEC, CODE_FOR_altivec_vsubuhs, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS },
7372 { MASK_ALTIVEC, CODE_FOR_altivec_vsubshs, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS },
7373 { MASK_ALTIVEC, CODE_FOR_altivec_vsubuws, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS },
7374 { MASK_ALTIVEC, CODE_FOR_altivec_vsubsws, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS },
7375 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4ubs, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS },
7376 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4sbs, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS },
7377 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4shs, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS },
7378 { MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
7379 { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
7380 { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
7382 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
7383 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
7384 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
7385 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM },
7386 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM },
7387 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC },
7388 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS },
7389 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS },
7390 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS },
7391 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS },
7392 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS },
7393 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS },
7394 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS },
7395 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND },
7396 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC },
7397 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG },
7398 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW },
7399 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW },
7400 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH },
7401 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH },
7402 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB },
7403 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB },
7404 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB },
7405 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ },
7406 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP },
7407 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW },
7408 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH },
7409 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB },
7410 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE },
7411 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT },
7412 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP },
7413 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW },
7414 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW },
7415 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH },
7416 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH },
7417 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB },
7418 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB },
7419 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE },
7420 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT },
7421 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX },
7422 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP },
7423 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW },
7424 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW },
7425 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH },
7426 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH },
7427 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB },
7428 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB },
7429 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH },
7430 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW },
7431 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH },
7432 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB },
7433 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL },
7434 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW },
7435 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH },
7436 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB },
7437 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN },
7438 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP },
7439 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW },
7440 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW },
7441 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH },
7442 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH },
7443 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB },
7444 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB },
7445 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE },
7446 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB },
7447 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB },
7448 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH },
7449 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH },
7450 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO },
7451 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH },
7452 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH },
7453 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB },
7454 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB },
7455 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR },
7456 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR },
7457 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK },
7458 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM },
7459 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM },
7460 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX },
7461 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS },
7462 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS },
7463 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS },
7464 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS },
7465 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS },
7466 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU },
7467 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS },
7468 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS },
7469 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL },
7470 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW },
7471 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH },
7472 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB },
7473 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL },
7474 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW },
7475 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH },
7476 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB },
7477 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL },
7478 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO },
7479 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR },
7480 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW },
7481 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH },
7482 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB },
7483 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA },
7484 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW },
7485 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH },
7486 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB },
7487 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL },
7488 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO },
7489 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB },
7490 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP },
7491 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM },
7492 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM },
7493 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM },
7494 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC },
7495 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS },
7496 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS },
7497 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS },
7498 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS },
7499 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS },
7500 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS },
7501 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS },
7502 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S },
7503 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS },
7504 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS },
7505 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS },
7506 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S },
7507 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
7508 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
7510 { 0, CODE_FOR_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
7511 { 0, CODE_FOR_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
7512 { 0, CODE_FOR_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
7513 { 0, CODE_FOR_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
7514 { 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 },
7515 { 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 },
7516 { 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 },
7517 { 0, CODE_FOR_paired_merge01, "__builtin_paired_merge01", PAIRED_BUILTIN_MERGE01 },
7518 { 0, CODE_FOR_paired_merge10, "__builtin_paired_merge10", PAIRED_BUILTIN_MERGE10 },
7519 { 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 },
7521 /* Place holder, leave as first spe builtin. */
7522 { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
7523 { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
7524 { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
7525 { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
7526 { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
7527 { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
7528 { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
7529 { 0, CODE_FOR_spe_evfsdiv, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV },
7530 { 0, CODE_FOR_spe_evfsmul, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL },
7531 { 0, CODE_FOR_spe_evfssub, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB },
7532 { 0, CODE_FOR_spe_evmergehi, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI },
7533 { 0, CODE_FOR_spe_evmergehilo, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO },
7534 { 0, CODE_FOR_spe_evmergelo, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO },
7535 { 0, CODE_FOR_spe_evmergelohi, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI },
7536 { 0, CODE_FOR_spe_evmhegsmfaa, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA },
7537 { 0, CODE_FOR_spe_evmhegsmfan, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN },
7538 { 0, CODE_FOR_spe_evmhegsmiaa, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA },
7539 { 0, CODE_FOR_spe_evmhegsmian, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN },
7540 { 0, CODE_FOR_spe_evmhegumiaa, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA },
7541 { 0, CODE_FOR_spe_evmhegumian, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN },
7542 { 0, CODE_FOR_spe_evmhesmf, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF },
7543 { 0, CODE_FOR_spe_evmhesmfa, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA },
7544 { 0, CODE_FOR_spe_evmhesmfaaw, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW },
7545 { 0, CODE_FOR_spe_evmhesmfanw, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW },
7546 { 0, CODE_FOR_spe_evmhesmi, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI },
7547 { 0, CODE_FOR_spe_evmhesmia, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA },
7548 { 0, CODE_FOR_spe_evmhesmiaaw, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW },
7549 { 0, CODE_FOR_spe_evmhesmianw, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW },
7550 { 0, CODE_FOR_spe_evmhessf, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF },
7551 { 0, CODE_FOR_spe_evmhessfa, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA },
7552 { 0, CODE_FOR_spe_evmhessfaaw, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW },
7553 { 0, CODE_FOR_spe_evmhessfanw, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW },
7554 { 0, CODE_FOR_spe_evmhessiaaw, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW },
7555 { 0, CODE_FOR_spe_evmhessianw, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW },
7556 { 0, CODE_FOR_spe_evmheumi, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI },
7557 { 0, CODE_FOR_spe_evmheumia, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA },
7558 { 0, CODE_FOR_spe_evmheumiaaw, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW },
7559 { 0, CODE_FOR_spe_evmheumianw, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW },
7560 { 0, CODE_FOR_spe_evmheusiaaw, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW },
7561 { 0, CODE_FOR_spe_evmheusianw, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW },
7562 { 0, CODE_FOR_spe_evmhogsmfaa, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA },
7563 { 0, CODE_FOR_spe_evmhogsmfan, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN },
7564 { 0, CODE_FOR_spe_evmhogsmiaa, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA },
7565 { 0, CODE_FOR_spe_evmhogsmian, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN },
7566 { 0, CODE_FOR_spe_evmhogumiaa, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA },
7567 { 0, CODE_FOR_spe_evmhogumian, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN },
7568 { 0, CODE_FOR_spe_evmhosmf, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF },
7569 { 0, CODE_FOR_spe_evmhosmfa, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA },
7570 { 0, CODE_FOR_spe_evmhosmfaaw, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW },
7571 { 0, CODE_FOR_spe_evmhosmfanw, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW },
7572 { 0, CODE_FOR_spe_evmhosmi, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI },
7573 { 0, CODE_FOR_spe_evmhosmia, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA },
7574 { 0, CODE_FOR_spe_evmhosmiaaw, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW },
7575 { 0, CODE_FOR_spe_evmhosmianw, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW },
7576 { 0, CODE_FOR_spe_evmhossf, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF },
7577 { 0, CODE_FOR_spe_evmhossfa, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA },
7578 { 0, CODE_FOR_spe_evmhossfaaw, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW },
7579 { 0, CODE_FOR_spe_evmhossfanw, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW },
7580 { 0, CODE_FOR_spe_evmhossiaaw, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW },
7581 { 0, CODE_FOR_spe_evmhossianw, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW },
7582 { 0, CODE_FOR_spe_evmhoumi, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI },
7583 { 0, CODE_FOR_spe_evmhoumia, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA },
7584 { 0, CODE_FOR_spe_evmhoumiaaw, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW },
7585 { 0, CODE_FOR_spe_evmhoumianw, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW },
7586 { 0, CODE_FOR_spe_evmhousiaaw, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW },
7587 { 0, CODE_FOR_spe_evmhousianw, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW },
7588 { 0, CODE_FOR_spe_evmwhsmf, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF },
7589 { 0, CODE_FOR_spe_evmwhsmfa, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA },
7590 { 0, CODE_FOR_spe_evmwhsmi, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI },
7591 { 0, CODE_FOR_spe_evmwhsmia, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA },
7592 { 0, CODE_FOR_spe_evmwhssf, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF },
7593 { 0, CODE_FOR_spe_evmwhssfa, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA },
7594 { 0, CODE_FOR_spe_evmwhumi, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI },
7595 { 0, CODE_FOR_spe_evmwhumia, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA },
7596 { 0, CODE_FOR_spe_evmwlsmiaaw, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW },
7597 { 0, CODE_FOR_spe_evmwlsmianw, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW },
7598 { 0, CODE_FOR_spe_evmwlssiaaw, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW },
7599 { 0, CODE_FOR_spe_evmwlssianw, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW },
7600 { 0, CODE_FOR_spe_evmwlumi, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI },
7601 { 0, CODE_FOR_spe_evmwlumia, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA },
7602 { 0, CODE_FOR_spe_evmwlumiaaw, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW },
7603 { 0, CODE_FOR_spe_evmwlumianw, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW },
7604 { 0, CODE_FOR_spe_evmwlusiaaw, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW },
7605 { 0, CODE_FOR_spe_evmwlusianw, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW },
7606 { 0, CODE_FOR_spe_evmwsmf, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF },
7607 { 0, CODE_FOR_spe_evmwsmfa, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA },
7608 { 0, CODE_FOR_spe_evmwsmfaa, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA },
7609 { 0, CODE_FOR_spe_evmwsmfan, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN },
7610 { 0, CODE_FOR_spe_evmwsmi, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI },
7611 { 0, CODE_FOR_spe_evmwsmia, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA },
7612 { 0, CODE_FOR_spe_evmwsmiaa, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA },
7613 { 0, CODE_FOR_spe_evmwsmian, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN },
7614 { 0, CODE_FOR_spe_evmwssf, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF },
7615 { 0, CODE_FOR_spe_evmwssfa, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA },
7616 { 0, CODE_FOR_spe_evmwssfaa, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA },
7617 { 0, CODE_FOR_spe_evmwssfan, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN },
7618 { 0, CODE_FOR_spe_evmwumi, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI },
7619 { 0, CODE_FOR_spe_evmwumia, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA },
7620 { 0, CODE_FOR_spe_evmwumiaa, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA },
7621 { 0, CODE_FOR_spe_evmwumian, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN },
7622 { 0, CODE_FOR_spe_evnand, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND },
7623 { 0, CODE_FOR_spe_evnor, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR },
7624 { 0, CODE_FOR_spe_evor, "__builtin_spe_evor", SPE_BUILTIN_EVOR },
7625 { 0, CODE_FOR_spe_evorc, "__builtin_spe_evorc", SPE_BUILTIN_EVORC },
7626 { 0, CODE_FOR_spe_evrlw, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW },
7627 { 0, CODE_FOR_spe_evslw, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW },
7628 { 0, CODE_FOR_spe_evsrws, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS },
7629 { 0, CODE_FOR_spe_evsrwu, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU },
7630 { 0, CODE_FOR_spe_evsubfw, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
7632 /* SPE binary operations expecting a 5-bit unsigned literal. */
7633 { 0, CODE_FOR_spe_evaddiw, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW },
7635 { 0, CODE_FOR_spe_evrlwi, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI },
7636 { 0, CODE_FOR_spe_evslwi, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI },
7637 { 0, CODE_FOR_spe_evsrwis, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS },
7638 { 0, CODE_FOR_spe_evsrwiu, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU },
7639 { 0, CODE_FOR_spe_evsubifw, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW },
7640 { 0, CODE_FOR_spe_evmwhssfaa, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA },
7641 { 0, CODE_FOR_spe_evmwhssmaa, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA },
7642 { 0, CODE_FOR_spe_evmwhsmfaa, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA },
7643 { 0, CODE_FOR_spe_evmwhsmiaa, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA },
7644 { 0, CODE_FOR_spe_evmwhusiaa, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA },
7645 { 0, CODE_FOR_spe_evmwhumiaa, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA },
7646 { 0, CODE_FOR_spe_evmwhssfan, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN },
7647 { 0, CODE_FOR_spe_evmwhssian, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN },
7648 { 0, CODE_FOR_spe_evmwhsmfan, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN },
7649 { 0, CODE_FOR_spe_evmwhsmian, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN },
7650 { 0, CODE_FOR_spe_evmwhusian, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN },
7651 { 0, CODE_FOR_spe_evmwhumian, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN },
7652 { 0, CODE_FOR_spe_evmwhgssfaa, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA },
7653 { 0, CODE_FOR_spe_evmwhgsmfaa, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA },
7654 { 0, CODE_FOR_spe_evmwhgsmiaa, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA },
7655 { 0, CODE_FOR_spe_evmwhgumiaa, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA },
7656 { 0, CODE_FOR_spe_evmwhgssfan, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN },
7657 { 0, CODE_FOR_spe_evmwhgsmfan, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN },
7658 { 0, CODE_FOR_spe_evmwhgsmian, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN },
7659 { 0, CODE_FOR_spe_evmwhgumian, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN },
7660 { 0, CODE_FOR_spe_brinc, "__builtin_spe_brinc", SPE_BUILTIN_BRINC },
7662 /* Place-holder. Leave as last binary SPE builtin. */
7663 { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR }
7666 /* AltiVec predicates. */
7668 struct builtin_description_predicates
7670 const unsigned int mask;
7671 const enum insn_code icode;
7673 const char *const name;
7674 const enum rs6000_builtins code;
7677 static const struct builtin_description_predicates bdesc_altivec_preds[] =
7679 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
7680 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
7681 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
7682 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
7683 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
7684 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
7685 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
7686 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
7687 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
7688 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
7689 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
7690 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
7691 { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
7693 { MASK_ALTIVEC, CODE_FOR_nothing, NULL, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P },
7694 { MASK_ALTIVEC, CODE_FOR_nothing, NULL, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P },
7695 { MASK_ALTIVEC, CODE_FOR_nothing, NULL, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P }
7698 /* SPE predicates. */
7699 static struct builtin_description bdesc_spe_predicates[] =
7701 /* Place-holder. Leave as first. */
7702 { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ },
7703 { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS },
7704 { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU },
7705 { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS },
7706 { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU },
7707 { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ },
7708 { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT },
7709 { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT },
7710 { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ },
7711 { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT },
7712 /* Place-holder. Leave as last. */
7713 { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT },
7716 /* SPE evsel predicates. */
7717 static struct builtin_description bdesc_spe_evsel[] =
7719 /* Place-holder. Leave as first. */
7720 { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS },
7721 { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU },
7722 { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS },
7723 { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU },
7724 { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ },
7725 { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT },
7726 { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT },
7727 { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ },
7728 { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT },
7729 { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT },
7730 /* Place-holder. Leave as last. */
7731 { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ },
7734 /* PAIRED predicates. */
7735 static const struct builtin_description bdesc_paired_preds[] =
7737 /* Place-holder. Leave as first. */
7738 { 0, CODE_FOR_paired_cmpu0, "__builtin_paired_cmpu0", PAIRED_BUILTIN_CMPU0 },
7739 /* Place-holder. Leave as last. */
7740 { 0, CODE_FOR_paired_cmpu1, "__builtin_paired_cmpu1", PAIRED_BUILTIN_CMPU1 },
7743 /* ABS* operations. */
7745 static const struct builtin_description bdesc_abs[] =
7747 { MASK_ALTIVEC, CODE_FOR_absv4si2, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI },
7748 { MASK_ALTIVEC, CODE_FOR_absv8hi2, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI },
7749 { MASK_ALTIVEC, CODE_FOR_absv4sf2, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF },
7750 { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
7751 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
7752 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
7753 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI }
7756 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
7759 static struct builtin_description bdesc_1arg[] =
7761 { MASK_ALTIVEC, CODE_FOR_altivec_vexptefp, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP },
7762 { MASK_ALTIVEC, CODE_FOR_altivec_vlogefp, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP },
7763 { MASK_ALTIVEC, CODE_FOR_altivec_vrefp, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP },
7764 { MASK_ALTIVEC, CODE_FOR_altivec_vrfim, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM },
7765 { MASK_ALTIVEC, CODE_FOR_altivec_vrfin, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN },
7766 { MASK_ALTIVEC, CODE_FOR_altivec_vrfip, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP },
7767 { MASK_ALTIVEC, CODE_FOR_ftruncv4sf2, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ },
7768 { MASK_ALTIVEC, CODE_FOR_altivec_vrsqrtefp, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP },
7769 { MASK_ALTIVEC, CODE_FOR_altivec_vspltisb, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB },
7770 { MASK_ALTIVEC, CODE_FOR_altivec_vspltish, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH },
7771 { MASK_ALTIVEC, CODE_FOR_altivec_vspltisw, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW },
7772 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsb, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB },
7773 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhpx, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX },
7774 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsh, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH },
7775 { MASK_ALTIVEC, CODE_FOR_altivec_vupklsb, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB },
7776 { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
7777 { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
7779 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
7780 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
7781 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
7782 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE },
7783 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR },
7784 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE },
7785 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR },
7786 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE },
7787 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND },
7788 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE },
7789 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC },
7790 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH },
7791 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH },
7792 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX },
7793 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB },
7794 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL },
7795 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX },
7796 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
7797 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
7799 /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
7800 end with SPE_BUILTIN_EVSUBFUSIAAW. */
7801 { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
7802 { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
7803 { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
7804 { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
7805 { 0, CODE_FOR_spe_evaddusiaaw, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW },
7806 { 0, CODE_FOR_spe_evcntlsw, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW },
7807 { 0, CODE_FOR_spe_evcntlzw, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW },
7808 { 0, CODE_FOR_spe_evextsb, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB },
7809 { 0, CODE_FOR_spe_evextsh, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH },
7810 { 0, CODE_FOR_spe_evfsabs, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS },
7811 { 0, CODE_FOR_spe_evfscfsf, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF },
7812 { 0, CODE_FOR_spe_evfscfsi, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI },
7813 { 0, CODE_FOR_spe_evfscfuf, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF },
7814 { 0, CODE_FOR_spe_evfscfui, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI },
7815 { 0, CODE_FOR_spe_evfsctsf, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF },
7816 { 0, CODE_FOR_spe_evfsctsi, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI },
7817 { 0, CODE_FOR_spe_evfsctsiz, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ },
7818 { 0, CODE_FOR_spe_evfsctuf, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF },
7819 { 0, CODE_FOR_spe_evfsctui, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI },
7820 { 0, CODE_FOR_spe_evfsctuiz, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ },
7821 { 0, CODE_FOR_spe_evfsnabs, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS },
7822 { 0, CODE_FOR_spe_evfsneg, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG },
7823 { 0, CODE_FOR_spe_evmra, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA },
7824 { 0, CODE_FOR_negv2si2, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG },
7825 { 0, CODE_FOR_spe_evrndw, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW },
7826 { 0, CODE_FOR_spe_evsubfsmiaaw, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW },
7827 { 0, CODE_FOR_spe_evsubfssiaaw, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW },
7828 { 0, CODE_FOR_spe_evsubfumiaaw, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW },
7830 /* Place-holder. Leave as last unary SPE builtin. */
7831 { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
7833 { 0, CODE_FOR_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
7834 { 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 },
7835 { 0, CODE_FOR_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
7836 { 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 },
7837 { 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 }
7841 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
7844 tree arg0 = CALL_EXPR_ARG (exp, 0);
7845 rtx op0 = expand_normal (arg0);
7846 enum machine_mode tmode = insn_data[icode].operand[0].mode;
7847 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7849 if (icode == CODE_FOR_nothing)
7850 /* Builtin not supported on this processor. */
7853 /* If we got invalid arguments bail out before generating bad rtl. */
7854 if (arg0 == error_mark_node)
7857 if (icode == CODE_FOR_altivec_vspltisb
7858 || icode == CODE_FOR_altivec_vspltish
7859 || icode == CODE_FOR_altivec_vspltisw
7860 || icode == CODE_FOR_spe_evsplatfi
7861 || icode == CODE_FOR_spe_evsplati)
7863 /* Only allow 5-bit *signed* literals. */
7864 if (GET_CODE (op0) != CONST_INT
7865 || INTVAL (op0) > 15
7866 || INTVAL (op0) < -16)
7868 error ("argument 1 must be a 5-bit signed literal");
7874 || GET_MODE (target) != tmode
7875 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7876 target = gen_reg_rtx (tmode);
7878 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
7879 op0 = copy_to_mode_reg (mode0, op0);
7881 pat = GEN_FCN (icode) (target, op0);
7890 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
7892 rtx pat, scratch1, scratch2;
7893 tree arg0 = CALL_EXPR_ARG (exp, 0);
7894 rtx op0 = expand_normal (arg0);
7895 enum machine_mode tmode = insn_data[icode].operand[0].mode;
7896 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7898 /* If we have invalid arguments, bail out before generating bad rtl. */
7899 if (arg0 == error_mark_node)
7903 || GET_MODE (target) != tmode
7904 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7905 target = gen_reg_rtx (tmode);
7907 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
7908 op0 = copy_to_mode_reg (mode0, op0);
7910 scratch1 = gen_reg_rtx (mode0);
7911 scratch2 = gen_reg_rtx (mode0);
7913 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
7922 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
7925 tree arg0 = CALL_EXPR_ARG (exp, 0);
7926 tree arg1 = CALL_EXPR_ARG (exp, 1);
7927 rtx op0 = expand_normal (arg0);
7928 rtx op1 = expand_normal (arg1);
7929 enum machine_mode tmode = insn_data[icode].operand[0].mode;
7930 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
7931 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
7933 if (icode == CODE_FOR_nothing)
7934 /* Builtin not supported on this processor. */
7937 /* If we got invalid arguments bail out before generating bad rtl. */
7938 if (arg0 == error_mark_node || arg1 == error_mark_node)
7941 if (icode == CODE_FOR_altivec_vcfux
7942 || icode == CODE_FOR_altivec_vcfsx
7943 || icode == CODE_FOR_altivec_vctsxs
7944 || icode == CODE_FOR_altivec_vctuxs
7945 || icode == CODE_FOR_altivec_vspltb
7946 || icode == CODE_FOR_altivec_vsplth
7947 || icode == CODE_FOR_altivec_vspltw
7948 || icode == CODE_FOR_spe_evaddiw
7949 || icode == CODE_FOR_spe_evldd
7950 || icode == CODE_FOR_spe_evldh
7951 || icode == CODE_FOR_spe_evldw
7952 || icode == CODE_FOR_spe_evlhhesplat
7953 || icode == CODE_FOR_spe_evlhhossplat
7954 || icode == CODE_FOR_spe_evlhhousplat
7955 || icode == CODE_FOR_spe_evlwhe
7956 || icode == CODE_FOR_spe_evlwhos
7957 || icode == CODE_FOR_spe_evlwhou
7958 || icode == CODE_FOR_spe_evlwhsplat
7959 || icode == CODE_FOR_spe_evlwwsplat
7960 || icode == CODE_FOR_spe_evrlwi
7961 || icode == CODE_FOR_spe_evslwi
7962 || icode == CODE_FOR_spe_evsrwis
7963 || icode == CODE_FOR_spe_evsubifw
7964 || icode == CODE_FOR_spe_evsrwiu)
7966 /* Only allow 5-bit unsigned literals. */
7968 if (TREE_CODE (arg1) != INTEGER_CST
7969 || TREE_INT_CST_LOW (arg1) & ~0x1f)
7971 error ("argument 2 must be a 5-bit unsigned literal");
7977 || GET_MODE (target) != tmode
7978 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7979 target = gen_reg_rtx (tmode);
7981 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
7982 op0 = copy_to_mode_reg (mode0, op0);
7983 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
7984 op1 = copy_to_mode_reg (mode1, op1);
7986 pat = GEN_FCN (icode) (target, op0, op1);
7995 altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
7996 tree exp, rtx target)
7999 tree cr6_form = CALL_EXPR_ARG (exp, 0);
8000 tree arg0 = CALL_EXPR_ARG (exp, 1);
8001 tree arg1 = CALL_EXPR_ARG (exp, 2);
8002 rtx op0 = expand_normal (arg0);
8003 rtx op1 = expand_normal (arg1);
8004 enum machine_mode tmode = SImode;
8005 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
8006 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
8009 if (TREE_CODE (cr6_form) != INTEGER_CST)
8011 error ("argument 1 of __builtin_altivec_predicate must be a constant");
8015 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
8017 gcc_assert (mode0 == mode1);
8019 /* If we have invalid arguments, bail out before generating bad rtl. */
8020 if (arg0 == error_mark_node || arg1 == error_mark_node)
8024 || GET_MODE (target) != tmode
8025 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8026 target = gen_reg_rtx (tmode);
8028 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
8029 op0 = copy_to_mode_reg (mode0, op0);
8030 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
8031 op1 = copy_to_mode_reg (mode1, op1);
8033 scratch = gen_reg_rtx (mode0);
8035 pat = GEN_FCN (icode) (scratch, op0, op1,
8036 gen_rtx_SYMBOL_REF (Pmode, opcode));
8041 /* The vec_any* and vec_all* predicates use the same opcodes for two
8042 different operations, but the bits in CR6 will be different
8043 depending on what information we want. So we have to play tricks
8044 with CR6 to get the right bits out.
8046 If you think this is disgusting, look at the specs for the
8047 AltiVec predicates. */
8049 switch (cr6_form_int)
8052 emit_insn (gen_cr6_test_for_zero (target));
8055 emit_insn (gen_cr6_test_for_zero_reverse (target));
8058 emit_insn (gen_cr6_test_for_lt (target));
8061 emit_insn (gen_cr6_test_for_lt_reverse (target));
8064 error ("argument 1 of __builtin_altivec_predicate is out of range");
8072 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
8075 tree arg0 = CALL_EXPR_ARG (exp, 0);
8076 tree arg1 = CALL_EXPR_ARG (exp, 1);
8077 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8078 enum machine_mode mode0 = Pmode;
8079 enum machine_mode mode1 = Pmode;
8080 rtx op0 = expand_normal (arg0);
8081 rtx op1 = expand_normal (arg1);
8083 if (icode == CODE_FOR_nothing)
8084 /* Builtin not supported on this processor. */
8087 /* If we got invalid arguments bail out before generating bad rtl. */
8088 if (arg0 == error_mark_node || arg1 == error_mark_node)
8092 || GET_MODE (target) != tmode
8093 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8094 target = gen_reg_rtx (tmode);
8096 op1 = copy_to_mode_reg (mode1, op1);
8098 if (op0 == const0_rtx)
8100 addr = gen_rtx_MEM (tmode, op1);
8104 op0 = copy_to_mode_reg (mode0, op0);
8105 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
8108 pat = GEN_FCN (icode) (target, addr);
8118 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
8121 tree arg0 = CALL_EXPR_ARG (exp, 0);
8122 tree arg1 = CALL_EXPR_ARG (exp, 1);
8123 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8124 enum machine_mode mode0 = Pmode;
8125 enum machine_mode mode1 = Pmode;
8126 rtx op0 = expand_normal (arg0);
8127 rtx op1 = expand_normal (arg1);
8129 if (icode == CODE_FOR_nothing)
8130 /* Builtin not supported on this processor. */
8133 /* If we got invalid arguments bail out before generating bad rtl. */
8134 if (arg0 == error_mark_node || arg1 == error_mark_node)
8138 || GET_MODE (target) != tmode
8139 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8140 target = gen_reg_rtx (tmode);
8142 op1 = copy_to_mode_reg (mode1, op1);
8144 if (op0 == const0_rtx)
8146 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
8150 op0 = copy_to_mode_reg (mode0, op0);
8151 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
8154 pat = GEN_FCN (icode) (target, addr);
8164 spe_expand_stv_builtin (enum insn_code icode, tree exp)
8166 tree arg0 = CALL_EXPR_ARG (exp, 0);
8167 tree arg1 = CALL_EXPR_ARG (exp, 1);
8168 tree arg2 = CALL_EXPR_ARG (exp, 2);
8169 rtx op0 = expand_normal (arg0);
8170 rtx op1 = expand_normal (arg1);
8171 rtx op2 = expand_normal (arg2);
8173 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
8174 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
8175 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
8177 /* Invalid arguments. Bail before doing anything stoopid! */
8178 if (arg0 == error_mark_node
8179 || arg1 == error_mark_node
8180 || arg2 == error_mark_node)
8183 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
8184 op0 = copy_to_mode_reg (mode2, op0);
8185 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
8186 op1 = copy_to_mode_reg (mode0, op1);
8187 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
8188 op2 = copy_to_mode_reg (mode1, op2);
8190 pat = GEN_FCN (icode) (op1, op2, op0);
8197 paired_expand_stv_builtin (enum insn_code icode, tree exp)
8199 tree arg0 = CALL_EXPR_ARG (exp, 0);
8200 tree arg1 = CALL_EXPR_ARG (exp, 1);
8201 tree arg2 = CALL_EXPR_ARG (exp, 2);
8202 rtx op0 = expand_normal (arg0);
8203 rtx op1 = expand_normal (arg1);
8204 rtx op2 = expand_normal (arg2);
8206 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8207 enum machine_mode mode1 = Pmode;
8208 enum machine_mode mode2 = Pmode;
8210 /* Invalid arguments. Bail before doing anything stoopid! */
8211 if (arg0 == error_mark_node
8212 || arg1 == error_mark_node
8213 || arg2 == error_mark_node)
8216 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
8217 op0 = copy_to_mode_reg (tmode, op0);
8219 op2 = copy_to_mode_reg (mode2, op2);
8221 if (op1 == const0_rtx)
8223 addr = gen_rtx_MEM (tmode, op2);
8227 op1 = copy_to_mode_reg (mode1, op1);
8228 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
8231 pat = GEN_FCN (icode) (addr, op0);
8238 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
8240 tree arg0 = CALL_EXPR_ARG (exp, 0);
8241 tree arg1 = CALL_EXPR_ARG (exp, 1);
8242 tree arg2 = CALL_EXPR_ARG (exp, 2);
8243 rtx op0 = expand_normal (arg0);
8244 rtx op1 = expand_normal (arg1);
8245 rtx op2 = expand_normal (arg2);
8247 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8248 enum machine_mode mode1 = Pmode;
8249 enum machine_mode mode2 = Pmode;
8251 /* Invalid arguments. Bail before doing anything stoopid! */
8252 if (arg0 == error_mark_node
8253 || arg1 == error_mark_node
8254 || arg2 == error_mark_node)
8257 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
8258 op0 = copy_to_mode_reg (tmode, op0);
8260 op2 = copy_to_mode_reg (mode2, op2);
8262 if (op1 == const0_rtx)
8264 addr = gen_rtx_MEM (tmode, op2);
8268 op1 = copy_to_mode_reg (mode1, op1);
8269 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
8272 pat = GEN_FCN (icode) (addr, op0);
8279 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
8282 tree arg0 = CALL_EXPR_ARG (exp, 0);
8283 tree arg1 = CALL_EXPR_ARG (exp, 1);
8284 tree arg2 = CALL_EXPR_ARG (exp, 2);
8285 rtx op0 = expand_normal (arg0);
8286 rtx op1 = expand_normal (arg1);
8287 rtx op2 = expand_normal (arg2);
8288 enum machine_mode tmode = insn_data[icode].operand[0].mode;
8289 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
8290 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
8291 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
8293 if (icode == CODE_FOR_nothing)
8294 /* Builtin not supported on this processor. */
8297 /* If we got invalid arguments bail out before generating bad rtl. */
8298 if (arg0 == error_mark_node
8299 || arg1 == error_mark_node
8300 || arg2 == error_mark_node)
8303 if (icode == CODE_FOR_altivec_vsldoi_v4sf
8304 || icode == CODE_FOR_altivec_vsldoi_v4si
8305 || icode == CODE_FOR_altivec_vsldoi_v8hi
8306 || icode == CODE_FOR_altivec_vsldoi_v16qi)
8308 /* Only allow 4-bit unsigned literals. */
8310 if (TREE_CODE (arg2) != INTEGER_CST
8311 || TREE_INT_CST_LOW (arg2) & ~0xf)
8313 error ("argument 3 must be a 4-bit unsigned literal");
8319 || GET_MODE (target) != tmode
8320 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8321 target = gen_reg_rtx (tmode);
8323 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
8324 op0 = copy_to_mode_reg (mode0, op0);
8325 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
8326 op1 = copy_to_mode_reg (mode1, op1);
8327 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
8328 op2 = copy_to_mode_reg (mode2, op2);
8330 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
8331 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
8333 pat = GEN_FCN (icode) (target, op0, op1, op2);
8341 /* Expand the lvx builtins. */
8343 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
8345 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8346 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8348 enum machine_mode tmode, mode0;
8350 enum insn_code icode;
8354 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
8355 icode = CODE_FOR_altivec_lvx_v16qi;
8357 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
8358 icode = CODE_FOR_altivec_lvx_v8hi;
8360 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
8361 icode = CODE_FOR_altivec_lvx_v4si;
8363 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
8364 icode = CODE_FOR_altivec_lvx_v4sf;
8373 arg0 = CALL_EXPR_ARG (exp, 0);
8374 op0 = expand_normal (arg0);
8375 tmode = insn_data[icode].operand[0].mode;
8376 mode0 = insn_data[icode].operand[1].mode;
8379 || GET_MODE (target) != tmode
8380 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8381 target = gen_reg_rtx (tmode);
8383 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
8384 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
8386 pat = GEN_FCN (icode) (target, op0);
8393 /* Expand the stvx builtins. */
8395 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
8398 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8399 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8401 enum machine_mode mode0, mode1;
8403 enum insn_code icode;
8407 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
8408 icode = CODE_FOR_altivec_stvx_v16qi;
8410 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
8411 icode = CODE_FOR_altivec_stvx_v8hi;
8413 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
8414 icode = CODE_FOR_altivec_stvx_v4si;
8416 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
8417 icode = CODE_FOR_altivec_stvx_v4sf;
8424 arg0 = CALL_EXPR_ARG (exp, 0);
8425 arg1 = CALL_EXPR_ARG (exp, 1);
8426 op0 = expand_normal (arg0);
8427 op1 = expand_normal (arg1);
8428 mode0 = insn_data[icode].operand[0].mode;
8429 mode1 = insn_data[icode].operand[1].mode;
8431 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8432 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
8433 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
8434 op1 = copy_to_mode_reg (mode1, op1);
8436 pat = GEN_FCN (icode) (op0, op1);
8444 /* Expand the dst builtins. */
8446 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
8449 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8450 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8451 tree arg0, arg1, arg2;
8452 enum machine_mode mode0, mode1, mode2;
8453 rtx pat, op0, op1, op2;
8454 const struct builtin_description *d;
8459 /* Handle DST variants. */
8461 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
8462 if (d->code == fcode)
8464 arg0 = CALL_EXPR_ARG (exp, 0);
8465 arg1 = CALL_EXPR_ARG (exp, 1);
8466 arg2 = CALL_EXPR_ARG (exp, 2);
8467 op0 = expand_normal (arg0);
8468 op1 = expand_normal (arg1);
8469 op2 = expand_normal (arg2);
8470 mode0 = insn_data[d->icode].operand[0].mode;
8471 mode1 = insn_data[d->icode].operand[1].mode;
8472 mode2 = insn_data[d->icode].operand[2].mode;
8474 /* Invalid arguments, bail out before generating bad rtl. */
8475 if (arg0 == error_mark_node
8476 || arg1 == error_mark_node
8477 || arg2 == error_mark_node)
8482 if (TREE_CODE (arg2) != INTEGER_CST
8483 || TREE_INT_CST_LOW (arg2) & ~0x3)
8485 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
8489 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
8490 op0 = copy_to_mode_reg (Pmode, op0);
8491 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
8492 op1 = copy_to_mode_reg (mode1, op1);
8494 pat = GEN_FCN (d->icode) (op0, op1, op2);
8504 /* Expand vec_init builtin. */
8506 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
8508 enum machine_mode tmode = TYPE_MODE (type);
8509 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
8510 int i, n_elt = GET_MODE_NUNITS (tmode);
8511 rtvec v = rtvec_alloc (n_elt);
8513 gcc_assert (VECTOR_MODE_P (tmode));
8514 gcc_assert (n_elt == call_expr_nargs (exp));
8516 for (i = 0; i < n_elt; ++i)
8518 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
8519 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
8522 if (!target || !register_operand (target, tmode))
8523 target = gen_reg_rtx (tmode);
8525 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
8529 /* Return the integer constant in ARG. Constrain it to be in the range
8530 of the subparts of VEC_TYPE; issue an error if not. */
8533 get_element_number (tree vec_type, tree arg)
8535 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
8537 if (!host_integerp (arg, 1)
8538 || (elt = tree_low_cst (arg, 1), elt > max))
8540 error ("selector must be an integer constant in the range 0..%wi", max);
8547 /* Expand vec_set builtin. */
8549 altivec_expand_vec_set_builtin (tree exp)
8551 enum machine_mode tmode, mode1;
8552 tree arg0, arg1, arg2;
8556 arg0 = CALL_EXPR_ARG (exp, 0);
8557 arg1 = CALL_EXPR_ARG (exp, 1);
8558 arg2 = CALL_EXPR_ARG (exp, 2);
8560 tmode = TYPE_MODE (TREE_TYPE (arg0));
8561 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
8562 gcc_assert (VECTOR_MODE_P (tmode));
8564 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
8565 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
8566 elt = get_element_number (TREE_TYPE (arg0), arg2);
8568 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
8569 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
8571 op0 = force_reg (tmode, op0);
8572 op1 = force_reg (mode1, op1);
8574 rs6000_expand_vector_set (op0, op1, elt);
8579 /* Expand vec_ext builtin. */
8581 altivec_expand_vec_ext_builtin (tree exp, rtx target)
8583 enum machine_mode tmode, mode0;
8588 arg0 = CALL_EXPR_ARG (exp, 0);
8589 arg1 = CALL_EXPR_ARG (exp, 1);
8591 op0 = expand_normal (arg0);
8592 elt = get_element_number (TREE_TYPE (arg0), arg1);
8594 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
8595 mode0 = TYPE_MODE (TREE_TYPE (arg0));
8596 gcc_assert (VECTOR_MODE_P (mode0));
8598 op0 = force_reg (mode0, op0);
8600 if (optimize || !target || !register_operand (target, tmode))
8601 target = gen_reg_rtx (tmode);
8603 rs6000_expand_vector_extract (target, op0, elt);
8608 /* Expand the builtin in EXP and store the result in TARGET. Store
8609 true in *EXPANDEDP if we found a builtin to expand. */
8611 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
8613 const struct builtin_description *d;
8614 const struct builtin_description_predicates *dp;
8616 enum insn_code icode;
8617 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8620 enum machine_mode tmode, mode0;
8621 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8623 if (fcode >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
8624 && fcode <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
8627 error ("unresolved overload for Altivec builtin %qF", fndecl);
8631 target = altivec_expand_ld_builtin (exp, target, expandedp);
8635 target = altivec_expand_st_builtin (exp, target, expandedp);
8639 target = altivec_expand_dst_builtin (exp, target, expandedp);
8647 case ALTIVEC_BUILTIN_STVX:
8648 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, exp);
8649 case ALTIVEC_BUILTIN_STVEBX:
8650 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
8651 case ALTIVEC_BUILTIN_STVEHX:
8652 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
8653 case ALTIVEC_BUILTIN_STVEWX:
8654 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
8655 case ALTIVEC_BUILTIN_STVXL:
8656 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
8658 case ALTIVEC_BUILTIN_STVLX:
8659 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
8660 case ALTIVEC_BUILTIN_STVLXL:
8661 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
8662 case ALTIVEC_BUILTIN_STVRX:
8663 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
8664 case ALTIVEC_BUILTIN_STVRXL:
8665 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
8667 case ALTIVEC_BUILTIN_MFVSCR:
8668 icode = CODE_FOR_altivec_mfvscr;
8669 tmode = insn_data[icode].operand[0].mode;
8672 || GET_MODE (target) != tmode
8673 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8674 target = gen_reg_rtx (tmode);
8676 pat = GEN_FCN (icode) (target);
8682 case ALTIVEC_BUILTIN_MTVSCR:
8683 icode = CODE_FOR_altivec_mtvscr;
8684 arg0 = CALL_EXPR_ARG (exp, 0);
8685 op0 = expand_normal (arg0);
8686 mode0 = insn_data[icode].operand[0].mode;
8688 /* If we got invalid arguments bail out before generating bad rtl. */
8689 if (arg0 == error_mark_node)
8692 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8693 op0 = copy_to_mode_reg (mode0, op0);
8695 pat = GEN_FCN (icode) (op0);
8700 case ALTIVEC_BUILTIN_DSSALL:
8701 emit_insn (gen_altivec_dssall ());
8704 case ALTIVEC_BUILTIN_DSS:
8705 icode = CODE_FOR_altivec_dss;
8706 arg0 = CALL_EXPR_ARG (exp, 0);
8708 op0 = expand_normal (arg0);
8709 mode0 = insn_data[icode].operand[0].mode;
8711 /* If we got invalid arguments bail out before generating bad rtl. */
8712 if (arg0 == error_mark_node)
8715 if (TREE_CODE (arg0) != INTEGER_CST
8716 || TREE_INT_CST_LOW (arg0) & ~0x3)
8718 error ("argument to dss must be a 2-bit unsigned literal");
8722 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8723 op0 = copy_to_mode_reg (mode0, op0);
8725 emit_insn (gen_altivec_dss (op0));
8728 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
8729 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
8730 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
8731 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
8732 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
8734 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
8735 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
8736 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
8737 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
8738 return altivec_expand_vec_set_builtin (exp);
8740 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
8741 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
8742 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
8743 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
8744 return altivec_expand_vec_ext_builtin (exp, target);
8751 /* Expand abs* operations. */
8753 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
8754 if (d->code == fcode)
8755 return altivec_expand_abs_builtin (d->icode, exp, target);
8757 /* Expand the AltiVec predicates. */
8758 dp = bdesc_altivec_preds;
8759 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
8760 if (dp->code == fcode)
8761 return altivec_expand_predicate_builtin (dp->icode, dp->opcode,
8764 /* LV* are funky. We initialized them differently. */
8767 case ALTIVEC_BUILTIN_LVSL:
8768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
8769 exp, target, false);
8770 case ALTIVEC_BUILTIN_LVSR:
8771 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
8772 exp, target, false);
8773 case ALTIVEC_BUILTIN_LVEBX:
8774 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
8775 exp, target, false);
8776 case ALTIVEC_BUILTIN_LVEHX:
8777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
8778 exp, target, false);
8779 case ALTIVEC_BUILTIN_LVEWX:
8780 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
8781 exp, target, false);
8782 case ALTIVEC_BUILTIN_LVXL:
8783 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
8784 exp, target, false);
8785 case ALTIVEC_BUILTIN_LVX:
8786 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
8787 exp, target, false);
8788 case ALTIVEC_BUILTIN_LVLX:
8789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
8791 case ALTIVEC_BUILTIN_LVLXL:
8792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
8794 case ALTIVEC_BUILTIN_LVRX:
8795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
8797 case ALTIVEC_BUILTIN_LVRXL:
8798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
8809 /* Expand the builtin in EXP and store the result in TARGET. Store
8810 true in *EXPANDEDP if we found a builtin to expand. */
8812 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
8814 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8815 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8816 const struct builtin_description *d;
8823 case PAIRED_BUILTIN_STX:
8824 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
8825 case PAIRED_BUILTIN_LX:
8826 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
8832 /* Expand the paired predicates. */
8833 d = bdesc_paired_preds;
8834 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
8835 if (d->code == fcode)
8836 return paired_expand_predicate_builtin (d->icode, exp, target);
8842 /* Binops that need to be initialized manually, but can be expanded
8843 automagically by rs6000_expand_binop_builtin. */
8844 static struct builtin_description bdesc_2arg_spe[] =
8846 { 0, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
8847 { 0, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
8848 { 0, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
8849 { 0, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
8850 { 0, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
8851 { 0, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
8852 { 0, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
8853 { 0, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
8854 { 0, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
8855 { 0, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
8856 { 0, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
8857 { 0, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
8858 { 0, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
8859 { 0, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
8860 { 0, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
8861 { 0, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
8862 { 0, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
8863 { 0, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
8864 { 0, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
8865 { 0, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
8866 { 0, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
8867 { 0, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
8870 /* Expand the builtin in EXP and store the result in TARGET. Store
8871 true in *EXPANDEDP if we found a builtin to expand.
8873 This expands the SPE builtins that are not simple unary and binary
8876 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
8878 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8880 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8881 enum insn_code icode;
8882 enum machine_mode tmode, mode0;
8884 struct builtin_description *d;
8889 /* Syntax check for a 5-bit unsigned immediate. */
8892 case SPE_BUILTIN_EVSTDD:
8893 case SPE_BUILTIN_EVSTDH:
8894 case SPE_BUILTIN_EVSTDW:
8895 case SPE_BUILTIN_EVSTWHE:
8896 case SPE_BUILTIN_EVSTWHO:
8897 case SPE_BUILTIN_EVSTWWE:
8898 case SPE_BUILTIN_EVSTWWO:
8899 arg1 = CALL_EXPR_ARG (exp, 2);
8900 if (TREE_CODE (arg1) != INTEGER_CST
8901 || TREE_INT_CST_LOW (arg1) & ~0x1f)
8903 error ("argument 2 must be a 5-bit unsigned literal");
8911 /* The evsplat*i instructions are not quite generic. */
8914 case SPE_BUILTIN_EVSPLATFI:
8915 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
8917 case SPE_BUILTIN_EVSPLATI:
8918 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
8924 d = (struct builtin_description *) bdesc_2arg_spe;
8925 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
8926 if (d->code == fcode)
8927 return rs6000_expand_binop_builtin (d->icode, exp, target);
8929 d = (struct builtin_description *) bdesc_spe_predicates;
8930 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
8931 if (d->code == fcode)
8932 return spe_expand_predicate_builtin (d->icode, exp, target);
8934 d = (struct builtin_description *) bdesc_spe_evsel;
8935 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
8936 if (d->code == fcode)
8937 return spe_expand_evsel_builtin (d->icode, exp, target);
8941 case SPE_BUILTIN_EVSTDDX:
8942 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
8943 case SPE_BUILTIN_EVSTDHX:
8944 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
8945 case SPE_BUILTIN_EVSTDWX:
8946 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
8947 case SPE_BUILTIN_EVSTWHEX:
8948 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
8949 case SPE_BUILTIN_EVSTWHOX:
8950 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
8951 case SPE_BUILTIN_EVSTWWEX:
8952 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
8953 case SPE_BUILTIN_EVSTWWOX:
8954 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
8955 case SPE_BUILTIN_EVSTDD:
8956 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
8957 case SPE_BUILTIN_EVSTDH:
8958 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
8959 case SPE_BUILTIN_EVSTDW:
8960 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
8961 case SPE_BUILTIN_EVSTWHE:
8962 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
8963 case SPE_BUILTIN_EVSTWHO:
8964 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
8965 case SPE_BUILTIN_EVSTWWE:
8966 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
8967 case SPE_BUILTIN_EVSTWWO:
8968 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
8969 case SPE_BUILTIN_MFSPEFSCR:
8970 icode = CODE_FOR_spe_mfspefscr;
8971 tmode = insn_data[icode].operand[0].mode;
8974 || GET_MODE (target) != tmode
8975 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8976 target = gen_reg_rtx (tmode);
8978 pat = GEN_FCN (icode) (target);
8983 case SPE_BUILTIN_MTSPEFSCR:
8984 icode = CODE_FOR_spe_mtspefscr;
8985 arg0 = CALL_EXPR_ARG (exp, 0);
8986 op0 = expand_normal (arg0);
8987 mode0 = insn_data[icode].operand[0].mode;
8989 if (arg0 == error_mark_node)
8992 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
8993 op0 = copy_to_mode_reg (mode0, op0);
8995 pat = GEN_FCN (icode) (op0);
9008 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9010 rtx pat, scratch, tmp;
9011 tree form = CALL_EXPR_ARG (exp, 0);
9012 tree arg0 = CALL_EXPR_ARG (exp, 1);
9013 tree arg1 = CALL_EXPR_ARG (exp, 2);
9014 rtx op0 = expand_normal (arg0);
9015 rtx op1 = expand_normal (arg1);
9016 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9017 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9021 if (TREE_CODE (form) != INTEGER_CST)
9023 error ("argument 1 of __builtin_paired_predicate must be a constant");
9027 form_int = TREE_INT_CST_LOW (form);
9029 gcc_assert (mode0 == mode1);
9031 if (arg0 == error_mark_node || arg1 == error_mark_node)
9035 || GET_MODE (target) != SImode
9036 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
9037 target = gen_reg_rtx (SImode);
9038 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
9039 op0 = copy_to_mode_reg (mode0, op0);
9040 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
9041 op1 = copy_to_mode_reg (mode1, op1);
9043 scratch = gen_reg_rtx (CCFPmode);
9045 pat = GEN_FCN (icode) (scratch, op0, op1);
9067 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
9070 error ("argument 1 of __builtin_paired_predicate is out of range");
9074 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
9075 emit_move_insn (target, tmp);
9080 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9082 rtx pat, scratch, tmp;
9083 tree form = CALL_EXPR_ARG (exp, 0);
9084 tree arg0 = CALL_EXPR_ARG (exp, 1);
9085 tree arg1 = CALL_EXPR_ARG (exp, 2);
9086 rtx op0 = expand_normal (arg0);
9087 rtx op1 = expand_normal (arg1);
9088 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9089 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9093 if (TREE_CODE (form) != INTEGER_CST)
9095 error ("argument 1 of __builtin_spe_predicate must be a constant");
9099 form_int = TREE_INT_CST_LOW (form);
9101 gcc_assert (mode0 == mode1);
9103 if (arg0 == error_mark_node || arg1 == error_mark_node)
9107 || GET_MODE (target) != SImode
9108 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
9109 target = gen_reg_rtx (SImode);
9111 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9112 op0 = copy_to_mode_reg (mode0, op0);
9113 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9114 op1 = copy_to_mode_reg (mode1, op1);
9116 scratch = gen_reg_rtx (CCmode);
9118 pat = GEN_FCN (icode) (scratch, op0, op1);
9123 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
9124 _lower_. We use one compare, but look in different bits of the
9125 CR for each variant.
9127 There are 2 elements in each SPE simd type (upper/lower). The CR
9128 bits are set as follows:
9130 BIT0 | BIT 1 | BIT 2 | BIT 3
9131 U | L | (U | L) | (U & L)
9133 So, for an "all" relationship, BIT 3 would be set.
9134 For an "any" relationship, BIT 2 would be set. Etc.
9136 Following traditional nomenclature, these bits map to:
9138 BIT0 | BIT 1 | BIT 2 | BIT 3
9141 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
9146 /* All variant. OV bit. */
9148 /* We need to get to the OV bit, which is the ORDERED bit. We
9149 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
9150 that's ugly and will make validate_condition_mode die.
9151 So let's just use another pattern. */
9152 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
9154 /* Any variant. EQ bit. */
9158 /* Upper variant. LT bit. */
9162 /* Lower variant. GT bit. */
9167 error ("argument 1 of __builtin_spe_predicate is out of range");
9171 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
9172 emit_move_insn (target, tmp);
9177 /* The evsel builtins look like this:
9179 e = __builtin_spe_evsel_OP (a, b, c, d);
9183 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
9184 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
9188 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
9191 tree arg0 = CALL_EXPR_ARG (exp, 0);
9192 tree arg1 = CALL_EXPR_ARG (exp, 1);
9193 tree arg2 = CALL_EXPR_ARG (exp, 2);
9194 tree arg3 = CALL_EXPR_ARG (exp, 3);
9195 rtx op0 = expand_normal (arg0);
9196 rtx op1 = expand_normal (arg1);
9197 rtx op2 = expand_normal (arg2);
9198 rtx op3 = expand_normal (arg3);
9199 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9200 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9202 gcc_assert (mode0 == mode1);
9204 if (arg0 == error_mark_node || arg1 == error_mark_node
9205 || arg2 == error_mark_node || arg3 == error_mark_node)
9209 || GET_MODE (target) != mode0
9210 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
9211 target = gen_reg_rtx (mode0);
9213 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9214 op0 = copy_to_mode_reg (mode0, op0);
9215 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
9216 op1 = copy_to_mode_reg (mode0, op1);
9217 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
9218 op2 = copy_to_mode_reg (mode0, op2);
9219 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
9220 op3 = copy_to_mode_reg (mode0, op3);
9222 /* Generate the compare. */
9223 scratch = gen_reg_rtx (CCmode);
9224 pat = GEN_FCN (icode) (scratch, op0, op1);
9229 if (mode0 == V2SImode)
9230 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
9232 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
9237 /* Expand an expression EXP that calls a built-in function,
9238 with result going to TARGET if that's convenient
9239 (and in mode MODE if that's convenient).
9240 SUBTARGET may be used as the target for computing one of EXP's operands.
9241 IGNORE is nonzero if the value is to be ignored. */
9244 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9245 enum machine_mode mode ATTRIBUTE_UNUSED,
9246 int ignore ATTRIBUTE_UNUSED)
9248 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9249 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9250 const struct builtin_description *d;
9255 if (fcode == RS6000_BUILTIN_RECIP)
9256 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
9258 if (fcode == RS6000_BUILTIN_RECIPF)
9259 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
9261 if (fcode == RS6000_BUILTIN_RSQRTF)
9262 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
9264 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_LOAD
9265 || fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
9267 int icode = (int) CODE_FOR_altivec_lvsr;
9268 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9269 enum machine_mode mode = insn_data[icode].operand[1].mode;
9273 gcc_assert (TARGET_ALTIVEC);
9275 arg = CALL_EXPR_ARG (exp, 0);
9276 gcc_assert (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE);
9277 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
9278 addr = memory_address (mode, op);
9279 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
9283 /* For the load case need to negate the address. */
9284 op = gen_reg_rtx (GET_MODE (addr));
9285 emit_insn (gen_rtx_SET (VOIDmode, op,
9286 gen_rtx_NEG (GET_MODE (addr), addr)));
9288 op = gen_rtx_MEM (mode, op);
9291 || GET_MODE (target) != tmode
9292 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9293 target = gen_reg_rtx (tmode);
9295 /*pat = gen_altivec_lvsr (target, op);*/
9296 pat = GEN_FCN (icode) (target, op);
9304 /* FIXME: There's got to be a nicer way to handle this case than
9305 constructing a new CALL_EXPR. */
9306 if (fcode == ALTIVEC_BUILTIN_VCFUX
9307 || fcode == ALTIVEC_BUILTIN_VCFSX
9308 || fcode == ALTIVEC_BUILTIN_VCTUXS
9309 || fcode == ALTIVEC_BUILTIN_VCTSXS)
9311 if (call_expr_nargs (exp) == 1)
9312 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
9313 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
9318 ret = altivec_expand_builtin (exp, target, &success);
9325 ret = spe_expand_builtin (exp, target, &success);
9330 if (TARGET_PAIRED_FLOAT)
9332 ret = paired_expand_builtin (exp, target, &success);
9338 gcc_assert (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT);
9340 /* Handle simple unary operations. */
9341 d = (struct builtin_description *) bdesc_1arg;
9342 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
9343 if (d->code == fcode)
9344 return rs6000_expand_unop_builtin (d->icode, exp, target);
9346 /* Handle simple binary operations. */
9347 d = (struct builtin_description *) bdesc_2arg;
9348 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
9349 if (d->code == fcode)
9350 return rs6000_expand_binop_builtin (d->icode, exp, target);
9352 /* Handle simple ternary operations. */
9354 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
9355 if (d->code == fcode)
9356 return rs6000_expand_ternop_builtin (d->icode, exp, target);
9362 rs6000_init_builtins (void)
9366 V2SI_type_node = build_vector_type (intSI_type_node, 2);
9367 V2SF_type_node = build_vector_type (float_type_node, 2);
9368 V4HI_type_node = build_vector_type (intHI_type_node, 4);
9369 V4SI_type_node = build_vector_type (intSI_type_node, 4);
9370 V4SF_type_node = build_vector_type (float_type_node, 4);
9371 V8HI_type_node = build_vector_type (intHI_type_node, 8);
9372 V16QI_type_node = build_vector_type (intQI_type_node, 16);
9374 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
9375 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
9376 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
9378 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
9379 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
9380 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
9381 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
9383 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
9384 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
9385 'vector unsigned short'. */
9387 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
9388 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
9389 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
9390 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
9392 long_integer_type_internal_node = long_integer_type_node;
9393 long_unsigned_type_internal_node = long_unsigned_type_node;
9394 intQI_type_internal_node = intQI_type_node;
9395 uintQI_type_internal_node = unsigned_intQI_type_node;
9396 intHI_type_internal_node = intHI_type_node;
9397 uintHI_type_internal_node = unsigned_intHI_type_node;
9398 intSI_type_internal_node = intSI_type_node;
9399 uintSI_type_internal_node = unsigned_intSI_type_node;
9400 float_type_internal_node = float_type_node;
9401 void_type_internal_node = void_type_node;
9403 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9404 get_identifier ("__bool char"),
9405 bool_char_type_node);
9406 TYPE_NAME (bool_char_type_node) = tdecl;
9407 (*lang_hooks.decls.pushdecl) (tdecl);
9408 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9409 get_identifier ("__bool short"),
9410 bool_short_type_node);
9411 TYPE_NAME (bool_short_type_node) = tdecl;
9412 (*lang_hooks.decls.pushdecl) (tdecl);
9413 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9414 get_identifier ("__bool int"),
9415 bool_int_type_node);
9416 TYPE_NAME (bool_int_type_node) = tdecl;
9417 (*lang_hooks.decls.pushdecl) (tdecl);
9418 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL, get_identifier ("__pixel"),
9420 TYPE_NAME (pixel_type_node) = tdecl;
9421 (*lang_hooks.decls.pushdecl) (tdecl);
9423 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
9424 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
9425 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
9426 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
9428 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9429 get_identifier ("__vector unsigned char"),
9430 unsigned_V16QI_type_node);
9431 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
9432 (*lang_hooks.decls.pushdecl) (tdecl);
9433 tdecl = build_decl (BUILTINS_LOCATION,
9434 TYPE_DECL, get_identifier ("__vector signed char"),
9436 TYPE_NAME (V16QI_type_node) = tdecl;
9437 (*lang_hooks.decls.pushdecl) (tdecl);
9438 tdecl = build_decl (BUILTINS_LOCATION,
9439 TYPE_DECL, get_identifier ("__vector __bool char"),
9440 bool_V16QI_type_node);
9441 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
9442 (*lang_hooks.decls.pushdecl) (tdecl);
9444 tdecl = build_decl (BUILTINS_LOCATION,
9445 TYPE_DECL, get_identifier ("__vector unsigned short"),
9446 unsigned_V8HI_type_node);
9447 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
9448 (*lang_hooks.decls.pushdecl) (tdecl);
9449 tdecl = build_decl (BUILTINS_LOCATION,
9450 TYPE_DECL, get_identifier ("__vector signed short"),
9452 TYPE_NAME (V8HI_type_node) = tdecl;
9453 (*lang_hooks.decls.pushdecl) (tdecl);
9454 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9455 get_identifier ("__vector __bool short"),
9456 bool_V8HI_type_node);
9457 TYPE_NAME (bool_V8HI_type_node) = tdecl;
9458 (*lang_hooks.decls.pushdecl) (tdecl);
9460 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9461 get_identifier ("__vector unsigned int"),
9462 unsigned_V4SI_type_node);
9463 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
9464 (*lang_hooks.decls.pushdecl) (tdecl);
9465 tdecl = build_decl (BUILTINS_LOCATION,
9466 TYPE_DECL, get_identifier ("__vector signed int"),
9468 TYPE_NAME (V4SI_type_node) = tdecl;
9469 (*lang_hooks.decls.pushdecl) (tdecl);
9470 tdecl = build_decl (BUILTINS_LOCATION,
9471 TYPE_DECL, get_identifier ("__vector __bool int"),
9472 bool_V4SI_type_node);
9473 TYPE_NAME (bool_V4SI_type_node) = tdecl;
9474 (*lang_hooks.decls.pushdecl) (tdecl);
9476 tdecl = build_decl (BUILTINS_LOCATION,
9477 TYPE_DECL, get_identifier ("__vector float"),
9479 TYPE_NAME (V4SF_type_node) = tdecl;
9480 (*lang_hooks.decls.pushdecl) (tdecl);
9481 tdecl = build_decl (BUILTINS_LOCATION,
9482 TYPE_DECL, get_identifier ("__vector __pixel"),
9483 pixel_V8HI_type_node);
9484 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
9485 (*lang_hooks.decls.pushdecl) (tdecl);
9487 if (TARGET_PAIRED_FLOAT)
9488 paired_init_builtins ();
9490 spe_init_builtins ();
9492 altivec_init_builtins ();
9493 if (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT)
9494 rs6000_common_init_builtins ();
9495 if (TARGET_PPC_GFXOPT)
9497 tree ftype = build_function_type_list (float_type_node,
9501 def_builtin (MASK_PPC_GFXOPT, "__builtin_recipdivf", ftype,
9502 RS6000_BUILTIN_RECIPF);
9504 ftype = build_function_type_list (float_type_node,
9507 def_builtin (MASK_PPC_GFXOPT, "__builtin_rsqrtf", ftype,
9508 RS6000_BUILTIN_RSQRTF);
9512 tree ftype = build_function_type_list (double_type_node,
9516 def_builtin (MASK_POPCNTB, "__builtin_recipdiv", ftype,
9517 RS6000_BUILTIN_RECIP);
9522 /* AIX libm provides clog as __clog. */
9523 if (built_in_decls [BUILT_IN_CLOG])
9524 set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
9527 #ifdef SUBTARGET_INIT_BUILTINS
9528 SUBTARGET_INIT_BUILTINS;
9532 /* Search through a set of builtins and enable the mask bits.
9533 DESC is an array of builtins.
9534 SIZE is the total number of builtins.
9535 START is the builtin enum at which to start.
9536 END is the builtin enum at which to end. */
9538 enable_mask_for_builtins (struct builtin_description *desc, int size,
9539 enum rs6000_builtins start,
9540 enum rs6000_builtins end)
9544 for (i = 0; i < size; ++i)
9545 if (desc[i].code == start)
9551 for (; i < size; ++i)
9553 /* Flip all the bits on. */
9554 desc[i].mask = target_flags;
9555 if (desc[i].code == end)
9561 spe_init_builtins (void)
9563 tree endlink = void_list_node;
9564 tree puint_type_node = build_pointer_type (unsigned_type_node);
9565 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
9566 struct builtin_description *d;
9569 tree v2si_ftype_4_v2si
9570 = build_function_type
9571 (opaque_V2SI_type_node,
9572 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9573 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9574 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9575 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9578 tree v2sf_ftype_4_v2sf
9579 = build_function_type
9580 (opaque_V2SF_type_node,
9581 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9582 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9583 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9584 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9587 tree int_ftype_int_v2si_v2si
9588 = build_function_type
9590 tree_cons (NULL_TREE, integer_type_node,
9591 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9592 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9595 tree int_ftype_int_v2sf_v2sf
9596 = build_function_type
9598 tree_cons (NULL_TREE, integer_type_node,
9599 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9600 tree_cons (NULL_TREE, opaque_V2SF_type_node,
9603 tree void_ftype_v2si_puint_int
9604 = build_function_type (void_type_node,
9605 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9606 tree_cons (NULL_TREE, puint_type_node,
9607 tree_cons (NULL_TREE,
9611 tree void_ftype_v2si_puint_char
9612 = build_function_type (void_type_node,
9613 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9614 tree_cons (NULL_TREE, puint_type_node,
9615 tree_cons (NULL_TREE,
9619 tree void_ftype_v2si_pv2si_int
9620 = build_function_type (void_type_node,
9621 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9622 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
9623 tree_cons (NULL_TREE,
9627 tree void_ftype_v2si_pv2si_char
9628 = build_function_type (void_type_node,
9629 tree_cons (NULL_TREE, opaque_V2SI_type_node,
9630 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
9631 tree_cons (NULL_TREE,
9636 = build_function_type (void_type_node,
9637 tree_cons (NULL_TREE, integer_type_node, endlink));
9640 = build_function_type (integer_type_node, endlink);
9642 tree v2si_ftype_pv2si_int
9643 = build_function_type (opaque_V2SI_type_node,
9644 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
9645 tree_cons (NULL_TREE, integer_type_node,
9648 tree v2si_ftype_puint_int
9649 = build_function_type (opaque_V2SI_type_node,
9650 tree_cons (NULL_TREE, puint_type_node,
9651 tree_cons (NULL_TREE, integer_type_node,
9654 tree v2si_ftype_pushort_int
9655 = build_function_type (opaque_V2SI_type_node,
9656 tree_cons (NULL_TREE, pushort_type_node,
9657 tree_cons (NULL_TREE, integer_type_node,
9660 tree v2si_ftype_signed_char
9661 = build_function_type (opaque_V2SI_type_node,
9662 tree_cons (NULL_TREE, signed_char_type_node,
9665 /* The initialization of the simple binary and unary builtins is
9666 done in rs6000_common_init_builtins, but we have to enable the
9667 mask bits here manually because we have run out of `target_flags'
9668 bits. We really need to redesign this mask business. */
9670 enable_mask_for_builtins ((struct builtin_description *) bdesc_2arg,
9671 ARRAY_SIZE (bdesc_2arg),
9674 enable_mask_for_builtins ((struct builtin_description *) bdesc_1arg,
9675 ARRAY_SIZE (bdesc_1arg),
9677 SPE_BUILTIN_EVSUBFUSIAAW);
9678 enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_predicates,
9679 ARRAY_SIZE (bdesc_spe_predicates),
9680 SPE_BUILTIN_EVCMPEQ,
9681 SPE_BUILTIN_EVFSTSTLT);
9682 enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_evsel,
9683 ARRAY_SIZE (bdesc_spe_evsel),
9684 SPE_BUILTIN_EVSEL_CMPGTS,
9685 SPE_BUILTIN_EVSEL_FSTSTEQ);
9687 (*lang_hooks.decls.pushdecl)
9688 (build_decl (BUILTINS_LOCATION, TYPE_DECL,
9689 get_identifier ("__ev64_opaque__"),
9690 opaque_V2SI_type_node));
9692 /* Initialize irregular SPE builtins. */
9694 def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
9695 def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
9696 def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
9697 def_builtin (target_flags, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
9698 def_builtin (target_flags, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
9699 def_builtin (target_flags, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
9700 def_builtin (target_flags, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
9701 def_builtin (target_flags, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
9702 def_builtin (target_flags, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
9703 def_builtin (target_flags, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
9704 def_builtin (target_flags, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
9705 def_builtin (target_flags, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
9706 def_builtin (target_flags, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
9707 def_builtin (target_flags, "__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
9708 def_builtin (target_flags, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
9709 def_builtin (target_flags, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
9710 def_builtin (target_flags, "__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
9711 def_builtin (target_flags, "__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
9714 def_builtin (target_flags, "__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
9715 def_builtin (target_flags, "__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
9716 def_builtin (target_flags, "__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
9717 def_builtin (target_flags, "__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
9718 def_builtin (target_flags, "__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
9719 def_builtin (target_flags, "__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
9720 def_builtin (target_flags, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
9721 def_builtin (target_flags, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
9722 def_builtin (target_flags, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
9723 def_builtin (target_flags, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
9724 def_builtin (target_flags, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
9725 def_builtin (target_flags, "__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
9726 def_builtin (target_flags, "__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
9727 def_builtin (target_flags, "__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
9728 def_builtin (target_flags, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
9729 def_builtin (target_flags, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
9730 def_builtin (target_flags, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
9731 def_builtin (target_flags, "__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
9732 def_builtin (target_flags, "__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
9733 def_builtin (target_flags, "__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
9734 def_builtin (target_flags, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
9735 def_builtin (target_flags, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
9738 d = (struct builtin_description *) bdesc_spe_predicates;
9739 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
9743 switch (insn_data[d->icode].operand[1].mode)
9746 type = int_ftype_int_v2si_v2si;
9749 type = int_ftype_int_v2sf_v2sf;
9755 def_builtin (d->mask, d->name, type, d->code);
9758 /* Evsel predicates. */
9759 d = (struct builtin_description *) bdesc_spe_evsel;
9760 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
9764 switch (insn_data[d->icode].operand[1].mode)
9767 type = v2si_ftype_4_v2si;
9770 type = v2sf_ftype_4_v2sf;
9776 def_builtin (d->mask, d->name, type, d->code);
9781 paired_init_builtins (void)
9783 const struct builtin_description *d;
9785 tree endlink = void_list_node;
9787 tree int_ftype_int_v2sf_v2sf
9788 = build_function_type
9790 tree_cons (NULL_TREE, integer_type_node,
9791 tree_cons (NULL_TREE, V2SF_type_node,
9792 tree_cons (NULL_TREE, V2SF_type_node,
9794 tree pcfloat_type_node =
9795 build_pointer_type (build_qualified_type
9796 (float_type_node, TYPE_QUAL_CONST));
9798 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
9799 long_integer_type_node,
9802 tree void_ftype_v2sf_long_pcfloat =
9803 build_function_type_list (void_type_node,
9805 long_integer_type_node,
9810 def_builtin (0, "__builtin_paired_lx", v2sf_ftype_long_pcfloat,
9814 def_builtin (0, "__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
9815 PAIRED_BUILTIN_STX);
9818 d = bdesc_paired_preds;
9819 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
9823 switch (insn_data[d->icode].operand[1].mode)
9826 type = int_ftype_int_v2sf_v2sf;
9832 def_builtin (d->mask, d->name, type, d->code);
9837 altivec_init_builtins (void)
9839 const struct builtin_description *d;
9840 const struct builtin_description_predicates *dp;
9844 tree pfloat_type_node = build_pointer_type (float_type_node);
9845 tree pint_type_node = build_pointer_type (integer_type_node);
9846 tree pshort_type_node = build_pointer_type (short_integer_type_node);
9847 tree pchar_type_node = build_pointer_type (char_type_node);
9849 tree pvoid_type_node = build_pointer_type (void_type_node);
9851 tree pcfloat_type_node = build_pointer_type (build_qualified_type (float_type_node, TYPE_QUAL_CONST));
9852 tree pcint_type_node = build_pointer_type (build_qualified_type (integer_type_node, TYPE_QUAL_CONST));
9853 tree pcshort_type_node = build_pointer_type (build_qualified_type (short_integer_type_node, TYPE_QUAL_CONST));
9854 tree pcchar_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
9856 tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
9858 tree int_ftype_opaque
9859 = build_function_type_list (integer_type_node,
9860 opaque_V4SI_type_node, NULL_TREE);
9861 tree opaque_ftype_opaque
9862 = build_function_type (integer_type_node,
9864 tree opaque_ftype_opaque_int
9865 = build_function_type_list (opaque_V4SI_type_node,
9866 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
9867 tree opaque_ftype_opaque_opaque_int
9868 = build_function_type_list (opaque_V4SI_type_node,
9869 opaque_V4SI_type_node, opaque_V4SI_type_node,
9870 integer_type_node, NULL_TREE);
9871 tree int_ftype_int_opaque_opaque
9872 = build_function_type_list (integer_type_node,
9873 integer_type_node, opaque_V4SI_type_node,
9874 opaque_V4SI_type_node, NULL_TREE);
9875 tree int_ftype_int_v4si_v4si
9876 = build_function_type_list (integer_type_node,
9877 integer_type_node, V4SI_type_node,
9878 V4SI_type_node, NULL_TREE);
9879 tree v4sf_ftype_pcfloat
9880 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
9881 tree void_ftype_pfloat_v4sf
9882 = build_function_type_list (void_type_node,
9883 pfloat_type_node, V4SF_type_node, NULL_TREE);
9884 tree v4si_ftype_pcint
9885 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
9886 tree void_ftype_pint_v4si
9887 = build_function_type_list (void_type_node,
9888 pint_type_node, V4SI_type_node, NULL_TREE);
9889 tree v8hi_ftype_pcshort
9890 = build_function_type_list (V8HI_type_node, pcshort_type_node, NULL_TREE);
9891 tree void_ftype_pshort_v8hi
9892 = build_function_type_list (void_type_node,
9893 pshort_type_node, V8HI_type_node, NULL_TREE);
9894 tree v16qi_ftype_pcchar
9895 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
9896 tree void_ftype_pchar_v16qi
9897 = build_function_type_list (void_type_node,
9898 pchar_type_node, V16QI_type_node, NULL_TREE);
9899 tree void_ftype_v4si
9900 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
9901 tree v8hi_ftype_void
9902 = build_function_type (V8HI_type_node, void_list_node);
9903 tree void_ftype_void
9904 = build_function_type (void_type_node, void_list_node);
9906 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
9908 tree opaque_ftype_long_pcvoid
9909 = build_function_type_list (opaque_V4SI_type_node,
9910 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9911 tree v16qi_ftype_long_pcvoid
9912 = build_function_type_list (V16QI_type_node,
9913 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9914 tree v8hi_ftype_long_pcvoid
9915 = build_function_type_list (V8HI_type_node,
9916 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9917 tree v4si_ftype_long_pcvoid
9918 = build_function_type_list (V4SI_type_node,
9919 long_integer_type_node, pcvoid_type_node, NULL_TREE);
9921 tree void_ftype_opaque_long_pvoid
9922 = build_function_type_list (void_type_node,
9923 opaque_V4SI_type_node, long_integer_type_node,
9924 pvoid_type_node, NULL_TREE);
9925 tree void_ftype_v4si_long_pvoid
9926 = build_function_type_list (void_type_node,
9927 V4SI_type_node, long_integer_type_node,
9928 pvoid_type_node, NULL_TREE);
9929 tree void_ftype_v16qi_long_pvoid
9930 = build_function_type_list (void_type_node,
9931 V16QI_type_node, long_integer_type_node,
9932 pvoid_type_node, NULL_TREE);
9933 tree void_ftype_v8hi_long_pvoid
9934 = build_function_type_list (void_type_node,
9935 V8HI_type_node, long_integer_type_node,
9936 pvoid_type_node, NULL_TREE);
9937 tree int_ftype_int_v8hi_v8hi
9938 = build_function_type_list (integer_type_node,
9939 integer_type_node, V8HI_type_node,
9940 V8HI_type_node, NULL_TREE);
9941 tree int_ftype_int_v16qi_v16qi
9942 = build_function_type_list (integer_type_node,
9943 integer_type_node, V16QI_type_node,
9944 V16QI_type_node, NULL_TREE);
9945 tree int_ftype_int_v4sf_v4sf
9946 = build_function_type_list (integer_type_node,
9947 integer_type_node, V4SF_type_node,
9948 V4SF_type_node, NULL_TREE);
9949 tree v4si_ftype_v4si
9950 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
9951 tree v8hi_ftype_v8hi
9952 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
9953 tree v16qi_ftype_v16qi
9954 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
9955 tree v4sf_ftype_v4sf
9956 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
9957 tree void_ftype_pcvoid_int_int
9958 = build_function_type_list (void_type_node,
9959 pcvoid_type_node, integer_type_node,
9960 integer_type_node, NULL_TREE);
9962 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
9963 ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
9964 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
9965 ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
9966 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint,
9967 ALTIVEC_BUILTIN_LD_INTERNAL_4si);
9968 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si,
9969 ALTIVEC_BUILTIN_ST_INTERNAL_4si);
9970 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort,
9971 ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
9972 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi,
9973 ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
9974 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar,
9975 ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
9976 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi,
9977 ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
9978 def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
9979 def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
9980 def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
9981 def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
9982 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
9983 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
9984 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
9985 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
9986 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
9987 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
9988 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
9989 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
9990 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
9991 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
9992 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
9993 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
9994 def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
9995 def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
9996 def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
9997 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
9998 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
9999 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
10000 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
10001 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
10002 def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
10003 def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
10004 def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
10005 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
10006 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
10007 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
10009 if (rs6000_cpu == PROCESSOR_CELL)
10011 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
10012 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
10013 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
10014 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
10016 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
10017 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
10018 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
10019 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
10021 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
10022 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
10023 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
10024 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
10026 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
10027 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
10028 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
10029 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
10031 def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
10032 def_builtin (MASK_ALTIVEC, "__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
10033 def_builtin (MASK_ALTIVEC, "__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
10035 def_builtin (MASK_ALTIVEC, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
10036 def_builtin (MASK_ALTIVEC, "__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
10037 def_builtin (MASK_ALTIVEC, "__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
10038 def_builtin (MASK_ALTIVEC, "__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
10039 def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
10040 def_builtin (MASK_ALTIVEC, "__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
10041 def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
10042 def_builtin (MASK_ALTIVEC, "__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
10043 def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
10044 def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
10045 def_builtin (MASK_ALTIVEC, "__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
10046 def_builtin (MASK_ALTIVEC, "__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
10048 /* Add the DST variants. */
10050 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10051 def_builtin (d->mask, d->name, void_ftype_pcvoid_int_int, d->code);
10053 /* Initialize the predicates. */
10054 dp = bdesc_altivec_preds;
10055 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
10057 enum machine_mode mode1;
10059 bool is_overloaded = dp->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10060 && dp->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10065 mode1 = insn_data[dp->icode].operand[1].mode;
10070 type = int_ftype_int_opaque_opaque;
10073 type = int_ftype_int_v4si_v4si;
10076 type = int_ftype_int_v8hi_v8hi;
10079 type = int_ftype_int_v16qi_v16qi;
10082 type = int_ftype_int_v4sf_v4sf;
10085 gcc_unreachable ();
10088 def_builtin (dp->mask, dp->name, type, dp->code);
10091 /* Initialize the abs* operators. */
10093 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10095 enum machine_mode mode0;
10098 mode0 = insn_data[d->icode].operand[0].mode;
10103 type = v4si_ftype_v4si;
10106 type = v8hi_ftype_v8hi;
10109 type = v16qi_ftype_v16qi;
10112 type = v4sf_ftype_v4sf;
10115 gcc_unreachable ();
10118 def_builtin (d->mask, d->name, type, d->code);
10121 if (TARGET_ALTIVEC)
10125 /* Initialize target builtin that implements
10126 targetm.vectorize.builtin_mask_for_load. */
10128 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
10129 v16qi_ftype_long_pcvoid,
10130 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
10131 BUILT_IN_MD, NULL, NULL_TREE);
10132 TREE_READONLY (decl) = 1;
10133 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
10134 altivec_builtin_mask_for_load = decl;
10137 /* Access to the vec_init patterns. */
10138 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
10139 integer_type_node, integer_type_node,
10140 integer_type_node, NULL_TREE);
10141 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
10142 ALTIVEC_BUILTIN_VEC_INIT_V4SI);
10144 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
10145 short_integer_type_node,
10146 short_integer_type_node,
10147 short_integer_type_node,
10148 short_integer_type_node,
10149 short_integer_type_node,
10150 short_integer_type_node,
10151 short_integer_type_node, NULL_TREE);
10152 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
10153 ALTIVEC_BUILTIN_VEC_INIT_V8HI);
10155 ftype = build_function_type_list (V16QI_type_node, char_type_node,
10156 char_type_node, char_type_node,
10157 char_type_node, char_type_node,
10158 char_type_node, char_type_node,
10159 char_type_node, char_type_node,
10160 char_type_node, char_type_node,
10161 char_type_node, char_type_node,
10162 char_type_node, char_type_node,
10163 char_type_node, NULL_TREE);
10164 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
10165 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
10167 ftype = build_function_type_list (V4SF_type_node, float_type_node,
10168 float_type_node, float_type_node,
10169 float_type_node, NULL_TREE);
10170 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
10171 ALTIVEC_BUILTIN_VEC_INIT_V4SF);
10173 /* Access to the vec_set patterns. */
10174 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
10176 integer_type_node, NULL_TREE);
10177 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
10178 ALTIVEC_BUILTIN_VEC_SET_V4SI);
10180 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
10182 integer_type_node, NULL_TREE);
10183 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
10184 ALTIVEC_BUILTIN_VEC_SET_V8HI);
10186 ftype = build_function_type_list (V8HI_type_node, V16QI_type_node,
10188 integer_type_node, NULL_TREE);
10189 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
10190 ALTIVEC_BUILTIN_VEC_SET_V16QI);
10192 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
10194 integer_type_node, NULL_TREE);
10195 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4sf", ftype,
10196 ALTIVEC_BUILTIN_VEC_SET_V4SF);
10198 /* Access to the vec_extract patterns. */
10199 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
10200 integer_type_node, NULL_TREE);
10201 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
10202 ALTIVEC_BUILTIN_VEC_EXT_V4SI);
10204 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
10205 integer_type_node, NULL_TREE);
10206 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
10207 ALTIVEC_BUILTIN_VEC_EXT_V8HI);
10209 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
10210 integer_type_node, NULL_TREE);
10211 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
10212 ALTIVEC_BUILTIN_VEC_EXT_V16QI);
10214 ftype = build_function_type_list (float_type_node, V4SF_type_node,
10215 integer_type_node, NULL_TREE);
10216 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4sf", ftype,
10217 ALTIVEC_BUILTIN_VEC_EXT_V4SF);
10221 rs6000_common_init_builtins (void)
10223 const struct builtin_description *d;
10226 tree v2sf_ftype_v2sf_v2sf_v2sf
10227 = build_function_type_list (V2SF_type_node,
10228 V2SF_type_node, V2SF_type_node,
10229 V2SF_type_node, NULL_TREE);
10231 tree v4sf_ftype_v4sf_v4sf_v16qi
10232 = build_function_type_list (V4SF_type_node,
10233 V4SF_type_node, V4SF_type_node,
10234 V16QI_type_node, NULL_TREE);
10235 tree v4si_ftype_v4si_v4si_v16qi
10236 = build_function_type_list (V4SI_type_node,
10237 V4SI_type_node, V4SI_type_node,
10238 V16QI_type_node, NULL_TREE);
10239 tree v8hi_ftype_v8hi_v8hi_v16qi
10240 = build_function_type_list (V8HI_type_node,
10241 V8HI_type_node, V8HI_type_node,
10242 V16QI_type_node, NULL_TREE);
10243 tree v16qi_ftype_v16qi_v16qi_v16qi
10244 = build_function_type_list (V16QI_type_node,
10245 V16QI_type_node, V16QI_type_node,
10246 V16QI_type_node, NULL_TREE);
10247 tree v4si_ftype_int
10248 = build_function_type_list (V4SI_type_node, integer_type_node, NULL_TREE);
10249 tree v8hi_ftype_int
10250 = build_function_type_list (V8HI_type_node, integer_type_node, NULL_TREE);
10251 tree v16qi_ftype_int
10252 = build_function_type_list (V16QI_type_node, integer_type_node, NULL_TREE);
10253 tree v8hi_ftype_v16qi
10254 = build_function_type_list (V8HI_type_node, V16QI_type_node, NULL_TREE);
10255 tree v4sf_ftype_v4sf
10256 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
10258 tree v2si_ftype_v2si_v2si
10259 = build_function_type_list (opaque_V2SI_type_node,
10260 opaque_V2SI_type_node,
10261 opaque_V2SI_type_node, NULL_TREE);
10263 tree v2sf_ftype_v2sf_v2sf_spe
10264 = build_function_type_list (opaque_V2SF_type_node,
10265 opaque_V2SF_type_node,
10266 opaque_V2SF_type_node, NULL_TREE);
10268 tree v2sf_ftype_v2sf_v2sf
10269 = build_function_type_list (V2SF_type_node,
10271 V2SF_type_node, NULL_TREE);
10274 tree v2si_ftype_int_int
10275 = build_function_type_list (opaque_V2SI_type_node,
10276 integer_type_node, integer_type_node,
10279 tree opaque_ftype_opaque
10280 = build_function_type_list (opaque_V4SI_type_node,
10281 opaque_V4SI_type_node, NULL_TREE);
10283 tree v2si_ftype_v2si
10284 = build_function_type_list (opaque_V2SI_type_node,
10285 opaque_V2SI_type_node, NULL_TREE);
10287 tree v2sf_ftype_v2sf_spe
10288 = build_function_type_list (opaque_V2SF_type_node,
10289 opaque_V2SF_type_node, NULL_TREE);
10291 tree v2sf_ftype_v2sf
10292 = build_function_type_list (V2SF_type_node,
10293 V2SF_type_node, NULL_TREE);
10295 tree v2sf_ftype_v2si
10296 = build_function_type_list (opaque_V2SF_type_node,
10297 opaque_V2SI_type_node, NULL_TREE);
10299 tree v2si_ftype_v2sf
10300 = build_function_type_list (opaque_V2SI_type_node,
10301 opaque_V2SF_type_node, NULL_TREE);
10303 tree v2si_ftype_v2si_char
10304 = build_function_type_list (opaque_V2SI_type_node,
10305 opaque_V2SI_type_node,
10306 char_type_node, NULL_TREE);
10308 tree v2si_ftype_int_char
10309 = build_function_type_list (opaque_V2SI_type_node,
10310 integer_type_node, char_type_node, NULL_TREE);
10312 tree v2si_ftype_char
10313 = build_function_type_list (opaque_V2SI_type_node,
10314 char_type_node, NULL_TREE);
10316 tree int_ftype_int_int
10317 = build_function_type_list (integer_type_node,
10318 integer_type_node, integer_type_node,
10321 tree opaque_ftype_opaque_opaque
10322 = build_function_type_list (opaque_V4SI_type_node,
10323 opaque_V4SI_type_node, opaque_V4SI_type_node, NULL_TREE);
10324 tree v4si_ftype_v4si_v4si
10325 = build_function_type_list (V4SI_type_node,
10326 V4SI_type_node, V4SI_type_node, NULL_TREE);
10327 tree v4sf_ftype_v4si_int
10328 = build_function_type_list (V4SF_type_node,
10329 V4SI_type_node, integer_type_node, NULL_TREE);
10330 tree v4si_ftype_v4sf_int
10331 = build_function_type_list (V4SI_type_node,
10332 V4SF_type_node, integer_type_node, NULL_TREE);
10333 tree v4si_ftype_v4si_int
10334 = build_function_type_list (V4SI_type_node,
10335 V4SI_type_node, integer_type_node, NULL_TREE);
10336 tree v8hi_ftype_v8hi_int
10337 = build_function_type_list (V8HI_type_node,
10338 V8HI_type_node, integer_type_node, NULL_TREE);
10339 tree v16qi_ftype_v16qi_int
10340 = build_function_type_list (V16QI_type_node,
10341 V16QI_type_node, integer_type_node, NULL_TREE);
10342 tree v16qi_ftype_v16qi_v16qi_int
10343 = build_function_type_list (V16QI_type_node,
10344 V16QI_type_node, V16QI_type_node,
10345 integer_type_node, NULL_TREE);
10346 tree v8hi_ftype_v8hi_v8hi_int
10347 = build_function_type_list (V8HI_type_node,
10348 V8HI_type_node, V8HI_type_node,
10349 integer_type_node, NULL_TREE);
10350 tree v4si_ftype_v4si_v4si_int
10351 = build_function_type_list (V4SI_type_node,
10352 V4SI_type_node, V4SI_type_node,
10353 integer_type_node, NULL_TREE);
10354 tree v4sf_ftype_v4sf_v4sf_int
10355 = build_function_type_list (V4SF_type_node,
10356 V4SF_type_node, V4SF_type_node,
10357 integer_type_node, NULL_TREE);
10358 tree v4sf_ftype_v4sf_v4sf
10359 = build_function_type_list (V4SF_type_node,
10360 V4SF_type_node, V4SF_type_node, NULL_TREE);
10361 tree opaque_ftype_opaque_opaque_opaque
10362 = build_function_type_list (opaque_V4SI_type_node,
10363 opaque_V4SI_type_node, opaque_V4SI_type_node,
10364 opaque_V4SI_type_node, NULL_TREE);
10365 tree v4sf_ftype_v4sf_v4sf_v4si
10366 = build_function_type_list (V4SF_type_node,
10367 V4SF_type_node, V4SF_type_node,
10368 V4SI_type_node, NULL_TREE);
10369 tree v4sf_ftype_v4sf_v4sf_v4sf
10370 = build_function_type_list (V4SF_type_node,
10371 V4SF_type_node, V4SF_type_node,
10372 V4SF_type_node, NULL_TREE);
10373 tree v4si_ftype_v4si_v4si_v4si
10374 = build_function_type_list (V4SI_type_node,
10375 V4SI_type_node, V4SI_type_node,
10376 V4SI_type_node, NULL_TREE);
10377 tree v8hi_ftype_v8hi_v8hi
10378 = build_function_type_list (V8HI_type_node,
10379 V8HI_type_node, V8HI_type_node, NULL_TREE);
10380 tree v8hi_ftype_v8hi_v8hi_v8hi
10381 = build_function_type_list (V8HI_type_node,
10382 V8HI_type_node, V8HI_type_node,
10383 V8HI_type_node, NULL_TREE);
10384 tree v4si_ftype_v8hi_v8hi_v4si
10385 = build_function_type_list (V4SI_type_node,
10386 V8HI_type_node, V8HI_type_node,
10387 V4SI_type_node, NULL_TREE);
10388 tree v4si_ftype_v16qi_v16qi_v4si
10389 = build_function_type_list (V4SI_type_node,
10390 V16QI_type_node, V16QI_type_node,
10391 V4SI_type_node, NULL_TREE);
10392 tree v16qi_ftype_v16qi_v16qi
10393 = build_function_type_list (V16QI_type_node,
10394 V16QI_type_node, V16QI_type_node, NULL_TREE);
10395 tree v4si_ftype_v4sf_v4sf
10396 = build_function_type_list (V4SI_type_node,
10397 V4SF_type_node, V4SF_type_node, NULL_TREE);
10398 tree v8hi_ftype_v16qi_v16qi
10399 = build_function_type_list (V8HI_type_node,
10400 V16QI_type_node, V16QI_type_node, NULL_TREE);
10401 tree v4si_ftype_v8hi_v8hi
10402 = build_function_type_list (V4SI_type_node,
10403 V8HI_type_node, V8HI_type_node, NULL_TREE);
10404 tree v8hi_ftype_v4si_v4si
10405 = build_function_type_list (V8HI_type_node,
10406 V4SI_type_node, V4SI_type_node, NULL_TREE);
10407 tree v16qi_ftype_v8hi_v8hi
10408 = build_function_type_list (V16QI_type_node,
10409 V8HI_type_node, V8HI_type_node, NULL_TREE);
10410 tree v4si_ftype_v16qi_v4si
10411 = build_function_type_list (V4SI_type_node,
10412 V16QI_type_node, V4SI_type_node, NULL_TREE);
10413 tree v4si_ftype_v16qi_v16qi
10414 = build_function_type_list (V4SI_type_node,
10415 V16QI_type_node, V16QI_type_node, NULL_TREE);
10416 tree v4si_ftype_v8hi_v4si
10417 = build_function_type_list (V4SI_type_node,
10418 V8HI_type_node, V4SI_type_node, NULL_TREE);
10419 tree v4si_ftype_v8hi
10420 = build_function_type_list (V4SI_type_node, V8HI_type_node, NULL_TREE);
10421 tree int_ftype_v4si_v4si
10422 = build_function_type_list (integer_type_node,
10423 V4SI_type_node, V4SI_type_node, NULL_TREE);
10424 tree int_ftype_v4sf_v4sf
10425 = build_function_type_list (integer_type_node,
10426 V4SF_type_node, V4SF_type_node, NULL_TREE);
10427 tree int_ftype_v16qi_v16qi
10428 = build_function_type_list (integer_type_node,
10429 V16QI_type_node, V16QI_type_node, NULL_TREE);
10430 tree int_ftype_v8hi_v8hi
10431 = build_function_type_list (integer_type_node,
10432 V8HI_type_node, V8HI_type_node, NULL_TREE);
10434 /* Add the simple ternary operators. */
10436 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
10438 enum machine_mode mode0, mode1, mode2, mode3;
10440 bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10441 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10452 if (d->name == 0 || d->icode == CODE_FOR_nothing)
10455 mode0 = insn_data[d->icode].operand[0].mode;
10456 mode1 = insn_data[d->icode].operand[1].mode;
10457 mode2 = insn_data[d->icode].operand[2].mode;
10458 mode3 = insn_data[d->icode].operand[3].mode;
10461 /* When all four are of the same mode. */
10462 if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
10467 type = opaque_ftype_opaque_opaque_opaque;
10470 type = v4si_ftype_v4si_v4si_v4si;
10473 type = v4sf_ftype_v4sf_v4sf_v4sf;
10476 type = v8hi_ftype_v8hi_v8hi_v8hi;
10479 type = v16qi_ftype_v16qi_v16qi_v16qi;
10482 type = v2sf_ftype_v2sf_v2sf_v2sf;
10485 gcc_unreachable ();
10488 else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
10493 type = v4si_ftype_v4si_v4si_v16qi;
10496 type = v4sf_ftype_v4sf_v4sf_v16qi;
10499 type = v8hi_ftype_v8hi_v8hi_v16qi;
10502 type = v16qi_ftype_v16qi_v16qi_v16qi;
10505 gcc_unreachable ();
10508 else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
10509 && mode3 == V4SImode)
10510 type = v4si_ftype_v16qi_v16qi_v4si;
10511 else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
10512 && mode3 == V4SImode)
10513 type = v4si_ftype_v8hi_v8hi_v4si;
10514 else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
10515 && mode3 == V4SImode)
10516 type = v4sf_ftype_v4sf_v4sf_v4si;
10518 /* vchar, vchar, vchar, 4-bit literal. */
10519 else if (mode0 == V16QImode && mode1 == mode0 && mode2 == mode0
10520 && mode3 == QImode)
10521 type = v16qi_ftype_v16qi_v16qi_int;
10523 /* vshort, vshort, vshort, 4-bit literal. */
10524 else if (mode0 == V8HImode && mode1 == mode0 && mode2 == mode0
10525 && mode3 == QImode)
10526 type = v8hi_ftype_v8hi_v8hi_int;
10528 /* vint, vint, vint, 4-bit literal. */
10529 else if (mode0 == V4SImode && mode1 == mode0 && mode2 == mode0
10530 && mode3 == QImode)
10531 type = v4si_ftype_v4si_v4si_int;
10533 /* vfloat, vfloat, vfloat, 4-bit literal. */
10534 else if (mode0 == V4SFmode && mode1 == mode0 && mode2 == mode0
10535 && mode3 == QImode)
10536 type = v4sf_ftype_v4sf_v4sf_int;
10539 gcc_unreachable ();
10541 def_builtin (d->mask, d->name, type, d->code);
10544 /* Add the simple binary operators. */
10545 d = (struct builtin_description *) bdesc_2arg;
10546 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
10548 enum machine_mode mode0, mode1, mode2;
10550 bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10551 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10561 if (d->name == 0 || d->icode == CODE_FOR_nothing)
10564 mode0 = insn_data[d->icode].operand[0].mode;
10565 mode1 = insn_data[d->icode].operand[1].mode;
10566 mode2 = insn_data[d->icode].operand[2].mode;
10569 /* When all three operands are of the same mode. */
10570 if (mode0 == mode1 && mode1 == mode2)
10575 type = opaque_ftype_opaque_opaque;
10578 type = v4sf_ftype_v4sf_v4sf;
10581 type = v4si_ftype_v4si_v4si;
10584 type = v16qi_ftype_v16qi_v16qi;
10587 type = v8hi_ftype_v8hi_v8hi;
10590 type = v2si_ftype_v2si_v2si;
10593 if (TARGET_PAIRED_FLOAT)
10594 type = v2sf_ftype_v2sf_v2sf;
10596 type = v2sf_ftype_v2sf_v2sf_spe;
10599 type = int_ftype_int_int;
10602 gcc_unreachable ();
10606 /* A few other combos we really don't want to do manually. */
10608 /* vint, vfloat, vfloat. */
10609 else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == V4SFmode)
10610 type = v4si_ftype_v4sf_v4sf;
10612 /* vshort, vchar, vchar. */
10613 else if (mode0 == V8HImode && mode1 == V16QImode && mode2 == V16QImode)
10614 type = v8hi_ftype_v16qi_v16qi;
10616 /* vint, vshort, vshort. */
10617 else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode)
10618 type = v4si_ftype_v8hi_v8hi;
10620 /* vshort, vint, vint. */
10621 else if (mode0 == V8HImode && mode1 == V4SImode && mode2 == V4SImode)
10622 type = v8hi_ftype_v4si_v4si;
10624 /* vchar, vshort, vshort. */
10625 else if (mode0 == V16QImode && mode1 == V8HImode && mode2 == V8HImode)
10626 type = v16qi_ftype_v8hi_v8hi;
10628 /* vint, vchar, vint. */
10629 else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V4SImode)
10630 type = v4si_ftype_v16qi_v4si;
10632 /* vint, vchar, vchar. */
10633 else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode)
10634 type = v4si_ftype_v16qi_v16qi;
10636 /* vint, vshort, vint. */
10637 else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
10638 type = v4si_ftype_v8hi_v4si;
10640 /* vint, vint, 5-bit literal. */
10641 else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
10642 type = v4si_ftype_v4si_int;
10644 /* vshort, vshort, 5-bit literal. */
10645 else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
10646 type = v8hi_ftype_v8hi_int;
10648 /* vchar, vchar, 5-bit literal. */
10649 else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
10650 type = v16qi_ftype_v16qi_int;
10652 /* vfloat, vint, 5-bit literal. */
10653 else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
10654 type = v4sf_ftype_v4si_int;
10656 /* vint, vfloat, 5-bit literal. */
10657 else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
10658 type = v4si_ftype_v4sf_int;
10660 else if (mode0 == V2SImode && mode1 == SImode && mode2 == SImode)
10661 type = v2si_ftype_int_int;
10663 else if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
10664 type = v2si_ftype_v2si_char;
10666 else if (mode0 == V2SImode && mode1 == SImode && mode2 == QImode)
10667 type = v2si_ftype_int_char;
10672 gcc_assert (mode0 == SImode);
10676 type = int_ftype_v4si_v4si;
10679 type = int_ftype_v4sf_v4sf;
10682 type = int_ftype_v16qi_v16qi;
10685 type = int_ftype_v8hi_v8hi;
10688 gcc_unreachable ();
10692 def_builtin (d->mask, d->name, type, d->code);
10695 /* Add the simple unary operators. */
10696 d = (struct builtin_description *) bdesc_1arg;
10697 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
10699 enum machine_mode mode0, mode1;
10701 bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10702 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
10711 if (d->name == 0 || d->icode == CODE_FOR_nothing)
10714 mode0 = insn_data[d->icode].operand[0].mode;
10715 mode1 = insn_data[d->icode].operand[1].mode;
10718 if (mode0 == V4SImode && mode1 == QImode)
10719 type = v4si_ftype_int;
10720 else if (mode0 == V8HImode && mode1 == QImode)
10721 type = v8hi_ftype_int;
10722 else if (mode0 == V16QImode && mode1 == QImode)
10723 type = v16qi_ftype_int;
10724 else if (mode0 == VOIDmode && mode1 == VOIDmode)
10725 type = opaque_ftype_opaque;
10726 else if (mode0 == V4SFmode && mode1 == V4SFmode)
10727 type = v4sf_ftype_v4sf;
10728 else if (mode0 == V8HImode && mode1 == V16QImode)
10729 type = v8hi_ftype_v16qi;
10730 else if (mode0 == V4SImode && mode1 == V8HImode)
10731 type = v4si_ftype_v8hi;
10732 else if (mode0 == V2SImode && mode1 == V2SImode)
10733 type = v2si_ftype_v2si;
10734 else if (mode0 == V2SFmode && mode1 == V2SFmode)
10736 if (TARGET_PAIRED_FLOAT)
10737 type = v2sf_ftype_v2sf;
10739 type = v2sf_ftype_v2sf_spe;
10741 else if (mode0 == V2SFmode && mode1 == V2SImode)
10742 type = v2sf_ftype_v2si;
10743 else if (mode0 == V2SImode && mode1 == V2SFmode)
10744 type = v2si_ftype_v2sf;
10745 else if (mode0 == V2SImode && mode1 == QImode)
10746 type = v2si_ftype_char;
10748 gcc_unreachable ();
10750 def_builtin (d->mask, d->name, type, d->code);
10755 rs6000_init_libfuncs (void)
10757 if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
10758 && !TARGET_POWER2 && !TARGET_POWERPC)
10760 /* AIX library routines for float->int conversion. */
10761 set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
10762 set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
10763 set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
10764 set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
10767 if (!TARGET_IEEEQUAD)
10768 /* AIX/Darwin/64-bit Linux quad floating point routines. */
10769 if (!TARGET_XL_COMPAT)
10771 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
10772 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
10773 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
10774 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
10776 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
10778 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
10779 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
10780 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
10781 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
10782 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
10783 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
10784 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
10786 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
10787 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
10788 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
10789 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
10790 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
10791 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
10792 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
10793 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
10796 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
10797 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
10801 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
10802 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
10803 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
10804 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
10808 /* 32-bit SVR4 quad floating point routines. */
10810 set_optab_libfunc (add_optab, TFmode, "_q_add");
10811 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
10812 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
10813 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
10814 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
10815 if (TARGET_PPC_GPOPT || TARGET_POWER2)
10816 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
10818 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
10819 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
10820 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
10821 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
10822 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
10823 set_optab_libfunc (le_optab, TFmode, "_q_fle");
10825 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
10826 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
10827 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
10828 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
10829 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
10830 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
10831 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
10832 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
10837 /* Expand a block clear operation, and return 1 if successful. Return 0
10838 if we should let the compiler generate normal code.
10840 operands[0] is the destination
10841 operands[1] is the length
10842 operands[3] is the alignment */
10845 expand_block_clear (rtx operands[])
10847 rtx orig_dest = operands[0];
10848 rtx bytes_rtx = operands[1];
10849 rtx align_rtx = operands[3];
10850 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
10851 HOST_WIDE_INT align;
10852 HOST_WIDE_INT bytes;
10857 /* If this is not a fixed size move, just call memcpy */
10861 /* This must be a fixed size alignment */
10862 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
10863 align = INTVAL (align_rtx) * BITS_PER_UNIT;
10865 /* Anything to clear? */
10866 bytes = INTVAL (bytes_rtx);
10870 /* Use the builtin memset after a point, to avoid huge code bloat.
10871 When optimize_size, avoid any significant code bloat; calling
10872 memset is about 4 instructions, so allow for one instruction to
10873 load zero and three to do clearing. */
10874 if (TARGET_ALTIVEC && align >= 128)
10876 else if (TARGET_POWERPC64 && align >= 32)
10878 else if (TARGET_SPE && align >= 64)
10883 if (optimize_size && bytes > 3 * clear_step)
10885 if (! optimize_size && bytes > 8 * clear_step)
10888 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
10890 enum machine_mode mode = BLKmode;
10893 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
10898 else if (bytes >= 8 && TARGET_SPE && align >= 64)
10903 else if (bytes >= 8 && TARGET_POWERPC64
10904 /* 64-bit loads and stores require word-aligned
10906 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
10911 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
10912 { /* move 4 bytes */
10916 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
10917 { /* move 2 bytes */
10921 else /* move 1 byte at a time */
10927 dest = adjust_address (orig_dest, mode, offset);
10929 emit_move_insn (dest, CONST0_RTX (mode));
10936 /* Expand a block move operation, and return 1 if successful. Return 0
10937 if we should let the compiler generate normal code.
10939 operands[0] is the destination
10940 operands[1] is the source
10941 operands[2] is the length
10942 operands[3] is the alignment */
10944 #define MAX_MOVE_REG 4
10947 expand_block_move (rtx operands[])
10949 rtx orig_dest = operands[0];
10950 rtx orig_src = operands[1];
10951 rtx bytes_rtx = operands[2];
10952 rtx align_rtx = operands[3];
10953 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
10958 rtx stores[MAX_MOVE_REG];
10961 /* If this is not a fixed size move, just call memcpy */
10965 /* This must be a fixed size alignment */
10966 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
10967 align = INTVAL (align_rtx) * BITS_PER_UNIT;
10969 /* Anything to move? */
10970 bytes = INTVAL (bytes_rtx);
10974 /* store_one_arg depends on expand_block_move to handle at least the size of
10975 reg_parm_stack_space. */
10976 if (bytes > (TARGET_POWERPC64 ? 64 : 32))
10979 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
10982 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
10983 rtx (*mov) (rtx, rtx);
10985 enum machine_mode mode = BLKmode;
10988 /* Altivec first, since it will be faster than a string move
10989 when it applies, and usually not significantly larger. */
10990 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
10994 gen_func.mov = gen_movv4si;
10996 else if (TARGET_SPE && bytes >= 8 && align >= 64)
11000 gen_func.mov = gen_movv2si;
11002 else if (TARGET_STRING
11003 && bytes > 24 /* move up to 32 bytes at a time */
11009 && ! fixed_regs[10]
11010 && ! fixed_regs[11]
11011 && ! fixed_regs[12])
11013 move_bytes = (bytes > 32) ? 32 : bytes;
11014 gen_func.movmemsi = gen_movmemsi_8reg;
11016 else if (TARGET_STRING
11017 && bytes > 16 /* move up to 24 bytes at a time */
11023 && ! fixed_regs[10])
11025 move_bytes = (bytes > 24) ? 24 : bytes;
11026 gen_func.movmemsi = gen_movmemsi_6reg;
11028 else if (TARGET_STRING
11029 && bytes > 8 /* move up to 16 bytes at a time */
11033 && ! fixed_regs[8])
11035 move_bytes = (bytes > 16) ? 16 : bytes;
11036 gen_func.movmemsi = gen_movmemsi_4reg;
11038 else if (bytes >= 8 && TARGET_POWERPC64
11039 /* 64-bit loads and stores require word-aligned
11041 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
11045 gen_func.mov = gen_movdi;
11047 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
11048 { /* move up to 8 bytes at a time */
11049 move_bytes = (bytes > 8) ? 8 : bytes;
11050 gen_func.movmemsi = gen_movmemsi_2reg;
11052 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
11053 { /* move 4 bytes */
11056 gen_func.mov = gen_movsi;
11058 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
11059 { /* move 2 bytes */
11062 gen_func.mov = gen_movhi;
11064 else if (TARGET_STRING && bytes > 1)
11065 { /* move up to 4 bytes at a time */
11066 move_bytes = (bytes > 4) ? 4 : bytes;
11067 gen_func.movmemsi = gen_movmemsi_1reg;
11069 else /* move 1 byte at a time */
11073 gen_func.mov = gen_movqi;
11076 src = adjust_address (orig_src, mode, offset);
11077 dest = adjust_address (orig_dest, mode, offset);
11079 if (mode != BLKmode)
11081 rtx tmp_reg = gen_reg_rtx (mode);
11083 emit_insn ((*gen_func.mov) (tmp_reg, src));
11084 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
11087 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
11090 for (i = 0; i < num_reg; i++)
11091 emit_insn (stores[i]);
11095 if (mode == BLKmode)
11097 /* Move the address into scratch registers. The movmemsi
11098 patterns require zero offset. */
11099 if (!REG_P (XEXP (src, 0)))
11101 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
11102 src = replace_equiv_address (src, src_reg);
11104 set_mem_size (src, GEN_INT (move_bytes));
11106 if (!REG_P (XEXP (dest, 0)))
11108 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
11109 dest = replace_equiv_address (dest, dest_reg);
11111 set_mem_size (dest, GEN_INT (move_bytes));
11113 emit_insn ((*gen_func.movmemsi) (dest, src,
11114 GEN_INT (move_bytes & 31),
11123 /* Return a string to perform a load_multiple operation.
11124 operands[0] is the vector.
11125 operands[1] is the source address.
11126 operands[2] is the first destination register. */
11129 rs6000_output_load_multiple (rtx operands[3])
11131 /* We have to handle the case where the pseudo used to contain the address
11132 is assigned to one of the output registers. */
11134 int words = XVECLEN (operands[0], 0);
11137 if (XVECLEN (operands[0], 0) == 1)
11138 return "{l|lwz} %2,0(%1)";
11140 for (i = 0; i < words; i++)
11141 if (refers_to_regno_p (REGNO (operands[2]) + i,
11142 REGNO (operands[2]) + i + 1, operands[1], 0))
11146 xop[0] = GEN_INT (4 * (words-1));
11147 xop[1] = operands[1];
11148 xop[2] = operands[2];
11149 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
11154 xop[0] = GEN_INT (4 * (words-1));
11155 xop[1] = operands[1];
11156 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
11157 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
11162 for (j = 0; j < words; j++)
11165 xop[0] = GEN_INT (j * 4);
11166 xop[1] = operands[1];
11167 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
11168 output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
11170 xop[0] = GEN_INT (i * 4);
11171 xop[1] = operands[1];
11172 output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
11177 return "{lsi|lswi} %2,%1,%N0";
11181 /* A validation routine: say whether CODE, a condition code, and MODE
11182 match. The other alternatives either don't make sense or should
11183 never be generated. */
11186 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
11188 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
11189 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
11190 && GET_MODE_CLASS (mode) == MODE_CC);
11192 /* These don't make sense. */
11193 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
11194 || mode != CCUNSmode);
11196 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
11197 || mode == CCUNSmode);
11199 gcc_assert (mode == CCFPmode
11200 || (code != ORDERED && code != UNORDERED
11201 && code != UNEQ && code != LTGT
11202 && code != UNGT && code != UNLT
11203 && code != UNGE && code != UNLE));
11205 /* These should never be generated except for
11206 flag_finite_math_only. */
11207 gcc_assert (mode != CCFPmode
11208 || flag_finite_math_only
11209 || (code != LE && code != GE
11210 && code != UNEQ && code != LTGT
11211 && code != UNGT && code != UNLT));
11213 /* These are invalid; the information is not there. */
11214 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
11218 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
11219 mask required to convert the result of a rotate insn into a shift
11220 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
11223 includes_lshift_p (rtx shiftop, rtx andop)
11225 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
11227 shift_mask <<= INTVAL (shiftop);
11229 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
11232 /* Similar, but for right shift. */
11235 includes_rshift_p (rtx shiftop, rtx andop)
11237 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
11239 shift_mask >>= INTVAL (shiftop);
11241 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
11244 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
11245 to perform a left shift. It must have exactly SHIFTOP least
11246 significant 0's, then one or more 1's, then zero or more 0's. */
11249 includes_rldic_lshift_p (rtx shiftop, rtx andop)
11251 if (GET_CODE (andop) == CONST_INT)
11253 HOST_WIDE_INT c, lsb, shift_mask;
11255 c = INTVAL (andop);
11256 if (c == 0 || c == ~0)
11260 shift_mask <<= INTVAL (shiftop);
11262 /* Find the least significant one bit. */
11265 /* It must coincide with the LSB of the shift mask. */
11266 if (-lsb != shift_mask)
11269 /* Invert to look for the next transition (if any). */
11272 /* Remove the low group of ones (originally low group of zeros). */
11275 /* Again find the lsb, and check we have all 1's above. */
11279 else if (GET_CODE (andop) == CONST_DOUBLE
11280 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
11282 HOST_WIDE_INT low, high, lsb;
11283 HOST_WIDE_INT shift_mask_low, shift_mask_high;
11285 low = CONST_DOUBLE_LOW (andop);
11286 if (HOST_BITS_PER_WIDE_INT < 64)
11287 high = CONST_DOUBLE_HIGH (andop);
11289 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
11290 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
11293 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
11295 shift_mask_high = ~0;
11296 if (INTVAL (shiftop) > 32)
11297 shift_mask_high <<= INTVAL (shiftop) - 32;
11299 lsb = high & -high;
11301 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
11307 lsb = high & -high;
11308 return high == -lsb;
11311 shift_mask_low = ~0;
11312 shift_mask_low <<= INTVAL (shiftop);
11316 if (-lsb != shift_mask_low)
11319 if (HOST_BITS_PER_WIDE_INT < 64)
11324 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
11326 lsb = high & -high;
11327 return high == -lsb;
11331 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
11337 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
11338 to perform a left shift. It must have SHIFTOP or more least
11339 significant 0's, with the remainder of the word 1's. */
11342 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
11344 if (GET_CODE (andop) == CONST_INT)
11346 HOST_WIDE_INT c, lsb, shift_mask;
11349 shift_mask <<= INTVAL (shiftop);
11350 c = INTVAL (andop);
11352 /* Find the least significant one bit. */
11355 /* It must be covered by the shift mask.
11356 This test also rejects c == 0. */
11357 if ((lsb & shift_mask) == 0)
11360 /* Check we have all 1's above the transition, and reject all 1's. */
11361 return c == -lsb && lsb != 1;
11363 else if (GET_CODE (andop) == CONST_DOUBLE
11364 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
11366 HOST_WIDE_INT low, lsb, shift_mask_low;
11368 low = CONST_DOUBLE_LOW (andop);
11370 if (HOST_BITS_PER_WIDE_INT < 64)
11372 HOST_WIDE_INT high, shift_mask_high;
11374 high = CONST_DOUBLE_HIGH (andop);
11378 shift_mask_high = ~0;
11379 if (INTVAL (shiftop) > 32)
11380 shift_mask_high <<= INTVAL (shiftop) - 32;
11382 lsb = high & -high;
11384 if ((lsb & shift_mask_high) == 0)
11387 return high == -lsb;
11393 shift_mask_low = ~0;
11394 shift_mask_low <<= INTVAL (shiftop);
11398 if ((lsb & shift_mask_low) == 0)
11401 return low == -lsb && lsb != 1;
11407 /* Return 1 if operands will generate a valid arguments to rlwimi
11408 instruction for insert with right shift in 64-bit mode. The mask may
11409 not start on the first bit or stop on the last bit because wrap-around
11410 effects of instruction do not correspond to semantics of RTL insn. */
11413 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
11415 if (INTVAL (startop) > 32
11416 && INTVAL (startop) < 64
11417 && INTVAL (sizeop) > 1
11418 && INTVAL (sizeop) + INTVAL (startop) < 64
11419 && INTVAL (shiftop) > 0
11420 && INTVAL (sizeop) + INTVAL (shiftop) < 32
11421 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
11427 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
11428 for lfq and stfq insns iff the registers are hard registers. */
11431 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
11433 /* We might have been passed a SUBREG. */
11434 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
11437 /* We might have been passed non floating point registers. */
11438 if (!FP_REGNO_P (REGNO (reg1))
11439 || !FP_REGNO_P (REGNO (reg2)))
11442 return (REGNO (reg1) == REGNO (reg2) - 1);
11445 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
11446 addr1 and addr2 must be in consecutive memory locations
11447 (addr2 == addr1 + 8). */
11450 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
11453 unsigned int reg1, reg2;
11454 int offset1, offset2;
11456 /* The mems cannot be volatile. */
11457 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
11460 addr1 = XEXP (mem1, 0);
11461 addr2 = XEXP (mem2, 0);
11463 /* Extract an offset (if used) from the first addr. */
11464 if (GET_CODE (addr1) == PLUS)
11466 /* If not a REG, return zero. */
11467 if (GET_CODE (XEXP (addr1, 0)) != REG)
11471 reg1 = REGNO (XEXP (addr1, 0));
11472 /* The offset must be constant! */
11473 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
11475 offset1 = INTVAL (XEXP (addr1, 1));
11478 else if (GET_CODE (addr1) != REG)
11482 reg1 = REGNO (addr1);
11483 /* This was a simple (mem (reg)) expression. Offset is 0. */
11487 /* And now for the second addr. */
11488 if (GET_CODE (addr2) == PLUS)
11490 /* If not a REG, return zero. */
11491 if (GET_CODE (XEXP (addr2, 0)) != REG)
11495 reg2 = REGNO (XEXP (addr2, 0));
11496 /* The offset must be constant. */
11497 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
11499 offset2 = INTVAL (XEXP (addr2, 1));
11502 else if (GET_CODE (addr2) != REG)
11506 reg2 = REGNO (addr2);
11507 /* This was a simple (mem (reg)) expression. Offset is 0. */
11511 /* Both of these must have the same base register. */
11515 /* The offset for the second addr must be 8 more than the first addr. */
11516 if (offset2 != offset1 + 8)
11519 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
11526 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
11528 static bool eliminated = false;
11529 if (mode != SDmode)
11530 return assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11533 rtx mem = cfun->machine->sdmode_stack_slot;
11534 gcc_assert (mem != NULL_RTX);
11538 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11539 cfun->machine->sdmode_stack_slot = mem;
11547 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
11549 /* Don't walk into types. */
11550 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
11552 *walk_subtrees = 0;
11556 switch (TREE_CODE (*tp))
11565 case ALIGN_INDIRECT_REF:
11566 case MISALIGNED_INDIRECT_REF:
11567 case VIEW_CONVERT_EXPR:
11568 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
11579 /* Allocate a 64-bit stack slot to be used for copying SDmode
11580 values through if this function has any SDmode references. */
11583 rs6000_alloc_sdmode_stack_slot (void)
11587 gimple_stmt_iterator gsi;
11589 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
11592 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
11594 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
11597 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
11598 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
11604 /* Check for any SDmode parameters of the function. */
11605 for (t = DECL_ARGUMENTS (cfun->decl); t; t = TREE_CHAIN (t))
11607 if (TREE_TYPE (t) == error_mark_node)
11610 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
11611 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
11613 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
11614 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
11622 rs6000_instantiate_decls (void)
11624 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
11625 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
11628 /* Return the register class of a scratch register needed to copy IN into
11629 or out of a register in RCLASS in MODE. If it can be done directly,
11630 NO_REGS is returned. */
11633 rs6000_secondary_reload_class (enum reg_class rclass,
11634 enum machine_mode mode ATTRIBUTE_UNUSED,
11639 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
11641 && MACHOPIC_INDIRECT
11645 /* We cannot copy a symbolic operand directly into anything
11646 other than BASE_REGS for TARGET_ELF. So indicate that a
11647 register from BASE_REGS is needed as an intermediate
11650 On Darwin, pic addresses require a load from memory, which
11651 needs a base register. */
11652 if (rclass != BASE_REGS
11653 && (GET_CODE (in) == SYMBOL_REF
11654 || GET_CODE (in) == HIGH
11655 || GET_CODE (in) == LABEL_REF
11656 || GET_CODE (in) == CONST))
11660 if (GET_CODE (in) == REG)
11662 regno = REGNO (in);
11663 if (regno >= FIRST_PSEUDO_REGISTER)
11665 regno = true_regnum (in);
11666 if (regno >= FIRST_PSEUDO_REGISTER)
11670 else if (GET_CODE (in) == SUBREG)
11672 regno = true_regnum (in);
11673 if (regno >= FIRST_PSEUDO_REGISTER)
11679 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
11681 if (rclass == GENERAL_REGS || rclass == BASE_REGS
11682 || (regno >= 0 && INT_REGNO_P (regno)))
11685 /* Constants, memory, and FP registers can go into FP registers. */
11686 if ((regno == -1 || FP_REGNO_P (regno))
11687 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
11688 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
11690 /* Memory, and AltiVec registers can go into AltiVec registers. */
11691 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
11692 && rclass == ALTIVEC_REGS)
11695 /* We can copy among the CR registers. */
11696 if ((rclass == CR_REGS || rclass == CR0_REGS)
11697 && regno >= 0 && CR_REGNO_P (regno))
11700 /* Otherwise, we need GENERAL_REGS. */
11701 return GENERAL_REGS;
11704 /* Given a comparison operation, return the bit number in CCR to test. We
11705 know this is a valid comparison.
11707 SCC_P is 1 if this is for an scc. That means that %D will have been
11708 used instead of %C, so the bits will be in different places.
11710 Return -1 if OP isn't a valid comparison for some reason. */
11713 ccr_bit (rtx op, int scc_p)
11715 enum rtx_code code = GET_CODE (op);
11716 enum machine_mode cc_mode;
11721 if (!COMPARISON_P (op))
11724 reg = XEXP (op, 0);
11726 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
11728 cc_mode = GET_MODE (reg);
11729 cc_regnum = REGNO (reg);
11730 base_bit = 4 * (cc_regnum - CR0_REGNO);
11732 validate_condition_mode (code, cc_mode);
11734 /* When generating a sCOND operation, only positive conditions are
11737 || code == EQ || code == GT || code == LT || code == UNORDERED
11738 || code == GTU || code == LTU);
11743 return scc_p ? base_bit + 3 : base_bit + 2;
11745 return base_bit + 2;
11746 case GT: case GTU: case UNLE:
11747 return base_bit + 1;
11748 case LT: case LTU: case UNGE:
11750 case ORDERED: case UNORDERED:
11751 return base_bit + 3;
11754 /* If scc, we will have done a cror to put the bit in the
11755 unordered position. So test that bit. For integer, this is ! LT
11756 unless this is an scc insn. */
11757 return scc_p ? base_bit + 3 : base_bit;
11760 return scc_p ? base_bit + 3 : base_bit + 1;
11763 gcc_unreachable ();
11767 /* Return the GOT register. */
11770 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
11772 /* The second flow pass currently (June 1999) can't update
11773 regs_ever_live without disturbing other parts of the compiler, so
11774 update it here to make the prolog/epilogue code happy. */
11775 if (!can_create_pseudo_p ()
11776 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
11777 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
11779 crtl->uses_pic_offset_table = 1;
11781 return pic_offset_table_rtx;
11784 /* Function to init struct machine_function.
11785 This will be called, via a pointer variable,
11786 from push_function_context. */
11788 static struct machine_function *
11789 rs6000_init_machine_status (void)
11791 return GGC_CNEW (machine_function);
11794 /* These macros test for integers and extract the low-order bits. */
11796 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
11797 && GET_MODE (X) == VOIDmode)
11799 #define INT_LOWPART(X) \
11800 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
11803 extract_MB (rtx op)
11806 unsigned long val = INT_LOWPART (op);
11808 /* If the high bit is zero, the value is the first 1 bit we find
11810 if ((val & 0x80000000) == 0)
11812 gcc_assert (val & 0xffffffff);
11815 while (((val <<= 1) & 0x80000000) == 0)
11820 /* If the high bit is set and the low bit is not, or the mask is all
11821 1's, the value is zero. */
11822 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
11825 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11828 while (((val >>= 1) & 1) != 0)
11835 extract_ME (rtx op)
11838 unsigned long val = INT_LOWPART (op);
11840 /* If the low bit is zero, the value is the first 1 bit we find from
11842 if ((val & 1) == 0)
11844 gcc_assert (val & 0xffffffff);
11847 while (((val >>= 1) & 1) == 0)
11853 /* If the low bit is set and the high bit is not, or the mask is all
11854 1's, the value is 31. */
11855 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
11858 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11861 while (((val <<= 1) & 0x80000000) != 0)
11867 /* Locate some local-dynamic symbol still in use by this function
11868 so that we can print its name in some tls_ld pattern. */
11870 static const char *
11871 rs6000_get_some_local_dynamic_name (void)
11875 if (cfun->machine->some_ld_name)
11876 return cfun->machine->some_ld_name;
11878 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11880 && for_each_rtx (&PATTERN (insn),
11881 rs6000_get_some_local_dynamic_name_1, 0))
11882 return cfun->machine->some_ld_name;
11884 gcc_unreachable ();
11887 /* Helper function for rs6000_get_some_local_dynamic_name. */
11890 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11894 if (GET_CODE (x) == SYMBOL_REF)
11896 const char *str = XSTR (x, 0);
11897 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11899 cfun->machine->some_ld_name = str;
11907 /* Write out a function code label. */
11910 rs6000_output_function_entry (FILE *file, const char *fname)
11912 if (fname[0] != '.')
11914 switch (DEFAULT_ABI)
11917 gcc_unreachable ();
11923 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
11932 RS6000_OUTPUT_BASENAME (file, fname);
11934 assemble_name (file, fname);
11937 /* Print an operand. Recognize special options, documented below. */
11940 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
11941 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
11943 #define SMALL_DATA_RELOC "sda21"
11944 #define SMALL_DATA_REG 0
11948 print_operand (FILE *file, rtx x, int code)
11952 unsigned HOST_WIDE_INT uval;
11957 /* Write out an instruction after the call which may be replaced
11958 with glue code by the loader. This depends on the AIX version. */
11959 asm_fprintf (file, RS6000_CALL_GLUE);
11962 /* %a is output_address. */
11965 /* If X is a constant integer whose low-order 5 bits are zero,
11966 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
11967 in the AIX assembler where "sri" with a zero shift count
11968 writes a trash instruction. */
11969 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
11976 /* If constant, low-order 16 bits of constant, unsigned.
11977 Otherwise, write normally. */
11979 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
11981 print_operand (file, x, 0);
11985 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
11986 for 64-bit mask direction. */
11987 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
11990 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
11994 /* X is a CR register. Print the number of the GT bit of the CR. */
11995 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
11996 output_operand_lossage ("invalid %%E value");
11998 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
12002 /* Like 'J' but get to the GT bit only. */
12003 gcc_assert (GET_CODE (x) == REG);
12005 /* Bit 1 is GT bit. */
12006 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
12008 /* Add one for shift count in rlinm for scc. */
12009 fprintf (file, "%d", i + 1);
12013 /* X is a CR register. Print the number of the EQ bit of the CR */
12014 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
12015 output_operand_lossage ("invalid %%E value");
12017 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
12021 /* X is a CR register. Print the shift count needed to move it
12022 to the high-order four bits. */
12023 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
12024 output_operand_lossage ("invalid %%f value");
12026 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
12030 /* Similar, but print the count for the rotate in the opposite
12032 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
12033 output_operand_lossage ("invalid %%F value");
12035 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
12039 /* X is a constant integer. If it is negative, print "m",
12040 otherwise print "z". This is to make an aze or ame insn. */
12041 if (GET_CODE (x) != CONST_INT)
12042 output_operand_lossage ("invalid %%G value");
12043 else if (INTVAL (x) >= 0)
12050 /* If constant, output low-order five bits. Otherwise, write
12053 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
12055 print_operand (file, x, 0);
12059 /* If constant, output low-order six bits. Otherwise, write
12062 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
12064 print_operand (file, x, 0);
12068 /* Print `i' if this is a constant, else nothing. */
12074 /* Write the bit number in CCR for jump. */
12075 i = ccr_bit (x, 0);
12077 output_operand_lossage ("invalid %%j code");
12079 fprintf (file, "%d", i);
12083 /* Similar, but add one for shift count in rlinm for scc and pass
12084 scc flag to `ccr_bit'. */
12085 i = ccr_bit (x, 1);
12087 output_operand_lossage ("invalid %%J code");
12089 /* If we want bit 31, write a shift count of zero, not 32. */
12090 fprintf (file, "%d", i == 31 ? 0 : i + 1);
12094 /* X must be a constant. Write the 1's complement of the
12097 output_operand_lossage ("invalid %%k value");
12099 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
12103 /* X must be a symbolic constant on ELF. Write an
12104 expression suitable for an 'addi' that adds in the low 16
12105 bits of the MEM. */
12106 if (GET_CODE (x) != CONST)
12108 print_operand_address (file, x);
12109 fputs ("@l", file);
12113 if (GET_CODE (XEXP (x, 0)) != PLUS
12114 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
12115 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
12116 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
12117 output_operand_lossage ("invalid %%K value");
12118 print_operand_address (file, XEXP (XEXP (x, 0), 0));
12119 fputs ("@l", file);
12120 /* For GNU as, there must be a non-alphanumeric character
12121 between 'l' and the number. The '-' is added by
12122 print_operand() already. */
12123 if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
12125 print_operand (file, XEXP (XEXP (x, 0), 1), 0);
12129 /* %l is output_asm_label. */
12132 /* Write second word of DImode or DFmode reference. Works on register
12133 or non-indexed memory only. */
12134 if (GET_CODE (x) == REG)
12135 fputs (reg_names[REGNO (x) + 1], file);
12136 else if (GET_CODE (x) == MEM)
12138 /* Handle possible auto-increment. Since it is pre-increment and
12139 we have already done it, we can just use an offset of word. */
12140 if (GET_CODE (XEXP (x, 0)) == PRE_INC
12141 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
12142 output_address (plus_constant (XEXP (XEXP (x, 0), 0),
12144 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12145 output_address (plus_constant (XEXP (XEXP (x, 0), 0),
12148 output_address (XEXP (adjust_address_nv (x, SImode,
12152 if (small_data_operand (x, GET_MODE (x)))
12153 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12154 reg_names[SMALL_DATA_REG]);
12159 /* MB value for a mask operand. */
12160 if (! mask_operand (x, SImode))
12161 output_operand_lossage ("invalid %%m value");
12163 fprintf (file, "%d", extract_MB (x));
12167 /* ME value for a mask operand. */
12168 if (! mask_operand (x, SImode))
12169 output_operand_lossage ("invalid %%M value");
12171 fprintf (file, "%d", extract_ME (x));
12174 /* %n outputs the negative of its operand. */
12177 /* Write the number of elements in the vector times 4. */
12178 if (GET_CODE (x) != PARALLEL)
12179 output_operand_lossage ("invalid %%N value");
12181 fprintf (file, "%d", XVECLEN (x, 0) * 4);
12185 /* Similar, but subtract 1 first. */
12186 if (GET_CODE (x) != PARALLEL)
12187 output_operand_lossage ("invalid %%O value");
12189 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
12193 /* X is a CONST_INT that is a power of two. Output the logarithm. */
12195 || INT_LOWPART (x) < 0
12196 || (i = exact_log2 (INT_LOWPART (x))) < 0)
12197 output_operand_lossage ("invalid %%p value");
12199 fprintf (file, "%d", i);
12203 /* The operand must be an indirect memory reference. The result
12204 is the register name. */
12205 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
12206 || REGNO (XEXP (x, 0)) >= 32)
12207 output_operand_lossage ("invalid %%P value");
12209 fputs (reg_names[REGNO (XEXP (x, 0))], file);
12213 /* This outputs the logical code corresponding to a boolean
12214 expression. The expression may have one or both operands
12215 negated (if one, only the first one). For condition register
12216 logical operations, it will also treat the negated
12217 CR codes as NOTs, but not handle NOTs of them. */
12219 const char *const *t = 0;
12221 enum rtx_code code = GET_CODE (x);
12222 static const char * const tbl[3][3] = {
12223 { "and", "andc", "nor" },
12224 { "or", "orc", "nand" },
12225 { "xor", "eqv", "xor" } };
12229 else if (code == IOR)
12231 else if (code == XOR)
12234 output_operand_lossage ("invalid %%q value");
12236 if (GET_CODE (XEXP (x, 0)) != NOT)
12240 if (GET_CODE (XEXP (x, 1)) == NOT)
12258 /* X is a CR register. Print the mask for `mtcrf'. */
12259 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
12260 output_operand_lossage ("invalid %%R value");
12262 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
12266 /* Low 5 bits of 32 - value */
12268 output_operand_lossage ("invalid %%s value");
12270 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
12274 /* PowerPC64 mask position. All 0's is excluded.
12275 CONST_INT 32-bit mask is considered sign-extended so any
12276 transition must occur within the CONST_INT, not on the boundary. */
12277 if (! mask64_operand (x, DImode))
12278 output_operand_lossage ("invalid %%S value");
12280 uval = INT_LOWPART (x);
12282 if (uval & 1) /* Clear Left */
12284 #if HOST_BITS_PER_WIDE_INT > 64
12285 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
12289 else /* Clear Right */
12292 #if HOST_BITS_PER_WIDE_INT > 64
12293 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
12299 gcc_assert (i >= 0);
12300 fprintf (file, "%d", i);
12304 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
12305 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
12307 /* Bit 3 is OV bit. */
12308 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
12310 /* If we want bit 31, write a shift count of zero, not 32. */
12311 fprintf (file, "%d", i == 31 ? 0 : i + 1);
12315 /* Print the symbolic name of a branch target register. */
12316 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
12317 && REGNO (x) != CTR_REGNO))
12318 output_operand_lossage ("invalid %%T value");
12319 else if (REGNO (x) == LR_REGNO)
12320 fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
12322 fputs ("ctr", file);
12326 /* High-order 16 bits of constant for use in unsigned operand. */
12328 output_operand_lossage ("invalid %%u value");
12330 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
12331 (INT_LOWPART (x) >> 16) & 0xffff);
12335 /* High-order 16 bits of constant for use in signed operand. */
12337 output_operand_lossage ("invalid %%v value");
12339 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
12340 (INT_LOWPART (x) >> 16) & 0xffff);
12344 /* Print `u' if this has an auto-increment or auto-decrement. */
12345 if (GET_CODE (x) == MEM
12346 && (GET_CODE (XEXP (x, 0)) == PRE_INC
12347 || GET_CODE (XEXP (x, 0)) == PRE_DEC
12348 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
12353 /* Print the trap code for this operand. */
12354 switch (GET_CODE (x))
12357 fputs ("eq", file); /* 4 */
12360 fputs ("ne", file); /* 24 */
12363 fputs ("lt", file); /* 16 */
12366 fputs ("le", file); /* 20 */
12369 fputs ("gt", file); /* 8 */
12372 fputs ("ge", file); /* 12 */
12375 fputs ("llt", file); /* 2 */
12378 fputs ("lle", file); /* 6 */
12381 fputs ("lgt", file); /* 1 */
12384 fputs ("lge", file); /* 5 */
12387 gcc_unreachable ();
12392 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
12395 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
12396 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
12398 print_operand (file, x, 0);
12402 /* MB value for a PowerPC64 rldic operand. */
12403 val = (GET_CODE (x) == CONST_INT
12404 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
12409 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
12410 if ((val <<= 1) < 0)
12413 #if HOST_BITS_PER_WIDE_INT == 32
12414 if (GET_CODE (x) == CONST_INT && i >= 0)
12415 i += 32; /* zero-extend high-part was all 0's */
12416 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
12418 val = CONST_DOUBLE_LOW (x);
12424 for ( ; i < 64; i++)
12425 if ((val <<= 1) < 0)
12430 fprintf (file, "%d", i + 1);
12434 if (GET_CODE (x) == MEM
12435 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
12436 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
12437 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
12442 /* Like 'L', for third word of TImode */
12443 if (GET_CODE (x) == REG)
12444 fputs (reg_names[REGNO (x) + 2], file);
12445 else if (GET_CODE (x) == MEM)
12447 if (GET_CODE (XEXP (x, 0)) == PRE_INC
12448 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
12449 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
12450 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12451 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
12453 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
12454 if (small_data_operand (x, GET_MODE (x)))
12455 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12456 reg_names[SMALL_DATA_REG]);
12461 /* X is a SYMBOL_REF. Write out the name preceded by a
12462 period and without any trailing data in brackets. Used for function
12463 names. If we are configured for System V (or the embedded ABI) on
12464 the PowerPC, do not emit the period, since those systems do not use
12465 TOCs and the like. */
12466 gcc_assert (GET_CODE (x) == SYMBOL_REF);
12468 /* Mark the decl as referenced so that cgraph will output the
12470 if (SYMBOL_REF_DECL (x))
12471 mark_decl_referenced (SYMBOL_REF_DECL (x));
12473 /* For macho, check to see if we need a stub. */
12476 const char *name = XSTR (x, 0);
12478 if (MACHOPIC_INDIRECT
12479 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
12480 name = machopic_indirection_name (x, /*stub_p=*/true);
12482 assemble_name (file, name);
12484 else if (!DOT_SYMBOLS)
12485 assemble_name (file, XSTR (x, 0));
12487 rs6000_output_function_entry (file, XSTR (x, 0));
12491 /* Like 'L', for last word of TImode. */
12492 if (GET_CODE (x) == REG)
12493 fputs (reg_names[REGNO (x) + 3], file);
12494 else if (GET_CODE (x) == MEM)
12496 if (GET_CODE (XEXP (x, 0)) == PRE_INC
12497 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
12498 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
12499 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12500 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
12502 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
12503 if (small_data_operand (x, GET_MODE (x)))
12504 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12505 reg_names[SMALL_DATA_REG]);
12509 /* Print AltiVec or SPE memory operand. */
12514 gcc_assert (GET_CODE (x) == MEM);
12518 /* Ugly hack because %y is overloaded. */
12519 if ((TARGET_SPE || TARGET_E500_DOUBLE)
12520 && (GET_MODE_SIZE (GET_MODE (x)) == 8
12521 || GET_MODE (x) == TFmode
12522 || GET_MODE (x) == TImode))
12524 /* Handle [reg]. */
12525 if (GET_CODE (tmp) == REG)
12527 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
12530 /* Handle [reg+UIMM]. */
12531 else if (GET_CODE (tmp) == PLUS &&
12532 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
12536 gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
12538 x = INTVAL (XEXP (tmp, 1));
12539 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
12543 /* Fall through. Must be [reg+reg]. */
12546 && GET_CODE (tmp) == AND
12547 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
12548 && INTVAL (XEXP (tmp, 1)) == -16)
12549 tmp = XEXP (tmp, 0);
12550 if (GET_CODE (tmp) == REG)
12551 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
12554 if (!GET_CODE (tmp) == PLUS
12555 || !REG_P (XEXP (tmp, 0))
12556 || !REG_P (XEXP (tmp, 1)))
12558 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
12562 if (REGNO (XEXP (tmp, 0)) == 0)
12563 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
12564 reg_names[ REGNO (XEXP (tmp, 0)) ]);
12566 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
12567 reg_names[ REGNO (XEXP (tmp, 1)) ]);
12573 if (GET_CODE (x) == REG)
12574 fprintf (file, "%s", reg_names[REGNO (x)]);
12575 else if (GET_CODE (x) == MEM)
12577 /* We need to handle PRE_INC and PRE_DEC here, since we need to
12578 know the width from the mode. */
12579 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
12580 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
12581 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
12582 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
12583 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
12584 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
12585 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
12586 output_address (XEXP (XEXP (x, 0), 1));
12588 output_address (XEXP (x, 0));
12591 output_addr_const (file, x);
12595 assemble_name (file, rs6000_get_some_local_dynamic_name ());
12599 output_operand_lossage ("invalid %%xn code");
12603 /* Print the address of an operand. */
12606 print_operand_address (FILE *file, rtx x)
12608 if (GET_CODE (x) == REG)
12609 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
12610 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
12611 || GET_CODE (x) == LABEL_REF)
12613 output_addr_const (file, x);
12614 if (small_data_operand (x, GET_MODE (x)))
12615 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
12616 reg_names[SMALL_DATA_REG]);
12618 gcc_assert (!TARGET_TOC);
12620 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
12622 gcc_assert (REG_P (XEXP (x, 0)));
12623 if (REGNO (XEXP (x, 0)) == 0)
12624 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
12625 reg_names[ REGNO (XEXP (x, 0)) ]);
12627 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
12628 reg_names[ REGNO (XEXP (x, 1)) ]);
12630 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
12631 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
12632 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
12634 else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
12635 && CONSTANT_P (XEXP (x, 1)))
12637 output_addr_const (file, XEXP (x, 1));
12638 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
12642 else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
12643 && CONSTANT_P (XEXP (x, 1)))
12645 fprintf (file, "lo16(");
12646 output_addr_const (file, XEXP (x, 1));
12647 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
12650 else if (legitimate_constant_pool_address_p (x))
12652 output_addr_const (file, XEXP (x, 1));
12653 fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
12656 gcc_unreachable ();
12659 /* Implement OUTPUT_ADDR_CONST_EXTRA for address X. */
12662 rs6000_output_addr_const_extra (FILE *file, rtx x)
12664 if (GET_CODE (x) == UNSPEC)
12665 switch (XINT (x, 1))
12667 case UNSPEC_TOCREL:
12668 x = XVECEXP (x, 0, 0);
12669 gcc_assert (GET_CODE (x) == SYMBOL_REF);
12670 output_addr_const (file, x);
12671 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
12674 assemble_name (file, toc_label_name);
12676 else if (TARGET_ELF)
12677 fputs ("@toc", file);
12681 case UNSPEC_MACHOPIC_OFFSET:
12682 output_addr_const (file, XVECEXP (x, 0, 0));
12684 machopic_output_function_base_name (file);
12691 /* Target hook for assembling integer objects. The PowerPC version has
12692 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
12693 is defined. It also needs to handle DI-mode objects on 64-bit
12697 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
12699 #ifdef RELOCATABLE_NEEDS_FIXUP
12700 /* Special handling for SI values. */
12701 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
12703 static int recurse = 0;
12705 /* For -mrelocatable, we mark all addresses that need to be fixed up
12706 in the .fixup section. */
12707 if (TARGET_RELOCATABLE
12708 && in_section != toc_section
12709 && in_section != text_section
12710 && !unlikely_text_section_p (in_section)
12712 && GET_CODE (x) != CONST_INT
12713 && GET_CODE (x) != CONST_DOUBLE
12719 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
12721 ASM_OUTPUT_LABEL (asm_out_file, buf);
12722 fprintf (asm_out_file, "\t.long\t(");
12723 output_addr_const (asm_out_file, x);
12724 fprintf (asm_out_file, ")@fixup\n");
12725 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
12726 ASM_OUTPUT_ALIGN (asm_out_file, 2);
12727 fprintf (asm_out_file, "\t.long\t");
12728 assemble_name (asm_out_file, buf);
12729 fprintf (asm_out_file, "\n\t.previous\n");
12733 /* Remove initial .'s to turn a -mcall-aixdesc function
12734 address into the address of the descriptor, not the function
12736 else if (GET_CODE (x) == SYMBOL_REF
12737 && XSTR (x, 0)[0] == '.'
12738 && DEFAULT_ABI == ABI_AIX)
12740 const char *name = XSTR (x, 0);
12741 while (*name == '.')
12744 fprintf (asm_out_file, "\t.long\t%s\n", name);
12748 #endif /* RELOCATABLE_NEEDS_FIXUP */
12749 return default_assemble_integer (x, size, aligned_p);
12752 #ifdef HAVE_GAS_HIDDEN
12753 /* Emit an assembler directive to set symbol visibility for DECL to
12754 VISIBILITY_TYPE. */
12757 rs6000_assemble_visibility (tree decl, int vis)
12759 /* Functions need to have their entry point symbol visibility set as
12760 well as their descriptor symbol visibility. */
12761 if (DEFAULT_ABI == ABI_AIX
12763 && TREE_CODE (decl) == FUNCTION_DECL)
12765 static const char * const visibility_types[] = {
12766 NULL, "internal", "hidden", "protected"
12769 const char *name, *type;
12771 name = ((* targetm.strip_name_encoding)
12772 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
12773 type = visibility_types[vis];
12775 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
12776 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
12779 default_assemble_visibility (decl, vis);
12784 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
12786 /* Reversal of FP compares takes care -- an ordered compare
12787 becomes an unordered compare and vice versa. */
12788 if (mode == CCFPmode
12789 && (!flag_finite_math_only
12790 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
12791 || code == UNEQ || code == LTGT))
12792 return reverse_condition_maybe_unordered (code);
12794 return reverse_condition (code);
12797 /* Generate a compare for CODE. Return a brand-new rtx that
12798 represents the result of the compare. */
12801 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
12803 enum machine_mode comp_mode;
12804 rtx compare_result;
12805 enum rtx_code code = GET_CODE (cmp);
12806 rtx op0 = XEXP (cmp, 0);
12807 rtx op1 = XEXP (cmp, 1);
12809 if (FLOAT_MODE_P (mode))
12810 comp_mode = CCFPmode;
12811 else if (code == GTU || code == LTU
12812 || code == GEU || code == LEU)
12813 comp_mode = CCUNSmode;
12814 else if ((code == EQ || code == NE)
12815 && GET_CODE (op0) == SUBREG
12816 && GET_CODE (op1) == SUBREG
12817 && SUBREG_PROMOTED_UNSIGNED_P (op0)
12818 && SUBREG_PROMOTED_UNSIGNED_P (op1))
12819 /* These are unsigned values, perhaps there will be a later
12820 ordering compare that can be shared with this one.
12821 Unfortunately we cannot detect the signedness of the operands
12822 for non-subregs. */
12823 comp_mode = CCUNSmode;
12825 comp_mode = CCmode;
12827 /* First, the compare. */
12828 compare_result = gen_reg_rtx (comp_mode);
12830 /* E500 FP compare instructions on the GPRs. Yuck! */
12831 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
12832 && FLOAT_MODE_P (mode))
12834 rtx cmp, or_result, compare_result2;
12835 enum machine_mode op_mode = GET_MODE (op0);
12837 if (op_mode == VOIDmode)
12838 op_mode = GET_MODE (op1);
12840 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
12841 This explains the following mess. */
12845 case EQ: case UNEQ: case NE: case LTGT:
12849 cmp = (flag_finite_math_only && !flag_trapping_math)
12850 ? gen_tstsfeq_gpr (compare_result, op0, op1)
12851 : gen_cmpsfeq_gpr (compare_result, op0, op1);
12855 cmp = (flag_finite_math_only && !flag_trapping_math)
12856 ? gen_tstdfeq_gpr (compare_result, op0, op1)
12857 : gen_cmpdfeq_gpr (compare_result, op0, op1);
12861 cmp = (flag_finite_math_only && !flag_trapping_math)
12862 ? gen_tsttfeq_gpr (compare_result, op0, op1)
12863 : gen_cmptfeq_gpr (compare_result, op0, op1);
12867 gcc_unreachable ();
12871 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
12875 cmp = (flag_finite_math_only && !flag_trapping_math)
12876 ? gen_tstsfgt_gpr (compare_result, op0, op1)
12877 : gen_cmpsfgt_gpr (compare_result, op0, op1);
12881 cmp = (flag_finite_math_only && !flag_trapping_math)
12882 ? gen_tstdfgt_gpr (compare_result, op0, op1)
12883 : gen_cmpdfgt_gpr (compare_result, op0, op1);
12887 cmp = (flag_finite_math_only && !flag_trapping_math)
12888 ? gen_tsttfgt_gpr (compare_result, op0, op1)
12889 : gen_cmptfgt_gpr (compare_result, op0, op1);
12893 gcc_unreachable ();
12897 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
12901 cmp = (flag_finite_math_only && !flag_trapping_math)
12902 ? gen_tstsflt_gpr (compare_result, op0, op1)
12903 : gen_cmpsflt_gpr (compare_result, op0, op1);
12907 cmp = (flag_finite_math_only && !flag_trapping_math)
12908 ? gen_tstdflt_gpr (compare_result, op0, op1)
12909 : gen_cmpdflt_gpr (compare_result, op0, op1);
12913 cmp = (flag_finite_math_only && !flag_trapping_math)
12914 ? gen_tsttflt_gpr (compare_result, op0, op1)
12915 : gen_cmptflt_gpr (compare_result, op0, op1);
12919 gcc_unreachable ();
12923 gcc_unreachable ();
12926 /* Synthesize LE and GE from LT/GT || EQ. */
12927 if (code == LE || code == GE || code == LEU || code == GEU)
12933 case LE: code = LT; break;
12934 case GE: code = GT; break;
12935 case LEU: code = LT; break;
12936 case GEU: code = GT; break;
12937 default: gcc_unreachable ();
12940 compare_result2 = gen_reg_rtx (CCFPmode);
12946 cmp = (flag_finite_math_only && !flag_trapping_math)
12947 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
12948 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
12952 cmp = (flag_finite_math_only && !flag_trapping_math)
12953 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
12954 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
12958 cmp = (flag_finite_math_only && !flag_trapping_math)
12959 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
12960 : gen_cmptfeq_gpr (compare_result2, op0, op1);
12964 gcc_unreachable ();
12968 /* OR them together. */
12969 or_result = gen_reg_rtx (CCFPmode);
12970 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
12972 compare_result = or_result;
12977 if (code == NE || code == LTGT)
12987 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
12988 CLOBBERs to match cmptf_internal2 pattern. */
12989 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
12990 && GET_MODE (op0) == TFmode
12991 && !TARGET_IEEEQUAD
12992 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
12993 emit_insn (gen_rtx_PARALLEL (VOIDmode,
12995 gen_rtx_SET (VOIDmode,
12997 gen_rtx_COMPARE (comp_mode, op0, op1)),
12998 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
12999 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
13000 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
13001 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
13002 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
13003 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
13004 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
13005 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
13006 else if (GET_CODE (op1) == UNSPEC
13007 && XINT (op1, 1) == UNSPEC_SP_TEST)
13009 rtx op1b = XVECEXP (op1, 0, 0);
13010 comp_mode = CCEQmode;
13011 compare_result = gen_reg_rtx (CCEQmode);
13013 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
13015 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
13018 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
13019 gen_rtx_COMPARE (comp_mode, op0, op1)));
13022 /* Some kinds of FP comparisons need an OR operation;
13023 under flag_finite_math_only we don't bother. */
13024 if (FLOAT_MODE_P (mode)
13025 && !flag_finite_math_only
13026 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
13027 && (code == LE || code == GE
13028 || code == UNEQ || code == LTGT
13029 || code == UNGT || code == UNLT))
13031 enum rtx_code or1, or2;
13032 rtx or1_rtx, or2_rtx, compare2_rtx;
13033 rtx or_result = gen_reg_rtx (CCEQmode);
13037 case LE: or1 = LT; or2 = EQ; break;
13038 case GE: or1 = GT; or2 = EQ; break;
13039 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
13040 case LTGT: or1 = LT; or2 = GT; break;
13041 case UNGT: or1 = UNORDERED; or2 = GT; break;
13042 case UNLT: or1 = UNORDERED; or2 = LT; break;
13043 default: gcc_unreachable ();
13045 validate_condition_mode (or1, comp_mode);
13046 validate_condition_mode (or2, comp_mode);
13047 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
13048 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
13049 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
13050 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
13052 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
13054 compare_result = or_result;
13058 validate_condition_mode (code, GET_MODE (compare_result));
13060 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
13064 /* Emit the RTL for an sCOND pattern. */
13067 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
13070 enum machine_mode op_mode;
13071 enum rtx_code cond_code;
13072 rtx result = operands[0];
13074 condition_rtx = rs6000_generate_compare (operands[1], mode);
13075 cond_code = GET_CODE (condition_rtx);
13077 if (FLOAT_MODE_P (mode)
13078 && !TARGET_FPRS && TARGET_HARD_FLOAT)
13082 PUT_MODE (condition_rtx, SImode);
13083 t = XEXP (condition_rtx, 0);
13085 gcc_assert (cond_code == NE || cond_code == EQ);
13087 if (cond_code == NE)
13088 emit_insn (gen_e500_flip_gt_bit (t, t));
13090 emit_insn (gen_move_from_CR_gt_bit (result, t));
13094 if (cond_code == NE
13095 || cond_code == GE || cond_code == LE
13096 || cond_code == GEU || cond_code == LEU
13097 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
13099 rtx not_result = gen_reg_rtx (CCEQmode);
13100 rtx not_op, rev_cond_rtx;
13101 enum machine_mode cc_mode;
13103 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
13105 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
13106 SImode, XEXP (condition_rtx, 0), const0_rtx);
13107 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
13108 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
13109 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
13112 op_mode = GET_MODE (XEXP (operands[1], 0));
13113 if (op_mode == VOIDmode)
13114 op_mode = GET_MODE (XEXP (operands[1], 1));
13116 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
13118 PUT_MODE (condition_rtx, DImode);
13119 convert_move (result, condition_rtx, 0);
13123 PUT_MODE (condition_rtx, SImode);
13124 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
13128 /* Emit a branch of kind CODE to location LOC. */
13131 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
13133 rtx condition_rtx, loc_ref;
13135 condition_rtx = rs6000_generate_compare (operands[0], mode);
13136 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
13137 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
13138 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
13139 loc_ref, pc_rtx)));
13142 /* Return the string to output a conditional branch to LABEL, which is
13143 the operand number of the label, or -1 if the branch is really a
13144 conditional return.
13146 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
13147 condition code register and its mode specifies what kind of
13148 comparison we made.
13150 REVERSED is nonzero if we should reverse the sense of the comparison.
13152 INSN is the insn. */
13155 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
13157 static char string[64];
13158 enum rtx_code code = GET_CODE (op);
13159 rtx cc_reg = XEXP (op, 0);
13160 enum machine_mode mode = GET_MODE (cc_reg);
13161 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
13162 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
13163 int really_reversed = reversed ^ need_longbranch;
13169 validate_condition_mode (code, mode);
13171 /* Work out which way this really branches. We could use
13172 reverse_condition_maybe_unordered here always but this
13173 makes the resulting assembler clearer. */
13174 if (really_reversed)
13176 /* Reversal of FP compares takes care -- an ordered compare
13177 becomes an unordered compare and vice versa. */
13178 if (mode == CCFPmode)
13179 code = reverse_condition_maybe_unordered (code);
13181 code = reverse_condition (code);
13184 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
13186 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
13191 /* Opposite of GT. */
13200 gcc_unreachable ();
13206 /* Not all of these are actually distinct opcodes, but
13207 we distinguish them for clarity of the resulting assembler. */
13208 case NE: case LTGT:
13209 ccode = "ne"; break;
13210 case EQ: case UNEQ:
13211 ccode = "eq"; break;
13213 ccode = "ge"; break;
13214 case GT: case GTU: case UNGT:
13215 ccode = "gt"; break;
13217 ccode = "le"; break;
13218 case LT: case LTU: case UNLT:
13219 ccode = "lt"; break;
13220 case UNORDERED: ccode = "un"; break;
13221 case ORDERED: ccode = "nu"; break;
13222 case UNGE: ccode = "nl"; break;
13223 case UNLE: ccode = "ng"; break;
13225 gcc_unreachable ();
13228 /* Maybe we have a guess as to how likely the branch is.
13229 The old mnemonics don't have a way to specify this information. */
13231 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
13232 if (note != NULL_RTX)
13234 /* PROB is the difference from 50%. */
13235 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
13237 /* Only hint for highly probable/improbable branches on newer
13238 cpus as static prediction overrides processor dynamic
13239 prediction. For older cpus we may as well always hint, but
13240 assume not taken for branches that are very close to 50% as a
13241 mispredicted taken branch is more expensive than a
13242 mispredicted not-taken branch. */
13243 if (rs6000_always_hint
13244 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
13245 && br_prob_note_reliable_p (note)))
13247 if (abs (prob) > REG_BR_PROB_BASE / 20
13248 && ((prob > 0) ^ need_longbranch))
13256 s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
13258 s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
13260 /* We need to escape any '%' characters in the reg_names string.
13261 Assume they'd only be the first character.... */
13262 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
13264 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
13268 /* If the branch distance was too far, we may have to use an
13269 unconditional branch to go the distance. */
13270 if (need_longbranch)
13271 s += sprintf (s, ",$+8\n\tb %s", label);
13273 s += sprintf (s, ",%s", label);
13279 /* Return the string to flip the GT bit on a CR. */
13281 output_e500_flip_gt_bit (rtx dst, rtx src)
13283 static char string[64];
13286 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
13287 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
13290 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
13291 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
13293 sprintf (string, "crnot %d,%d", a, b);
13297 /* Return insn index for the vector compare instruction for given CODE,
13298 and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
13302 get_vec_cmp_insn (enum rtx_code code,
13303 enum machine_mode dest_mode,
13304 enum machine_mode op_mode)
13306 if (!TARGET_ALTIVEC)
13307 return INSN_NOT_AVAILABLE;
13312 if (dest_mode == V16QImode && op_mode == V16QImode)
13313 return UNSPEC_VCMPEQUB;
13314 if (dest_mode == V8HImode && op_mode == V8HImode)
13315 return UNSPEC_VCMPEQUH;
13316 if (dest_mode == V4SImode && op_mode == V4SImode)
13317 return UNSPEC_VCMPEQUW;
13318 if (dest_mode == V4SImode && op_mode == V4SFmode)
13319 return UNSPEC_VCMPEQFP;
13322 if (dest_mode == V4SImode && op_mode == V4SFmode)
13323 return UNSPEC_VCMPGEFP;
13325 if (dest_mode == V16QImode && op_mode == V16QImode)
13326 return UNSPEC_VCMPGTSB;
13327 if (dest_mode == V8HImode && op_mode == V8HImode)
13328 return UNSPEC_VCMPGTSH;
13329 if (dest_mode == V4SImode && op_mode == V4SImode)
13330 return UNSPEC_VCMPGTSW;
13331 if (dest_mode == V4SImode && op_mode == V4SFmode)
13332 return UNSPEC_VCMPGTFP;
13335 if (dest_mode == V16QImode && op_mode == V16QImode)
13336 return UNSPEC_VCMPGTUB;
13337 if (dest_mode == V8HImode && op_mode == V8HImode)
13338 return UNSPEC_VCMPGTUH;
13339 if (dest_mode == V4SImode && op_mode == V4SImode)
13340 return UNSPEC_VCMPGTUW;
13345 return INSN_NOT_AVAILABLE;
13348 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
13349 DMODE is expected destination mode. This is a recursive function. */
13352 rs6000_emit_vector_compare (enum rtx_code rcode,
13354 enum machine_mode dmode)
13358 enum machine_mode dest_mode;
13359 enum machine_mode op_mode = GET_MODE (op1);
13361 gcc_assert (TARGET_ALTIVEC);
13362 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
13364 /* Floating point vector compare instructions uses destination V4SImode.
13365 Move destination to appropriate mode later. */
13366 if (dmode == V4SFmode)
13367 dest_mode = V4SImode;
13371 mask = gen_reg_rtx (dest_mode);
13372 vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
13374 if (vec_cmp_insn == INSN_NOT_AVAILABLE)
13376 bool swap_operands = false;
13377 bool try_again = false;
13382 swap_operands = true;
13387 swap_operands = true;
13395 /* Invert condition and try again.
13396 e.g., A != B becomes ~(A==B). */
13398 enum rtx_code rev_code;
13399 enum insn_code nor_code;
13402 rev_code = reverse_condition_maybe_unordered (rcode);
13403 eq_rtx = rs6000_emit_vector_compare (rev_code, op0, op1,
13406 nor_code = optab_handler (one_cmpl_optab, (int)dest_mode)->insn_code;
13407 gcc_assert (nor_code != CODE_FOR_nothing);
13408 emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
13410 if (dmode != dest_mode)
13412 rtx temp = gen_reg_rtx (dest_mode);
13413 convert_move (temp, mask, 0);
13423 /* Try GT/GTU/LT/LTU OR EQ */
13426 enum insn_code ior_code;
13427 enum rtx_code new_code;
13448 gcc_unreachable ();
13451 c_rtx = rs6000_emit_vector_compare (new_code,
13452 op0, op1, dest_mode);
13453 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
13456 ior_code = optab_handler (ior_optab, (int)dest_mode)->insn_code;
13457 gcc_assert (ior_code != CODE_FOR_nothing);
13458 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
13459 if (dmode != dest_mode)
13461 rtx temp = gen_reg_rtx (dest_mode);
13462 convert_move (temp, mask, 0);
13469 gcc_unreachable ();
13474 vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
13475 /* You only get two chances. */
13476 gcc_assert (vec_cmp_insn != INSN_NOT_AVAILABLE);
13488 emit_insn (gen_rtx_SET (VOIDmode, mask,
13489 gen_rtx_UNSPEC (dest_mode,
13490 gen_rtvec (2, op0, op1),
13492 if (dmode != dest_mode)
13494 rtx temp = gen_reg_rtx (dest_mode);
13495 convert_move (temp, mask, 0);
13501 /* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
13502 valid insn doesn exist for given mode. */
13505 get_vsel_insn (enum machine_mode mode)
13510 return UNSPEC_VSEL4SI;
13513 return UNSPEC_VSEL4SF;
13516 return UNSPEC_VSEL8HI;
13519 return UNSPEC_VSEL16QI;
13522 return INSN_NOT_AVAILABLE;
13525 return INSN_NOT_AVAILABLE;
13528 /* Emit vector select insn where DEST is destination using
13529 operands OP1, OP2 and MASK. */
13532 rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
13535 enum machine_mode dest_mode = GET_MODE (dest);
13536 int vsel_insn_index = get_vsel_insn (GET_MODE (dest));
13538 temp = gen_reg_rtx (dest_mode);
13540 /* For each vector element, select op1 when mask is 1 otherwise
13542 t = gen_rtx_SET (VOIDmode, temp,
13543 gen_rtx_UNSPEC (dest_mode,
13544 gen_rtvec (3, op2, op1, mask),
13547 emit_move_insn (dest, temp);
13551 /* Emit vector conditional expression.
13552 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
13553 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
13556 rs6000_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
13557 rtx cond, rtx cc_op0, rtx cc_op1)
13559 enum machine_mode dest_mode = GET_MODE (dest);
13560 enum rtx_code rcode = GET_CODE (cond);
13563 if (!TARGET_ALTIVEC)
13566 /* Get the vector mask for the given relational operations. */
13567 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
13569 rs6000_emit_vector_select (dest, op1, op2, mask);
13574 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
13575 operands of the last comparison is nonzero/true, FALSE_COND if it
13576 is zero/false. Return 0 if the hardware has no such operation. */
13579 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
13581 enum rtx_code code = GET_CODE (op);
13582 rtx op0 = XEXP (op, 0);
13583 rtx op1 = XEXP (op, 1);
13584 REAL_VALUE_TYPE c1;
13585 enum machine_mode compare_mode = GET_MODE (op0);
13586 enum machine_mode result_mode = GET_MODE (dest);
13588 bool is_against_zero;
13590 /* These modes should always match. */
13591 if (GET_MODE (op1) != compare_mode
13592 /* In the isel case however, we can use a compare immediate, so
13593 op1 may be a small constant. */
13594 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
13596 if (GET_MODE (true_cond) != result_mode)
13598 if (GET_MODE (false_cond) != result_mode)
13601 /* First, work out if the hardware can do this at all, or
13602 if it's too slow.... */
13603 if (!FLOAT_MODE_P (compare_mode))
13606 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
13609 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
13610 && SCALAR_FLOAT_MODE_P (compare_mode))
13613 is_against_zero = op1 == CONST0_RTX (compare_mode);
13615 /* A floating-point subtract might overflow, underflow, or produce
13616 an inexact result, thus changing the floating-point flags, so it
13617 can't be generated if we care about that. It's safe if one side
13618 of the construct is zero, since then no subtract will be
13620 if (SCALAR_FLOAT_MODE_P (compare_mode)
13621 && flag_trapping_math && ! is_against_zero)
13624 /* Eliminate half of the comparisons by switching operands, this
13625 makes the remaining code simpler. */
13626 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
13627 || code == LTGT || code == LT || code == UNLE)
13629 code = reverse_condition_maybe_unordered (code);
13631 true_cond = false_cond;
13635 /* UNEQ and LTGT take four instructions for a comparison with zero,
13636 it'll probably be faster to use a branch here too. */
13637 if (code == UNEQ && HONOR_NANS (compare_mode))
13640 if (GET_CODE (op1) == CONST_DOUBLE)
13641 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
13643 /* We're going to try to implement comparisons by performing
13644 a subtract, then comparing against zero. Unfortunately,
13645 Inf - Inf is NaN which is not zero, and so if we don't
13646 know that the operand is finite and the comparison
13647 would treat EQ different to UNORDERED, we can't do it. */
13648 if (HONOR_INFINITIES (compare_mode)
13649 && code != GT && code != UNGE
13650 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
13651 /* Constructs of the form (a OP b ? a : b) are safe. */
13652 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
13653 || (! rtx_equal_p (op0, true_cond)
13654 && ! rtx_equal_p (op1, true_cond))))
13657 /* At this point we know we can use fsel. */
13659 /* Reduce the comparison to a comparison against zero. */
13660 if (! is_against_zero)
13662 temp = gen_reg_rtx (compare_mode);
13663 emit_insn (gen_rtx_SET (VOIDmode, temp,
13664 gen_rtx_MINUS (compare_mode, op0, op1)));
13666 op1 = CONST0_RTX (compare_mode);
13669 /* If we don't care about NaNs we can reduce some of the comparisons
13670 down to faster ones. */
13671 if (! HONOR_NANS (compare_mode))
13677 true_cond = false_cond;
13690 /* Now, reduce everything down to a GE. */
13697 temp = gen_reg_rtx (compare_mode);
13698 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
13703 temp = gen_reg_rtx (compare_mode);
13704 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
13709 temp = gen_reg_rtx (compare_mode);
13710 emit_insn (gen_rtx_SET (VOIDmode, temp,
13711 gen_rtx_NEG (compare_mode,
13712 gen_rtx_ABS (compare_mode, op0))));
13717 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
13718 temp = gen_reg_rtx (result_mode);
13719 emit_insn (gen_rtx_SET (VOIDmode, temp,
13720 gen_rtx_IF_THEN_ELSE (result_mode,
13721 gen_rtx_GE (VOIDmode,
13723 true_cond, false_cond)));
13724 false_cond = true_cond;
13727 temp = gen_reg_rtx (compare_mode);
13728 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
13733 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
13734 temp = gen_reg_rtx (result_mode);
13735 emit_insn (gen_rtx_SET (VOIDmode, temp,
13736 gen_rtx_IF_THEN_ELSE (result_mode,
13737 gen_rtx_GE (VOIDmode,
13739 true_cond, false_cond)));
13740 true_cond = false_cond;
13743 temp = gen_reg_rtx (compare_mode);
13744 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
13749 gcc_unreachable ();
13752 emit_insn (gen_rtx_SET (VOIDmode, dest,
13753 gen_rtx_IF_THEN_ELSE (result_mode,
13754 gen_rtx_GE (VOIDmode,
13756 true_cond, false_cond)));
13760 /* Same as above, but for ints (isel). */
13763 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
13765 rtx condition_rtx, cr;
13767 /* All isel implementations thus far are 32-bits. */
13768 if (GET_MODE (XEXP (op, 0)) != SImode)
13771 /* We still have to do the compare, because isel doesn't do a
13772 compare, it just looks at the CRx bits set by a previous compare
13774 condition_rtx = rs6000_generate_compare (op, SImode);
13775 cr = XEXP (condition_rtx, 0);
13777 if (GET_MODE (cr) == CCmode)
13778 emit_insn (gen_isel_signed (dest, condition_rtx,
13779 true_cond, false_cond, cr));
13781 emit_insn (gen_isel_unsigned (dest, condition_rtx,
13782 true_cond, false_cond, cr));
13788 output_isel (rtx *operands)
13790 enum rtx_code code;
13792 code = GET_CODE (operands[1]);
13793 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
13795 PUT_CODE (operands[1], reverse_condition (code));
13796 return "isel %0,%3,%2,%j1";
13799 return "isel %0,%2,%3,%j1";
13803 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
13805 enum machine_mode mode = GET_MODE (op0);
13809 if (code == SMAX || code == SMIN)
13814 if (code == SMAX || code == UMAX)
13815 target = emit_conditional_move (dest, c, op0, op1, mode,
13816 op0, op1, mode, 0);
13818 target = emit_conditional_move (dest, c, op0, op1, mode,
13819 op1, op0, mode, 0);
13820 gcc_assert (target);
13821 if (target != dest)
13822 emit_move_insn (dest, target);
13825 /* Emit instructions to perform a load-reserved/store-conditional operation.
13826 The operation performed is an atomic
13827 (set M (CODE:MODE M OP))
13828 If not NULL, BEFORE is atomically set to M before the operation, and
13829 AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
13830 If SYNC_P then a memory barrier is emitted before the operation.
13831 Either OP or M may be wrapped in a NOT operation. */
13834 rs6000_emit_sync (enum rtx_code code, enum machine_mode mode,
13835 rtx m, rtx op, rtx before_param, rtx after_param,
13838 enum machine_mode used_mode;
13839 rtx the_op, set_before, set_after, set_atomic, cc_scratch, before, after;
13842 HOST_WIDE_INT imask = GET_MODE_MASK (mode);
13843 rtx shift = NULL_RTX;
13846 emit_insn (gen_lwsync ());
13850 /* If this is smaller than SImode, we'll have to use SImode with
13852 if (mode == QImode || mode == HImode)
13856 if (MEM_ALIGN (used_m) >= 32)
13859 if (BYTES_BIG_ENDIAN)
13860 ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
13862 shift = GEN_INT (ishift);
13863 used_m = change_address (used_m, SImode, 0);
13867 rtx addrSI, aligned_addr;
13868 int shift_mask = mode == QImode ? 0x18 : 0x10;
13870 addrSI = gen_lowpart_common (SImode,
13871 force_reg (Pmode, XEXP (used_m, 0)));
13872 addrSI = force_reg (SImode, addrSI);
13873 shift = gen_reg_rtx (SImode);
13875 emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
13876 GEN_INT (shift_mask)));
13877 emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
13879 aligned_addr = expand_binop (Pmode, and_optab,
13881 GEN_INT (-4), NULL_RTX,
13882 1, OPTAB_LIB_WIDEN);
13883 used_m = change_address (used_m, SImode, aligned_addr);
13884 set_mem_align (used_m, 32);
13886 /* It's safe to keep the old alias set of USED_M, because
13887 the operation is atomic and only affects the original
13891 if (GET_CODE (op) == NOT)
13893 oldop = lowpart_subreg (SImode, XEXP (op, 0), mode);
13894 oldop = gen_rtx_NOT (SImode, oldop);
13897 oldop = lowpart_subreg (SImode, op, mode);
13903 newop = expand_binop (SImode, and_optab,
13904 oldop, GEN_INT (imask), NULL_RTX,
13905 1, OPTAB_LIB_WIDEN);
13906 emit_insn (gen_ashlsi3 (newop, newop, shift));
13909 case NOT: /* NAND */
13910 newop = expand_binop (SImode, ior_optab,
13911 oldop, GEN_INT (~imask), NULL_RTX,
13912 1, OPTAB_LIB_WIDEN);
13913 emit_insn (gen_rotlsi3 (newop, newop, shift));
13917 newop = expand_binop (SImode, ior_optab,
13918 oldop, GEN_INT (~imask), NULL_RTX,
13919 1, OPTAB_LIB_WIDEN);
13920 emit_insn (gen_rotlsi3 (newop, newop, shift));
13928 newop = expand_binop (SImode, and_optab,
13929 oldop, GEN_INT (imask), NULL_RTX,
13930 1, OPTAB_LIB_WIDEN);
13931 emit_insn (gen_ashlsi3 (newop, newop, shift));
13933 mask = gen_reg_rtx (SImode);
13934 emit_move_insn (mask, GEN_INT (imask));
13935 emit_insn (gen_ashlsi3 (mask, mask, shift));
13938 newop = gen_rtx_PLUS (SImode, m, newop);
13940 newop = gen_rtx_MINUS (SImode, m, newop);
13941 newop = gen_rtx_AND (SImode, newop, mask);
13942 newop = gen_rtx_IOR (SImode, newop,
13943 gen_rtx_AND (SImode,
13944 gen_rtx_NOT (SImode, mask),
13950 gcc_unreachable ();
13954 used_mode = SImode;
13955 before = gen_reg_rtx (used_mode);
13956 after = gen_reg_rtx (used_mode);
13961 before = before_param;
13962 after = after_param;
13964 if (before == NULL_RTX)
13965 before = gen_reg_rtx (used_mode);
13966 if (after == NULL_RTX)
13967 after = gen_reg_rtx (used_mode);
13970 if ((code == PLUS || code == MINUS)
13971 && used_mode != mode)
13972 the_op = op; /* Computed above. */
13973 else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
13974 the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
13975 else if (code == NOT)
13976 the_op = gen_rtx_fmt_ee (IOR, used_mode,
13977 gen_rtx_NOT (used_mode, m),
13978 gen_rtx_NOT (used_mode, op));
13980 the_op = gen_rtx_fmt_ee (code, used_mode, m, op);
13982 set_after = gen_rtx_SET (VOIDmode, after, the_op);
13983 set_before = gen_rtx_SET (VOIDmode, before, used_m);
13984 set_atomic = gen_rtx_SET (VOIDmode, used_m,
13985 gen_rtx_UNSPEC (used_mode,
13986 gen_rtvec (1, the_op),
13988 cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
13990 if ((code == PLUS || code == MINUS) && used_mode != mode)
13991 vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
13992 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
13994 vec = gen_rtvec (4, set_after, set_before, set_atomic, cc_scratch);
13995 emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
13997 /* Shift and mask the return values properly. */
13998 if (used_mode != mode && before_param)
14000 emit_insn (gen_lshrsi3 (before, before, shift));
14001 convert_move (before_param, before, 1);
14004 if (used_mode != mode && after_param)
14006 emit_insn (gen_lshrsi3 (after, after, shift));
14007 convert_move (after_param, after, 1);
14010 /* The previous sequence will end with a branch that's dependent on
14011 the conditional store, so placing an isync will ensure that no
14012 other instructions (especially, no load or store instructions)
14013 can start before the atomic operation completes. */
14015 emit_insn (gen_isync ());
14018 /* A subroutine of the atomic operation splitters. Jump to LABEL if
14019 COND is true. Mark the jump as unlikely to be taken. */
14022 emit_unlikely_jump (rtx cond, rtx label)
14024 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
14027 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
14028 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
14029 add_reg_note (x, REG_BR_PROB, very_unlikely);
14032 /* A subroutine of the atomic operation splitters. Emit a load-locked
14033 instruction in MODE. */
14036 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
14038 rtx (*fn) (rtx, rtx) = NULL;
14039 if (mode == SImode)
14040 fn = gen_load_locked_si;
14041 else if (mode == DImode)
14042 fn = gen_load_locked_di;
14043 emit_insn (fn (reg, mem));
14046 /* A subroutine of the atomic operation splitters. Emit a store-conditional
14047 instruction in MODE. */
14050 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
14052 rtx (*fn) (rtx, rtx, rtx) = NULL;
14053 if (mode == SImode)
14054 fn = gen_store_conditional_si;
14055 else if (mode == DImode)
14056 fn = gen_store_conditional_di;
14058 /* Emit sync before stwcx. to address PPC405 Erratum. */
14059 if (PPC405_ERRATUM77)
14060 emit_insn (gen_memory_barrier ());
14062 emit_insn (fn (res, mem, val));
14065 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
14066 to perform. MEM is the memory on which to operate. VAL is the second
14067 operand of the binary operator. BEFORE and AFTER are optional locations to
14068 return the value of MEM either before of after the operation. SCRATCH is
14069 a scratch register. */
14072 rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
14073 rtx before, rtx after, rtx scratch)
14075 enum machine_mode mode = GET_MODE (mem);
14076 rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14078 emit_insn (gen_lwsync ());
14080 label = gen_label_rtx ();
14081 emit_label (label);
14082 label = gen_rtx_LABEL_REF (VOIDmode, label);
14084 if (before == NULL_RTX)
14086 emit_load_locked (mode, before, mem);
14089 x = gen_rtx_IOR (mode,
14090 gen_rtx_NOT (mode, before),
14091 gen_rtx_NOT (mode, val));
14092 else if (code == AND)
14093 x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
14095 x = gen_rtx_fmt_ee (code, mode, before, val);
14097 if (after != NULL_RTX)
14098 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
14099 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14101 emit_store_conditional (mode, cond, mem, scratch);
14103 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14104 emit_unlikely_jump (x, label);
14106 emit_insn (gen_isync ());
14109 /* Expand an atomic compare and swap operation. MEM is the memory on which
14110 to operate. OLDVAL is the old value to be compared. NEWVAL is the new
14111 value to be stored. SCRATCH is a scratch GPR. */
14114 rs6000_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
14117 enum machine_mode mode = GET_MODE (mem);
14118 rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14120 emit_insn (gen_lwsync ());
14122 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14123 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14124 emit_label (XEXP (label1, 0));
14126 emit_load_locked (mode, retval, mem);
14128 x = gen_rtx_COMPARE (CCmode, retval, oldval);
14129 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
14131 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14132 emit_unlikely_jump (x, label2);
14134 emit_move_insn (scratch, newval);
14135 emit_store_conditional (mode, cond, mem, scratch);
14137 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14138 emit_unlikely_jump (x, label1);
14140 emit_insn (gen_isync ());
14141 emit_label (XEXP (label2, 0));
14144 /* Expand an atomic test and set operation. MEM is the memory on which
14145 to operate. VAL is the value set. SCRATCH is a scratch GPR. */
14148 rs6000_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
14150 enum machine_mode mode = GET_MODE (mem);
14151 rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14153 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14154 emit_label (XEXP (label, 0));
14156 emit_load_locked (mode, retval, mem);
14157 emit_move_insn (scratch, val);
14158 emit_store_conditional (mode, cond, mem, scratch);
14160 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14161 emit_unlikely_jump (x, label);
14163 emit_insn (gen_isync ());
14167 rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
14169 enum machine_mode mode = GET_MODE (mem);
14170 rtx addrSI, align, wdst, shift, mask;
14171 HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
14172 HOST_WIDE_INT imask = GET_MODE_MASK (mode);
14174 /* Shift amount for subword relative to aligned word. */
14175 addrSI = force_reg (GET_MODE (XEXP (mem, 0)), XEXP (mem, 0));
14176 addrSI = force_reg (SImode, gen_lowpart_common (SImode, addrSI));
14177 shift = gen_reg_rtx (SImode);
14178 emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
14179 GEN_INT (shift_mask)));
14180 emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
14182 /* Shift and mask old value into position within word. */
14183 oldval = convert_modes (SImode, mode, oldval, 1);
14184 oldval = expand_binop (SImode, and_optab,
14185 oldval, GEN_INT (imask), NULL_RTX,
14186 1, OPTAB_LIB_WIDEN);
14187 emit_insn (gen_ashlsi3 (oldval, oldval, shift));
14189 /* Shift and mask new value into position within word. */
14190 newval = convert_modes (SImode, mode, newval, 1);
14191 newval = expand_binop (SImode, and_optab,
14192 newval, GEN_INT (imask), NULL_RTX,
14193 1, OPTAB_LIB_WIDEN);
14194 emit_insn (gen_ashlsi3 (newval, newval, shift));
14196 /* Mask for insertion. */
14197 mask = gen_reg_rtx (SImode);
14198 emit_move_insn (mask, GEN_INT (imask));
14199 emit_insn (gen_ashlsi3 (mask, mask, shift));
14201 /* Address of aligned word containing subword. */
14202 align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
14203 NULL_RTX, 1, OPTAB_LIB_WIDEN);
14204 mem = change_address (mem, SImode, align);
14205 set_mem_align (mem, 32);
14206 MEM_VOLATILE_P (mem) = 1;
14208 wdst = gen_reg_rtx (SImode);
14209 emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
14210 oldval, newval, mem));
14212 /* Shift the result back. */
14213 emit_insn (gen_lshrsi3 (wdst, wdst, shift));
14215 emit_move_insn (dst, gen_lowpart (mode, wdst));
14219 rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
14220 rtx oldval, rtx newval, rtx mem,
14223 rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
14225 emit_insn (gen_lwsync ());
14226 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14227 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
14228 emit_label (XEXP (label1, 0));
14230 emit_load_locked (SImode, scratch, mem);
14232 /* Mask subword within loaded value for comparison with oldval.
14233 Use UNSPEC_AND to avoid clobber.*/
14234 emit_insn (gen_rtx_SET (SImode, dest,
14235 gen_rtx_UNSPEC (SImode,
14236 gen_rtvec (2, scratch, mask),
14239 x = gen_rtx_COMPARE (CCmode, dest, oldval);
14240 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
14242 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14243 emit_unlikely_jump (x, label2);
14245 /* Clear subword within loaded value for insertion of new value. */
14246 emit_insn (gen_rtx_SET (SImode, scratch,
14247 gen_rtx_AND (SImode,
14248 gen_rtx_NOT (SImode, mask), scratch)));
14249 emit_insn (gen_iorsi3 (scratch, scratch, newval));
14250 emit_store_conditional (SImode, cond, mem, scratch);
14252 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
14253 emit_unlikely_jump (x, label1);
14255 emit_insn (gen_isync ());
14256 emit_label (XEXP (label2, 0));
14260 /* Emit instructions to move SRC to DST. Called by splitters for
14261 multi-register moves. It will emit at most one instruction for
14262 each register that is accessed; that is, it won't emit li/lis pairs
14263 (or equivalent for 64-bit code). One of SRC or DST must be a hard
14267 rs6000_split_multireg_move (rtx dst, rtx src)
14269 /* The register number of the first register being moved. */
14271 /* The mode that is to be moved. */
14272 enum machine_mode mode;
14273 /* The mode that the move is being done in, and its size. */
14274 enum machine_mode reg_mode;
14276 /* The number of registers that will be moved. */
14279 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
14280 mode = GET_MODE (dst);
14281 nregs = hard_regno_nregs[reg][mode];
14282 if (FP_REGNO_P (reg))
14283 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
14284 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
14285 else if (ALTIVEC_REGNO_P (reg))
14286 reg_mode = V16QImode;
14287 else if (TARGET_E500_DOUBLE && mode == TFmode)
14290 reg_mode = word_mode;
14291 reg_mode_size = GET_MODE_SIZE (reg_mode);
14293 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
14295 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
14297 /* Move register range backwards, if we might have destructive
14300 for (i = nregs - 1; i >= 0; i--)
14301 emit_insn (gen_rtx_SET (VOIDmode,
14302 simplify_gen_subreg (reg_mode, dst, mode,
14303 i * reg_mode_size),
14304 simplify_gen_subreg (reg_mode, src, mode,
14305 i * reg_mode_size)));
14311 bool used_update = false;
14313 if (MEM_P (src) && INT_REGNO_P (reg))
14317 if (GET_CODE (XEXP (src, 0)) == PRE_INC
14318 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
14321 breg = XEXP (XEXP (src, 0), 0);
14322 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
14323 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
14324 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
14325 emit_insn (TARGET_32BIT
14326 ? gen_addsi3 (breg, breg, delta_rtx)
14327 : gen_adddi3 (breg, breg, delta_rtx));
14328 src = replace_equiv_address (src, breg);
14330 else if (! rs6000_offsettable_memref_p (src))
14333 basereg = gen_rtx_REG (Pmode, reg);
14334 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
14335 src = replace_equiv_address (src, basereg);
14338 breg = XEXP (src, 0);
14339 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
14340 breg = XEXP (breg, 0);
14342 /* If the base register we are using to address memory is
14343 also a destination reg, then change that register last. */
14345 && REGNO (breg) >= REGNO (dst)
14346 && REGNO (breg) < REGNO (dst) + nregs)
14347 j = REGNO (breg) - REGNO (dst);
14350 if (GET_CODE (dst) == MEM && INT_REGNO_P (reg))
14354 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
14355 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
14358 breg = XEXP (XEXP (dst, 0), 0);
14359 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
14360 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
14361 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
14363 /* We have to update the breg before doing the store.
14364 Use store with update, if available. */
14368 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
14369 emit_insn (TARGET_32BIT
14370 ? (TARGET_POWERPC64
14371 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
14372 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
14373 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
14374 used_update = true;
14377 emit_insn (TARGET_32BIT
14378 ? gen_addsi3 (breg, breg, delta_rtx)
14379 : gen_adddi3 (breg, breg, delta_rtx));
14380 dst = replace_equiv_address (dst, breg);
14383 gcc_assert (rs6000_offsettable_memref_p (dst));
14386 for (i = 0; i < nregs; i++)
14388 /* Calculate index to next subword. */
14393 /* If compiler already emitted move of first word by
14394 store with update, no need to do anything. */
14395 if (j == 0 && used_update)
14398 emit_insn (gen_rtx_SET (VOIDmode,
14399 simplify_gen_subreg (reg_mode, dst, mode,
14400 j * reg_mode_size),
14401 simplify_gen_subreg (reg_mode, src, mode,
14402 j * reg_mode_size)));
14408 /* This page contains routines that are used to determine what the
14409 function prologue and epilogue code will do and write them out. */
14411 /* Return the first fixed-point register that is required to be
14412 saved. 32 if none. */
14415 first_reg_to_save (void)
14419 /* Find lowest numbered live register. */
14420 for (first_reg = 13; first_reg <= 31; first_reg++)
14421 if (df_regs_ever_live_p (first_reg)
14422 && (! call_used_regs[first_reg]
14423 || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
14424 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
14425 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
14426 || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
14431 && crtl->uses_pic_offset_table
14432 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
14433 return RS6000_PIC_OFFSET_TABLE_REGNUM;
14439 /* Similar, for FP regs. */
14442 first_fp_reg_to_save (void)
14446 /* Find lowest numbered live register. */
14447 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
14448 if (df_regs_ever_live_p (first_reg))
14454 /* Similar, for AltiVec regs. */
14457 first_altivec_reg_to_save (void)
14461 /* Stack frame remains as is unless we are in AltiVec ABI. */
14462 if (! TARGET_ALTIVEC_ABI)
14463 return LAST_ALTIVEC_REGNO + 1;
14465 /* On Darwin, the unwind routines are compiled without
14466 TARGET_ALTIVEC, and use save_world to save/restore the
14467 altivec registers when necessary. */
14468 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
14469 && ! TARGET_ALTIVEC)
14470 return FIRST_ALTIVEC_REGNO + 20;
14472 /* Find lowest numbered live register. */
14473 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
14474 if (df_regs_ever_live_p (i))
14480 /* Return a 32-bit mask of the AltiVec registers we need to set in
14481 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
14482 the 32-bit word is 0. */
14484 static unsigned int
14485 compute_vrsave_mask (void)
14487 unsigned int i, mask = 0;
14489 /* On Darwin, the unwind routines are compiled without
14490 TARGET_ALTIVEC, and use save_world to save/restore the
14491 call-saved altivec registers when necessary. */
14492 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
14493 && ! TARGET_ALTIVEC)
14496 /* First, find out if we use _any_ altivec registers. */
14497 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
14498 if (df_regs_ever_live_p (i))
14499 mask |= ALTIVEC_REG_BIT (i);
14504 /* Next, remove the argument registers from the set. These must
14505 be in the VRSAVE mask set by the caller, so we don't need to add
14506 them in again. More importantly, the mask we compute here is
14507 used to generate CLOBBERs in the set_vrsave insn, and we do not
14508 wish the argument registers to die. */
14509 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
14510 mask &= ~ALTIVEC_REG_BIT (i);
14512 /* Similarly, remove the return value from the set. */
14515 diddle_return_value (is_altivec_return_reg, &yes);
14517 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
14523 /* For a very restricted set of circumstances, we can cut down the
14524 size of prologues/epilogues by calling our own save/restore-the-world
14528 compute_save_world_info (rs6000_stack_t *info_ptr)
14530 info_ptr->world_save_p = 1;
14531 info_ptr->world_save_p
14532 = (WORLD_SAVE_P (info_ptr)
14533 && DEFAULT_ABI == ABI_DARWIN
14534 && ! (cfun->calls_setjmp && flag_exceptions)
14535 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
14536 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
14537 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
14538 && info_ptr->cr_save_p);
14540 /* This will not work in conjunction with sibcalls. Make sure there
14541 are none. (This check is expensive, but seldom executed.) */
14542 if (WORLD_SAVE_P (info_ptr))
14545 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
14546 if ( GET_CODE (insn) == CALL_INSN
14547 && SIBLING_CALL_P (insn))
14549 info_ptr->world_save_p = 0;
14554 if (WORLD_SAVE_P (info_ptr))
14556 /* Even if we're not touching VRsave, make sure there's room on the
14557 stack for it, if it looks like we're calling SAVE_WORLD, which
14558 will attempt to save it. */
14559 info_ptr->vrsave_size = 4;
14561 /* If we are going to save the world, we need to save the link register too. */
14562 info_ptr->lr_save_p = 1;
14564 /* "Save" the VRsave register too if we're saving the world. */
14565 if (info_ptr->vrsave_mask == 0)
14566 info_ptr->vrsave_mask = compute_vrsave_mask ();
14568 /* Because the Darwin register save/restore routines only handle
14569 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
14571 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
14572 && (info_ptr->first_altivec_reg_save
14573 >= FIRST_SAVED_ALTIVEC_REGNO));
14580 is_altivec_return_reg (rtx reg, void *xyes)
14582 bool *yes = (bool *) xyes;
14583 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
14588 /* Calculate the stack information for the current function. This is
14589 complicated by having two separate calling sequences, the AIX calling
14590 sequence and the V.4 calling sequence.
14592 AIX (and Darwin/Mac OS X) stack frames look like:
14594 SP----> +---------------------------------------+
14595 | back chain to caller | 0 0
14596 +---------------------------------------+
14597 | saved CR | 4 8 (8-11)
14598 +---------------------------------------+
14600 +---------------------------------------+
14601 | reserved for compilers | 12 24
14602 +---------------------------------------+
14603 | reserved for binders | 16 32
14604 +---------------------------------------+
14605 | saved TOC pointer | 20 40
14606 +---------------------------------------+
14607 | Parameter save area (P) | 24 48
14608 +---------------------------------------+
14609 | Alloca space (A) | 24+P etc.
14610 +---------------------------------------+
14611 | Local variable space (L) | 24+P+A
14612 +---------------------------------------+
14613 | Float/int conversion temporary (X) | 24+P+A+L
14614 +---------------------------------------+
14615 | Save area for AltiVec registers (W) | 24+P+A+L+X
14616 +---------------------------------------+
14617 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
14618 +---------------------------------------+
14619 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
14620 +---------------------------------------+
14621 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
14622 +---------------------------------------+
14623 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
14624 +---------------------------------------+
14625 old SP->| back chain to caller's caller |
14626 +---------------------------------------+
14628 The required alignment for AIX configurations is two words (i.e., 8
14632 V.4 stack frames look like:
14634 SP----> +---------------------------------------+
14635 | back chain to caller | 0
14636 +---------------------------------------+
14637 | caller's saved LR | 4
14638 +---------------------------------------+
14639 | Parameter save area (P) | 8
14640 +---------------------------------------+
14641 | Alloca space (A) | 8+P
14642 +---------------------------------------+
14643 | Varargs save area (V) | 8+P+A
14644 +---------------------------------------+
14645 | Local variable space (L) | 8+P+A+V
14646 +---------------------------------------+
14647 | Float/int conversion temporary (X) | 8+P+A+V+L
14648 +---------------------------------------+
14649 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
14650 +---------------------------------------+
14651 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
14652 +---------------------------------------+
14653 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
14654 +---------------------------------------+
14655 | SPE: area for 64-bit GP registers |
14656 +---------------------------------------+
14657 | SPE alignment padding |
14658 +---------------------------------------+
14659 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
14660 +---------------------------------------+
14661 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
14662 +---------------------------------------+
14663 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
14664 +---------------------------------------+
14665 old SP->| back chain to caller's caller |
14666 +---------------------------------------+
14668 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
14669 given. (But note below and in sysv4.h that we require only 8 and
14670 may round up the size of our stack frame anyways. The historical
14671 reason is early versions of powerpc-linux which didn't properly
14672 align the stack at program startup. A happy side-effect is that
14673 -mno-eabi libraries can be used with -meabi programs.)
14675 The EABI configuration defaults to the V.4 layout. However,
14676 the stack alignment requirements may differ. If -mno-eabi is not
14677 given, the required stack alignment is 8 bytes; if -mno-eabi is
14678 given, the required alignment is 16 bytes. (But see V.4 comment
14681 #ifndef ABI_STACK_BOUNDARY
14682 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
14685 static rs6000_stack_t *
14686 rs6000_stack_info (void)
14688 static rs6000_stack_t info;
14689 rs6000_stack_t *info_ptr = &info;
14690 int reg_size = TARGET_32BIT ? 4 : 8;
14694 HOST_WIDE_INT non_fixed_size;
14696 memset (&info, 0, sizeof (info));
14700 /* Cache value so we don't rescan instruction chain over and over. */
14701 if (cfun->machine->insn_chain_scanned_p == 0)
14702 cfun->machine->insn_chain_scanned_p
14703 = spe_func_has_64bit_regs_p () + 1;
14704 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
14707 /* Select which calling sequence. */
14708 info_ptr->abi = DEFAULT_ABI;
14710 /* Calculate which registers need to be saved & save area size. */
14711 info_ptr->first_gp_reg_save = first_reg_to_save ();
14712 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
14713 even if it currently looks like we won't. Reload may need it to
14714 get at a constant; if so, it will have already created a constant
14715 pool entry for it. */
14716 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
14717 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
14718 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
14719 && crtl->uses_const_pool
14720 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
14721 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
14723 first_gp = info_ptr->first_gp_reg_save;
14725 info_ptr->gp_size = reg_size * (32 - first_gp);
14727 /* For the SPE, we have an additional upper 32-bits on each GPR.
14728 Ideally we should save the entire 64-bits only when the upper
14729 half is used in SIMD instructions. Since we only record
14730 registers live (not the size they are used in), this proves
14731 difficult because we'd have to traverse the instruction chain at
14732 the right time, taking reload into account. This is a real pain,
14733 so we opt to save the GPRs in 64-bits always if but one register
14734 gets used in 64-bits. Otherwise, all the registers in the frame
14735 get saved in 32-bits.
14737 So... since when we save all GPRs (except the SP) in 64-bits, the
14738 traditional GP save area will be empty. */
14739 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
14740 info_ptr->gp_size = 0;
14742 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
14743 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
14745 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
14746 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
14747 - info_ptr->first_altivec_reg_save);
14749 /* Does this function call anything? */
14750 info_ptr->calls_p = (! current_function_is_leaf
14751 || cfun->machine->ra_needs_full_frame);
14753 /* Determine if we need to save the link register. */
14754 if ((DEFAULT_ABI == ABI_AIX
14756 && !TARGET_PROFILE_KERNEL)
14757 #ifdef TARGET_RELOCATABLE
14758 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
14760 || (info_ptr->first_fp_reg_save != 64
14761 && !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
14762 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
14763 || info_ptr->calls_p
14764 || rs6000_ra_ever_killed ())
14766 info_ptr->lr_save_p = 1;
14767 df_set_regs_ever_live (LR_REGNO, true);
14770 /* Determine if we need to save the condition code registers. */
14771 if (df_regs_ever_live_p (CR2_REGNO)
14772 || df_regs_ever_live_p (CR3_REGNO)
14773 || df_regs_ever_live_p (CR4_REGNO))
14775 info_ptr->cr_save_p = 1;
14776 if (DEFAULT_ABI == ABI_V4)
14777 info_ptr->cr_size = reg_size;
14780 /* If the current function calls __builtin_eh_return, then we need
14781 to allocate stack space for registers that will hold data for
14782 the exception handler. */
14783 if (crtl->calls_eh_return)
14786 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
14789 /* SPE saves EH registers in 64-bits. */
14790 ehrd_size = i * (TARGET_SPE_ABI
14791 && info_ptr->spe_64bit_regs_used != 0
14792 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
14797 /* Determine various sizes. */
14798 info_ptr->reg_size = reg_size;
14799 info_ptr->fixed_size = RS6000_SAVE_AREA;
14800 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
14801 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
14802 TARGET_ALTIVEC ? 16 : 8);
14803 if (FRAME_GROWS_DOWNWARD)
14804 info_ptr->vars_size
14805 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
14806 + info_ptr->parm_size,
14807 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
14808 - (info_ptr->fixed_size + info_ptr->vars_size
14809 + info_ptr->parm_size);
14811 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
14812 info_ptr->spe_gp_size = 8 * (32 - first_gp);
14814 info_ptr->spe_gp_size = 0;
14816 if (TARGET_ALTIVEC_ABI)
14817 info_ptr->vrsave_mask = compute_vrsave_mask ();
14819 info_ptr->vrsave_mask = 0;
14821 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
14822 info_ptr->vrsave_size = 4;
14824 info_ptr->vrsave_size = 0;
14826 compute_save_world_info (info_ptr);
14828 /* Calculate the offsets. */
14829 switch (DEFAULT_ABI)
14833 gcc_unreachable ();
14837 info_ptr->fp_save_offset = - info_ptr->fp_size;
14838 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
14840 if (TARGET_ALTIVEC_ABI)
14842 info_ptr->vrsave_save_offset
14843 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
14845 /* Align stack so vector save area is on a quadword boundary.
14846 The padding goes above the vectors. */
14847 if (info_ptr->altivec_size != 0)
14848 info_ptr->altivec_padding_size
14849 = info_ptr->vrsave_save_offset & 0xF;
14851 info_ptr->altivec_padding_size = 0;
14853 info_ptr->altivec_save_offset
14854 = info_ptr->vrsave_save_offset
14855 - info_ptr->altivec_padding_size
14856 - info_ptr->altivec_size;
14857 gcc_assert (info_ptr->altivec_size == 0
14858 || info_ptr->altivec_save_offset % 16 == 0);
14860 /* Adjust for AltiVec case. */
14861 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
14864 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
14865 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
14866 info_ptr->lr_save_offset = 2*reg_size;
14870 info_ptr->fp_save_offset = - info_ptr->fp_size;
14871 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
14872 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
14874 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
14876 /* Align stack so SPE GPR save area is aligned on a
14877 double-word boundary. */
14878 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
14879 info_ptr->spe_padding_size
14880 = 8 - (-info_ptr->cr_save_offset % 8);
14882 info_ptr->spe_padding_size = 0;
14884 info_ptr->spe_gp_save_offset
14885 = info_ptr->cr_save_offset
14886 - info_ptr->spe_padding_size
14887 - info_ptr->spe_gp_size;
14889 /* Adjust for SPE case. */
14890 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
14892 else if (TARGET_ALTIVEC_ABI)
14894 info_ptr->vrsave_save_offset
14895 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
14897 /* Align stack so vector save area is on a quadword boundary. */
14898 if (info_ptr->altivec_size != 0)
14899 info_ptr->altivec_padding_size
14900 = 16 - (-info_ptr->vrsave_save_offset % 16);
14902 info_ptr->altivec_padding_size = 0;
14904 info_ptr->altivec_save_offset
14905 = info_ptr->vrsave_save_offset
14906 - info_ptr->altivec_padding_size
14907 - info_ptr->altivec_size;
14909 /* Adjust for AltiVec case. */
14910 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
14913 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
14914 info_ptr->ehrd_offset -= ehrd_size;
14915 info_ptr->lr_save_offset = reg_size;
14919 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
14920 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
14921 + info_ptr->gp_size
14922 + info_ptr->altivec_size
14923 + info_ptr->altivec_padding_size
14924 + info_ptr->spe_gp_size
14925 + info_ptr->spe_padding_size
14927 + info_ptr->cr_size
14928 + info_ptr->vrsave_size,
14931 non_fixed_size = (info_ptr->vars_size
14932 + info_ptr->parm_size
14933 + info_ptr->save_size);
14935 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
14936 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
14938 /* Determine if we need to allocate any stack frame:
14940 For AIX we need to push the stack if a frame pointer is needed
14941 (because the stack might be dynamically adjusted), if we are
14942 debugging, if we make calls, or if the sum of fp_save, gp_save,
14943 and local variables are more than the space needed to save all
14944 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
14945 + 18*8 = 288 (GPR13 reserved).
14947 For V.4 we don't have the stack cushion that AIX uses, but assume
14948 that the debugger can handle stackless frames. */
14950 if (info_ptr->calls_p)
14951 info_ptr->push_p = 1;
14953 else if (DEFAULT_ABI == ABI_V4)
14954 info_ptr->push_p = non_fixed_size != 0;
14956 else if (frame_pointer_needed)
14957 info_ptr->push_p = 1;
14959 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
14960 info_ptr->push_p = 1;
14963 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
14965 /* Zero offsets if we're not saving those registers. */
14966 if (info_ptr->fp_size == 0)
14967 info_ptr->fp_save_offset = 0;
14969 if (info_ptr->gp_size == 0)
14970 info_ptr->gp_save_offset = 0;
14972 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
14973 info_ptr->altivec_save_offset = 0;
14975 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
14976 info_ptr->vrsave_save_offset = 0;
14978 if (! TARGET_SPE_ABI
14979 || info_ptr->spe_64bit_regs_used == 0
14980 || info_ptr->spe_gp_size == 0)
14981 info_ptr->spe_gp_save_offset = 0;
14983 if (! info_ptr->lr_save_p)
14984 info_ptr->lr_save_offset = 0;
14986 if (! info_ptr->cr_save_p)
14987 info_ptr->cr_save_offset = 0;
14992 /* Return true if the current function uses any GPRs in 64-bit SIMD
14996 spe_func_has_64bit_regs_p (void)
15000 /* Functions that save and restore all the call-saved registers will
15001 need to save/restore the registers in 64-bits. */
15002 if (crtl->calls_eh_return
15003 || cfun->calls_setjmp
15004 || crtl->has_nonlocal_goto)
15007 insns = get_insns ();
15009 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
15015 /* FIXME: This should be implemented with attributes...
15017 (set_attr "spe64" "true")....then,
15018 if (get_spe64(insn)) return true;
15020 It's the only reliable way to do the stuff below. */
15022 i = PATTERN (insn);
15023 if (GET_CODE (i) == SET)
15025 enum machine_mode mode = GET_MODE (SET_SRC (i));
15027 if (SPE_VECTOR_MODE (mode))
15029 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
15039 debug_stack_info (rs6000_stack_t *info)
15041 const char *abi_string;
15044 info = rs6000_stack_info ();
15046 fprintf (stderr, "\nStack information for function %s:\n",
15047 ((current_function_decl && DECL_NAME (current_function_decl))
15048 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
15053 default: abi_string = "Unknown"; break;
15054 case ABI_NONE: abi_string = "NONE"; break;
15055 case ABI_AIX: abi_string = "AIX"; break;
15056 case ABI_DARWIN: abi_string = "Darwin"; break;
15057 case ABI_V4: abi_string = "V.4"; break;
15060 fprintf (stderr, "\tABI = %5s\n", abi_string);
15062 if (TARGET_ALTIVEC_ABI)
15063 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
15065 if (TARGET_SPE_ABI)
15066 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
15068 if (info->first_gp_reg_save != 32)
15069 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
15071 if (info->first_fp_reg_save != 64)
15072 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
15074 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
15075 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
15076 info->first_altivec_reg_save);
15078 if (info->lr_save_p)
15079 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
15081 if (info->cr_save_p)
15082 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
15084 if (info->vrsave_mask)
15085 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
15088 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
15091 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
15093 if (info->gp_save_offset)
15094 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
15096 if (info->fp_save_offset)
15097 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
15099 if (info->altivec_save_offset)
15100 fprintf (stderr, "\taltivec_save_offset = %5d\n",
15101 info->altivec_save_offset);
15103 if (info->spe_gp_save_offset)
15104 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
15105 info->spe_gp_save_offset);
15107 if (info->vrsave_save_offset)
15108 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
15109 info->vrsave_save_offset);
15111 if (info->lr_save_offset)
15112 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
15114 if (info->cr_save_offset)
15115 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
15117 if (info->varargs_save_offset)
15118 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
15120 if (info->total_size)
15121 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
15124 if (info->vars_size)
15125 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
15128 if (info->parm_size)
15129 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
15131 if (info->fixed_size)
15132 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
15135 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
15137 if (info->spe_gp_size)
15138 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
15141 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
15143 if (info->altivec_size)
15144 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
15146 if (info->vrsave_size)
15147 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
15149 if (info->altivec_padding_size)
15150 fprintf (stderr, "\taltivec_padding_size= %5d\n",
15151 info->altivec_padding_size);
15153 if (info->spe_padding_size)
15154 fprintf (stderr, "\tspe_padding_size = %5d\n",
15155 info->spe_padding_size);
15158 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
15160 if (info->save_size)
15161 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
15163 if (info->reg_size != 4)
15164 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
15166 fprintf (stderr, "\n");
15170 rs6000_return_addr (int count, rtx frame)
15172 /* Currently we don't optimize very well between prolog and body
15173 code and for PIC code the code can be actually quite bad, so
15174 don't try to be too clever here. */
15175 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
15177 cfun->machine->ra_needs_full_frame = 1;
15184 plus_constant (copy_to_reg
15185 (gen_rtx_MEM (Pmode,
15186 memory_address (Pmode, frame))),
15187 RETURN_ADDRESS_OFFSET)));
15190 cfun->machine->ra_need_lr = 1;
15191 return get_hard_reg_initial_val (Pmode, LR_REGNO);
15194 /* Say whether a function is a candidate for sibcall handling or not.
15195 We do not allow indirect calls to be optimized into sibling calls.
15196 Also, we can't do it if there are any vector parameters; there's
15197 nowhere to put the VRsave code so it works; note that functions with
15198 vector parameters are required to have a prototype, so the argument
15199 type info must be available here. (The tail recursion case can work
15200 with vector parameters, but there's no way to distinguish here.) */
15202 rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
15207 if (TARGET_ALTIVEC_VRSAVE)
15209 for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
15210 type; type = TREE_CHAIN (type))
15212 if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
15216 if (DEFAULT_ABI == ABI_DARWIN
15217 || ((*targetm.binds_local_p) (decl)
15218 && (DEFAULT_ABI != ABI_AIX || !DECL_EXTERNAL (decl))))
15220 tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (decl));
15222 if (!lookup_attribute ("longcall", attr_list)
15223 || lookup_attribute ("shortcall", attr_list))
15230 /* NULL if INSN insn is valid within a low-overhead loop.
15231 Otherwise return why doloop cannot be applied.
15232 PowerPC uses the COUNT register for branch on table instructions. */
15234 static const char *
15235 rs6000_invalid_within_doloop (const_rtx insn)
15238 return "Function call in the loop.";
15241 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
15242 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
15243 return "Computed branch in the loop.";
15249 rs6000_ra_ever_killed (void)
15255 if (cfun->is_thunk)
15258 /* regs_ever_live has LR marked as used if any sibcalls are present,
15259 but this should not force saving and restoring in the
15260 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
15261 clobbers LR, so that is inappropriate. */
15263 /* Also, the prologue can generate a store into LR that
15264 doesn't really count, like this:
15267 bcl to set PIC register
15271 When we're called from the epilogue, we need to avoid counting
15272 this as a store. */
15274 push_topmost_sequence ();
15275 top = get_insns ();
15276 pop_topmost_sequence ();
15277 reg = gen_rtx_REG (Pmode, LR_REGNO);
15279 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
15285 if (!SIBLING_CALL_P (insn))
15288 else if (find_regno_note (insn, REG_INC, LR_REGNO))
15290 else if (set_of (reg, insn) != NULL_RTX
15291 && !prologue_epilogue_contains (insn))
15298 /* Emit instructions needed to load the TOC register.
15299 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
15300 a constant pool; or for SVR4 -fpic. */
15303 rs6000_emit_load_toc_table (int fromprolog)
15306 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
15308 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
15311 rtx lab, tmp1, tmp2, got;
15313 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
15314 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15316 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
15318 got = rs6000_got_sym ();
15319 tmp1 = tmp2 = dest;
15322 tmp1 = gen_reg_rtx (Pmode);
15323 tmp2 = gen_reg_rtx (Pmode);
15325 emit_insn (gen_load_toc_v4_PIC_1 (lab));
15326 emit_move_insn (tmp1,
15327 gen_rtx_REG (Pmode, LR_REGNO));
15328 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
15329 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
15331 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
15333 emit_insn (gen_load_toc_v4_pic_si ());
15334 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
15336 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
15339 rtx temp0 = (fromprolog
15340 ? gen_rtx_REG (Pmode, 0)
15341 : gen_reg_rtx (Pmode));
15347 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
15348 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15350 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
15351 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15353 emit_insn (gen_load_toc_v4_PIC_1 (symF));
15354 emit_move_insn (dest,
15355 gen_rtx_REG (Pmode, LR_REGNO));
15356 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
15362 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
15363 emit_insn (gen_load_toc_v4_PIC_1b (tocsym));
15364 emit_move_insn (dest,
15365 gen_rtx_REG (Pmode, LR_REGNO));
15366 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
15368 emit_insn (gen_addsi3 (dest, temp0, dest));
15370 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
15372 /* This is for AIX code running in non-PIC ELF32. */
15375 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
15376 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
15378 emit_insn (gen_elf_high (dest, realsym));
15379 emit_insn (gen_elf_low (dest, dest, realsym));
15383 gcc_assert (DEFAULT_ABI == ABI_AIX);
15386 emit_insn (gen_load_toc_aix_si (dest));
15388 emit_insn (gen_load_toc_aix_di (dest));
15392 /* Emit instructions to restore the link register after determining where
15393 its value has been stored. */
15396 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
15398 rs6000_stack_t *info = rs6000_stack_info ();
15401 operands[0] = source;
15402 operands[1] = scratch;
15404 if (info->lr_save_p)
15406 rtx frame_rtx = stack_pointer_rtx;
15407 HOST_WIDE_INT sp_offset = 0;
15410 if (frame_pointer_needed
15411 || cfun->calls_alloca
15412 || info->total_size > 32767)
15414 tmp = gen_frame_mem (Pmode, frame_rtx);
15415 emit_move_insn (operands[1], tmp);
15416 frame_rtx = operands[1];
15418 else if (info->push_p)
15419 sp_offset = info->total_size;
15421 tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
15422 tmp = gen_frame_mem (Pmode, tmp);
15423 emit_move_insn (tmp, operands[0]);
15426 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
15429 static GTY(()) alias_set_type set = -1;
15432 get_TOC_alias_set (void)
15435 set = new_alias_set ();
15439 /* This returns nonzero if the current function uses the TOC. This is
15440 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
15441 is generated by the ABI_V4 load_toc_* patterns. */
15448 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15451 rtx pat = PATTERN (insn);
15454 if (GET_CODE (pat) == PARALLEL)
15455 for (i = 0; i < XVECLEN (pat, 0); i++)
15457 rtx sub = XVECEXP (pat, 0, i);
15458 if (GET_CODE (sub) == USE)
15460 sub = XEXP (sub, 0);
15461 if (GET_CODE (sub) == UNSPEC
15462 && XINT (sub, 1) == UNSPEC_TOC)
15472 create_TOC_reference (rtx symbol)
15474 if (!can_create_pseudo_p ())
15475 df_set_regs_ever_live (TOC_REGISTER, true);
15476 return gen_rtx_PLUS (Pmode,
15477 gen_rtx_REG (Pmode, TOC_REGISTER),
15478 gen_rtx_CONST (Pmode,
15479 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_TOCREL)));
15482 /* If _Unwind_* has been called from within the same module,
15483 toc register is not guaranteed to be saved to 40(1) on function
15484 entry. Save it there in that case. */
15487 rs6000_aix_emit_builtin_unwind_init (void)
15490 rtx stack_top = gen_reg_rtx (Pmode);
15491 rtx opcode_addr = gen_reg_rtx (Pmode);
15492 rtx opcode = gen_reg_rtx (SImode);
15493 rtx tocompare = gen_reg_rtx (SImode);
15494 rtx no_toc_save_needed = gen_label_rtx ();
15496 mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
15497 emit_move_insn (stack_top, mem);
15499 mem = gen_frame_mem (Pmode,
15500 gen_rtx_PLUS (Pmode, stack_top,
15501 GEN_INT (2 * GET_MODE_SIZE (Pmode))));
15502 emit_move_insn (opcode_addr, mem);
15503 emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
15504 emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
15505 : 0xE8410028, SImode));
15507 do_compare_rtx_and_jump (opcode, tocompare, EQ, 1,
15508 SImode, NULL_RTX, NULL_RTX,
15509 no_toc_save_needed);
15511 mem = gen_frame_mem (Pmode,
15512 gen_rtx_PLUS (Pmode, stack_top,
15513 GEN_INT (5 * GET_MODE_SIZE (Pmode))));
15514 emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
15515 emit_label (no_toc_save_needed);
15518 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
15519 and the change to the stack pointer. */
15522 rs6000_emit_stack_tie (void)
15524 rtx mem = gen_frame_mem (BLKmode,
15525 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
15527 emit_insn (gen_stack_tie (mem));
15530 /* Emit the correct code for allocating stack space, as insns.
15531 If COPY_R12, make sure a copy of the old frame is left in r12.
15532 If COPY_R11, make sure a copy of the old frame is left in r11,
15533 in preference to r12 if COPY_R12.
15534 The generated code may use hard register 0 as a temporary. */
15537 rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12, int copy_r11)
15540 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
15541 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
15542 rtx todec = gen_int_mode (-size, Pmode);
15545 if (INTVAL (todec) != -size)
15547 warning (0, "stack frame too large");
15548 emit_insn (gen_trap ());
15552 if (crtl->limit_stack)
15554 if (REG_P (stack_limit_rtx)
15555 && REGNO (stack_limit_rtx) > 1
15556 && REGNO (stack_limit_rtx) <= 31)
15558 emit_insn (TARGET_32BIT
15559 ? gen_addsi3 (tmp_reg,
15562 : gen_adddi3 (tmp_reg,
15566 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
15569 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
15571 && DEFAULT_ABI == ABI_V4)
15573 rtx toload = gen_rtx_CONST (VOIDmode,
15574 gen_rtx_PLUS (Pmode,
15578 emit_insn (gen_elf_high (tmp_reg, toload));
15579 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
15580 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
15584 warning (0, "stack limit expression is not supported");
15587 if (copy_r12 || copy_r11)
15588 emit_move_insn (copy_r11
15589 ? gen_rtx_REG (Pmode, 11)
15590 : gen_rtx_REG (Pmode, 12),
15595 /* Need a note here so that try_split doesn't get confused. */
15596 if (get_last_insn () == NULL_RTX)
15597 emit_note (NOTE_INSN_DELETED);
15598 insn = emit_move_insn (tmp_reg, todec);
15599 try_split (PATTERN (insn), insn, 0);
15603 insn = emit_insn (TARGET_32BIT
15604 ? gen_movsi_update_stack (stack_reg, stack_reg,
15606 : gen_movdi_di_update_stack (stack_reg, stack_reg,
15607 todec, stack_reg));
15608 /* Since we didn't use gen_frame_mem to generate the MEM, grab
15609 it now and set the alias set/attributes. The above gen_*_update
15610 calls will generate a PARALLEL with the MEM set being the first
15612 par = PATTERN (insn);
15613 gcc_assert (GET_CODE (par) == PARALLEL);
15614 set = XVECEXP (par, 0, 0);
15615 gcc_assert (GET_CODE (set) == SET);
15616 mem = SET_DEST (set);
15617 gcc_assert (MEM_P (mem));
15618 MEM_NOTRAP_P (mem) = 1;
15619 set_mem_alias_set (mem, get_frame_alias_set ());
15621 RTX_FRAME_RELATED_P (insn) = 1;
15622 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
15623 gen_rtx_SET (VOIDmode, stack_reg,
15624 gen_rtx_PLUS (Pmode, stack_reg,
15625 GEN_INT (-size))));
15628 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
15629 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
15630 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
15631 deduce these equivalences by itself so it wasn't necessary to hold
15632 its hand so much. */
15635 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
15636 rtx reg2, rtx rreg)
15640 /* copy_rtx will not make unique copies of registers, so we need to
15641 ensure we don't have unwanted sharing here. */
15643 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
15646 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
15648 real = copy_rtx (PATTERN (insn));
15650 if (reg2 != NULL_RTX)
15651 real = replace_rtx (real, reg2, rreg);
15653 real = replace_rtx (real, reg,
15654 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
15655 STACK_POINTER_REGNUM),
15658 /* We expect that 'real' is either a SET or a PARALLEL containing
15659 SETs (and possibly other stuff). In a PARALLEL, all the SETs
15660 are important so they all have to be marked RTX_FRAME_RELATED_P. */
15662 if (GET_CODE (real) == SET)
15666 temp = simplify_rtx (SET_SRC (set));
15668 SET_SRC (set) = temp;
15669 temp = simplify_rtx (SET_DEST (set));
15671 SET_DEST (set) = temp;
15672 if (GET_CODE (SET_DEST (set)) == MEM)
15674 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
15676 XEXP (SET_DEST (set), 0) = temp;
15683 gcc_assert (GET_CODE (real) == PARALLEL);
15684 for (i = 0; i < XVECLEN (real, 0); i++)
15685 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
15687 rtx set = XVECEXP (real, 0, i);
15689 temp = simplify_rtx (SET_SRC (set));
15691 SET_SRC (set) = temp;
15692 temp = simplify_rtx (SET_DEST (set));
15694 SET_DEST (set) = temp;
15695 if (GET_CODE (SET_DEST (set)) == MEM)
15697 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
15699 XEXP (SET_DEST (set), 0) = temp;
15701 RTX_FRAME_RELATED_P (set) = 1;
15705 RTX_FRAME_RELATED_P (insn) = 1;
15706 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
15709 /* Returns an insn that has a vrsave set operation with the
15710 appropriate CLOBBERs. */
15713 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
15716 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
15717 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
15720 = gen_rtx_SET (VOIDmode,
15722 gen_rtx_UNSPEC_VOLATILE (SImode,
15723 gen_rtvec (2, reg, vrsave),
15724 UNSPECV_SET_VRSAVE));
15728 /* We need to clobber the registers in the mask so the scheduler
15729 does not move sets to VRSAVE before sets of AltiVec registers.
15731 However, if the function receives nonlocal gotos, reload will set
15732 all call saved registers live. We will end up with:
15734 (set (reg 999) (mem))
15735 (parallel [ (set (reg vrsave) (unspec blah))
15736 (clobber (reg 999))])
15738 The clobber will cause the store into reg 999 to be dead, and
15739 flow will attempt to delete an epilogue insn. In this case, we
15740 need an unspec use/set of the register. */
15742 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
15743 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
15745 if (!epiloguep || call_used_regs [i])
15746 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
15747 gen_rtx_REG (V4SImode, i));
15750 rtx reg = gen_rtx_REG (V4SImode, i);
15753 = gen_rtx_SET (VOIDmode,
15755 gen_rtx_UNSPEC (V4SImode,
15756 gen_rtvec (1, reg), 27));
15760 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
15762 for (i = 0; i < nclobs; ++i)
15763 XVECEXP (insn, 0, i) = clobs[i];
15768 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
15769 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
15772 emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
15773 unsigned int regno, int offset, HOST_WIDE_INT total_size)
15775 rtx reg, offset_rtx, insn, mem, addr, int_rtx;
15776 rtx replacea, replaceb;
15778 int_rtx = GEN_INT (offset);
15780 /* Some cases that need register indexed addressing. */
15781 if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
15782 || (TARGET_E500_DOUBLE && mode == DFmode)
15784 && SPE_VECTOR_MODE (mode)
15785 && !SPE_CONST_OFFSET_OK (offset)))
15787 /* Whomever calls us must make sure r11 is available in the
15788 flow path of instructions in the prologue. */
15789 offset_rtx = gen_rtx_REG (Pmode, 11);
15790 emit_move_insn (offset_rtx, int_rtx);
15792 replacea = offset_rtx;
15793 replaceb = int_rtx;
15797 offset_rtx = int_rtx;
15798 replacea = NULL_RTX;
15799 replaceb = NULL_RTX;
15802 reg = gen_rtx_REG (mode, regno);
15803 addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
15804 mem = gen_frame_mem (mode, addr);
15806 insn = emit_move_insn (mem, reg);
15808 rs6000_frame_related (insn, frame_ptr, total_size, replacea, replaceb);
15811 /* Emit an offset memory reference suitable for a frame store, while
15812 converting to a valid addressing mode. */
15815 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
15817 rtx int_rtx, offset_rtx;
15819 int_rtx = GEN_INT (offset);
15821 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
15822 || (TARGET_E500_DOUBLE && mode == DFmode))
15824 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
15825 emit_move_insn (offset_rtx, int_rtx);
15828 offset_rtx = int_rtx;
15830 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
15833 /* Look for user-defined global regs. We should not save and restore these,
15834 and cannot use stmw/lmw if there are any in its range. */
15837 no_global_regs_above (int first, bool gpr)
15840 for (i = first; i < gpr ? 32 : 64 ; i++)
15841 if (global_regs[i])
15846 #ifndef TARGET_FIX_AND_CONTINUE
15847 #define TARGET_FIX_AND_CONTINUE 0
15850 /* It's really GPR 13 and FPR 14, but we need the smaller of the two. */
15851 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
15852 #define LAST_SAVRES_REGISTER 31
15853 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
15855 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8];
15857 /* Return the symbol for an out-of-line register save/restore routine.
15858 We are saving/restoring GPRs if GPR is true. */
15861 rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep, bool gpr, bool exitp)
15863 int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
15865 int select = ((savep ? 1 : 0) << 2
15867 /* On the SPE, we never have any FPRs, but we do have
15868 32/64-bit versions of the routines. */
15869 ? (TARGET_SPE_ABI && info->spe_64bit_regs_used ? 1 : 0)
15873 /* Don't generate bogus routine names. */
15874 gcc_assert (FIRST_SAVRES_REGISTER <= regno && regno <= LAST_SAVRES_REGISTER);
15876 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
15881 const char *action;
15882 const char *regkind;
15883 const char *exit_suffix;
15885 action = savep ? "save" : "rest";
15887 /* SPE has slightly different names for its routines depending on
15888 whether we are saving 32-bit or 64-bit registers. */
15889 if (TARGET_SPE_ABI)
15891 /* No floating point saves on the SPE. */
15894 regkind = info->spe_64bit_regs_used ? "64gpr" : "32gpr";
15897 regkind = gpr ? "gpr" : "fpr";
15899 exit_suffix = exitp ? "_x" : "";
15901 sprintf (name, "_%s%s_%d%s", action, regkind, regno, exit_suffix);
15903 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
15904 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
15910 /* Emit a sequence of insns, including a stack tie if needed, for
15911 resetting the stack pointer. If SAVRES is true, then don't reset the
15912 stack pointer, but move the base of the frame into r11 for use by
15913 out-of-line register restore routines. */
15916 rs6000_emit_stack_reset (rs6000_stack_t *info,
15917 rtx sp_reg_rtx, rtx frame_reg_rtx,
15918 int sp_offset, bool savres)
15920 /* This blockage is needed so that sched doesn't decide to move
15921 the sp change before the register restores. */
15922 if (frame_reg_rtx != sp_reg_rtx
15924 && info->spe_64bit_regs_used != 0
15925 && info->first_gp_reg_save != 32))
15926 rs6000_emit_stack_tie ();
15928 if (frame_reg_rtx != sp_reg_rtx)
15930 if (sp_offset != 0)
15931 return emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx,
15932 GEN_INT (sp_offset)));
15934 return emit_move_insn (sp_reg_rtx, frame_reg_rtx);
15936 else if (sp_offset != 0)
15938 /* If we are restoring registers out-of-line, we will be using the
15939 "exit" variants of the restore routines, which will reset the
15940 stack for us. But we do need to point r11 into the right place
15941 for those routines. */
15942 rtx dest_reg = (savres
15943 ? gen_rtx_REG (Pmode, 11)
15946 rtx insn = emit_insn (gen_add3_insn (dest_reg, sp_reg_rtx,
15947 GEN_INT (sp_offset)));
15954 /* Construct a parallel rtx describing the effect of a call to an
15955 out-of-line register save/restore routine. */
15958 rs6000_make_savres_rtx (rs6000_stack_t *info,
15959 rtx frame_reg_rtx, int save_area_offset,
15960 enum machine_mode reg_mode,
15961 bool savep, bool gpr, bool exitp)
15964 int offset, start_reg, end_reg, n_regs;
15965 int reg_size = GET_MODE_SIZE (reg_mode);
15971 ? info->first_gp_reg_save
15972 : info->first_fp_reg_save);
15973 end_reg = gpr ? 32 : 64;
15974 n_regs = end_reg - start_reg;
15975 p = rtvec_alloc ((exitp ? 4 : 3) + n_regs);
15977 /* If we're saving registers, then we should never say we're exiting. */
15978 gcc_assert ((savep && !exitp) || !savep);
15981 RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
15983 RTVEC_ELT (p, offset++)
15984 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
15986 sym = rs6000_savres_routine_sym (info, savep, gpr, exitp);
15987 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
15988 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 11));
15990 for (i = 0; i < end_reg - start_reg; i++)
15992 rtx addr, reg, mem;
15993 reg = gen_rtx_REG (reg_mode, start_reg + i);
15994 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
15995 GEN_INT (save_area_offset + reg_size*i));
15996 mem = gen_frame_mem (reg_mode, addr);
15998 RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode,
16000 savep ? reg : mem);
16003 return gen_rtx_PARALLEL (VOIDmode, p);
16006 /* Determine whether the gp REG is really used. */
16009 rs6000_reg_live_or_pic_offset_p (int reg)
16011 return ((df_regs_ever_live_p (reg)
16012 && (!call_used_regs[reg]
16013 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
16014 && TARGET_TOC && TARGET_MINIMAL_TOC)))
16015 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
16016 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
16017 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
16021 SAVRES_MULTIPLE = 0x1,
16022 SAVRES_INLINE_FPRS = 0x2,
16023 SAVRES_INLINE_GPRS = 0x4
16026 /* Determine the strategy for savings/restoring registers. */
16029 rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
16030 int using_static_chain_p, int sibcall)
16032 bool using_multiple_p;
16034 bool savres_fprs_inline;
16035 bool savres_gprs_inline;
16036 bool noclobber_global_gprs
16037 = no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true);
16039 using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64
16040 && (!TARGET_SPE_ABI
16041 || info->spe_64bit_regs_used == 0)
16042 && info->first_gp_reg_save < 31
16043 && noclobber_global_gprs);
16044 /* Don't bother to try to save things out-of-line if r11 is occupied
16045 by the static chain. It would require too much fiddling and the
16046 static chain is rarely used anyway. */
16047 common = (using_static_chain_p
16049 || crtl->calls_eh_return
16050 || !info->lr_save_p
16051 || cfun->machine->ra_need_lr
16052 || info->total_size > 32767);
16053 savres_fprs_inline = (common
16054 || info->first_fp_reg_save == 64
16055 || !no_global_regs_above (info->first_fp_reg_save,
16057 || FP_SAVE_INLINE (info->first_fp_reg_save));
16058 savres_gprs_inline = (common
16059 /* Saving CR interferes with the exit routines
16060 used on the SPE, so just punt here. */
16063 && info->spe_64bit_regs_used != 0
16064 && info->cr_save_p != 0)
16065 || info->first_gp_reg_save == 32
16066 || !noclobber_global_gprs
16067 || GP_SAVE_INLINE (info->first_gp_reg_save));
16070 /* If we are going to use store multiple, then don't even bother
16071 with the out-of-line routines, since the store-multiple instruction
16072 will always be smaller. */
16073 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
16076 /* The situation is more complicated with load multiple. We'd
16077 prefer to use the out-of-line routines for restores, since the
16078 "exit" out-of-line routines can handle the restore of LR and
16079 the frame teardown. But we can only use the out-of-line
16080 routines if we know that we've used store multiple or
16081 out-of-line routines in the prologue, i.e. if we've saved all
16082 the registers from first_gp_reg_save. Otherwise, we risk
16083 loading garbage from the stack. Furthermore, we can only use
16084 the "exit" out-of-line gpr restore if we haven't saved any
16086 bool saved_all = !savres_gprs_inline || using_multiple_p;
16088 if (saved_all && info->first_fp_reg_save != 64)
16089 /* We can't use the exit routine; use load multiple if it's
16091 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
16094 return (using_multiple_p
16095 | (savres_fprs_inline << 1)
16096 | (savres_gprs_inline << 2));
16099 /* Emit function prologue as insns. */
16102 rs6000_emit_prologue (void)
16104 rs6000_stack_t *info = rs6000_stack_info ();
16105 enum machine_mode reg_mode = Pmode;
16106 int reg_size = TARGET_32BIT ? 4 : 8;
16107 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
16108 rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
16109 rtx frame_reg_rtx = sp_reg_rtx;
16110 rtx cr_save_rtx = NULL_RTX;
16113 int saving_FPRs_inline;
16114 int saving_GPRs_inline;
16115 int using_store_multiple;
16116 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
16117 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
16118 && !call_used_regs[STATIC_CHAIN_REGNUM]);
16119 HOST_WIDE_INT sp_offset = 0;
16121 if (TARGET_FIX_AND_CONTINUE)
16123 /* gdb on darwin arranges to forward a function from the old
16124 address by modifying the first 5 instructions of the function
16125 to branch to the overriding function. This is necessary to
16126 permit function pointers that point to the old function to
16127 actually forward to the new function. */
16128 emit_insn (gen_nop ());
16129 emit_insn (gen_nop ());
16130 emit_insn (gen_nop ());
16131 emit_insn (gen_nop ());
16132 emit_insn (gen_nop ());
16135 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
16137 reg_mode = V2SImode;
16141 strategy = rs6000_savres_strategy (info, /*savep=*/true,
16142 /*static_chain_p=*/using_static_chain_p,
16144 using_store_multiple = strategy & SAVRES_MULTIPLE;
16145 saving_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
16146 saving_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
16148 /* For V.4, update stack before we do any saving and set back pointer. */
16149 if (! WORLD_SAVE_P (info)
16151 && (DEFAULT_ABI == ABI_V4
16152 || crtl->calls_eh_return))
16154 bool need_r11 = (TARGET_SPE
16155 ? (!saving_GPRs_inline
16156 && info->spe_64bit_regs_used == 0)
16157 : (!saving_FPRs_inline || !saving_GPRs_inline));
16158 if (info->total_size < 32767)
16159 sp_offset = info->total_size;
16161 frame_reg_rtx = (need_r11
16162 ? gen_rtx_REG (Pmode, 11)
16164 rs6000_emit_allocate_stack (info->total_size,
16165 (frame_reg_rtx != sp_reg_rtx
16166 && (info->cr_save_p
16168 || info->first_fp_reg_save < 64
16169 || info->first_gp_reg_save < 32
16172 if (frame_reg_rtx != sp_reg_rtx)
16173 rs6000_emit_stack_tie ();
16176 /* Handle world saves specially here. */
16177 if (WORLD_SAVE_P (info))
16184 /* save_world expects lr in r0. */
16185 reg0 = gen_rtx_REG (Pmode, 0);
16186 if (info->lr_save_p)
16188 insn = emit_move_insn (reg0,
16189 gen_rtx_REG (Pmode, LR_REGNO));
16190 RTX_FRAME_RELATED_P (insn) = 1;
16193 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
16194 assumptions about the offsets of various bits of the stack
16196 gcc_assert (info->gp_save_offset == -220
16197 && info->fp_save_offset == -144
16198 && info->lr_save_offset == 8
16199 && info->cr_save_offset == 4
16202 && (!crtl->calls_eh_return
16203 || info->ehrd_offset == -432)
16204 && info->vrsave_save_offset == -224
16205 && info->altivec_save_offset == -416);
16207 treg = gen_rtx_REG (SImode, 11);
16208 emit_move_insn (treg, GEN_INT (-info->total_size));
16210 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
16211 in R11. It also clobbers R12, so beware! */
16213 /* Preserve CR2 for save_world prologues */
16215 sz += 32 - info->first_gp_reg_save;
16216 sz += 64 - info->first_fp_reg_save;
16217 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
16218 p = rtvec_alloc (sz);
16220 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
16221 gen_rtx_REG (SImode,
16223 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
16224 gen_rtx_SYMBOL_REF (Pmode,
16226 /* We do floats first so that the instruction pattern matches
16228 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
16230 rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16231 ? DFmode : SFmode),
16232 info->first_fp_reg_save + i);
16233 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16234 GEN_INT (info->fp_save_offset
16235 + sp_offset + 8 * i));
16236 rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16237 ? DFmode : SFmode), addr);
16239 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16241 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
16243 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
16244 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16245 GEN_INT (info->altivec_save_offset
16246 + sp_offset + 16 * i));
16247 rtx mem = gen_frame_mem (V4SImode, addr);
16249 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16251 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16253 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16254 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16255 GEN_INT (info->gp_save_offset
16256 + sp_offset + reg_size * i));
16257 rtx mem = gen_frame_mem (reg_mode, addr);
16259 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16263 /* CR register traditionally saved as CR2. */
16264 rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
16265 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16266 GEN_INT (info->cr_save_offset
16268 rtx mem = gen_frame_mem (reg_mode, addr);
16270 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
16272 /* Explain about use of R0. */
16273 if (info->lr_save_p)
16275 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16276 GEN_INT (info->lr_save_offset
16278 rtx mem = gen_frame_mem (reg_mode, addr);
16280 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
16282 /* Explain what happens to the stack pointer. */
16284 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
16285 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
16288 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
16289 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16290 treg, GEN_INT (-info->total_size));
16291 sp_offset = info->total_size;
16294 /* If we use the link register, get it into r0. */
16295 if (!WORLD_SAVE_P (info) && info->lr_save_p)
16297 rtx addr, reg, mem;
16299 insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
16300 gen_rtx_REG (Pmode, LR_REGNO));
16301 RTX_FRAME_RELATED_P (insn) = 1;
16303 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16304 GEN_INT (info->lr_save_offset + sp_offset));
16305 reg = gen_rtx_REG (Pmode, 0);
16306 mem = gen_rtx_MEM (Pmode, addr);
16307 /* This should not be of rs6000_sr_alias_set, because of
16308 __builtin_return_address. */
16310 insn = emit_move_insn (mem, reg);
16311 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16312 NULL_RTX, NULL_RTX);
16315 /* If we need to save CR, put it into r12. */
16316 if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
16320 cr_save_rtx = gen_rtx_REG (SImode, 12);
16321 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
16322 RTX_FRAME_RELATED_P (insn) = 1;
16323 /* Now, there's no way that dwarf2out_frame_debug_expr is going
16324 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
16325 But that's OK. All we have to do is specify that _one_ condition
16326 code register is saved in this stack slot. The thrower's epilogue
16327 will then restore all the call-saved registers.
16328 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
16329 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
16330 gen_rtx_REG (SImode, CR2_REGNO));
16331 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
16334 /* Do any required saving of fpr's. If only one or two to save, do
16335 it ourselves. Otherwise, call function. */
16336 if (!WORLD_SAVE_P (info) && saving_FPRs_inline)
16339 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
16340 if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
16341 && ! call_used_regs[info->first_fp_reg_save+i]))
16342 emit_frame_save (frame_reg_rtx, frame_ptr_rtx,
16343 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16345 info->first_fp_reg_save + i,
16346 info->fp_save_offset + sp_offset + 8 * i,
16349 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
16353 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
16354 info->fp_save_offset + sp_offset,
16356 /*savep=*/true, /*gpr=*/false,
16358 insn = emit_insn (par);
16359 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16360 NULL_RTX, NULL_RTX);
16363 /* Save GPRs. This is done as a PARALLEL if we are using
16364 the store-multiple instructions. */
16365 if (!WORLD_SAVE_P (info)
16367 && info->spe_64bit_regs_used != 0
16368 && info->first_gp_reg_save != 32)
16371 rtx spe_save_area_ptr;
16373 /* Determine whether we can address all of the registers that need
16374 to be saved with an offset from the stack pointer that fits in
16375 the small const field for SPE memory instructions. */
16376 int spe_regs_addressable_via_sp
16377 = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
16378 + (32 - info->first_gp_reg_save - 1) * reg_size)
16379 && saving_GPRs_inline);
16382 if (spe_regs_addressable_via_sp)
16384 spe_save_area_ptr = frame_reg_rtx;
16385 spe_offset = info->spe_gp_save_offset + sp_offset;
16389 /* Make r11 point to the start of the SPE save area. We need
16390 to be careful here if r11 is holding the static chain. If
16391 it is, then temporarily save it in r0. We would use r0 as
16392 our base register here, but using r0 as a base register in
16393 loads and stores means something different from what we
16395 int ool_adjust = (saving_GPRs_inline
16397 : (info->first_gp_reg_save
16398 - (FIRST_SAVRES_REGISTER+1))*8);
16399 HOST_WIDE_INT offset = (info->spe_gp_save_offset
16400 + sp_offset - ool_adjust);
16402 if (using_static_chain_p)
16404 rtx r0 = gen_rtx_REG (Pmode, 0);
16405 gcc_assert (info->first_gp_reg_save > 11);
16407 emit_move_insn (r0, gen_rtx_REG (Pmode, 11));
16410 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
16411 insn = emit_insn (gen_addsi3 (spe_save_area_ptr,
16413 GEN_INT (offset)));
16414 /* We need to make sure the move to r11 gets noted for
16415 properly outputting unwind information. */
16416 if (!saving_GPRs_inline)
16417 rs6000_frame_related (insn, frame_reg_rtx, offset,
16418 NULL_RTX, NULL_RTX);
16422 if (saving_GPRs_inline)
16424 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16425 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
16427 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16428 rtx offset, addr, mem;
16430 /* We're doing all this to ensure that the offset fits into
16431 the immediate offset of 'evstdd'. */
16432 gcc_assert (SPE_CONST_OFFSET_OK (reg_size * i + spe_offset));
16434 offset = GEN_INT (reg_size * i + spe_offset);
16435 addr = gen_rtx_PLUS (Pmode, spe_save_area_ptr, offset);
16436 mem = gen_rtx_MEM (V2SImode, addr);
16438 insn = emit_move_insn (mem, reg);
16440 rs6000_frame_related (insn, spe_save_area_ptr,
16441 info->spe_gp_save_offset
16442 + sp_offset + reg_size * i,
16443 offset, const0_rtx);
16450 par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
16452 /*savep=*/true, /*gpr=*/true,
16454 insn = emit_insn (par);
16455 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16456 NULL_RTX, NULL_RTX);
16460 /* Move the static chain pointer back. */
16461 if (using_static_chain_p && !spe_regs_addressable_via_sp)
16462 emit_move_insn (gen_rtx_REG (Pmode, 11), gen_rtx_REG (Pmode, 0));
16464 else if (!WORLD_SAVE_P (info) && !saving_GPRs_inline)
16468 /* Need to adjust r11 if we saved any FPRs. */
16469 if (info->first_fp_reg_save != 64)
16471 rtx r11 = gen_rtx_REG (reg_mode, 11);
16472 rtx offset = GEN_INT (info->total_size
16473 + (-8 * (64-info->first_fp_reg_save)));
16474 rtx ptr_reg = (sp_reg_rtx == frame_reg_rtx
16475 ? sp_reg_rtx : r11);
16477 emit_insn (TARGET_32BIT
16478 ? gen_addsi3 (r11, ptr_reg, offset)
16479 : gen_adddi3 (r11, ptr_reg, offset));
16482 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
16483 info->gp_save_offset + sp_offset,
16485 /*savep=*/true, /*gpr=*/true,
16487 insn = emit_insn (par);
16488 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16489 NULL_RTX, NULL_RTX);
16491 else if (!WORLD_SAVE_P (info) && using_store_multiple)
16495 p = rtvec_alloc (32 - info->first_gp_reg_save);
16496 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16498 rtx addr, reg, mem;
16499 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16500 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16501 GEN_INT (info->gp_save_offset
16504 mem = gen_frame_mem (reg_mode, addr);
16506 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
16508 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
16509 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16510 NULL_RTX, NULL_RTX);
16512 else if (!WORLD_SAVE_P (info))
16515 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16516 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
16518 rtx addr, reg, mem;
16519 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16521 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16522 GEN_INT (info->gp_save_offset
16525 mem = gen_frame_mem (reg_mode, addr);
16527 insn = emit_move_insn (mem, reg);
16528 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16529 NULL_RTX, NULL_RTX);
16533 /* ??? There's no need to emit actual instructions here, but it's the
16534 easiest way to get the frame unwind information emitted. */
16535 if (crtl->calls_eh_return)
16537 unsigned int i, regno;
16539 /* In AIX ABI we need to pretend we save r2 here. */
16542 rtx addr, reg, mem;
16544 reg = gen_rtx_REG (reg_mode, 2);
16545 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16546 GEN_INT (sp_offset + 5 * reg_size));
16547 mem = gen_frame_mem (reg_mode, addr);
16549 insn = emit_move_insn (mem, reg);
16550 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16551 NULL_RTX, NULL_RTX);
16552 PATTERN (insn) = gen_blockage ();
16557 regno = EH_RETURN_DATA_REGNO (i);
16558 if (regno == INVALID_REGNUM)
16561 emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, regno,
16562 info->ehrd_offset + sp_offset
16563 + reg_size * (int) i,
16568 /* Save CR if we use any that must be preserved. */
16569 if (!WORLD_SAVE_P (info) && info->cr_save_p)
16571 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16572 GEN_INT (info->cr_save_offset + sp_offset));
16573 rtx mem = gen_frame_mem (SImode, addr);
16574 /* See the large comment above about why CR2_REGNO is used. */
16575 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
16577 /* If r12 was used to hold the original sp, copy cr into r0 now
16579 if (REGNO (frame_reg_rtx) == 12)
16583 cr_save_rtx = gen_rtx_REG (SImode, 0);
16584 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
16585 RTX_FRAME_RELATED_P (insn) = 1;
16586 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
16587 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
16589 insn = emit_move_insn (mem, cr_save_rtx);
16591 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16592 NULL_RTX, NULL_RTX);
16595 /* Update stack and set back pointer unless this is V.4,
16596 for which it was done previously. */
16597 if (!WORLD_SAVE_P (info) && info->push_p
16598 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
16600 if (info->total_size < 32767)
16601 sp_offset = info->total_size;
16603 frame_reg_rtx = frame_ptr_rtx;
16604 rs6000_emit_allocate_stack (info->total_size,
16605 (frame_reg_rtx != sp_reg_rtx
16606 && ((info->altivec_size != 0)
16607 || (info->vrsave_mask != 0)
16610 if (frame_reg_rtx != sp_reg_rtx)
16611 rs6000_emit_stack_tie ();
16614 /* Set frame pointer, if needed. */
16615 if (frame_pointer_needed)
16617 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
16619 RTX_FRAME_RELATED_P (insn) = 1;
16622 /* Save AltiVec registers if needed. Save here because the red zone does
16623 not include AltiVec registers. */
16624 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
16628 /* There should be a non inline version of this, for when we
16629 are saving lots of vector registers. */
16630 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
16631 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
16633 rtx areg, savereg, mem;
16636 offset = info->altivec_save_offset + sp_offset
16637 + 16 * (i - info->first_altivec_reg_save);
16639 savereg = gen_rtx_REG (V4SImode, i);
16641 areg = gen_rtx_REG (Pmode, 0);
16642 emit_move_insn (areg, GEN_INT (offset));
16644 /* AltiVec addressing mode is [reg+reg]. */
16645 mem = gen_frame_mem (V4SImode,
16646 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
16648 insn = emit_move_insn (mem, savereg);
16650 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
16651 areg, GEN_INT (offset));
16655 /* VRSAVE is a bit vector representing which AltiVec registers
16656 are used. The OS uses this to determine which vector
16657 registers to save on a context switch. We need to save
16658 VRSAVE on the stack frame, add whatever AltiVec registers we
16659 used in this function, and do the corresponding magic in the
16662 if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
16663 && info->vrsave_mask != 0)
16665 rtx reg, mem, vrsave;
16668 /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
16669 as frame_reg_rtx and r11 as the static chain pointer for
16670 nested functions. */
16671 reg = gen_rtx_REG (SImode, 0);
16672 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
16674 emit_insn (gen_get_vrsave_internal (reg));
16676 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
16678 if (!WORLD_SAVE_P (info))
16681 offset = info->vrsave_save_offset + sp_offset;
16682 mem = gen_frame_mem (SImode,
16683 gen_rtx_PLUS (Pmode, frame_reg_rtx,
16684 GEN_INT (offset)));
16685 insn = emit_move_insn (mem, reg);
16688 /* Include the registers in the mask. */
16689 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
16691 insn = emit_insn (generate_set_vrsave (reg, info, 0));
16694 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
16695 if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
16696 || (DEFAULT_ABI == ABI_V4
16697 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
16698 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM)))
16700 /* If emit_load_toc_table will use the link register, we need to save
16701 it. We use R12 for this purpose because emit_load_toc_table
16702 can use register 0. This allows us to use a plain 'blr' to return
16703 from the procedure more often. */
16704 int save_LR_around_toc_setup = (TARGET_ELF
16705 && DEFAULT_ABI != ABI_AIX
16707 && ! info->lr_save_p
16708 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
16709 if (save_LR_around_toc_setup)
16711 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
16713 insn = emit_move_insn (frame_ptr_rtx, lr);
16714 RTX_FRAME_RELATED_P (insn) = 1;
16716 rs6000_emit_load_toc_table (TRUE);
16718 insn = emit_move_insn (lr, frame_ptr_rtx);
16719 RTX_FRAME_RELATED_P (insn) = 1;
16722 rs6000_emit_load_toc_table (TRUE);
16726 if (DEFAULT_ABI == ABI_DARWIN
16727 && flag_pic && crtl->uses_pic_offset_table)
16729 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
16730 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
16732 /* Save and restore LR locally around this call (in R0). */
16733 if (!info->lr_save_p)
16734 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
16736 emit_insn (gen_load_macho_picbase (src));
16738 emit_move_insn (gen_rtx_REG (Pmode,
16739 RS6000_PIC_OFFSET_TABLE_REGNUM),
16742 if (!info->lr_save_p)
16743 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
16748 /* Write function prologue. */
16751 rs6000_output_function_prologue (FILE *file,
16752 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
16754 rs6000_stack_t *info = rs6000_stack_info ();
16756 if (TARGET_DEBUG_STACK)
16757 debug_stack_info (info);
16759 /* Write .extern for any function we will call to save and restore
16761 if (info->first_fp_reg_save < 64
16762 && !FP_SAVE_INLINE (info->first_fp_reg_save))
16763 fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
16764 SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
16765 RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
16767 /* Write .extern for AIX common mode routines, if needed. */
16768 if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
16770 fputs ("\t.extern __mulh\n", file);
16771 fputs ("\t.extern __mull\n", file);
16772 fputs ("\t.extern __divss\n", file);
16773 fputs ("\t.extern __divus\n", file);
16774 fputs ("\t.extern __quoss\n", file);
16775 fputs ("\t.extern __quous\n", file);
16776 common_mode_defined = 1;
16779 if (! HAVE_prologue)
16783 /* A NOTE_INSN_DELETED is supposed to be at the start and end of
16784 the "toplevel" insn chain. */
16785 emit_note (NOTE_INSN_DELETED);
16786 rs6000_emit_prologue ();
16787 emit_note (NOTE_INSN_DELETED);
16789 /* Expand INSN_ADDRESSES so final() doesn't crash. */
16793 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
16795 INSN_ADDRESSES_NEW (insn, addr);
16800 if (TARGET_DEBUG_STACK)
16801 debug_rtx_list (get_insns (), 100);
16802 final (get_insns (), file, FALSE);
16806 rs6000_pic_labelno++;
16809 /* Non-zero if vmx regs are restored before the frame pop, zero if
16810 we restore after the pop when possible. */
16811 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
16813 /* Reload CR from REG. */
16816 rs6000_restore_saved_cr (rtx reg, int using_mfcr_multiple)
16821 if (using_mfcr_multiple)
16823 for (i = 0; i < 8; i++)
16824 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
16826 gcc_assert (count);
16829 if (using_mfcr_multiple && count > 1)
16834 p = rtvec_alloc (count);
16837 for (i = 0; i < 8; i++)
16838 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
16840 rtvec r = rtvec_alloc (2);
16841 RTVEC_ELT (r, 0) = reg;
16842 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
16843 RTVEC_ELT (p, ndx) =
16844 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
16845 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
16848 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
16849 gcc_assert (ndx == count);
16852 for (i = 0; i < 8; i++)
16853 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
16855 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
16861 /* Return true if OFFSET from stack pointer can be clobbered by signals.
16862 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
16863 below stack pointer not cloberred by signals. */
16866 offset_below_red_zone_p (HOST_WIDE_INT offset)
16868 return offset < (DEFAULT_ABI == ABI_V4
16870 : TARGET_32BIT ? -220 : -288);
16873 /* Emit function epilogue as insns. */
16876 rs6000_emit_epilogue (int sibcall)
16878 rs6000_stack_t *info;
16879 int restoring_GPRs_inline;
16880 int restoring_FPRs_inline;
16881 int using_load_multiple;
16882 int using_mtcr_multiple;
16883 int use_backchain_to_restore_sp;
16887 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
16888 rtx frame_reg_rtx = sp_reg_rtx;
16889 rtx cfa_restores = NULL_RTX;
16891 enum machine_mode reg_mode = Pmode;
16892 int reg_size = TARGET_32BIT ? 4 : 8;
16895 info = rs6000_stack_info ();
16897 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
16899 reg_mode = V2SImode;
16903 strategy = rs6000_savres_strategy (info, /*savep=*/false,
16904 /*static_chain_p=*/0, sibcall);
16905 using_load_multiple = strategy & SAVRES_MULTIPLE;
16906 restoring_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
16907 restoring_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
16908 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
16909 || rs6000_cpu == PROCESSOR_PPC603
16910 || rs6000_cpu == PROCESSOR_PPC750
16912 /* Restore via the backchain when we have a large frame, since this
16913 is more efficient than an addis, addi pair. The second condition
16914 here will not trigger at the moment; We don't actually need a
16915 frame pointer for alloca, but the generic parts of the compiler
16916 give us one anyway. */
16917 use_backchain_to_restore_sp = (info->total_size > 32767
16918 || info->total_size
16919 + (info->lr_save_p ? info->lr_save_offset : 0)
16921 || (cfun->calls_alloca
16922 && !frame_pointer_needed));
16923 restore_lr = (info->lr_save_p
16924 && restoring_GPRs_inline
16925 && restoring_FPRs_inline);
16927 if (WORLD_SAVE_P (info))
16931 const char *alloc_rname;
16934 /* eh_rest_world_r10 will return to the location saved in the LR
16935 stack slot (which is not likely to be our caller.)
16936 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
16937 rest_world is similar, except any R10 parameter is ignored.
16938 The exception-handling stuff that was here in 2.95 is no
16939 longer necessary. */
16943 + 32 - info->first_gp_reg_save
16944 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
16945 + 63 + 1 - info->first_fp_reg_save);
16947 strcpy (rname, ((crtl->calls_eh_return) ?
16948 "*eh_rest_world_r10" : "*rest_world"));
16949 alloc_rname = ggc_strdup (rname);
16952 RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
16953 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
16954 gen_rtx_REG (Pmode,
16957 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
16958 /* The instruction pattern requires a clobber here;
16959 it is shared with the restVEC helper. */
16961 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
16964 /* CR register traditionally saved as CR2. */
16965 rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
16966 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16967 GEN_INT (info->cr_save_offset));
16968 rtx mem = gen_frame_mem (reg_mode, addr);
16970 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
16973 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
16975 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
16976 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16977 GEN_INT (info->gp_save_offset
16979 rtx mem = gen_frame_mem (reg_mode, addr);
16981 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
16983 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
16985 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
16986 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16987 GEN_INT (info->altivec_save_offset
16989 rtx mem = gen_frame_mem (V4SImode, addr);
16991 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
16993 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
16995 rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
16996 ? DFmode : SFmode),
16997 info->first_fp_reg_save + i);
16998 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
16999 GEN_INT (info->fp_save_offset
17001 rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
17002 ? DFmode : SFmode), addr);
17004 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
17007 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
17009 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
17011 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
17013 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
17015 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
17016 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
17021 /* frame_reg_rtx + sp_offset points to the top of this stack frame. */
17023 sp_offset = info->total_size;
17025 /* Restore AltiVec registers if we must do so before adjusting the
17027 if (TARGET_ALTIVEC_ABI
17028 && info->altivec_size != 0
17029 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17030 || (DEFAULT_ABI != ABI_V4
17031 && offset_below_red_zone_p (info->altivec_save_offset))))
17035 if (use_backchain_to_restore_sp)
17037 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17038 emit_move_insn (frame_reg_rtx,
17039 gen_rtx_MEM (Pmode, sp_reg_rtx));
17042 else if (frame_pointer_needed)
17043 frame_reg_rtx = hard_frame_pointer_rtx;
17045 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
17046 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
17048 rtx addr, areg, mem, reg;
17050 areg = gen_rtx_REG (Pmode, 0);
17052 (areg, GEN_INT (info->altivec_save_offset
17054 + 16 * (i - info->first_altivec_reg_save)));
17056 /* AltiVec addressing mode is [reg+reg]. */
17057 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
17058 mem = gen_frame_mem (V4SImode, addr);
17060 reg = gen_rtx_REG (V4SImode, i);
17061 emit_move_insn (reg, mem);
17062 if (offset_below_red_zone_p (info->altivec_save_offset
17063 + (i - info->first_altivec_reg_save)
17065 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
17070 /* Restore VRSAVE if we must do so before adjusting the stack. */
17072 && TARGET_ALTIVEC_VRSAVE
17073 && info->vrsave_mask != 0
17074 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17075 || (DEFAULT_ABI != ABI_V4
17076 && offset_below_red_zone_p (info->vrsave_save_offset))))
17078 rtx addr, mem, reg;
17080 if (frame_reg_rtx == sp_reg_rtx)
17082 if (use_backchain_to_restore_sp)
17084 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17085 emit_move_insn (frame_reg_rtx,
17086 gen_rtx_MEM (Pmode, sp_reg_rtx));
17089 else if (frame_pointer_needed)
17090 frame_reg_rtx = hard_frame_pointer_rtx;
17093 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17094 GEN_INT (info->vrsave_save_offset + sp_offset));
17095 mem = gen_frame_mem (SImode, addr);
17096 reg = gen_rtx_REG (SImode, 12);
17097 emit_move_insn (reg, mem);
17099 emit_insn (generate_set_vrsave (reg, info, 1));
17103 /* If we have a large stack frame, restore the old stack pointer
17104 using the backchain. */
17105 if (use_backchain_to_restore_sp)
17107 if (frame_reg_rtx == sp_reg_rtx)
17109 /* Under V.4, don't reset the stack pointer until after we're done
17110 loading the saved registers. */
17111 if (DEFAULT_ABI == ABI_V4)
17112 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17114 insn = emit_move_insn (frame_reg_rtx,
17115 gen_rtx_MEM (Pmode, sp_reg_rtx));
17118 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17119 && DEFAULT_ABI == ABI_V4)
17120 /* frame_reg_rtx has been set up by the altivec restore. */
17124 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
17125 frame_reg_rtx = sp_reg_rtx;
17128 /* If we have a frame pointer, we can restore the old stack pointer
17130 else if (frame_pointer_needed)
17132 frame_reg_rtx = sp_reg_rtx;
17133 if (DEFAULT_ABI == ABI_V4)
17134 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17136 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
17137 GEN_INT (info->total_size)));
17140 else if (info->push_p
17141 && DEFAULT_ABI != ABI_V4
17142 && !crtl->calls_eh_return)
17144 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
17145 GEN_INT (info->total_size)));
17148 if (insn && frame_reg_rtx == sp_reg_rtx)
17152 REG_NOTES (insn) = cfa_restores;
17153 cfa_restores = NULL_RTX;
17155 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
17156 RTX_FRAME_RELATED_P (insn) = 1;
17159 /* Restore AltiVec registers if we have not done so already. */
17160 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17161 && TARGET_ALTIVEC_ABI
17162 && info->altivec_size != 0
17163 && (DEFAULT_ABI == ABI_V4
17164 || !offset_below_red_zone_p (info->altivec_save_offset)))
17168 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
17169 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
17171 rtx addr, areg, mem, reg;
17173 areg = gen_rtx_REG (Pmode, 0);
17175 (areg, GEN_INT (info->altivec_save_offset
17177 + 16 * (i - info->first_altivec_reg_save)));
17179 /* AltiVec addressing mode is [reg+reg]. */
17180 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
17181 mem = gen_frame_mem (V4SImode, addr);
17183 reg = gen_rtx_REG (V4SImode, i);
17184 emit_move_insn (reg, mem);
17185 if (DEFAULT_ABI == ABI_V4)
17186 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
17191 /* Restore VRSAVE if we have not done so already. */
17192 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17194 && TARGET_ALTIVEC_VRSAVE
17195 && info->vrsave_mask != 0
17196 && (DEFAULT_ABI == ABI_V4
17197 || !offset_below_red_zone_p (info->vrsave_save_offset)))
17199 rtx addr, mem, reg;
17201 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17202 GEN_INT (info->vrsave_save_offset + sp_offset));
17203 mem = gen_frame_mem (SImode, addr);
17204 reg = gen_rtx_REG (SImode, 12);
17205 emit_move_insn (reg, mem);
17207 emit_insn (generate_set_vrsave (reg, info, 1));
17210 /* Get the old lr if we saved it. If we are restoring registers
17211 out-of-line, then the out-of-line routines can do this for us. */
17214 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
17215 info->lr_save_offset + sp_offset);
17217 emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
17220 /* Get the old cr if we saved it. */
17221 if (info->cr_save_p)
17223 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17224 GEN_INT (info->cr_save_offset + sp_offset));
17225 rtx mem = gen_frame_mem (SImode, addr);
17227 emit_move_insn (gen_rtx_REG (SImode, 12), mem);
17230 /* Set LR here to try to overlap restores below. LR is always saved
17231 above incoming stack, so it never needs REG_CFA_RESTORE. */
17233 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
17234 gen_rtx_REG (Pmode, 0));
17236 /* Load exception handler data registers, if needed. */
17237 if (crtl->calls_eh_return)
17239 unsigned int i, regno;
17243 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17244 GEN_INT (sp_offset + 5 * reg_size));
17245 rtx mem = gen_frame_mem (reg_mode, addr);
17247 emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
17254 regno = EH_RETURN_DATA_REGNO (i);
17255 if (regno == INVALID_REGNUM)
17258 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
17259 info->ehrd_offset + sp_offset
17260 + reg_size * (int) i);
17262 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
17266 /* Restore GPRs. This is done as a PARALLEL if we are using
17267 the load-multiple instructions. */
17269 && info->spe_64bit_regs_used != 0
17270 && info->first_gp_reg_save != 32)
17272 /* Determine whether we can address all of the registers that need
17273 to be saved with an offset from the stack pointer that fits in
17274 the small const field for SPE memory instructions. */
17275 int spe_regs_addressable_via_sp
17276 = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
17277 + (32 - info->first_gp_reg_save - 1) * reg_size)
17278 && restoring_GPRs_inline);
17281 if (spe_regs_addressable_via_sp)
17282 spe_offset = info->spe_gp_save_offset + sp_offset;
17285 rtx old_frame_reg_rtx = frame_reg_rtx;
17286 /* Make r11 point to the start of the SPE save area. We worried about
17287 not clobbering it when we were saving registers in the prologue.
17288 There's no need to worry here because the static chain is passed
17289 anew to every function. */
17290 int ool_adjust = (restoring_GPRs_inline
17292 : (info->first_gp_reg_save
17293 - (FIRST_SAVRES_REGISTER+1))*8);
17295 if (frame_reg_rtx == sp_reg_rtx)
17296 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
17297 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
17298 GEN_INT (info->spe_gp_save_offset
17301 /* Keep the invariant that frame_reg_rtx + sp_offset points
17302 at the top of the stack frame. */
17303 sp_offset = -info->spe_gp_save_offset;
17308 if (restoring_GPRs_inline)
17310 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
17311 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
17313 rtx offset, addr, mem, reg;
17315 /* We're doing all this to ensure that the immediate offset
17316 fits into the immediate field of 'evldd'. */
17317 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
17319 offset = GEN_INT (spe_offset + reg_size * i);
17320 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
17321 mem = gen_rtx_MEM (V2SImode, addr);
17322 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
17324 insn = emit_move_insn (reg, mem);
17325 if (DEFAULT_ABI == ABI_V4)
17327 if (frame_pointer_needed
17328 && info->first_gp_reg_save + i
17329 == HARD_FRAME_POINTER_REGNUM)
17331 add_reg_note (insn, REG_CFA_DEF_CFA,
17332 plus_constant (frame_reg_rtx,
17334 RTX_FRAME_RELATED_P (insn) = 1;
17337 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
17346 par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
17348 /*savep=*/false, /*gpr=*/true,
17350 emit_jump_insn (par);
17351 /* We don't want anybody else emitting things after we jumped
17356 else if (!restoring_GPRs_inline)
17358 /* We are jumping to an out-of-line function. */
17359 bool can_use_exit = info->first_fp_reg_save == 64;
17362 /* Emit stack reset code if we need it. */
17364 rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
17365 sp_offset, can_use_exit);
17367 emit_insn (gen_addsi3 (gen_rtx_REG (Pmode, 11),
17369 GEN_INT (sp_offset - info->fp_size)));
17371 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
17372 info->gp_save_offset, reg_mode,
17373 /*savep=*/false, /*gpr=*/true,
17374 /*exitp=*/can_use_exit);
17378 if (info->cr_save_p)
17380 rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12),
17381 using_mtcr_multiple);
17382 if (DEFAULT_ABI == ABI_V4)
17384 = alloc_reg_note (REG_CFA_RESTORE,
17385 gen_rtx_REG (SImode, CR2_REGNO),
17389 emit_jump_insn (par);
17391 /* We don't want anybody else emitting things after we jumped
17396 insn = emit_insn (par);
17397 if (DEFAULT_ABI == ABI_V4)
17399 if (frame_pointer_needed)
17401 add_reg_note (insn, REG_CFA_DEF_CFA,
17402 plus_constant (frame_reg_rtx, sp_offset));
17403 RTX_FRAME_RELATED_P (insn) = 1;
17406 for (i = info->first_gp_reg_save; i < 32; i++)
17408 = alloc_reg_note (REG_CFA_RESTORE,
17409 gen_rtx_REG (reg_mode, i), cfa_restores);
17412 else if (using_load_multiple)
17415 p = rtvec_alloc (32 - info->first_gp_reg_save);
17416 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
17418 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17419 GEN_INT (info->gp_save_offset
17422 rtx mem = gen_frame_mem (reg_mode, addr);
17423 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
17425 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, reg, mem);
17426 if (DEFAULT_ABI == ABI_V4)
17427 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
17430 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
17431 if (DEFAULT_ABI == ABI_V4 && frame_pointer_needed)
17433 add_reg_note (insn, REG_CFA_DEF_CFA,
17434 plus_constant (frame_reg_rtx, sp_offset));
17435 RTX_FRAME_RELATED_P (insn) = 1;
17440 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
17441 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
17443 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17444 GEN_INT (info->gp_save_offset
17447 rtx mem = gen_frame_mem (reg_mode, addr);
17448 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
17450 insn = emit_move_insn (reg, mem);
17451 if (DEFAULT_ABI == ABI_V4)
17453 if (frame_pointer_needed
17454 && info->first_gp_reg_save + i
17455 == HARD_FRAME_POINTER_REGNUM)
17457 add_reg_note (insn, REG_CFA_DEF_CFA,
17458 plus_constant (frame_reg_rtx, sp_offset));
17459 RTX_FRAME_RELATED_P (insn) = 1;
17462 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
17468 /* Restore fpr's if we need to do it without calling a function. */
17469 if (restoring_FPRs_inline)
17470 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
17471 if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
17472 && ! call_used_regs[info->first_fp_reg_save+i]))
17474 rtx addr, mem, reg;
17475 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
17476 GEN_INT (info->fp_save_offset
17479 mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
17480 ? DFmode : SFmode), addr);
17481 reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
17482 ? DFmode : SFmode),
17483 info->first_fp_reg_save + i);
17485 emit_move_insn (reg, mem);
17486 if (DEFAULT_ABI == ABI_V4)
17487 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
17491 /* If we saved cr, restore it here. Just those that were used. */
17492 if (info->cr_save_p)
17494 rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), using_mtcr_multiple);
17495 if (DEFAULT_ABI == ABI_V4)
17497 = alloc_reg_note (REG_CFA_RESTORE, gen_rtx_REG (SImode, CR2_REGNO),
17501 /* If this is V.4, unwind the stack pointer after all of the loads
17503 insn = rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
17504 sp_offset, !restoring_FPRs_inline);
17509 REG_NOTES (insn) = cfa_restores;
17510 cfa_restores = NULL_RTX;
17512 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
17513 RTX_FRAME_RELATED_P (insn) = 1;
17516 if (crtl->calls_eh_return)
17518 rtx sa = EH_RETURN_STACKADJ_RTX;
17519 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
17525 if (! restoring_FPRs_inline)
17526 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
17528 p = rtvec_alloc (2);
17530 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
17531 RTVEC_ELT (p, 1) = (restoring_FPRs_inline
17532 ? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
17533 : gen_rtx_CLOBBER (VOIDmode,
17534 gen_rtx_REG (Pmode, 65)));
17536 /* If we have to restore more than two FP registers, branch to the
17537 restore function. It will return to our caller. */
17538 if (! restoring_FPRs_inline)
17543 sym = rs6000_savres_routine_sym (info,
17547 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
17548 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
17549 gen_rtx_REG (Pmode, 11));
17550 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
17553 addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
17554 GEN_INT (info->fp_save_offset + 8*i));
17555 mem = gen_frame_mem (DFmode, addr);
17557 RTVEC_ELT (p, i+4) =
17558 gen_rtx_SET (VOIDmode,
17559 gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
17564 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
17568 /* Write function epilogue. */
17571 rs6000_output_function_epilogue (FILE *file,
17572 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
17574 if (! HAVE_epilogue)
17576 rtx insn = get_last_insn ();
17577 /* If the last insn was a BARRIER, we don't have to write anything except
17578 the trace table. */
17579 if (GET_CODE (insn) == NOTE)
17580 insn = prev_nonnote_insn (insn);
17581 if (insn == 0 || GET_CODE (insn) != BARRIER)
17583 /* This is slightly ugly, but at least we don't have two
17584 copies of the epilogue-emitting code. */
17587 /* A NOTE_INSN_DELETED is supposed to be at the start
17588 and end of the "toplevel" insn chain. */
17589 emit_note (NOTE_INSN_DELETED);
17590 rs6000_emit_epilogue (FALSE);
17591 emit_note (NOTE_INSN_DELETED);
17593 /* Expand INSN_ADDRESSES so final() doesn't crash. */
17597 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
17599 INSN_ADDRESSES_NEW (insn, addr);
17604 if (TARGET_DEBUG_STACK)
17605 debug_rtx_list (get_insns (), 100);
17606 final (get_insns (), file, FALSE);
17612 macho_branch_islands ();
17613 /* Mach-O doesn't support labels at the end of objects, so if
17614 it looks like we might want one, insert a NOP. */
17616 rtx insn = get_last_insn ();
17619 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
17620 insn = PREV_INSN (insn);
17624 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
17625 fputs ("\tnop\n", file);
17629 /* Output a traceback table here. See /usr/include/sys/debug.h for info
17632 We don't output a traceback table if -finhibit-size-directive was
17633 used. The documentation for -finhibit-size-directive reads
17634 ``don't output a @code{.size} assembler directive, or anything
17635 else that would cause trouble if the function is split in the
17636 middle, and the two halves are placed at locations far apart in
17637 memory.'' The traceback table has this property, since it
17638 includes the offset from the start of the function to the
17639 traceback table itself.
17641 System V.4 Powerpc's (and the embedded ABI derived from it) use a
17642 different traceback table. */
17643 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
17644 && rs6000_traceback != traceback_none && !cfun->is_thunk)
17646 const char *fname = NULL;
17647 const char *language_string = lang_hooks.name;
17648 int fixed_parms = 0, float_parms = 0, parm_info = 0;
17650 int optional_tbtab;
17651 rs6000_stack_t *info = rs6000_stack_info ();
17653 if (rs6000_traceback == traceback_full)
17654 optional_tbtab = 1;
17655 else if (rs6000_traceback == traceback_part)
17656 optional_tbtab = 0;
17658 optional_tbtab = !optimize_size && !TARGET_ELF;
17660 if (optional_tbtab)
17662 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
17663 while (*fname == '.') /* V.4 encodes . in the name */
17666 /* Need label immediately before tbtab, so we can compute
17667 its offset from the function start. */
17668 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
17669 ASM_OUTPUT_LABEL (file, fname);
17672 /* The .tbtab pseudo-op can only be used for the first eight
17673 expressions, since it can't handle the possibly variable
17674 length fields that follow. However, if you omit the optional
17675 fields, the assembler outputs zeros for all optional fields
17676 anyways, giving each variable length field is minimum length
17677 (as defined in sys/debug.h). Thus we can not use the .tbtab
17678 pseudo-op at all. */
17680 /* An all-zero word flags the start of the tbtab, for debuggers
17681 that have to find it by searching forward from the entry
17682 point or from the current pc. */
17683 fputs ("\t.long 0\n", file);
17685 /* Tbtab format type. Use format type 0. */
17686 fputs ("\t.byte 0,", file);
17688 /* Language type. Unfortunately, there does not seem to be any
17689 official way to discover the language being compiled, so we
17690 use language_string.
17691 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
17692 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
17693 a number, so for now use 9. */
17694 if (! strcmp (language_string, "GNU C"))
17696 else if (! strcmp (language_string, "GNU F77")
17697 || ! strcmp (language_string, "GNU Fortran"))
17699 else if (! strcmp (language_string, "GNU Pascal"))
17701 else if (! strcmp (language_string, "GNU Ada"))
17703 else if (! strcmp (language_string, "GNU C++")
17704 || ! strcmp (language_string, "GNU Objective-C++"))
17706 else if (! strcmp (language_string, "GNU Java"))
17708 else if (! strcmp (language_string, "GNU Objective-C"))
17711 gcc_unreachable ();
17712 fprintf (file, "%d,", i);
17714 /* 8 single bit fields: global linkage (not set for C extern linkage,
17715 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
17716 from start of procedure stored in tbtab, internal function, function
17717 has controlled storage, function has no toc, function uses fp,
17718 function logs/aborts fp operations. */
17719 /* Assume that fp operations are used if any fp reg must be saved. */
17720 fprintf (file, "%d,",
17721 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
17723 /* 6 bitfields: function is interrupt handler, name present in
17724 proc table, function calls alloca, on condition directives
17725 (controls stack walks, 3 bits), saves condition reg, saves
17727 /* The `function calls alloca' bit seems to be set whenever reg 31 is
17728 set up as a frame pointer, even when there is no alloca call. */
17729 fprintf (file, "%d,",
17730 ((optional_tbtab << 6)
17731 | ((optional_tbtab & frame_pointer_needed) << 5)
17732 | (info->cr_save_p << 1)
17733 | (info->lr_save_p)));
17735 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
17737 fprintf (file, "%d,",
17738 (info->push_p << 7) | (64 - info->first_fp_reg_save));
17740 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
17741 fprintf (file, "%d,", (32 - first_reg_to_save ()));
17743 if (optional_tbtab)
17745 /* Compute the parameter info from the function decl argument
17748 int next_parm_info_bit = 31;
17750 for (decl = DECL_ARGUMENTS (current_function_decl);
17751 decl; decl = TREE_CHAIN (decl))
17753 rtx parameter = DECL_INCOMING_RTL (decl);
17754 enum machine_mode mode = GET_MODE (parameter);
17756 if (GET_CODE (parameter) == REG)
17758 if (SCALAR_FLOAT_MODE_P (mode))
17779 gcc_unreachable ();
17782 /* If only one bit will fit, don't or in this entry. */
17783 if (next_parm_info_bit > 0)
17784 parm_info |= (bits << (next_parm_info_bit - 1));
17785 next_parm_info_bit -= 2;
17789 fixed_parms += ((GET_MODE_SIZE (mode)
17790 + (UNITS_PER_WORD - 1))
17792 next_parm_info_bit -= 1;
17798 /* Number of fixed point parameters. */
17799 /* This is actually the number of words of fixed point parameters; thus
17800 an 8 byte struct counts as 2; and thus the maximum value is 8. */
17801 fprintf (file, "%d,", fixed_parms);
17803 /* 2 bitfields: number of floating point parameters (7 bits), parameters
17805 /* This is actually the number of fp registers that hold parameters;
17806 and thus the maximum value is 13. */
17807 /* Set parameters on stack bit if parameters are not in their original
17808 registers, regardless of whether they are on the stack? Xlc
17809 seems to set the bit when not optimizing. */
17810 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
17812 if (! optional_tbtab)
17815 /* Optional fields follow. Some are variable length. */
17817 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
17818 11 double float. */
17819 /* There is an entry for each parameter in a register, in the order that
17820 they occur in the parameter list. Any intervening arguments on the
17821 stack are ignored. If the list overflows a long (max possible length
17822 34 bits) then completely leave off all elements that don't fit. */
17823 /* Only emit this long if there was at least one parameter. */
17824 if (fixed_parms || float_parms)
17825 fprintf (file, "\t.long %d\n", parm_info);
17827 /* Offset from start of code to tb table. */
17828 fputs ("\t.long ", file);
17829 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
17831 RS6000_OUTPUT_BASENAME (file, fname);
17833 assemble_name (file, fname);
17835 rs6000_output_function_entry (file, fname);
17838 /* Interrupt handler mask. */
17839 /* Omit this long, since we never set the interrupt handler bit
17842 /* Number of CTL (controlled storage) anchors. */
17843 /* Omit this long, since the has_ctl bit is never set above. */
17845 /* Displacement into stack of each CTL anchor. */
17846 /* Omit this list of longs, because there are no CTL anchors. */
17848 /* Length of function name. */
17851 fprintf (file, "\t.short %d\n", (int) strlen (fname));
17853 /* Function name. */
17854 assemble_string (fname, strlen (fname));
17856 /* Register for alloca automatic storage; this is always reg 31.
17857 Only emit this if the alloca bit was set above. */
17858 if (frame_pointer_needed)
17859 fputs ("\t.byte 31\n", file);
17861 fputs ("\t.align 2\n", file);
17865 /* A C compound statement that outputs the assembler code for a thunk
17866 function, used to implement C++ virtual function calls with
17867 multiple inheritance. The thunk acts as a wrapper around a virtual
17868 function, adjusting the implicit object parameter before handing
17869 control off to the real function.
17871 First, emit code to add the integer DELTA to the location that
17872 contains the incoming first argument. Assume that this argument
17873 contains a pointer, and is the one used to pass the `this' pointer
17874 in C++. This is the incoming argument *before* the function
17875 prologue, e.g. `%o0' on a sparc. The addition must preserve the
17876 values of all other incoming arguments.
17878 After the addition, emit code to jump to FUNCTION, which is a
17879 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
17880 not touch the return address. Hence returning from FUNCTION will
17881 return to whoever called the current `thunk'.
17883 The effect must be as if FUNCTION had been called directly with the
17884 adjusted first argument. This macro is responsible for emitting
17885 all of the code for a thunk function; output_function_prologue()
17886 and output_function_epilogue() are not invoked.
17888 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
17889 been extracted from it.) It might possibly be useful on some
17890 targets, but probably not.
17892 If you do not define this macro, the target-independent code in the
17893 C++ frontend will generate a less efficient heavyweight thunk that
17894 calls FUNCTION instead of jumping to it. The generic approach does
17895 not support varargs. */
17898 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
17899 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
17902 rtx this_rtx, insn, funexp;
17904 reload_completed = 1;
17905 epilogue_completed = 1;
17907 /* Mark the end of the (empty) prologue. */
17908 emit_note (NOTE_INSN_PROLOGUE_END);
17910 /* Find the "this" pointer. If the function returns a structure,
17911 the structure return pointer is in r3. */
17912 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
17913 this_rtx = gen_rtx_REG (Pmode, 4);
17915 this_rtx = gen_rtx_REG (Pmode, 3);
17917 /* Apply the constant offset, if required. */
17920 rtx delta_rtx = GEN_INT (delta);
17921 emit_insn (TARGET_32BIT
17922 ? gen_addsi3 (this_rtx, this_rtx, delta_rtx)
17923 : gen_adddi3 (this_rtx, this_rtx, delta_rtx));
17926 /* Apply the offset from the vtable, if required. */
17929 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
17930 rtx tmp = gen_rtx_REG (Pmode, 12);
17932 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
17933 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
17935 emit_insn (TARGET_32BIT
17936 ? gen_addsi3 (tmp, tmp, vcall_offset_rtx)
17937 : gen_adddi3 (tmp, tmp, vcall_offset_rtx));
17938 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
17942 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
17944 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
17946 emit_insn (TARGET_32BIT
17947 ? gen_addsi3 (this_rtx, this_rtx, tmp)
17948 : gen_adddi3 (this_rtx, this_rtx, tmp));
17951 /* Generate a tail call to the target function. */
17952 if (!TREE_USED (function))
17954 assemble_external (function);
17955 TREE_USED (function) = 1;
17957 funexp = XEXP (DECL_RTL (function), 0);
17958 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
17961 if (MACHOPIC_INDIRECT)
17962 funexp = machopic_indirect_call_target (funexp);
17965 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
17966 generate sibcall RTL explicitly. */
17967 insn = emit_call_insn (
17968 gen_rtx_PARALLEL (VOIDmode,
17970 gen_rtx_CALL (VOIDmode,
17971 funexp, const0_rtx),
17972 gen_rtx_USE (VOIDmode, const0_rtx),
17973 gen_rtx_USE (VOIDmode,
17974 gen_rtx_REG (SImode,
17976 gen_rtx_RETURN (VOIDmode))));
17977 SIBLING_CALL_P (insn) = 1;
17980 /* Run just enough of rest_of_compilation to get the insns emitted.
17981 There's not really enough bulk here to make other passes such as
17982 instruction scheduling worth while. Note that use_thunk calls
17983 assemble_start_function and assemble_end_function. */
17984 insn = get_insns ();
17985 insn_locators_alloc ();
17986 shorten_branches (insn);
17987 final_start_function (insn, file, 1);
17988 final (insn, file, 1);
17989 final_end_function ();
17990 free_after_compilation (cfun);
17992 reload_completed = 0;
17993 epilogue_completed = 0;
17996 /* A quick summary of the various types of 'constant-pool tables'
17999 Target Flags Name One table per
18000 AIX (none) AIX TOC object file
18001 AIX -mfull-toc AIX TOC object file
18002 AIX -mminimal-toc AIX minimal TOC translation unit
18003 SVR4/EABI (none) SVR4 SDATA object file
18004 SVR4/EABI -fpic SVR4 pic object file
18005 SVR4/EABI -fPIC SVR4 PIC translation unit
18006 SVR4/EABI -mrelocatable EABI TOC function
18007 SVR4/EABI -maix AIX TOC object file
18008 SVR4/EABI -maix -mminimal-toc
18009 AIX minimal TOC translation unit
18011 Name Reg. Set by entries contains:
18012 made by addrs? fp? sum?
18014 AIX TOC 2 crt0 as Y option option
18015 AIX minimal TOC 30 prolog gcc Y Y option
18016 SVR4 SDATA 13 crt0 gcc N Y N
18017 SVR4 pic 30 prolog ld Y not yet N
18018 SVR4 PIC 30 prolog gcc Y option option
18019 EABI TOC 30 prolog gcc Y option option
18023 /* Hash functions for the hash table. */
18026 rs6000_hash_constant (rtx k)
18028 enum rtx_code code = GET_CODE (k);
18029 enum machine_mode mode = GET_MODE (k);
18030 unsigned result = (code << 3) ^ mode;
18031 const char *format;
18034 format = GET_RTX_FORMAT (code);
18035 flen = strlen (format);
18041 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
18044 if (mode != VOIDmode)
18045 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
18057 for (; fidx < flen; fidx++)
18058 switch (format[fidx])
18063 const char *str = XSTR (k, fidx);
18064 len = strlen (str);
18065 result = result * 613 + len;
18066 for (i = 0; i < len; i++)
18067 result = result * 613 + (unsigned) str[i];
18072 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
18076 result = result * 613 + (unsigned) XINT (k, fidx);
18079 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
18080 result = result * 613 + (unsigned) XWINT (k, fidx);
18084 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
18085 result = result * 613 + (unsigned) (XWINT (k, fidx)
18092 gcc_unreachable ();
18099 toc_hash_function (const void *hash_entry)
18101 const struct toc_hash_struct *thc =
18102 (const struct toc_hash_struct *) hash_entry;
18103 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
18106 /* Compare H1 and H2 for equivalence. */
18109 toc_hash_eq (const void *h1, const void *h2)
18111 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
18112 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
18114 if (((const struct toc_hash_struct *) h1)->key_mode
18115 != ((const struct toc_hash_struct *) h2)->key_mode)
18118 return rtx_equal_p (r1, r2);
18121 /* These are the names given by the C++ front-end to vtables, and
18122 vtable-like objects. Ideally, this logic should not be here;
18123 instead, there should be some programmatic way of inquiring as
18124 to whether or not an object is a vtable. */
18126 #define VTABLE_NAME_P(NAME) \
18127 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
18128 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
18129 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
18130 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
18131 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
18133 #ifdef NO_DOLLAR_IN_LABEL
18134 /* Return a GGC-allocated character string translating dollar signs in
18135 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
18138 rs6000_xcoff_strip_dollar (const char *name)
18143 p = strchr (name, '$');
18145 if (p == 0 || p == name)
18148 len = strlen (name);
18149 strip = (char *) alloca (len + 1);
18150 strcpy (strip, name);
18151 p = strchr (strip, '$');
18155 p = strchr (p + 1, '$');
18158 return ggc_alloc_string (strip, len);
18163 rs6000_output_symbol_ref (FILE *file, rtx x)
18165 /* Currently C++ toc references to vtables can be emitted before it
18166 is decided whether the vtable is public or private. If this is
18167 the case, then the linker will eventually complain that there is
18168 a reference to an unknown section. Thus, for vtables only,
18169 we emit the TOC reference to reference the symbol and not the
18171 const char *name = XSTR (x, 0);
18173 if (VTABLE_NAME_P (name))
18175 RS6000_OUTPUT_BASENAME (file, name);
18178 assemble_name (file, name);
18181 /* Output a TOC entry. We derive the entry name from what is being
18185 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
18188 const char *name = buf;
18190 HOST_WIDE_INT offset = 0;
18192 gcc_assert (!TARGET_NO_TOC);
18194 /* When the linker won't eliminate them, don't output duplicate
18195 TOC entries (this happens on AIX if there is any kind of TOC,
18196 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
18198 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
18200 struct toc_hash_struct *h;
18203 /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
18204 time because GGC is not initialized at that point. */
18205 if (toc_hash_table == NULL)
18206 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
18207 toc_hash_eq, NULL);
18209 h = GGC_NEW (struct toc_hash_struct);
18211 h->key_mode = mode;
18212 h->labelno = labelno;
18214 found = htab_find_slot (toc_hash_table, h, INSERT);
18215 if (*found == NULL)
18217 else /* This is indeed a duplicate.
18218 Set this label equal to that label. */
18220 fputs ("\t.set ", file);
18221 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
18222 fprintf (file, "%d,", labelno);
18223 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
18224 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
18230 /* If we're going to put a double constant in the TOC, make sure it's
18231 aligned properly when strict alignment is on. */
18232 if (GET_CODE (x) == CONST_DOUBLE
18233 && STRICT_ALIGNMENT
18234 && GET_MODE_BITSIZE (mode) >= 64
18235 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
18236 ASM_OUTPUT_ALIGN (file, 3);
18239 (*targetm.asm_out.internal_label) (file, "LC", labelno);
18241 /* Handle FP constants specially. Note that if we have a minimal
18242 TOC, things we put here aren't actually in the TOC, so we can allow
18244 if (GET_CODE (x) == CONST_DOUBLE &&
18245 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
18247 REAL_VALUE_TYPE rv;
18250 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
18251 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
18252 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
18254 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
18258 if (TARGET_MINIMAL_TOC)
18259 fputs (DOUBLE_INT_ASM_OP, file);
18261 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
18262 k[0] & 0xffffffff, k[1] & 0xffffffff,
18263 k[2] & 0xffffffff, k[3] & 0xffffffff);
18264 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
18265 k[0] & 0xffffffff, k[1] & 0xffffffff,
18266 k[2] & 0xffffffff, k[3] & 0xffffffff);
18271 if (TARGET_MINIMAL_TOC)
18272 fputs ("\t.long ", file);
18274 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
18275 k[0] & 0xffffffff, k[1] & 0xffffffff,
18276 k[2] & 0xffffffff, k[3] & 0xffffffff);
18277 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
18278 k[0] & 0xffffffff, k[1] & 0xffffffff,
18279 k[2] & 0xffffffff, k[3] & 0xffffffff);
18283 else if (GET_CODE (x) == CONST_DOUBLE &&
18284 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
18286 REAL_VALUE_TYPE rv;
18289 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
18291 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
18292 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
18294 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
18298 if (TARGET_MINIMAL_TOC)
18299 fputs (DOUBLE_INT_ASM_OP, file);
18301 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
18302 k[0] & 0xffffffff, k[1] & 0xffffffff);
18303 fprintf (file, "0x%lx%08lx\n",
18304 k[0] & 0xffffffff, k[1] & 0xffffffff);
18309 if (TARGET_MINIMAL_TOC)
18310 fputs ("\t.long ", file);
18312 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
18313 k[0] & 0xffffffff, k[1] & 0xffffffff);
18314 fprintf (file, "0x%lx,0x%lx\n",
18315 k[0] & 0xffffffff, k[1] & 0xffffffff);
18319 else if (GET_CODE (x) == CONST_DOUBLE &&
18320 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
18322 REAL_VALUE_TYPE rv;
18325 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
18326 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
18327 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
18329 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
18333 if (TARGET_MINIMAL_TOC)
18334 fputs (DOUBLE_INT_ASM_OP, file);
18336 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
18337 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
18342 if (TARGET_MINIMAL_TOC)
18343 fputs ("\t.long ", file);
18345 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
18346 fprintf (file, "0x%lx\n", l & 0xffffffff);
18350 else if (GET_MODE (x) == VOIDmode
18351 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
18353 unsigned HOST_WIDE_INT low;
18354 HOST_WIDE_INT high;
18356 if (GET_CODE (x) == CONST_DOUBLE)
18358 low = CONST_DOUBLE_LOW (x);
18359 high = CONST_DOUBLE_HIGH (x);
18362 #if HOST_BITS_PER_WIDE_INT == 32
18365 high = (low & 0x80000000) ? ~0 : 0;
18369 low = INTVAL (x) & 0xffffffff;
18370 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
18374 /* TOC entries are always Pmode-sized, but since this
18375 is a bigendian machine then if we're putting smaller
18376 integer constants in the TOC we have to pad them.
18377 (This is still a win over putting the constants in
18378 a separate constant pool, because then we'd have
18379 to have both a TOC entry _and_ the actual constant.)
18381 For a 32-bit target, CONST_INT values are loaded and shifted
18382 entirely within `low' and can be stored in one TOC entry. */
18384 /* It would be easy to make this work, but it doesn't now. */
18385 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
18387 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
18389 #if HOST_BITS_PER_WIDE_INT == 32
18390 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
18391 POINTER_SIZE, &low, &high, 0);
18394 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
18395 high = (HOST_WIDE_INT) low >> 32;
18402 if (TARGET_MINIMAL_TOC)
18403 fputs (DOUBLE_INT_ASM_OP, file);
18405 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
18406 (long) high & 0xffffffff, (long) low & 0xffffffff);
18407 fprintf (file, "0x%lx%08lx\n",
18408 (long) high & 0xffffffff, (long) low & 0xffffffff);
18413 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
18415 if (TARGET_MINIMAL_TOC)
18416 fputs ("\t.long ", file);
18418 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
18419 (long) high & 0xffffffff, (long) low & 0xffffffff);
18420 fprintf (file, "0x%lx,0x%lx\n",
18421 (long) high & 0xffffffff, (long) low & 0xffffffff);
18425 if (TARGET_MINIMAL_TOC)
18426 fputs ("\t.long ", file);
18428 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
18429 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
18435 if (GET_CODE (x) == CONST)
18437 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS);
18439 base = XEXP (XEXP (x, 0), 0);
18440 offset = INTVAL (XEXP (XEXP (x, 0), 1));
18443 switch (GET_CODE (base))
18446 name = XSTR (base, 0);
18450 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
18451 CODE_LABEL_NUMBER (XEXP (base, 0)));
18455 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
18459 gcc_unreachable ();
18462 if (TARGET_MINIMAL_TOC)
18463 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
18466 fputs ("\t.tc ", file);
18467 RS6000_OUTPUT_BASENAME (file, name);
18470 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
18472 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
18474 fputs ("[TC],", file);
18477 /* Currently C++ toc references to vtables can be emitted before it
18478 is decided whether the vtable is public or private. If this is
18479 the case, then the linker will eventually complain that there is
18480 a TOC reference to an unknown section. Thus, for vtables only,
18481 we emit the TOC reference to reference the symbol and not the
18483 if (VTABLE_NAME_P (name))
18485 RS6000_OUTPUT_BASENAME (file, name);
18487 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
18488 else if (offset > 0)
18489 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
18492 output_addr_const (file, x);
18496 /* Output an assembler pseudo-op to write an ASCII string of N characters
18497 starting at P to FILE.
18499 On the RS/6000, we have to do this using the .byte operation and
18500 write out special characters outside the quoted string.
18501 Also, the assembler is broken; very long strings are truncated,
18502 so we must artificially break them up early. */
18505 output_ascii (FILE *file, const char *p, int n)
18508 int i, count_string;
18509 const char *for_string = "\t.byte \"";
18510 const char *for_decimal = "\t.byte ";
18511 const char *to_close = NULL;
18514 for (i = 0; i < n; i++)
18517 if (c >= ' ' && c < 0177)
18520 fputs (for_string, file);
18523 /* Write two quotes to get one. */
18531 for_decimal = "\"\n\t.byte ";
18535 if (count_string >= 512)
18537 fputs (to_close, file);
18539 for_string = "\t.byte \"";
18540 for_decimal = "\t.byte ";
18548 fputs (for_decimal, file);
18549 fprintf (file, "%d", c);
18551 for_string = "\n\t.byte \"";
18552 for_decimal = ", ";
18558 /* Now close the string if we have written one. Then end the line. */
18560 fputs (to_close, file);
18563 /* Generate a unique section name for FILENAME for a section type
18564 represented by SECTION_DESC. Output goes into BUF.
18566 SECTION_DESC can be any string, as long as it is different for each
18567 possible section type.
18569 We name the section in the same manner as xlc. The name begins with an
18570 underscore followed by the filename (after stripping any leading directory
18571 names) with the last period replaced by the string SECTION_DESC. If
18572 FILENAME does not contain a period, SECTION_DESC is appended to the end of
18576 rs6000_gen_section_name (char **buf, const char *filename,
18577 const char *section_desc)
18579 const char *q, *after_last_slash, *last_period = 0;
18583 after_last_slash = filename;
18584 for (q = filename; *q; q++)
18587 after_last_slash = q + 1;
18588 else if (*q == '.')
18592 len = strlen (after_last_slash) + strlen (section_desc) + 2;
18593 *buf = (char *) xmalloc (len);
18598 for (q = after_last_slash; *q; q++)
18600 if (q == last_period)
18602 strcpy (p, section_desc);
18603 p += strlen (section_desc);
18607 else if (ISALNUM (*q))
18611 if (last_period == 0)
18612 strcpy (p, section_desc);
18617 /* Emit profile function. */
18620 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
18622 /* Non-standard profiling for kernels, which just saves LR then calls
18623 _mcount without worrying about arg saves. The idea is to change
18624 the function prologue as little as possible as it isn't easy to
18625 account for arg save/restore code added just for _mcount. */
18626 if (TARGET_PROFILE_KERNEL)
18629 if (DEFAULT_ABI == ABI_AIX)
18631 #ifndef NO_PROFILE_COUNTERS
18632 # define NO_PROFILE_COUNTERS 0
18634 if (NO_PROFILE_COUNTERS)
18635 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
18636 LCT_NORMAL, VOIDmode, 0);
18640 const char *label_name;
18643 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
18644 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
18645 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
18647 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
18648 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
18651 else if (DEFAULT_ABI == ABI_DARWIN)
18653 const char *mcount_name = RS6000_MCOUNT;
18654 int caller_addr_regno = LR_REGNO;
18656 /* Be conservative and always set this, at least for now. */
18657 crtl->uses_pic_offset_table = 1;
18660 /* For PIC code, set up a stub and collect the caller's address
18661 from r0, which is where the prologue puts it. */
18662 if (MACHOPIC_INDIRECT
18663 && crtl->uses_pic_offset_table)
18664 caller_addr_regno = 0;
18666 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
18667 LCT_NORMAL, VOIDmode, 1,
18668 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
18672 /* Write function profiler code. */
18675 output_function_profiler (FILE *file, int labelno)
18679 switch (DEFAULT_ABI)
18682 gcc_unreachable ();
18687 warning (0, "no profiling of 64-bit code for this ABI");
18690 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
18691 fprintf (file, "\tmflr %s\n", reg_names[0]);
18692 if (NO_PROFILE_COUNTERS)
18694 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18695 reg_names[0], reg_names[1]);
18697 else if (TARGET_SECURE_PLT && flag_pic)
18699 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
18700 reg_names[0], reg_names[1]);
18701 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
18702 asm_fprintf (file, "\t{cau|addis} %s,%s,",
18703 reg_names[12], reg_names[12]);
18704 assemble_name (file, buf);
18705 asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
18706 assemble_name (file, buf);
18707 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
18709 else if (flag_pic == 1)
18711 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
18712 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18713 reg_names[0], reg_names[1]);
18714 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
18715 asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
18716 assemble_name (file, buf);
18717 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
18719 else if (flag_pic > 1)
18721 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18722 reg_names[0], reg_names[1]);
18723 /* Now, we need to get the address of the label. */
18724 fputs ("\tbcl 20,31,1f\n\t.long ", file);
18725 assemble_name (file, buf);
18726 fputs ("-.\n1:", file);
18727 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
18728 asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
18729 reg_names[0], reg_names[11]);
18730 asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
18731 reg_names[0], reg_names[0], reg_names[11]);
18735 asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
18736 assemble_name (file, buf);
18737 fputs ("@ha\n", file);
18738 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
18739 reg_names[0], reg_names[1]);
18740 asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
18741 assemble_name (file, buf);
18742 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
18745 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
18746 fprintf (file, "\tbl %s%s\n",
18747 RS6000_MCOUNT, flag_pic ? "@plt" : "");
18752 if (!TARGET_PROFILE_KERNEL)
18754 /* Don't do anything, done in output_profile_hook (). */
18758 gcc_assert (!TARGET_32BIT);
18760 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
18761 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
18763 if (cfun->static_chain_decl != NULL)
18765 asm_fprintf (file, "\tstd %s,24(%s)\n",
18766 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
18767 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
18768 asm_fprintf (file, "\tld %s,24(%s)\n",
18769 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
18772 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
18780 /* The following variable value is the last issued insn. */
18782 static rtx last_scheduled_insn;
18784 /* The following variable helps to balance issuing of load and
18785 store instructions */
18787 static int load_store_pendulum;
18789 /* Power4 load update and store update instructions are cracked into a
18790 load or store and an integer insn which are executed in the same cycle.
18791 Branches have their own dispatch slot which does not count against the
18792 GCC issue rate, but it changes the program flow so there are no other
18793 instructions to issue in this cycle. */
18796 rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
18797 int verbose ATTRIBUTE_UNUSED,
18798 rtx insn, int more)
18800 last_scheduled_insn = insn;
18801 if (GET_CODE (PATTERN (insn)) == USE
18802 || GET_CODE (PATTERN (insn)) == CLOBBER)
18804 cached_can_issue_more = more;
18805 return cached_can_issue_more;
18808 if (insn_terminates_group_p (insn, current_group))
18810 cached_can_issue_more = 0;
18811 return cached_can_issue_more;
18814 /* If no reservation, but reach here */
18815 if (recog_memoized (insn) < 0)
18818 if (rs6000_sched_groups)
18820 if (is_microcoded_insn (insn))
18821 cached_can_issue_more = 0;
18822 else if (is_cracked_insn (insn))
18823 cached_can_issue_more = more > 2 ? more - 2 : 0;
18825 cached_can_issue_more = more - 1;
18827 return cached_can_issue_more;
18830 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
18833 cached_can_issue_more = more - 1;
18834 return cached_can_issue_more;
18837 /* Adjust the cost of a scheduling dependency. Return the new cost of
18838 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
18841 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
18843 enum attr_type attr_type;
18845 if (! recog_memoized (insn))
18848 switch (REG_NOTE_KIND (link))
18852 /* Data dependency; DEP_INSN writes a register that INSN reads
18853 some cycles later. */
18855 /* Separate a load from a narrower, dependent store. */
18856 if (rs6000_sched_groups
18857 && GET_CODE (PATTERN (insn)) == SET
18858 && GET_CODE (PATTERN (dep_insn)) == SET
18859 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
18860 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
18861 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
18862 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
18865 attr_type = get_attr_type (insn);
18870 /* Tell the first scheduling pass about the latency between
18871 a mtctr and bctr (and mtlr and br/blr). The first
18872 scheduling pass will not know about this latency since
18873 the mtctr instruction, which has the latency associated
18874 to it, will be generated by reload. */
18875 return TARGET_POWER ? 5 : 4;
18877 /* Leave some extra cycles between a compare and its
18878 dependent branch, to inhibit expensive mispredicts. */
18879 if ((rs6000_cpu_attr == CPU_PPC603
18880 || rs6000_cpu_attr == CPU_PPC604
18881 || rs6000_cpu_attr == CPU_PPC604E
18882 || rs6000_cpu_attr == CPU_PPC620
18883 || rs6000_cpu_attr == CPU_PPC630
18884 || rs6000_cpu_attr == CPU_PPC750
18885 || rs6000_cpu_attr == CPU_PPC7400
18886 || rs6000_cpu_attr == CPU_PPC7450
18887 || rs6000_cpu_attr == CPU_POWER4
18888 || rs6000_cpu_attr == CPU_POWER5
18889 || rs6000_cpu_attr == CPU_CELL)
18890 && recog_memoized (dep_insn)
18891 && (INSN_CODE (dep_insn) >= 0))
18893 switch (get_attr_type (dep_insn))
18897 case TYPE_DELAYED_COMPARE:
18898 case TYPE_IMUL_COMPARE:
18899 case TYPE_LMUL_COMPARE:
18900 case TYPE_FPCOMPARE:
18901 case TYPE_CR_LOGICAL:
18902 case TYPE_DELAYED_CR:
18911 case TYPE_STORE_UX:
18913 case TYPE_FPSTORE_U:
18914 case TYPE_FPSTORE_UX:
18915 if ((rs6000_cpu == PROCESSOR_POWER6)
18916 && recog_memoized (dep_insn)
18917 && (INSN_CODE (dep_insn) >= 0))
18920 if (GET_CODE (PATTERN (insn)) != SET)
18921 /* If this happens, we have to extend this to schedule
18922 optimally. Return default for now. */
18925 /* Adjust the cost for the case where the value written
18926 by a fixed point operation is used as the address
18927 gen value on a store. */
18928 switch (get_attr_type (dep_insn))
18935 if (! store_data_bypass_p (dep_insn, insn))
18939 case TYPE_LOAD_EXT:
18940 case TYPE_LOAD_EXT_U:
18941 case TYPE_LOAD_EXT_UX:
18942 case TYPE_VAR_SHIFT_ROTATE:
18943 case TYPE_VAR_DELAYED_COMPARE:
18945 if (! store_data_bypass_p (dep_insn, insn))
18951 case TYPE_FAST_COMPARE:
18954 case TYPE_INSERT_WORD:
18955 case TYPE_INSERT_DWORD:
18956 case TYPE_FPLOAD_U:
18957 case TYPE_FPLOAD_UX:
18959 case TYPE_STORE_UX:
18960 case TYPE_FPSTORE_U:
18961 case TYPE_FPSTORE_UX:
18963 if (! store_data_bypass_p (dep_insn, insn))
18971 case TYPE_IMUL_COMPARE:
18972 case TYPE_LMUL_COMPARE:
18974 if (! store_data_bypass_p (dep_insn, insn))
18980 if (! store_data_bypass_p (dep_insn, insn))
18986 if (! store_data_bypass_p (dep_insn, insn))
18999 case TYPE_LOAD_EXT:
19000 case TYPE_LOAD_EXT_U:
19001 case TYPE_LOAD_EXT_UX:
19002 if ((rs6000_cpu == PROCESSOR_POWER6)
19003 && recog_memoized (dep_insn)
19004 && (INSN_CODE (dep_insn) >= 0))
19007 /* Adjust the cost for the case where the value written
19008 by a fixed point instruction is used within the address
19009 gen portion of a subsequent load(u)(x) */
19010 switch (get_attr_type (dep_insn))
19017 if (set_to_load_agen (dep_insn, insn))
19021 case TYPE_LOAD_EXT:
19022 case TYPE_LOAD_EXT_U:
19023 case TYPE_LOAD_EXT_UX:
19024 case TYPE_VAR_SHIFT_ROTATE:
19025 case TYPE_VAR_DELAYED_COMPARE:
19027 if (set_to_load_agen (dep_insn, insn))
19033 case TYPE_FAST_COMPARE:
19036 case TYPE_INSERT_WORD:
19037 case TYPE_INSERT_DWORD:
19038 case TYPE_FPLOAD_U:
19039 case TYPE_FPLOAD_UX:
19041 case TYPE_STORE_UX:
19042 case TYPE_FPSTORE_U:
19043 case TYPE_FPSTORE_UX:
19045 if (set_to_load_agen (dep_insn, insn))
19053 case TYPE_IMUL_COMPARE:
19054 case TYPE_LMUL_COMPARE:
19056 if (set_to_load_agen (dep_insn, insn))
19062 if (set_to_load_agen (dep_insn, insn))
19068 if (set_to_load_agen (dep_insn, insn))
19079 if ((rs6000_cpu == PROCESSOR_POWER6)
19080 && recog_memoized (dep_insn)
19081 && (INSN_CODE (dep_insn) >= 0)
19082 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
19089 /* Fall out to return default cost. */
19093 case REG_DEP_OUTPUT:
19094 /* Output dependency; DEP_INSN writes a register that INSN writes some
19096 if ((rs6000_cpu == PROCESSOR_POWER6)
19097 && recog_memoized (dep_insn)
19098 && (INSN_CODE (dep_insn) >= 0))
19100 attr_type = get_attr_type (insn);
19105 if (get_attr_type (dep_insn) == TYPE_FP)
19109 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
19117 /* Anti dependency; DEP_INSN reads a register that INSN writes some
19122 gcc_unreachable ();
19128 /* The function returns a true if INSN is microcoded.
19129 Return false otherwise. */
19132 is_microcoded_insn (rtx insn)
19134 if (!insn || !INSN_P (insn)
19135 || GET_CODE (PATTERN (insn)) == USE
19136 || GET_CODE (PATTERN (insn)) == CLOBBER)
19139 if (rs6000_cpu_attr == CPU_CELL)
19140 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
19142 if (rs6000_sched_groups)
19144 enum attr_type type = get_attr_type (insn);
19145 if (type == TYPE_LOAD_EXT_U
19146 || type == TYPE_LOAD_EXT_UX
19147 || type == TYPE_LOAD_UX
19148 || type == TYPE_STORE_UX
19149 || type == TYPE_MFCR)
19156 /* The function returns true if INSN is cracked into 2 instructions
19157 by the processor (and therefore occupies 2 issue slots). */
19160 is_cracked_insn (rtx insn)
19162 if (!insn || !INSN_P (insn)
19163 || GET_CODE (PATTERN (insn)) == USE
19164 || GET_CODE (PATTERN (insn)) == CLOBBER)
19167 if (rs6000_sched_groups)
19169 enum attr_type type = get_attr_type (insn);
19170 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
19171 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
19172 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
19173 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
19174 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
19175 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
19176 || type == TYPE_IDIV || type == TYPE_LDIV
19177 || type == TYPE_INSERT_WORD)
19184 /* The function returns true if INSN can be issued only from
19185 the branch slot. */
19188 is_branch_slot_insn (rtx insn)
19190 if (!insn || !INSN_P (insn)
19191 || GET_CODE (PATTERN (insn)) == USE
19192 || GET_CODE (PATTERN (insn)) == CLOBBER)
19195 if (rs6000_sched_groups)
19197 enum attr_type type = get_attr_type (insn);
19198 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
19206 /* The function returns true if out_inst sets a value that is
19207 used in the address generation computation of in_insn */
19209 set_to_load_agen (rtx out_insn, rtx in_insn)
19211 rtx out_set, in_set;
19213 /* For performance reasons, only handle the simple case where
19214 both loads are a single_set. */
19215 out_set = single_set (out_insn);
19218 in_set = single_set (in_insn);
19220 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
19226 /* The function returns true if the target storage location of
19227 out_insn is adjacent to the target storage location of in_insn */
19228 /* Return 1 if memory locations are adjacent. */
19231 adjacent_mem_locations (rtx insn1, rtx insn2)
19234 rtx a = get_store_dest (PATTERN (insn1));
19235 rtx b = get_store_dest (PATTERN (insn2));
19237 if ((GET_CODE (XEXP (a, 0)) == REG
19238 || (GET_CODE (XEXP (a, 0)) == PLUS
19239 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
19240 && (GET_CODE (XEXP (b, 0)) == REG
19241 || (GET_CODE (XEXP (b, 0)) == PLUS
19242 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
19244 HOST_WIDE_INT val0 = 0, val1 = 0, val_diff;
19247 if (GET_CODE (XEXP (a, 0)) == PLUS)
19249 reg0 = XEXP (XEXP (a, 0), 0);
19250 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
19253 reg0 = XEXP (a, 0);
19255 if (GET_CODE (XEXP (b, 0)) == PLUS)
19257 reg1 = XEXP (XEXP (b, 0), 0);
19258 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
19261 reg1 = XEXP (b, 0);
19263 val_diff = val1 - val0;
19265 return ((REGNO (reg0) == REGNO (reg1))
19266 && ((MEM_SIZE (a) && val_diff == INTVAL (MEM_SIZE (a)))
19267 || (MEM_SIZE (b) && val_diff == -INTVAL (MEM_SIZE (b)))));
19273 /* A C statement (sans semicolon) to update the integer scheduling
19274 priority INSN_PRIORITY (INSN). Increase the priority to execute the
19275 INSN earlier, reduce the priority to execute INSN later. Do not
19276 define this macro if you do not need to adjust the scheduling
19277 priorities of insns. */
19280 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
19282 /* On machines (like the 750) which have asymmetric integer units,
19283 where one integer unit can do multiply and divides and the other
19284 can't, reduce the priority of multiply/divide so it is scheduled
19285 before other integer operations. */
19288 if (! INSN_P (insn))
19291 if (GET_CODE (PATTERN (insn)) == USE)
19294 switch (rs6000_cpu_attr) {
19296 switch (get_attr_type (insn))
19303 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
19304 priority, priority);
19305 if (priority >= 0 && priority < 0x01000000)
19312 if (insn_must_be_first_in_group (insn)
19313 && reload_completed
19314 && current_sched_info->sched_max_insns_priority
19315 && rs6000_sched_restricted_insns_priority)
19318 /* Prioritize insns that can be dispatched only in the first
19320 if (rs6000_sched_restricted_insns_priority == 1)
19321 /* Attach highest priority to insn. This means that in
19322 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
19323 precede 'priority' (critical path) considerations. */
19324 return current_sched_info->sched_max_insns_priority;
19325 else if (rs6000_sched_restricted_insns_priority == 2)
19326 /* Increase priority of insn by a minimal amount. This means that in
19327 haifa-sched.c:ready_sort(), only 'priority' (critical path)
19328 considerations precede dispatch-slot restriction considerations. */
19329 return (priority + 1);
19332 if (rs6000_cpu == PROCESSOR_POWER6
19333 && ((load_store_pendulum == -2 && is_load_insn (insn))
19334 || (load_store_pendulum == 2 && is_store_insn (insn))))
19335 /* Attach highest priority to insn if the scheduler has just issued two
19336 stores and this instruction is a load, or two loads and this instruction
19337 is a store. Power6 wants loads and stores scheduled alternately
19339 return current_sched_info->sched_max_insns_priority;
19344 /* Return true if the instruction is nonpipelined on the Cell. */
19346 is_nonpipeline_insn (rtx insn)
19348 enum attr_type type;
19349 if (!insn || !INSN_P (insn)
19350 || GET_CODE (PATTERN (insn)) == USE
19351 || GET_CODE (PATTERN (insn)) == CLOBBER)
19354 type = get_attr_type (insn);
19355 if (type == TYPE_IMUL
19356 || type == TYPE_IMUL2
19357 || type == TYPE_IMUL3
19358 || type == TYPE_LMUL
19359 || type == TYPE_IDIV
19360 || type == TYPE_LDIV
19361 || type == TYPE_SDIV
19362 || type == TYPE_DDIV
19363 || type == TYPE_SSQRT
19364 || type == TYPE_DSQRT
19365 || type == TYPE_MFCR
19366 || type == TYPE_MFCRF
19367 || type == TYPE_MFJMPR)
19375 /* Return how many instructions the machine can issue per cycle. */
19378 rs6000_issue_rate (void)
19380 /* Use issue rate of 1 for first scheduling pass to decrease degradation. */
19381 if (!reload_completed)
19384 switch (rs6000_cpu_attr) {
19385 case CPU_RIOS1: /* ? */
19387 case CPU_PPC601: /* ? */
19396 case CPU_PPCE300C2:
19397 case CPU_PPCE300C3:
19398 case CPU_PPCE500MC:
19415 /* Return how many instructions to look ahead for better insn
19419 rs6000_use_sched_lookahead (void)
19421 if (rs6000_cpu_attr == CPU_PPC8540)
19423 if (rs6000_cpu_attr == CPU_CELL)
19424 return (reload_completed ? 8 : 0);
19428 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
19430 rs6000_use_sched_lookahead_guard (rtx insn)
19432 if (rs6000_cpu_attr != CPU_CELL)
19435 if (insn == NULL_RTX || !INSN_P (insn))
19438 if (!reload_completed
19439 || is_nonpipeline_insn (insn)
19440 || is_microcoded_insn (insn))
19446 /* Determine is PAT refers to memory. */
19449 is_mem_ref (rtx pat)
19455 /* stack_tie does not produce any real memory traffic. */
19456 if (GET_CODE (pat) == UNSPEC
19457 && XINT (pat, 1) == UNSPEC_TIE)
19460 if (GET_CODE (pat) == MEM)
19463 /* Recursively process the pattern. */
19464 fmt = GET_RTX_FORMAT (GET_CODE (pat));
19466 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
19469 ret |= is_mem_ref (XEXP (pat, i));
19470 else if (fmt[i] == 'E')
19471 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
19472 ret |= is_mem_ref (XVECEXP (pat, i, j));
19478 /* Determine if PAT is a PATTERN of a load insn. */
19481 is_load_insn1 (rtx pat)
19483 if (!pat || pat == NULL_RTX)
19486 if (GET_CODE (pat) == SET)
19487 return is_mem_ref (SET_SRC (pat));
19489 if (GET_CODE (pat) == PARALLEL)
19493 for (i = 0; i < XVECLEN (pat, 0); i++)
19494 if (is_load_insn1 (XVECEXP (pat, 0, i)))
19501 /* Determine if INSN loads from memory. */
19504 is_load_insn (rtx insn)
19506 if (!insn || !INSN_P (insn))
19509 if (GET_CODE (insn) == CALL_INSN)
19512 return is_load_insn1 (PATTERN (insn));
19515 /* Determine if PAT is a PATTERN of a store insn. */
19518 is_store_insn1 (rtx pat)
19520 if (!pat || pat == NULL_RTX)
19523 if (GET_CODE (pat) == SET)
19524 return is_mem_ref (SET_DEST (pat));
19526 if (GET_CODE (pat) == PARALLEL)
19530 for (i = 0; i < XVECLEN (pat, 0); i++)
19531 if (is_store_insn1 (XVECEXP (pat, 0, i)))
19538 /* Determine if INSN stores to memory. */
19541 is_store_insn (rtx insn)
19543 if (!insn || !INSN_P (insn))
19546 return is_store_insn1 (PATTERN (insn));
19549 /* Return the dest of a store insn. */
19552 get_store_dest (rtx pat)
19554 gcc_assert (is_store_insn1 (pat));
19556 if (GET_CODE (pat) == SET)
19557 return SET_DEST (pat);
19558 else if (GET_CODE (pat) == PARALLEL)
19562 for (i = 0; i < XVECLEN (pat, 0); i++)
19564 rtx inner_pat = XVECEXP (pat, 0, i);
19565 if (GET_CODE (inner_pat) == SET
19566 && is_mem_ref (SET_DEST (inner_pat)))
19570 /* We shouldn't get here, because we should have either a simple
19571 store insn or a store with update which are covered above. */
19575 /* Returns whether the dependence between INSN and NEXT is considered
19576 costly by the given target. */
19579 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
19584 /* If the flag is not enabled - no dependence is considered costly;
19585 allow all dependent insns in the same group.
19586 This is the most aggressive option. */
19587 if (rs6000_sched_costly_dep == no_dep_costly)
19590 /* If the flag is set to 1 - a dependence is always considered costly;
19591 do not allow dependent instructions in the same group.
19592 This is the most conservative option. */
19593 if (rs6000_sched_costly_dep == all_deps_costly)
19596 insn = DEP_PRO (dep);
19597 next = DEP_CON (dep);
19599 if (rs6000_sched_costly_dep == store_to_load_dep_costly
19600 && is_load_insn (next)
19601 && is_store_insn (insn))
19602 /* Prevent load after store in the same group. */
19605 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
19606 && is_load_insn (next)
19607 && is_store_insn (insn)
19608 && DEP_TYPE (dep) == REG_DEP_TRUE)
19609 /* Prevent load after store in the same group if it is a true
19613 /* The flag is set to X; dependences with latency >= X are considered costly,
19614 and will not be scheduled in the same group. */
19615 if (rs6000_sched_costly_dep <= max_dep_latency
19616 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
19622 /* Return the next insn after INSN that is found before TAIL is reached,
19623 skipping any "non-active" insns - insns that will not actually occupy
19624 an issue slot. Return NULL_RTX if such an insn is not found. */
19627 get_next_active_insn (rtx insn, rtx tail)
19629 if (insn == NULL_RTX || insn == tail)
19634 insn = NEXT_INSN (insn);
19635 if (insn == NULL_RTX || insn == tail)
19640 || (NONJUMP_INSN_P (insn)
19641 && GET_CODE (PATTERN (insn)) != USE
19642 && GET_CODE (PATTERN (insn)) != CLOBBER
19643 && INSN_CODE (insn) != CODE_FOR_stack_tie))
19649 /* We are about to begin issuing insns for this clock cycle. */
19652 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
19653 rtx *ready ATTRIBUTE_UNUSED,
19654 int *pn_ready ATTRIBUTE_UNUSED,
19655 int clock_var ATTRIBUTE_UNUSED)
19657 int n_ready = *pn_ready;
19660 fprintf (dump, "// rs6000_sched_reorder :\n");
19662 /* Reorder the ready list, if the second to last ready insn
19663 is a nonepipeline insn. */
19664 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
19666 if (is_nonpipeline_insn (ready[n_ready - 1])
19667 && (recog_memoized (ready[n_ready - 2]) > 0))
19668 /* Simply swap first two insns. */
19670 rtx tmp = ready[n_ready - 1];
19671 ready[n_ready - 1] = ready[n_ready - 2];
19672 ready[n_ready - 2] = tmp;
19676 if (rs6000_cpu == PROCESSOR_POWER6)
19677 load_store_pendulum = 0;
19679 return rs6000_issue_rate ();
19682 /* Like rs6000_sched_reorder, but called after issuing each insn. */
19685 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
19686 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
19689 fprintf (dump, "// rs6000_sched_reorder2 :\n");
19691 /* For Power6, we need to handle some special cases to try and keep the
19692 store queue from overflowing and triggering expensive flushes.
19694 This code monitors how load and store instructions are being issued
19695 and skews the ready list one way or the other to increase the likelihood
19696 that a desired instruction is issued at the proper time.
19698 A couple of things are done. First, we maintain a "load_store_pendulum"
19699 to track the current state of load/store issue.
19701 - If the pendulum is at zero, then no loads or stores have been
19702 issued in the current cycle so we do nothing.
19704 - If the pendulum is 1, then a single load has been issued in this
19705 cycle and we attempt to locate another load in the ready list to
19708 - If the pendulum is -2, then two stores have already been
19709 issued in this cycle, so we increase the priority of the first load
19710 in the ready list to increase it's likelihood of being chosen first
19713 - If the pendulum is -1, then a single store has been issued in this
19714 cycle and we attempt to locate another store in the ready list to
19715 issue with it, preferring a store to an adjacent memory location to
19716 facilitate store pairing in the store queue.
19718 - If the pendulum is 2, then two loads have already been
19719 issued in this cycle, so we increase the priority of the first store
19720 in the ready list to increase it's likelihood of being chosen first
19723 - If the pendulum < -2 or > 2, then do nothing.
19725 Note: This code covers the most common scenarios. There exist non
19726 load/store instructions which make use of the LSU and which
19727 would need to be accounted for to strictly model the behavior
19728 of the machine. Those instructions are currently unaccounted
19729 for to help minimize compile time overhead of this code.
19731 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
19737 if (is_store_insn (last_scheduled_insn))
19738 /* Issuing a store, swing the load_store_pendulum to the left */
19739 load_store_pendulum--;
19740 else if (is_load_insn (last_scheduled_insn))
19741 /* Issuing a load, swing the load_store_pendulum to the right */
19742 load_store_pendulum++;
19744 return cached_can_issue_more;
19746 /* If the pendulum is balanced, or there is only one instruction on
19747 the ready list, then all is well, so return. */
19748 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
19749 return cached_can_issue_more;
19751 if (load_store_pendulum == 1)
19753 /* A load has been issued in this cycle. Scan the ready list
19754 for another load to issue with it */
19759 if (is_load_insn (ready[pos]))
19761 /* Found a load. Move it to the head of the ready list,
19762 and adjust it's priority so that it is more likely to
19765 for (i=pos; i<*pn_ready-1; i++)
19766 ready[i] = ready[i + 1];
19767 ready[*pn_ready-1] = tmp;
19769 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
19770 INSN_PRIORITY (tmp)++;
19776 else if (load_store_pendulum == -2)
19778 /* Two stores have been issued in this cycle. Increase the
19779 priority of the first load in the ready list to favor it for
19780 issuing in the next cycle. */
19785 if (is_load_insn (ready[pos])
19787 && INSN_PRIORITY_KNOWN (ready[pos]))
19789 INSN_PRIORITY (ready[pos])++;
19791 /* Adjust the pendulum to account for the fact that a load
19792 was found and increased in priority. This is to prevent
19793 increasing the priority of multiple loads */
19794 load_store_pendulum--;
19801 else if (load_store_pendulum == -1)
19803 /* A store has been issued in this cycle. Scan the ready list for
19804 another store to issue with it, preferring a store to an adjacent
19806 int first_store_pos = -1;
19812 if (is_store_insn (ready[pos]))
19814 /* Maintain the index of the first store found on the
19816 if (first_store_pos == -1)
19817 first_store_pos = pos;
19819 if (is_store_insn (last_scheduled_insn)
19820 && adjacent_mem_locations (last_scheduled_insn,ready[pos]))
19822 /* Found an adjacent store. Move it to the head of the
19823 ready list, and adjust it's priority so that it is
19824 more likely to stay there */
19826 for (i=pos; i<*pn_ready-1; i++)
19827 ready[i] = ready[i + 1];
19828 ready[*pn_ready-1] = tmp;
19830 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
19831 INSN_PRIORITY (tmp)++;
19833 first_store_pos = -1;
19841 if (first_store_pos >= 0)
19843 /* An adjacent store wasn't found, but a non-adjacent store was,
19844 so move the non-adjacent store to the front of the ready
19845 list, and adjust its priority so that it is more likely to
19847 tmp = ready[first_store_pos];
19848 for (i=first_store_pos; i<*pn_ready-1; i++)
19849 ready[i] = ready[i + 1];
19850 ready[*pn_ready-1] = tmp;
19851 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
19852 INSN_PRIORITY (tmp)++;
19855 else if (load_store_pendulum == 2)
19857 /* Two loads have been issued in this cycle. Increase the priority
19858 of the first store in the ready list to favor it for issuing in
19864 if (is_store_insn (ready[pos])
19866 && INSN_PRIORITY_KNOWN (ready[pos]))
19868 INSN_PRIORITY (ready[pos])++;
19870 /* Adjust the pendulum to account for the fact that a store
19871 was found and increased in priority. This is to prevent
19872 increasing the priority of multiple stores */
19873 load_store_pendulum++;
19882 return cached_can_issue_more;
19885 /* Return whether the presence of INSN causes a dispatch group termination
19886 of group WHICH_GROUP.
19888 If WHICH_GROUP == current_group, this function will return true if INSN
19889 causes the termination of the current group (i.e, the dispatch group to
19890 which INSN belongs). This means that INSN will be the last insn in the
19891 group it belongs to.
19893 If WHICH_GROUP == previous_group, this function will return true if INSN
19894 causes the termination of the previous group (i.e, the dispatch group that
19895 precedes the group to which INSN belongs). This means that INSN will be
19896 the first insn in the group it belongs to). */
19899 insn_terminates_group_p (rtx insn, enum group_termination which_group)
19906 first = insn_must_be_first_in_group (insn);
19907 last = insn_must_be_last_in_group (insn);
19912 if (which_group == current_group)
19914 else if (which_group == previous_group)
19922 insn_must_be_first_in_group (rtx insn)
19924 enum attr_type type;
19927 || insn == NULL_RTX
19928 || GET_CODE (insn) == NOTE
19929 || GET_CODE (PATTERN (insn)) == USE
19930 || GET_CODE (PATTERN (insn)) == CLOBBER)
19933 switch (rs6000_cpu)
19935 case PROCESSOR_POWER5:
19936 if (is_cracked_insn (insn))
19938 case PROCESSOR_POWER4:
19939 if (is_microcoded_insn (insn))
19942 if (!rs6000_sched_groups)
19945 type = get_attr_type (insn);
19952 case TYPE_DELAYED_CR:
19953 case TYPE_CR_LOGICAL:
19967 case PROCESSOR_POWER6:
19968 type = get_attr_type (insn);
19972 case TYPE_INSERT_DWORD:
19976 case TYPE_VAR_SHIFT_ROTATE:
19983 case TYPE_INSERT_WORD:
19984 case TYPE_DELAYED_COMPARE:
19985 case TYPE_IMUL_COMPARE:
19986 case TYPE_LMUL_COMPARE:
19987 case TYPE_FPCOMPARE:
19998 case TYPE_LOAD_EXT_UX:
20000 case TYPE_STORE_UX:
20001 case TYPE_FPLOAD_U:
20002 case TYPE_FPLOAD_UX:
20003 case TYPE_FPSTORE_U:
20004 case TYPE_FPSTORE_UX:
20018 insn_must_be_last_in_group (rtx insn)
20020 enum attr_type type;
20023 || insn == NULL_RTX
20024 || GET_CODE (insn) == NOTE
20025 || GET_CODE (PATTERN (insn)) == USE
20026 || GET_CODE (PATTERN (insn)) == CLOBBER)
20029 switch (rs6000_cpu) {
20030 case PROCESSOR_POWER4:
20031 case PROCESSOR_POWER5:
20032 if (is_microcoded_insn (insn))
20035 if (is_branch_slot_insn (insn))
20039 case PROCESSOR_POWER6:
20040 type = get_attr_type (insn);
20047 case TYPE_VAR_SHIFT_ROTATE:
20054 case TYPE_DELAYED_COMPARE:
20055 case TYPE_IMUL_COMPARE:
20056 case TYPE_LMUL_COMPARE:
20057 case TYPE_FPCOMPARE:
20078 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
20079 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
20082 is_costly_group (rtx *group_insns, rtx next_insn)
20085 int issue_rate = rs6000_issue_rate ();
20087 for (i = 0; i < issue_rate; i++)
20089 sd_iterator_def sd_it;
20091 rtx insn = group_insns[i];
20096 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
20098 rtx next = DEP_CON (dep);
20100 if (next == next_insn
20101 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
20109 /* Utility of the function redefine_groups.
20110 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
20111 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
20112 to keep it "far" (in a separate group) from GROUP_INSNS, following
20113 one of the following schemes, depending on the value of the flag
20114 -minsert_sched_nops = X:
20115 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
20116 in order to force NEXT_INSN into a separate group.
20117 (2) X < sched_finish_regroup_exact: insert exactly X nops.
20118 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
20119 insertion (has a group just ended, how many vacant issue slots remain in the
20120 last group, and how many dispatch groups were encountered so far). */
20123 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
20124 rtx next_insn, bool *group_end, int can_issue_more,
20129 int issue_rate = rs6000_issue_rate ();
20130 bool end = *group_end;
20133 if (next_insn == NULL_RTX)
20134 return can_issue_more;
20136 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
20137 return can_issue_more;
20139 force = is_costly_group (group_insns, next_insn);
20141 return can_issue_more;
20143 if (sched_verbose > 6)
20144 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
20145 *group_count ,can_issue_more);
20147 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
20150 can_issue_more = 0;
20152 /* Since only a branch can be issued in the last issue_slot, it is
20153 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
20154 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
20155 in this case the last nop will start a new group and the branch
20156 will be forced to the new group. */
20157 if (can_issue_more && !is_branch_slot_insn (next_insn))
20160 while (can_issue_more > 0)
20163 emit_insn_before (nop, next_insn);
20171 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
20173 int n_nops = rs6000_sched_insert_nops;
20175 /* Nops can't be issued from the branch slot, so the effective
20176 issue_rate for nops is 'issue_rate - 1'. */
20177 if (can_issue_more == 0)
20178 can_issue_more = issue_rate;
20180 if (can_issue_more == 0)
20182 can_issue_more = issue_rate - 1;
20185 for (i = 0; i < issue_rate; i++)
20187 group_insns[i] = 0;
20194 emit_insn_before (nop, next_insn);
20195 if (can_issue_more == issue_rate - 1) /* new group begins */
20198 if (can_issue_more == 0)
20200 can_issue_more = issue_rate - 1;
20203 for (i = 0; i < issue_rate; i++)
20205 group_insns[i] = 0;
20211 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
20214 /* Is next_insn going to start a new group? */
20217 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
20218 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
20219 || (can_issue_more < issue_rate &&
20220 insn_terminates_group_p (next_insn, previous_group)));
20221 if (*group_end && end)
20224 if (sched_verbose > 6)
20225 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
20226 *group_count, can_issue_more);
20227 return can_issue_more;
20230 return can_issue_more;
20233 /* This function tries to synch the dispatch groups that the compiler "sees"
20234 with the dispatch groups that the processor dispatcher is expected to
20235 form in practice. It tries to achieve this synchronization by forcing the
20236 estimated processor grouping on the compiler (as opposed to the function
20237 'pad_goups' which tries to force the scheduler's grouping on the processor).
20239 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
20240 examines the (estimated) dispatch groups that will be formed by the processor
20241 dispatcher. It marks these group boundaries to reflect the estimated
20242 processor grouping, overriding the grouping that the scheduler had marked.
20243 Depending on the value of the flag '-minsert-sched-nops' this function can
20244 force certain insns into separate groups or force a certain distance between
20245 them by inserting nops, for example, if there exists a "costly dependence"
20248 The function estimates the group boundaries that the processor will form as
20249 follows: It keeps track of how many vacant issue slots are available after
20250 each insn. A subsequent insn will start a new group if one of the following
20252 - no more vacant issue slots remain in the current dispatch group.
20253 - only the last issue slot, which is the branch slot, is vacant, but the next
20254 insn is not a branch.
20255 - only the last 2 or less issue slots, including the branch slot, are vacant,
20256 which means that a cracked insn (which occupies two issue slots) can't be
20257 issued in this group.
20258 - less than 'issue_rate' slots are vacant, and the next insn always needs to
20259 start a new group. */
20262 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
20264 rtx insn, next_insn;
20266 int can_issue_more;
20269 int group_count = 0;
20273 issue_rate = rs6000_issue_rate ();
20274 group_insns = XALLOCAVEC (rtx, issue_rate);
20275 for (i = 0; i < issue_rate; i++)
20277 group_insns[i] = 0;
20279 can_issue_more = issue_rate;
20281 insn = get_next_active_insn (prev_head_insn, tail);
20284 while (insn != NULL_RTX)
20286 slot = (issue_rate - can_issue_more);
20287 group_insns[slot] = insn;
20289 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
20290 if (insn_terminates_group_p (insn, current_group))
20291 can_issue_more = 0;
20293 next_insn = get_next_active_insn (insn, tail);
20294 if (next_insn == NULL_RTX)
20295 return group_count + 1;
20297 /* Is next_insn going to start a new group? */
20299 = (can_issue_more == 0
20300 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
20301 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
20302 || (can_issue_more < issue_rate &&
20303 insn_terminates_group_p (next_insn, previous_group)));
20305 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
20306 next_insn, &group_end, can_issue_more,
20312 can_issue_more = 0;
20313 for (i = 0; i < issue_rate; i++)
20315 group_insns[i] = 0;
20319 if (GET_MODE (next_insn) == TImode && can_issue_more)
20320 PUT_MODE (next_insn, VOIDmode);
20321 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
20322 PUT_MODE (next_insn, TImode);
20325 if (can_issue_more == 0)
20326 can_issue_more = issue_rate;
20329 return group_count;
20332 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
20333 dispatch group boundaries that the scheduler had marked. Pad with nops
20334 any dispatch groups which have vacant issue slots, in order to force the
20335 scheduler's grouping on the processor dispatcher. The function
20336 returns the number of dispatch groups found. */
20339 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
20341 rtx insn, next_insn;
20344 int can_issue_more;
20346 int group_count = 0;
20348 /* Initialize issue_rate. */
20349 issue_rate = rs6000_issue_rate ();
20350 can_issue_more = issue_rate;
20352 insn = get_next_active_insn (prev_head_insn, tail);
20353 next_insn = get_next_active_insn (insn, tail);
20355 while (insn != NULL_RTX)
20358 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
20360 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
20362 if (next_insn == NULL_RTX)
20367 /* If the scheduler had marked group termination at this location
20368 (between insn and next_insn), and neither insn nor next_insn will
20369 force group termination, pad the group with nops to force group
20372 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
20373 && !insn_terminates_group_p (insn, current_group)
20374 && !insn_terminates_group_p (next_insn, previous_group))
20376 if (!is_branch_slot_insn (next_insn))
20379 while (can_issue_more)
20382 emit_insn_before (nop, next_insn);
20387 can_issue_more = issue_rate;
20392 next_insn = get_next_active_insn (insn, tail);
20395 return group_count;
20398 /* We're beginning a new block. Initialize data structures as necessary. */
20401 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
20402 int sched_verbose ATTRIBUTE_UNUSED,
20403 int max_ready ATTRIBUTE_UNUSED)
20405 last_scheduled_insn = NULL_RTX;
20406 load_store_pendulum = 0;
20409 /* The following function is called at the end of scheduling BB.
20410 After reload, it inserts nops at insn group bundling. */
20413 rs6000_sched_finish (FILE *dump, int sched_verbose)
20418 fprintf (dump, "=== Finishing schedule.\n");
20420 if (reload_completed && rs6000_sched_groups)
20422 /* Do not run sched_finish hook when selective scheduling enabled. */
20423 if (sel_sched_p ())
20426 if (rs6000_sched_insert_nops == sched_finish_none)
20429 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
20430 n_groups = pad_groups (dump, sched_verbose,
20431 current_sched_info->prev_head,
20432 current_sched_info->next_tail);
20434 n_groups = redefine_groups (dump, sched_verbose,
20435 current_sched_info->prev_head,
20436 current_sched_info->next_tail);
20438 if (sched_verbose >= 6)
20440 fprintf (dump, "ngroups = %d\n", n_groups);
20441 print_rtl (dump, current_sched_info->prev_head);
20442 fprintf (dump, "Done finish_sched\n");
20447 struct _rs6000_sched_context
20449 short cached_can_issue_more;
20450 rtx last_scheduled_insn;
20451 int load_store_pendulum;
20454 typedef struct _rs6000_sched_context rs6000_sched_context_def;
20455 typedef rs6000_sched_context_def *rs6000_sched_context_t;
20457 /* Allocate store for new scheduling context. */
20459 rs6000_alloc_sched_context (void)
20461 return xmalloc (sizeof (rs6000_sched_context_def));
20464 /* If CLEAN_P is true then initializes _SC with clean data,
20465 and from the global context otherwise. */
20467 rs6000_init_sched_context (void *_sc, bool clean_p)
20469 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
20473 sc->cached_can_issue_more = 0;
20474 sc->last_scheduled_insn = NULL_RTX;
20475 sc->load_store_pendulum = 0;
20479 sc->cached_can_issue_more = cached_can_issue_more;
20480 sc->last_scheduled_insn = last_scheduled_insn;
20481 sc->load_store_pendulum = load_store_pendulum;
20485 /* Sets the global scheduling context to the one pointed to by _SC. */
20487 rs6000_set_sched_context (void *_sc)
20489 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
20491 gcc_assert (sc != NULL);
20493 cached_can_issue_more = sc->cached_can_issue_more;
20494 last_scheduled_insn = sc->last_scheduled_insn;
20495 load_store_pendulum = sc->load_store_pendulum;
20500 rs6000_free_sched_context (void *_sc)
20502 gcc_assert (_sc != NULL);
20508 /* Length in units of the trampoline for entering a nested function. */
20511 rs6000_trampoline_size (void)
20515 switch (DEFAULT_ABI)
20518 gcc_unreachable ();
20521 ret = (TARGET_32BIT) ? 12 : 24;
20526 ret = (TARGET_32BIT) ? 40 : 48;
20533 /* Emit RTL insns to initialize the variable parts of a trampoline.
20534 FNADDR is an RTX for the address of the function's pure code.
20535 CXT is an RTX for the static chain value for the function. */
20538 rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
20540 int regsize = (TARGET_32BIT) ? 4 : 8;
20541 rtx ctx_reg = force_reg (Pmode, cxt);
20543 switch (DEFAULT_ABI)
20546 gcc_unreachable ();
20548 /* Macros to shorten the code expansions below. */
20549 #define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
20550 #define MEM_PLUS(addr,offset) \
20551 gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
20553 /* Under AIX, just build the 3 word function descriptor */
20556 rtx fn_reg = gen_reg_rtx (Pmode);
20557 rtx toc_reg = gen_reg_rtx (Pmode);
20558 emit_move_insn (fn_reg, MEM_DEREF (fnaddr));
20559 emit_move_insn (toc_reg, MEM_PLUS (fnaddr, regsize));
20560 emit_move_insn (MEM_DEREF (addr), fn_reg);
20561 emit_move_insn (MEM_PLUS (addr, regsize), toc_reg);
20562 emit_move_insn (MEM_PLUS (addr, 2*regsize), ctx_reg);
20566 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
20569 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
20570 LCT_NORMAL, VOIDmode, 4,
20572 GEN_INT (rs6000_trampoline_size ()), SImode,
20582 /* Table of valid machine attributes. */
20584 const struct attribute_spec rs6000_attribute_table[] =
20586 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
20587 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
20588 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
20589 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
20590 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
20591 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
20592 #ifdef SUBTARGET_ATTRIBUTE_TABLE
20593 SUBTARGET_ATTRIBUTE_TABLE,
20595 { NULL, 0, 0, false, false, false, NULL }
20598 /* Handle the "altivec" attribute. The attribute may have
20599 arguments as follows:
20601 __attribute__((altivec(vector__)))
20602 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
20603 __attribute__((altivec(bool__))) (always followed by 'unsigned')
20605 and may appear more than once (e.g., 'vector bool char') in a
20606 given declaration. */
20609 rs6000_handle_altivec_attribute (tree *node,
20610 tree name ATTRIBUTE_UNUSED,
20612 int flags ATTRIBUTE_UNUSED,
20613 bool *no_add_attrs)
20615 tree type = *node, result = NULL_TREE;
20616 enum machine_mode mode;
20619 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
20620 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
20621 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
20624 while (POINTER_TYPE_P (type)
20625 || TREE_CODE (type) == FUNCTION_TYPE
20626 || TREE_CODE (type) == METHOD_TYPE
20627 || TREE_CODE (type) == ARRAY_TYPE)
20628 type = TREE_TYPE (type);
20630 mode = TYPE_MODE (type);
20632 /* Check for invalid AltiVec type qualifiers. */
20633 if (type == long_unsigned_type_node || type == long_integer_type_node)
20636 error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
20637 else if (rs6000_warn_altivec_long)
20638 warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
20640 else if (type == long_long_unsigned_type_node
20641 || type == long_long_integer_type_node)
20642 error ("use of %<long long%> in AltiVec types is invalid");
20643 else if (type == double_type_node)
20644 error ("use of %<double%> in AltiVec types is invalid");
20645 else if (type == long_double_type_node)
20646 error ("use of %<long double%> in AltiVec types is invalid");
20647 else if (type == boolean_type_node)
20648 error ("use of boolean types in AltiVec types is invalid");
20649 else if (TREE_CODE (type) == COMPLEX_TYPE)
20650 error ("use of %<complex%> in AltiVec types is invalid");
20651 else if (DECIMAL_FLOAT_MODE_P (mode))
20652 error ("use of decimal floating point types in AltiVec types is invalid");
20654 switch (altivec_type)
20657 unsigned_p = TYPE_UNSIGNED (type);
20661 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
20664 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
20667 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
20669 case SFmode: result = V4SF_type_node; break;
20670 /* If the user says 'vector int bool', we may be handed the 'bool'
20671 attribute _before_ the 'vector' attribute, and so select the
20672 proper type in the 'b' case below. */
20673 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
20681 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
20682 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
20683 case QImode: case V16QImode: result = bool_V16QI_type_node;
20690 case V8HImode: result = pixel_V8HI_type_node;
20696 /* Propagate qualifiers attached to the element type
20697 onto the vector type. */
20698 if (result && result != type && TYPE_QUALS (type))
20699 result = build_qualified_type (result, TYPE_QUALS (type));
20701 *no_add_attrs = true; /* No need to hang on to the attribute. */
20704 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
20709 /* AltiVec defines four built-in scalar types that serve as vector
20710 elements; we must teach the compiler how to mangle them. */
20712 static const char *
20713 rs6000_mangle_type (const_tree type)
20715 type = TYPE_MAIN_VARIANT (type);
20717 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
20718 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
20721 if (type == bool_char_type_node) return "U6__boolc";
20722 if (type == bool_short_type_node) return "U6__bools";
20723 if (type == pixel_type_node) return "u7__pixel";
20724 if (type == bool_int_type_node) return "U6__booli";
20726 /* Mangle IBM extended float long double as `g' (__float128) on
20727 powerpc*-linux where long-double-64 previously was the default. */
20728 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
20730 && TARGET_LONG_DOUBLE_128
20731 && !TARGET_IEEEQUAD)
20734 /* For all other types, use normal C++ mangling. */
20738 /* Handle a "longcall" or "shortcall" attribute; arguments as in
20739 struct attribute_spec.handler. */
20742 rs6000_handle_longcall_attribute (tree *node, tree name,
20743 tree args ATTRIBUTE_UNUSED,
20744 int flags ATTRIBUTE_UNUSED,
20745 bool *no_add_attrs)
20747 if (TREE_CODE (*node) != FUNCTION_TYPE
20748 && TREE_CODE (*node) != FIELD_DECL
20749 && TREE_CODE (*node) != TYPE_DECL)
20751 warning (OPT_Wattributes, "%qE attribute only applies to functions",
20753 *no_add_attrs = true;
20759 /* Set longcall attributes on all functions declared when
20760 rs6000_default_long_calls is true. */
20762 rs6000_set_default_type_attributes (tree type)
20764 if (rs6000_default_long_calls
20765 && (TREE_CODE (type) == FUNCTION_TYPE
20766 || TREE_CODE (type) == METHOD_TYPE))
20767 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
20769 TYPE_ATTRIBUTES (type));
20772 darwin_set_default_type_attributes (type);
20776 /* Return a reference suitable for calling a function with the
20777 longcall attribute. */
20780 rs6000_longcall_ref (rtx call_ref)
20782 const char *call_name;
20785 if (GET_CODE (call_ref) != SYMBOL_REF)
20788 /* System V adds '.' to the internal name, so skip them. */
20789 call_name = XSTR (call_ref, 0);
20790 if (*call_name == '.')
20792 while (*call_name == '.')
20795 node = get_identifier (call_name);
20796 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
20799 return force_reg (Pmode, call_ref);
20802 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
20803 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
20806 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
20807 struct attribute_spec.handler. */
20809 rs6000_handle_struct_attribute (tree *node, tree name,
20810 tree args ATTRIBUTE_UNUSED,
20811 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
20814 if (DECL_P (*node))
20816 if (TREE_CODE (*node) == TYPE_DECL)
20817 type = &TREE_TYPE (*node);
20822 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
20823 || TREE_CODE (*type) == UNION_TYPE)))
20825 warning (OPT_Wattributes, "%qE attribute ignored", name);
20826 *no_add_attrs = true;
20829 else if ((is_attribute_p ("ms_struct", name)
20830 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
20831 || ((is_attribute_p ("gcc_struct", name)
20832 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
20834 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
20836 *no_add_attrs = true;
20843 rs6000_ms_bitfield_layout_p (const_tree record_type)
20845 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
20846 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
20847 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
20850 #ifdef USING_ELFOS_H
20852 /* A get_unnamed_section callback, used for switching to toc_section. */
20855 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
20857 if (DEFAULT_ABI == ABI_AIX
20858 && TARGET_MINIMAL_TOC
20859 && !TARGET_RELOCATABLE)
20861 if (!toc_initialized)
20863 toc_initialized = 1;
20864 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
20865 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
20866 fprintf (asm_out_file, "\t.tc ");
20867 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
20868 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
20869 fprintf (asm_out_file, "\n");
20871 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
20872 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
20873 fprintf (asm_out_file, " = .+32768\n");
20876 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
20878 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
20879 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
20882 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
20883 if (!toc_initialized)
20885 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
20886 fprintf (asm_out_file, " = .+32768\n");
20887 toc_initialized = 1;
20892 /* Implement TARGET_ASM_INIT_SECTIONS. */
20895 rs6000_elf_asm_init_sections (void)
20898 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
20901 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
20902 SDATA2_SECTION_ASM_OP);
20905 /* Implement TARGET_SELECT_RTX_SECTION. */
20908 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
20909 unsigned HOST_WIDE_INT align)
20911 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
20912 return toc_section;
20914 return default_elf_select_rtx_section (mode, x, align);
20917 /* For a SYMBOL_REF, set generic flags and then perform some
20918 target-specific processing.
20920 When the AIX ABI is requested on a non-AIX system, replace the
20921 function name with the real name (with a leading .) rather than the
20922 function descriptor name. This saves a lot of overriding code to
20923 read the prefixes. */
20926 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
20928 default_encode_section_info (decl, rtl, first);
20931 && TREE_CODE (decl) == FUNCTION_DECL
20933 && DEFAULT_ABI == ABI_AIX)
20935 rtx sym_ref = XEXP (rtl, 0);
20936 size_t len = strlen (XSTR (sym_ref, 0));
20937 char *str = XALLOCAVEC (char, len + 2);
20939 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
20940 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
20945 compare_section_name (const char *section, const char *templ)
20949 len = strlen (templ);
20950 return (strncmp (section, templ, len) == 0
20951 && (section[len] == 0 || section[len] == '.'));
20955 rs6000_elf_in_small_data_p (const_tree decl)
20957 if (rs6000_sdata == SDATA_NONE)
20960 /* We want to merge strings, so we never consider them small data. */
20961 if (TREE_CODE (decl) == STRING_CST)
20964 /* Functions are never in the small data area. */
20965 if (TREE_CODE (decl) == FUNCTION_DECL)
20968 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
20970 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
20971 if (compare_section_name (section, ".sdata")
20972 || compare_section_name (section, ".sdata2")
20973 || compare_section_name (section, ".gnu.linkonce.s")
20974 || compare_section_name (section, ".sbss")
20975 || compare_section_name (section, ".sbss2")
20976 || compare_section_name (section, ".gnu.linkonce.sb")
20977 || strcmp (section, ".PPC.EMB.sdata0") == 0
20978 || strcmp (section, ".PPC.EMB.sbss0") == 0)
20983 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
20986 && (unsigned HOST_WIDE_INT) size <= g_switch_value
20987 /* If it's not public, and we're not going to reference it there,
20988 there's no need to put it in the small data section. */
20989 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
20996 #endif /* USING_ELFOS_H */
20998 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
21001 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
21003 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
21006 /* Return a REG that occurs in ADDR with coefficient 1.
21007 ADDR can be effectively incremented by incrementing REG.
21009 r0 is special and we must not select it as an address
21010 register by this routine since our caller will try to
21011 increment the returned register via an "la" instruction. */
21014 find_addr_reg (rtx addr)
21016 while (GET_CODE (addr) == PLUS)
21018 if (GET_CODE (XEXP (addr, 0)) == REG
21019 && REGNO (XEXP (addr, 0)) != 0)
21020 addr = XEXP (addr, 0);
21021 else if (GET_CODE (XEXP (addr, 1)) == REG
21022 && REGNO (XEXP (addr, 1)) != 0)
21023 addr = XEXP (addr, 1);
21024 else if (CONSTANT_P (XEXP (addr, 0)))
21025 addr = XEXP (addr, 1);
21026 else if (CONSTANT_P (XEXP (addr, 1)))
21027 addr = XEXP (addr, 0);
21029 gcc_unreachable ();
21031 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
21036 rs6000_fatal_bad_address (rtx op)
21038 fatal_insn ("bad address", op);
21043 static tree branch_island_list = 0;
21045 /* Remember to generate a branch island for far calls to the given
21049 add_compiler_branch_island (tree label_name, tree function_name,
21052 tree branch_island = build_tree_list (function_name, label_name);
21053 TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
21054 TREE_CHAIN (branch_island) = branch_island_list;
21055 branch_island_list = branch_island;
21058 #define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
21059 #define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
21060 #define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
21061 TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
21063 /* Generate far-jump branch islands for everything on the
21064 branch_island_list. Invoked immediately after the last instruction
21065 of the epilogue has been emitted; the branch-islands must be
21066 appended to, and contiguous with, the function body. Mach-O stubs
21067 are generated in machopic_output_stub(). */
21070 macho_branch_islands (void)
21073 tree branch_island;
21075 for (branch_island = branch_island_list;
21077 branch_island = TREE_CHAIN (branch_island))
21079 const char *label =
21080 IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
21082 IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island));
21083 char name_buf[512];
21084 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
21085 if (name[0] == '*' || name[0] == '&')
21086 strcpy (name_buf, name+1);
21090 strcpy (name_buf+1, name);
21092 strcpy (tmp_buf, "\n");
21093 strcat (tmp_buf, label);
21094 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
21095 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
21096 dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
21097 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
21100 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
21101 strcat (tmp_buf, label);
21102 strcat (tmp_buf, "_pic\n");
21103 strcat (tmp_buf, label);
21104 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
21106 strcat (tmp_buf, "\taddis r11,r11,ha16(");
21107 strcat (tmp_buf, name_buf);
21108 strcat (tmp_buf, " - ");
21109 strcat (tmp_buf, label);
21110 strcat (tmp_buf, "_pic)\n");
21112 strcat (tmp_buf, "\tmtlr r0\n");
21114 strcat (tmp_buf, "\taddi r12,r11,lo16(");
21115 strcat (tmp_buf, name_buf);
21116 strcat (tmp_buf, " - ");
21117 strcat (tmp_buf, label);
21118 strcat (tmp_buf, "_pic)\n");
21120 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
21124 strcat (tmp_buf, ":\nlis r12,hi16(");
21125 strcat (tmp_buf, name_buf);
21126 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
21127 strcat (tmp_buf, name_buf);
21128 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
21130 output_asm_insn (tmp_buf, 0);
21131 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
21132 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
21133 dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
21134 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
21137 branch_island_list = 0;
21140 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
21141 already there or not. */
21144 no_previous_def (tree function_name)
21146 tree branch_island;
21147 for (branch_island = branch_island_list;
21149 branch_island = TREE_CHAIN (branch_island))
21150 if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
21155 /* GET_PREV_LABEL gets the label name from the previous definition of
21159 get_prev_label (tree function_name)
21161 tree branch_island;
21162 for (branch_island = branch_island_list;
21164 branch_island = TREE_CHAIN (branch_island))
21165 if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
21166 return BRANCH_ISLAND_LABEL_NAME (branch_island);
21170 #ifndef DARWIN_LINKER_GENERATES_ISLANDS
21171 #define DARWIN_LINKER_GENERATES_ISLANDS 0
21174 /* KEXTs still need branch islands. */
21175 #define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
21176 || flag_mkernel || flag_apple_kext)
21178 /* INSN is either a function call or a millicode call. It may have an
21179 unconditional jump in its delay slot.
21181 CALL_DEST is the routine we are calling. */
21184 output_call (rtx insn, rtx *operands, int dest_operand_number,
21185 int cookie_operand_number)
21187 static char buf[256];
21188 if (DARWIN_GENERATE_ISLANDS
21189 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
21190 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
21193 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
21195 if (no_previous_def (funname))
21197 rtx label_rtx = gen_label_rtx ();
21198 char *label_buf, temp_buf[256];
21199 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
21200 CODE_LABEL_NUMBER (label_rtx));
21201 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
21202 labelname = get_identifier (label_buf);
21203 add_compiler_branch_island (labelname, funname, insn_line (insn));
21206 labelname = get_prev_label (funname);
21208 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21209 instruction will reach 'foo', otherwise link as 'bl L42'".
21210 "L42" should be a 'branch island', that will do a far jump to
21211 'foo'. Branch islands are generated in
21212 macho_branch_islands(). */
21213 sprintf (buf, "jbsr %%z%d,%.246s",
21214 dest_operand_number, IDENTIFIER_POINTER (labelname));
21217 sprintf (buf, "bl %%z%d", dest_operand_number);
21221 /* Generate PIC and indirect symbol stubs. */
21224 machopic_output_stub (FILE *file, const char *symb, const char *stub)
21226 unsigned int length;
21227 char *symbol_name, *lazy_ptr_name;
21228 char *local_label_0;
21229 static int label = 0;
21231 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
21232 symb = (*targetm.strip_name_encoding) (symb);
21235 length = strlen (symb);
21236 symbol_name = XALLOCAVEC (char, length + 32);
21237 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
21239 lazy_ptr_name = XALLOCAVEC (char, length + 32);
21240 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
21243 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
21245 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
21249 fprintf (file, "\t.align 5\n");
21251 fprintf (file, "%s:\n", stub);
21252 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
21255 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
21256 sprintf (local_label_0, "\"L%011d$spb\"", label);
21258 fprintf (file, "\tmflr r0\n");
21259 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
21260 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
21261 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
21262 lazy_ptr_name, local_label_0);
21263 fprintf (file, "\tmtlr r0\n");
21264 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
21265 (TARGET_64BIT ? "ldu" : "lwzu"),
21266 lazy_ptr_name, local_label_0);
21267 fprintf (file, "\tmtctr r12\n");
21268 fprintf (file, "\tbctr\n");
21272 fprintf (file, "\t.align 4\n");
21274 fprintf (file, "%s:\n", stub);
21275 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
21277 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
21278 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
21279 (TARGET_64BIT ? "ldu" : "lwzu"),
21281 fprintf (file, "\tmtctr r12\n");
21282 fprintf (file, "\tbctr\n");
21285 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
21286 fprintf (file, "%s:\n", lazy_ptr_name);
21287 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
21288 fprintf (file, "%sdyld_stub_binding_helper\n",
21289 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
21292 /* Legitimize PIC addresses. If the address is already
21293 position-independent, we return ORIG. Newly generated
21294 position-independent addresses go into a reg. This is REG if non
21295 zero, otherwise we allocate register(s) as necessary. */
21297 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
21300 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
21305 if (reg == NULL && ! reload_in_progress && ! reload_completed)
21306 reg = gen_reg_rtx (Pmode);
21308 if (GET_CODE (orig) == CONST)
21312 if (GET_CODE (XEXP (orig, 0)) == PLUS
21313 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
21316 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
21318 /* Use a different reg for the intermediate value, as
21319 it will be marked UNCHANGING. */
21320 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
21321 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
21324 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
21327 if (GET_CODE (offset) == CONST_INT)
21329 if (SMALL_INT (offset))
21330 return plus_constant (base, INTVAL (offset));
21331 else if (! reload_in_progress && ! reload_completed)
21332 offset = force_reg (Pmode, offset);
21335 rtx mem = force_const_mem (Pmode, orig);
21336 return machopic_legitimize_pic_address (mem, Pmode, reg);
21339 return gen_rtx_PLUS (Pmode, base, offset);
21342 /* Fall back on generic machopic code. */
21343 return machopic_legitimize_pic_address (orig, mode, reg);
21346 /* Output a .machine directive for the Darwin assembler, and call
21347 the generic start_file routine. */
21350 rs6000_darwin_file_start (void)
21352 static const struct
21358 { "ppc64", "ppc64", MASK_64BIT },
21359 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
21360 { "power4", "ppc970", 0 },
21361 { "G5", "ppc970", 0 },
21362 { "7450", "ppc7450", 0 },
21363 { "7400", "ppc7400", MASK_ALTIVEC },
21364 { "G4", "ppc7400", 0 },
21365 { "750", "ppc750", 0 },
21366 { "740", "ppc750", 0 },
21367 { "G3", "ppc750", 0 },
21368 { "604e", "ppc604e", 0 },
21369 { "604", "ppc604", 0 },
21370 { "603e", "ppc603", 0 },
21371 { "603", "ppc603", 0 },
21372 { "601", "ppc601", 0 },
21373 { NULL, "ppc", 0 } };
21374 const char *cpu_id = "";
21377 rs6000_file_start ();
21378 darwin_file_start ();
21380 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
21381 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
21382 if (rs6000_select[i].set_arch_p && rs6000_select[i].string
21383 && rs6000_select[i].string[0] != '\0')
21384 cpu_id = rs6000_select[i].string;
21386 /* Look through the mapping array. Pick the first name that either
21387 matches the argument, has a bit set in IF_SET that is also set
21388 in the target flags, or has a NULL name. */
21391 while (mapping[i].arg != NULL
21392 && strcmp (mapping[i].arg, cpu_id) != 0
21393 && (mapping[i].if_set & target_flags) == 0)
21396 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
21399 #endif /* TARGET_MACHO */
21403 rs6000_elf_reloc_rw_mask (void)
21407 else if (DEFAULT_ABI == ABI_AIX)
21413 /* Record an element in the table of global constructors. SYMBOL is
21414 a SYMBOL_REF of the function to be called; PRIORITY is a number
21415 between 0 and MAX_INIT_PRIORITY.
21417 This differs from default_named_section_asm_out_constructor in
21418 that we have special handling for -mrelocatable. */
21421 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
21423 const char *section = ".ctors";
21426 if (priority != DEFAULT_INIT_PRIORITY)
21428 sprintf (buf, ".ctors.%.5u",
21429 /* Invert the numbering so the linker puts us in the proper
21430 order; constructors are run from right to left, and the
21431 linker sorts in increasing order. */
21432 MAX_INIT_PRIORITY - priority);
21436 switch_to_section (get_section (section, SECTION_WRITE, NULL));
21437 assemble_align (POINTER_SIZE);
21439 if (TARGET_RELOCATABLE)
21441 fputs ("\t.long (", asm_out_file);
21442 output_addr_const (asm_out_file, symbol);
21443 fputs (")@fixup\n", asm_out_file);
21446 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
21450 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
21452 const char *section = ".dtors";
21455 if (priority != DEFAULT_INIT_PRIORITY)
21457 sprintf (buf, ".dtors.%.5u",
21458 /* Invert the numbering so the linker puts us in the proper
21459 order; constructors are run from right to left, and the
21460 linker sorts in increasing order. */
21461 MAX_INIT_PRIORITY - priority);
21465 switch_to_section (get_section (section, SECTION_WRITE, NULL));
21466 assemble_align (POINTER_SIZE);
21468 if (TARGET_RELOCATABLE)
21470 fputs ("\t.long (", asm_out_file);
21471 output_addr_const (asm_out_file, symbol);
21472 fputs (")@fixup\n", asm_out_file);
21475 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
21479 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
21483 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
21484 ASM_OUTPUT_LABEL (file, name);
21485 fputs (DOUBLE_INT_ASM_OP, file);
21486 rs6000_output_function_entry (file, name);
21487 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
21490 fputs ("\t.size\t", file);
21491 assemble_name (file, name);
21492 fputs (",24\n\t.type\t.", file);
21493 assemble_name (file, name);
21494 fputs (",@function\n", file);
21495 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
21497 fputs ("\t.globl\t.", file);
21498 assemble_name (file, name);
21503 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
21504 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
21505 rs6000_output_function_entry (file, name);
21506 fputs (":\n", file);
21510 if (TARGET_RELOCATABLE
21511 && !TARGET_SECURE_PLT
21512 && (get_pool_size () != 0 || crtl->profile)
21517 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
21519 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21520 fprintf (file, "\t.long ");
21521 assemble_name (file, buf);
21523 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21524 assemble_name (file, buf);
21528 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
21529 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
21531 if (DEFAULT_ABI == ABI_AIX)
21533 const char *desc_name, *orig_name;
21535 orig_name = (*targetm.strip_name_encoding) (name);
21536 desc_name = orig_name;
21537 while (*desc_name == '.')
21540 if (TREE_PUBLIC (decl))
21541 fprintf (file, "\t.globl %s\n", desc_name);
21543 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
21544 fprintf (file, "%s:\n", desc_name);
21545 fprintf (file, "\t.long %s\n", orig_name);
21546 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
21547 if (DEFAULT_ABI == ABI_AIX)
21548 fputs ("\t.long 0\n", file);
21549 fprintf (file, "\t.previous\n");
21551 ASM_OUTPUT_LABEL (file, name);
21555 rs6000_elf_end_indicate_exec_stack (void)
21558 file_end_indicate_exec_stack ();
21564 rs6000_xcoff_asm_output_anchor (rtx symbol)
21568 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
21569 SYMBOL_REF_BLOCK_OFFSET (symbol));
21570 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
21574 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
21576 fputs (GLOBAL_ASM_OP, stream);
21577 RS6000_OUTPUT_BASENAME (stream, name);
21578 putc ('\n', stream);
21581 /* A get_unnamed_decl callback, used for read-only sections. PTR
21582 points to the section string variable. */
21585 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
21587 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
21588 *(const char *const *) directive,
21589 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
21592 /* Likewise for read-write sections. */
21595 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
21597 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
21598 *(const char *const *) directive,
21599 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
21602 /* A get_unnamed_section callback, used for switching to toc_section. */
21605 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
21607 if (TARGET_MINIMAL_TOC)
21609 /* toc_section is always selected at least once from
21610 rs6000_xcoff_file_start, so this is guaranteed to
21611 always be defined once and only once in each file. */
21612 if (!toc_initialized)
21614 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
21615 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
21616 toc_initialized = 1;
21618 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
21619 (TARGET_32BIT ? "" : ",3"));
21622 fputs ("\t.toc\n", asm_out_file);
21625 /* Implement TARGET_ASM_INIT_SECTIONS. */
21628 rs6000_xcoff_asm_init_sections (void)
21630 read_only_data_section
21631 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
21632 &xcoff_read_only_section_name);
21634 private_data_section
21635 = get_unnamed_section (SECTION_WRITE,
21636 rs6000_xcoff_output_readwrite_section_asm_op,
21637 &xcoff_private_data_section_name);
21639 read_only_private_data_section
21640 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
21641 &xcoff_private_data_section_name);
21644 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
21646 readonly_data_section = read_only_data_section;
21647 exception_section = data_section;
21651 rs6000_xcoff_reloc_rw_mask (void)
21657 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
21658 tree decl ATTRIBUTE_UNUSED)
21661 static const char * const suffix[3] = { "PR", "RO", "RW" };
21663 if (flags & SECTION_CODE)
21665 else if (flags & SECTION_WRITE)
21670 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
21671 (flags & SECTION_CODE) ? "." : "",
21672 name, suffix[smclass], flags & SECTION_ENTSIZE);
21676 rs6000_xcoff_select_section (tree decl, int reloc,
21677 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
21679 if (decl_readonly_section (decl, reloc))
21681 if (TREE_PUBLIC (decl))
21682 return read_only_data_section;
21684 return read_only_private_data_section;
21688 if (TREE_PUBLIC (decl))
21689 return data_section;
21691 return private_data_section;
21696 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
21700 /* Use select_section for private and uninitialized data. */
21701 if (!TREE_PUBLIC (decl)
21702 || DECL_COMMON (decl)
21703 || DECL_INITIAL (decl) == NULL_TREE
21704 || DECL_INITIAL (decl) == error_mark_node
21705 || (flag_zero_initialized_in_bss
21706 && initializer_zerop (DECL_INITIAL (decl))))
21709 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
21710 name = (*targetm.strip_name_encoding) (name);
21711 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
21714 /* Select section for constant in constant pool.
21716 On RS/6000, all constants are in the private read-only data area.
21717 However, if this is being placed in the TOC it must be output as a
21721 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
21722 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
21724 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
21725 return toc_section;
21727 return read_only_private_data_section;
21730 /* Remove any trailing [DS] or the like from the symbol name. */
21732 static const char *
21733 rs6000_xcoff_strip_name_encoding (const char *name)
21738 len = strlen (name);
21739 if (name[len - 1] == ']')
21740 return ggc_alloc_string (name, len - 4);
21745 /* Section attributes. AIX is always PIC. */
21747 static unsigned int
21748 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
21750 unsigned int align;
21751 unsigned int flags = default_section_type_flags (decl, name, reloc);
21753 /* Align to at least UNIT size. */
21754 if (flags & SECTION_CODE)
21755 align = MIN_UNITS_PER_WORD;
21757 /* Increase alignment of large objects if not already stricter. */
21758 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
21759 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
21760 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
21762 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
21765 /* Output at beginning of assembler file.
21767 Initialize the section names for the RS/6000 at this point.
21769 Specify filename, including full path, to assembler.
21771 We want to go into the TOC section so at least one .toc will be emitted.
21772 Also, in order to output proper .bs/.es pairs, we need at least one static
21773 [RW] section emitted.
21775 Finally, declare mcount when profiling to make the assembler happy. */
21778 rs6000_xcoff_file_start (void)
21780 rs6000_gen_section_name (&xcoff_bss_section_name,
21781 main_input_filename, ".bss_");
21782 rs6000_gen_section_name (&xcoff_private_data_section_name,
21783 main_input_filename, ".rw_");
21784 rs6000_gen_section_name (&xcoff_read_only_section_name,
21785 main_input_filename, ".ro_");
21787 fputs ("\t.file\t", asm_out_file);
21788 output_quoted_string (asm_out_file, main_input_filename);
21789 fputc ('\n', asm_out_file);
21790 if (write_symbols != NO_DEBUG)
21791 switch_to_section (private_data_section);
21792 switch_to_section (text_section);
21794 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
21795 rs6000_file_start ();
21798 /* Output at end of assembler file.
21799 On the RS/6000, referencing data should automatically pull in text. */
21802 rs6000_xcoff_file_end (void)
21804 switch_to_section (text_section);
21805 fputs ("_section_.text:\n", asm_out_file);
21806 switch_to_section (data_section);
21807 fputs (TARGET_32BIT
21808 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
21811 #endif /* TARGET_XCOFF */
21813 /* Compute a (partial) cost for rtx X. Return true if the complete
21814 cost has been computed, and false if subexpressions should be
21815 scanned. In either case, *TOTAL contains the cost result. */
21818 rs6000_rtx_costs (rtx x, int code, int outer_code, int *total,
21821 enum machine_mode mode = GET_MODE (x);
21825 /* On the RS/6000, if it is valid in the insn, it is free. */
21827 if (((outer_code == SET
21828 || outer_code == PLUS
21829 || outer_code == MINUS)
21830 && (satisfies_constraint_I (x)
21831 || satisfies_constraint_L (x)))
21832 || (outer_code == AND
21833 && (satisfies_constraint_K (x)
21835 ? satisfies_constraint_L (x)
21836 : satisfies_constraint_J (x))
21837 || mask_operand (x, mode)
21839 && mask64_operand (x, DImode))))
21840 || ((outer_code == IOR || outer_code == XOR)
21841 && (satisfies_constraint_K (x)
21843 ? satisfies_constraint_L (x)
21844 : satisfies_constraint_J (x))))
21845 || outer_code == ASHIFT
21846 || outer_code == ASHIFTRT
21847 || outer_code == LSHIFTRT
21848 || outer_code == ROTATE
21849 || outer_code == ROTATERT
21850 || outer_code == ZERO_EXTRACT
21851 || (outer_code == MULT
21852 && satisfies_constraint_I (x))
21853 || ((outer_code == DIV || outer_code == UDIV
21854 || outer_code == MOD || outer_code == UMOD)
21855 && exact_log2 (INTVAL (x)) >= 0)
21856 || (outer_code == COMPARE
21857 && (satisfies_constraint_I (x)
21858 || satisfies_constraint_K (x)))
21859 || (outer_code == EQ
21860 && (satisfies_constraint_I (x)
21861 || satisfies_constraint_K (x)
21863 ? satisfies_constraint_L (x)
21864 : satisfies_constraint_J (x))))
21865 || (outer_code == GTU
21866 && satisfies_constraint_I (x))
21867 || (outer_code == LTU
21868 && satisfies_constraint_P (x)))
21873 else if ((outer_code == PLUS
21874 && reg_or_add_cint_operand (x, VOIDmode))
21875 || (outer_code == MINUS
21876 && reg_or_sub_cint_operand (x, VOIDmode))
21877 || ((outer_code == SET
21878 || outer_code == IOR
21879 || outer_code == XOR)
21881 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
21883 *total = COSTS_N_INSNS (1);
21889 if (mode == DImode && code == CONST_DOUBLE)
21891 if ((outer_code == IOR || outer_code == XOR)
21892 && CONST_DOUBLE_HIGH (x) == 0
21893 && (CONST_DOUBLE_LOW (x)
21894 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
21899 else if ((outer_code == AND && and64_2_operand (x, DImode))
21900 || ((outer_code == SET
21901 || outer_code == IOR
21902 || outer_code == XOR)
21903 && CONST_DOUBLE_HIGH (x) == 0))
21905 *total = COSTS_N_INSNS (1);
21915 /* When optimizing for size, MEM should be slightly more expensive
21916 than generating address, e.g., (plus (reg) (const)).
21917 L1 cache latency is about two instructions. */
21918 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
21926 if (mode == DFmode)
21928 if (GET_CODE (XEXP (x, 0)) == MULT)
21930 /* FNMA accounted in outer NEG. */
21931 if (outer_code == NEG)
21932 *total = rs6000_cost->dmul - rs6000_cost->fp;
21934 *total = rs6000_cost->dmul;
21937 *total = rs6000_cost->fp;
21939 else if (mode == SFmode)
21941 /* FNMA accounted in outer NEG. */
21942 if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
21945 *total = rs6000_cost->fp;
21948 *total = COSTS_N_INSNS (1);
21952 if (mode == DFmode)
21954 if (GET_CODE (XEXP (x, 0)) == MULT
21955 || GET_CODE (XEXP (x, 1)) == MULT)
21957 /* FNMA accounted in outer NEG. */
21958 if (outer_code == NEG)
21959 *total = rs6000_cost->dmul - rs6000_cost->fp;
21961 *total = rs6000_cost->dmul;
21964 *total = rs6000_cost->fp;
21966 else if (mode == SFmode)
21968 /* FNMA accounted in outer NEG. */
21969 if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
21972 *total = rs6000_cost->fp;
21975 *total = COSTS_N_INSNS (1);
21979 if (GET_CODE (XEXP (x, 1)) == CONST_INT
21980 && satisfies_constraint_I (XEXP (x, 1)))
21982 if (INTVAL (XEXP (x, 1)) >= -256
21983 && INTVAL (XEXP (x, 1)) <= 255)
21984 *total = rs6000_cost->mulsi_const9;
21986 *total = rs6000_cost->mulsi_const;
21988 /* FMA accounted in outer PLUS/MINUS. */
21989 else if ((mode == DFmode || mode == SFmode)
21990 && (outer_code == PLUS || outer_code == MINUS))
21992 else if (mode == DFmode)
21993 *total = rs6000_cost->dmul;
21994 else if (mode == SFmode)
21995 *total = rs6000_cost->fp;
21996 else if (mode == DImode)
21997 *total = rs6000_cost->muldi;
21999 *total = rs6000_cost->mulsi;
22004 if (FLOAT_MODE_P (mode))
22006 *total = mode == DFmode ? rs6000_cost->ddiv
22007 : rs6000_cost->sdiv;
22014 if (GET_CODE (XEXP (x, 1)) == CONST_INT
22015 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
22017 if (code == DIV || code == MOD)
22019 *total = COSTS_N_INSNS (2);
22022 *total = COSTS_N_INSNS (1);
22026 if (GET_MODE (XEXP (x, 1)) == DImode)
22027 *total = rs6000_cost->divdi;
22029 *total = rs6000_cost->divsi;
22031 /* Add in shift and subtract for MOD. */
22032 if (code == MOD || code == UMOD)
22033 *total += COSTS_N_INSNS (2);
22038 *total = COSTS_N_INSNS (4);
22042 *total = COSTS_N_INSNS (6);
22046 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
22058 *total = COSTS_N_INSNS (1);
22066 /* Handle mul_highpart. */
22067 if (outer_code == TRUNCATE
22068 && GET_CODE (XEXP (x, 0)) == MULT)
22070 if (mode == DImode)
22071 *total = rs6000_cost->muldi;
22073 *total = rs6000_cost->mulsi;
22076 else if (outer_code == AND)
22079 *total = COSTS_N_INSNS (1);
22084 if (GET_CODE (XEXP (x, 0)) == MEM)
22087 *total = COSTS_N_INSNS (1);
22093 if (!FLOAT_MODE_P (mode))
22095 *total = COSTS_N_INSNS (1);
22101 case UNSIGNED_FLOAT:
22104 case FLOAT_TRUNCATE:
22105 *total = rs6000_cost->fp;
22109 if (mode == DFmode)
22112 *total = rs6000_cost->fp;
22116 switch (XINT (x, 1))
22119 *total = rs6000_cost->fp;
22131 *total = COSTS_N_INSNS (1);
22134 else if (FLOAT_MODE_P (mode)
22135 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
22137 *total = rs6000_cost->fp;
22145 /* Carry bit requires mode == Pmode.
22146 NEG or PLUS already counted so only add one. */
22148 && (outer_code == NEG || outer_code == PLUS))
22150 *total = COSTS_N_INSNS (1);
22153 if (outer_code == SET)
22155 if (XEXP (x, 1) == const0_rtx)
22157 *total = COSTS_N_INSNS (2);
22160 else if (mode == Pmode)
22162 *total = COSTS_N_INSNS (3);
22171 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
22173 *total = COSTS_N_INSNS (2);
22177 if (outer_code == COMPARE)
22191 /* A C expression returning the cost of moving data from a register of class
22192 CLASS1 to one of CLASS2. */
22195 rs6000_register_move_cost (enum machine_mode mode,
22196 enum reg_class from, enum reg_class to)
22198 /* Moves from/to GENERAL_REGS. */
22199 if (reg_classes_intersect_p (to, GENERAL_REGS)
22200 || reg_classes_intersect_p (from, GENERAL_REGS))
22202 if (! reg_classes_intersect_p (to, GENERAL_REGS))
22205 if (from == FLOAT_REGS || from == ALTIVEC_REGS)
22206 return (rs6000_memory_move_cost (mode, from, 0)
22207 + rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
22209 /* It's more expensive to move CR_REGS than CR0_REGS because of the
22211 else if (from == CR_REGS)
22214 /* Power6 has slower LR/CTR moves so make them more expensive than
22215 memory in order to bias spills to memory .*/
22216 else if (rs6000_cpu == PROCESSOR_POWER6
22217 && reg_classes_intersect_p (from, LINK_OR_CTR_REGS))
22218 return 6 * hard_regno_nregs[0][mode];
22221 /* A move will cost one instruction per GPR moved. */
22222 return 2 * hard_regno_nregs[0][mode];
22225 /* Moving between two similar registers is just one instruction. */
22226 else if (reg_classes_intersect_p (to, from))
22227 return (mode == TFmode || mode == TDmode) ? 4 : 2;
22229 /* Everything else has to go through GENERAL_REGS. */
22231 return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
22232 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
22235 /* A C expressions returning the cost of moving data of MODE from a register to
22239 rs6000_memory_move_cost (enum machine_mode mode, enum reg_class rclass,
22240 int in ATTRIBUTE_UNUSED)
22242 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
22243 return 4 * hard_regno_nregs[0][mode];
22244 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
22245 return 4 * hard_regno_nregs[32][mode];
22246 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
22247 return 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
22249 return 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
22252 /* Returns a code for a target-specific builtin that implements
22253 reciprocal of the function, or NULL_TREE if not available. */
22256 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
22257 bool sqrt ATTRIBUTE_UNUSED)
22259 if (! (TARGET_RECIP && TARGET_PPC_GFXOPT && !optimize_size
22260 && flag_finite_math_only && !flag_trapping_math
22261 && flag_unsafe_math_optimizations))
22269 case BUILT_IN_SQRTF:
22270 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
22277 /* Newton-Raphson approximation of single-precision floating point divide n/d.
22278 Assumes no trapping math and finite arguments. */
22281 rs6000_emit_swdivsf (rtx dst, rtx n, rtx d)
22283 rtx x0, e0, e1, y1, u0, v0, one;
22285 x0 = gen_reg_rtx (SFmode);
22286 e0 = gen_reg_rtx (SFmode);
22287 e1 = gen_reg_rtx (SFmode);
22288 y1 = gen_reg_rtx (SFmode);
22289 u0 = gen_reg_rtx (SFmode);
22290 v0 = gen_reg_rtx (SFmode);
22291 one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
22293 /* x0 = 1./d estimate */
22294 emit_insn (gen_rtx_SET (VOIDmode, x0,
22295 gen_rtx_UNSPEC (SFmode, gen_rtvec (1, d),
22297 /* e0 = 1. - d * x0 */
22298 emit_insn (gen_rtx_SET (VOIDmode, e0,
22299 gen_rtx_MINUS (SFmode, one,
22300 gen_rtx_MULT (SFmode, d, x0))));
22301 /* e1 = e0 + e0 * e0 */
22302 emit_insn (gen_rtx_SET (VOIDmode, e1,
22303 gen_rtx_PLUS (SFmode,
22304 gen_rtx_MULT (SFmode, e0, e0), e0)));
22305 /* y1 = x0 + e1 * x0 */
22306 emit_insn (gen_rtx_SET (VOIDmode, y1,
22307 gen_rtx_PLUS (SFmode,
22308 gen_rtx_MULT (SFmode, e1, x0), x0)));
22310 emit_insn (gen_rtx_SET (VOIDmode, u0,
22311 gen_rtx_MULT (SFmode, n, y1)));
22312 /* v0 = n - d * u0 */
22313 emit_insn (gen_rtx_SET (VOIDmode, v0,
22314 gen_rtx_MINUS (SFmode, n,
22315 gen_rtx_MULT (SFmode, d, u0))));
22316 /* dst = u0 + v0 * y1 */
22317 emit_insn (gen_rtx_SET (VOIDmode, dst,
22318 gen_rtx_PLUS (SFmode,
22319 gen_rtx_MULT (SFmode, v0, y1), u0)));
22322 /* Newton-Raphson approximation of double-precision floating point divide n/d.
22323 Assumes no trapping math and finite arguments. */
22326 rs6000_emit_swdivdf (rtx dst, rtx n, rtx d)
22328 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
22330 x0 = gen_reg_rtx (DFmode);
22331 e0 = gen_reg_rtx (DFmode);
22332 e1 = gen_reg_rtx (DFmode);
22333 e2 = gen_reg_rtx (DFmode);
22334 y1 = gen_reg_rtx (DFmode);
22335 y2 = gen_reg_rtx (DFmode);
22336 y3 = gen_reg_rtx (DFmode);
22337 u0 = gen_reg_rtx (DFmode);
22338 v0 = gen_reg_rtx (DFmode);
22339 one = force_reg (DFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, DFmode));
22341 /* x0 = 1./d estimate */
22342 emit_insn (gen_rtx_SET (VOIDmode, x0,
22343 gen_rtx_UNSPEC (DFmode, gen_rtvec (1, d),
22345 /* e0 = 1. - d * x0 */
22346 emit_insn (gen_rtx_SET (VOIDmode, e0,
22347 gen_rtx_MINUS (DFmode, one,
22348 gen_rtx_MULT (SFmode, d, x0))));
22349 /* y1 = x0 + e0 * x0 */
22350 emit_insn (gen_rtx_SET (VOIDmode, y1,
22351 gen_rtx_PLUS (DFmode,
22352 gen_rtx_MULT (DFmode, e0, x0), x0)));
22354 emit_insn (gen_rtx_SET (VOIDmode, e1,
22355 gen_rtx_MULT (DFmode, e0, e0)));
22356 /* y2 = y1 + e1 * y1 */
22357 emit_insn (gen_rtx_SET (VOIDmode, y2,
22358 gen_rtx_PLUS (DFmode,
22359 gen_rtx_MULT (DFmode, e1, y1), y1)));
22361 emit_insn (gen_rtx_SET (VOIDmode, e2,
22362 gen_rtx_MULT (DFmode, e1, e1)));
22363 /* y3 = y2 + e2 * y2 */
22364 emit_insn (gen_rtx_SET (VOIDmode, y3,
22365 gen_rtx_PLUS (DFmode,
22366 gen_rtx_MULT (DFmode, e2, y2), y2)));
22368 emit_insn (gen_rtx_SET (VOIDmode, u0,
22369 gen_rtx_MULT (DFmode, n, y3)));
22370 /* v0 = n - d * u0 */
22371 emit_insn (gen_rtx_SET (VOIDmode, v0,
22372 gen_rtx_MINUS (DFmode, n,
22373 gen_rtx_MULT (DFmode, d, u0))));
22374 /* dst = u0 + v0 * y3 */
22375 emit_insn (gen_rtx_SET (VOIDmode, dst,
22376 gen_rtx_PLUS (DFmode,
22377 gen_rtx_MULT (DFmode, v0, y3), u0)));
22381 /* Newton-Raphson approximation of single-precision floating point rsqrt.
22382 Assumes no trapping math and finite arguments. */
22385 rs6000_emit_swrsqrtsf (rtx dst, rtx src)
22387 rtx x0, x1, x2, y1, u0, u1, u2, v0, v1, v2, t0,
22388 half, one, halfthree, c1, cond, label;
22390 x0 = gen_reg_rtx (SFmode);
22391 x1 = gen_reg_rtx (SFmode);
22392 x2 = gen_reg_rtx (SFmode);
22393 y1 = gen_reg_rtx (SFmode);
22394 u0 = gen_reg_rtx (SFmode);
22395 u1 = gen_reg_rtx (SFmode);
22396 u2 = gen_reg_rtx (SFmode);
22397 v0 = gen_reg_rtx (SFmode);
22398 v1 = gen_reg_rtx (SFmode);
22399 v2 = gen_reg_rtx (SFmode);
22400 t0 = gen_reg_rtx (SFmode);
22401 halfthree = gen_reg_rtx (SFmode);
22402 cond = gen_rtx_REG (CCFPmode, CR1_REGNO);
22403 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22405 /* check 0.0, 1.0, NaN, Inf by testing src * src = src */
22406 emit_insn (gen_rtx_SET (VOIDmode, t0,
22407 gen_rtx_MULT (SFmode, src, src)));
22409 emit_insn (gen_rtx_SET (VOIDmode, cond,
22410 gen_rtx_COMPARE (CCFPmode, t0, src)));
22411 c1 = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
22412 emit_unlikely_jump (c1, label);
22414 half = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf, SFmode));
22415 one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
22417 /* halfthree = 1.5 = 1.0 + 0.5 */
22418 emit_insn (gen_rtx_SET (VOIDmode, halfthree,
22419 gen_rtx_PLUS (SFmode, one, half)));
22421 /* x0 = rsqrt estimate */
22422 emit_insn (gen_rtx_SET (VOIDmode, x0,
22423 gen_rtx_UNSPEC (SFmode, gen_rtvec (1, src),
22426 /* y1 = 0.5 * src = 1.5 * src - src -> fewer constants */
22427 emit_insn (gen_rtx_SET (VOIDmode, y1,
22428 gen_rtx_MINUS (SFmode,
22429 gen_rtx_MULT (SFmode, src, halfthree),
22432 /* x1 = x0 * (1.5 - y1 * (x0 * x0)) */
22433 emit_insn (gen_rtx_SET (VOIDmode, u0,
22434 gen_rtx_MULT (SFmode, x0, x0)));
22435 emit_insn (gen_rtx_SET (VOIDmode, v0,
22436 gen_rtx_MINUS (SFmode,
22438 gen_rtx_MULT (SFmode, y1, u0))));
22439 emit_insn (gen_rtx_SET (VOIDmode, x1,
22440 gen_rtx_MULT (SFmode, x0, v0)));
22442 /* x2 = x1 * (1.5 - y1 * (x1 * x1)) */
22443 emit_insn (gen_rtx_SET (VOIDmode, u1,
22444 gen_rtx_MULT (SFmode, x1, x1)));
22445 emit_insn (gen_rtx_SET (VOIDmode, v1,
22446 gen_rtx_MINUS (SFmode,
22448 gen_rtx_MULT (SFmode, y1, u1))));
22449 emit_insn (gen_rtx_SET (VOIDmode, x2,
22450 gen_rtx_MULT (SFmode, x1, v1)));
22452 /* dst = x2 * (1.5 - y1 * (x2 * x2)) */
22453 emit_insn (gen_rtx_SET (VOIDmode, u2,
22454 gen_rtx_MULT (SFmode, x2, x2)));
22455 emit_insn (gen_rtx_SET (VOIDmode, v2,
22456 gen_rtx_MINUS (SFmode,
22458 gen_rtx_MULT (SFmode, y1, u2))));
22459 emit_insn (gen_rtx_SET (VOIDmode, dst,
22460 gen_rtx_MULT (SFmode, x2, v2)));
22462 emit_label (XEXP (label, 0));
22465 /* Emit popcount intrinsic on TARGET_POPCNTB targets. DST is the
22466 target, and SRC is the argument operand. */
22469 rs6000_emit_popcount (rtx dst, rtx src)
22471 enum machine_mode mode = GET_MODE (dst);
22474 tmp1 = gen_reg_rtx (mode);
22476 if (mode == SImode)
22478 emit_insn (gen_popcntbsi2 (tmp1, src));
22479 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
22481 tmp2 = force_reg (SImode, tmp2);
22482 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
22486 emit_insn (gen_popcntbdi2 (tmp1, src));
22487 tmp2 = expand_mult (DImode, tmp1,
22488 GEN_INT ((HOST_WIDE_INT)
22489 0x01010101 << 32 | 0x01010101),
22491 tmp2 = force_reg (DImode, tmp2);
22492 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
22497 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
22498 target, and SRC is the argument operand. */
22501 rs6000_emit_parity (rtx dst, rtx src)
22503 enum machine_mode mode = GET_MODE (dst);
22506 tmp = gen_reg_rtx (mode);
22507 if (mode == SImode)
22509 /* Is mult+shift >= shift+xor+shift+xor? */
22510 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
22512 rtx tmp1, tmp2, tmp3, tmp4;
22514 tmp1 = gen_reg_rtx (SImode);
22515 emit_insn (gen_popcntbsi2 (tmp1, src));
22517 tmp2 = gen_reg_rtx (SImode);
22518 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
22519 tmp3 = gen_reg_rtx (SImode);
22520 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
22522 tmp4 = gen_reg_rtx (SImode);
22523 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
22524 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
22527 rs6000_emit_popcount (tmp, src);
22528 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
22532 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
22533 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
22535 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
22537 tmp1 = gen_reg_rtx (DImode);
22538 emit_insn (gen_popcntbdi2 (tmp1, src));
22540 tmp2 = gen_reg_rtx (DImode);
22541 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
22542 tmp3 = gen_reg_rtx (DImode);
22543 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
22545 tmp4 = gen_reg_rtx (DImode);
22546 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
22547 tmp5 = gen_reg_rtx (DImode);
22548 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
22550 tmp6 = gen_reg_rtx (DImode);
22551 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
22552 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
22555 rs6000_emit_popcount (tmp, src);
22556 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
22560 /* Return an RTX representing where to find the function value of a
22561 function returning MODE. */
22563 rs6000_complex_function_value (enum machine_mode mode)
22565 unsigned int regno;
22567 enum machine_mode inner = GET_MODE_INNER (mode);
22568 unsigned int inner_bytes = GET_MODE_SIZE (inner);
22570 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
22571 regno = FP_ARG_RETURN;
22574 regno = GP_ARG_RETURN;
22576 /* 32-bit is OK since it'll go in r3/r4. */
22577 if (TARGET_32BIT && inner_bytes >= 4)
22578 return gen_rtx_REG (mode, regno);
22581 if (inner_bytes >= 8)
22582 return gen_rtx_REG (mode, regno);
22584 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
22586 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
22587 GEN_INT (inner_bytes));
22588 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
22591 /* Define how to find the value returned by a function.
22592 VALTYPE is the data type of the value (as a tree).
22593 If the precise function being called is known, FUNC is its FUNCTION_DECL;
22594 otherwise, FUNC is 0.
22596 On the SPE, both FPs and vectors are returned in r3.
22598 On RS/6000 an integer value is in r3 and a floating-point value is in
22599 fp1, unless -msoft-float. */
22602 rs6000_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
22604 enum machine_mode mode;
22605 unsigned int regno;
22607 /* Special handling for structs in darwin64. */
22608 if (rs6000_darwin64_abi
22609 && TYPE_MODE (valtype) == BLKmode
22610 && TREE_CODE (valtype) == RECORD_TYPE
22611 && int_size_in_bytes (valtype) > 0)
22613 CUMULATIVE_ARGS valcum;
22617 valcum.fregno = FP_ARG_MIN_REG;
22618 valcum.vregno = ALTIVEC_ARG_MIN_REG;
22619 /* Do a trial code generation as if this were going to be passed as
22620 an argument; if any part goes in memory, we return NULL. */
22621 valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
22624 /* Otherwise fall through to standard ABI rules. */
22627 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
22629 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
22630 return gen_rtx_PARALLEL (DImode,
22632 gen_rtx_EXPR_LIST (VOIDmode,
22633 gen_rtx_REG (SImode, GP_ARG_RETURN),
22635 gen_rtx_EXPR_LIST (VOIDmode,
22636 gen_rtx_REG (SImode,
22637 GP_ARG_RETURN + 1),
22640 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
22642 return gen_rtx_PARALLEL (DCmode,
22644 gen_rtx_EXPR_LIST (VOIDmode,
22645 gen_rtx_REG (SImode, GP_ARG_RETURN),
22647 gen_rtx_EXPR_LIST (VOIDmode,
22648 gen_rtx_REG (SImode,
22649 GP_ARG_RETURN + 1),
22651 gen_rtx_EXPR_LIST (VOIDmode,
22652 gen_rtx_REG (SImode,
22653 GP_ARG_RETURN + 2),
22655 gen_rtx_EXPR_LIST (VOIDmode,
22656 gen_rtx_REG (SImode,
22657 GP_ARG_RETURN + 3),
22661 mode = TYPE_MODE (valtype);
22662 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
22663 || POINTER_TYPE_P (valtype))
22664 mode = TARGET_32BIT ? SImode : DImode;
22666 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
22667 /* _Decimal128 must use an even/odd register pair. */
22668 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
22669 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
22670 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
22671 regno = FP_ARG_RETURN;
22672 else if (TREE_CODE (valtype) == COMPLEX_TYPE
22673 && targetm.calls.split_complex_arg)
22674 return rs6000_complex_function_value (mode);
22675 else if (TREE_CODE (valtype) == VECTOR_TYPE
22676 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
22677 && ALTIVEC_VECTOR_MODE (mode))
22678 regno = ALTIVEC_ARG_RETURN;
22679 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
22680 && (mode == DFmode || mode == DCmode
22681 || mode == TFmode || mode == TCmode))
22682 return spe_build_register_parallel (mode, GP_ARG_RETURN);
22684 regno = GP_ARG_RETURN;
22686 return gen_rtx_REG (mode, regno);
22689 /* Define how to find the value returned by a library function
22690 assuming the value has mode MODE. */
22692 rs6000_libcall_value (enum machine_mode mode)
22694 unsigned int regno;
22696 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
22698 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
22699 return gen_rtx_PARALLEL (DImode,
22701 gen_rtx_EXPR_LIST (VOIDmode,
22702 gen_rtx_REG (SImode, GP_ARG_RETURN),
22704 gen_rtx_EXPR_LIST (VOIDmode,
22705 gen_rtx_REG (SImode,
22706 GP_ARG_RETURN + 1),
22710 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
22711 /* _Decimal128 must use an even/odd register pair. */
22712 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
22713 else if (SCALAR_FLOAT_MODE_P (mode)
22714 && TARGET_HARD_FLOAT && TARGET_FPRS
22715 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
22716 regno = FP_ARG_RETURN;
22717 else if (ALTIVEC_VECTOR_MODE (mode)
22718 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
22719 regno = ALTIVEC_ARG_RETURN;
22720 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
22721 return rs6000_complex_function_value (mode);
22722 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
22723 && (mode == DFmode || mode == DCmode
22724 || mode == TFmode || mode == TCmode))
22725 return spe_build_register_parallel (mode, GP_ARG_RETURN);
22727 regno = GP_ARG_RETURN;
22729 return gen_rtx_REG (mode, regno);
22732 /* Define the offset between two registers, FROM to be eliminated and its
22733 replacement TO, at the start of a routine. */
22735 rs6000_initial_elimination_offset (int from, int to)
22737 rs6000_stack_t *info = rs6000_stack_info ();
22738 HOST_WIDE_INT offset;
22740 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
22741 offset = info->push_p ? 0 : -info->total_size;
22742 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
22744 offset = info->push_p ? 0 : -info->total_size;
22745 if (FRAME_GROWS_DOWNWARD)
22746 offset += info->fixed_size + info->vars_size + info->parm_size;
22748 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
22749 offset = FRAME_GROWS_DOWNWARD
22750 ? info->fixed_size + info->vars_size + info->parm_size
22752 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
22753 offset = info->total_size;
22754 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
22755 offset = info->push_p ? info->total_size : 0;
22756 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
22759 gcc_unreachable ();
22765 rs6000_dwarf_register_span (rtx reg)
22769 unsigned regno = REGNO (reg);
22770 enum machine_mode mode = GET_MODE (reg);
22774 && (SPE_VECTOR_MODE (GET_MODE (reg))
22775 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
22776 && mode != SFmode && mode != SDmode && mode != SCmode)))
22781 regno = REGNO (reg);
22783 /* The duality of the SPE register size wreaks all kinds of havoc.
22784 This is a way of distinguishing r0 in 32-bits from r0 in
22786 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
22787 gcc_assert (words <= 4);
22788 for (i = 0; i < words; i++, regno++)
22790 if (BYTES_BIG_ENDIAN)
22792 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
22793 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
22797 parts[2 * i] = gen_rtx_REG (SImode, regno);
22798 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
22802 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
22805 /* Fill in sizes for SPE register high parts in table used by unwinder. */
22808 rs6000_init_dwarf_reg_sizes_extra (tree address)
22813 enum machine_mode mode = TYPE_MODE (char_type_node);
22814 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
22815 rtx mem = gen_rtx_MEM (BLKmode, addr);
22816 rtx value = gen_int_mode (4, mode);
22818 for (i = 1201; i < 1232; i++)
22820 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
22821 HOST_WIDE_INT offset
22822 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
22824 emit_move_insn (adjust_address (mem, mode, offset), value);
22829 /* Map internal gcc register numbers to DWARF2 register numbers. */
22832 rs6000_dbx_register_number (unsigned int regno)
22834 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
22836 if (regno == MQ_REGNO)
22838 if (regno == LR_REGNO)
22840 if (regno == CTR_REGNO)
22842 if (CR_REGNO_P (regno))
22843 return regno - CR0_REGNO + 86;
22844 if (regno == XER_REGNO)
22846 if (ALTIVEC_REGNO_P (regno))
22847 return regno - FIRST_ALTIVEC_REGNO + 1124;
22848 if (regno == VRSAVE_REGNO)
22850 if (regno == VSCR_REGNO)
22852 if (regno == SPE_ACC_REGNO)
22854 if (regno == SPEFSCR_REGNO)
22856 /* SPE high reg number. We get these values of regno from
22857 rs6000_dwarf_register_span. */
22858 gcc_assert (regno >= 1200 && regno < 1232);
22862 /* target hook eh_return_filter_mode */
22863 static enum machine_mode
22864 rs6000_eh_return_filter_mode (void)
22866 return TARGET_32BIT ? SImode : word_mode;
22869 /* Target hook for scalar_mode_supported_p. */
22871 rs6000_scalar_mode_supported_p (enum machine_mode mode)
22873 if (DECIMAL_FLOAT_MODE_P (mode))
22876 return default_scalar_mode_supported_p (mode);
22879 /* Target hook for vector_mode_supported_p. */
22881 rs6000_vector_mode_supported_p (enum machine_mode mode)
22884 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
22887 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
22890 else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
22897 /* Target hook for invalid_arg_for_unprototyped_fn. */
22898 static const char *
22899 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
22901 return (!rs6000_darwin64_abi
22903 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
22904 && (funcdecl == NULL_TREE
22905 || (TREE_CODE (funcdecl) == FUNCTION_DECL
22906 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
22907 ? N_("AltiVec argument passed to unprototyped function")
22911 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
22912 setup by using __stack_chk_fail_local hidden function instead of
22913 calling __stack_chk_fail directly. Otherwise it is better to call
22914 __stack_chk_fail directly. */
22917 rs6000_stack_protect_fail (void)
22919 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
22920 ? default_hidden_stack_protect_fail ()
22921 : default_external_stack_protect_fail ();
22925 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
22926 int num_operands ATTRIBUTE_UNUSED)
22928 if (rs6000_warn_cell_microcode)
22931 int insn_code_number = recog_memoized (insn);
22932 location_t location = locator_location (INSN_LOCATOR (insn));
22934 /* Punt on insns we cannot recognize. */
22935 if (insn_code_number < 0)
22938 temp = get_insn_template (insn_code_number, insn);
22940 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
22941 warning_at (location, OPT_mwarn_cell_microcode,
22942 "emitting microcode insn %s\t[%s] #%d",
22943 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
22944 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
22945 warning_at (location, OPT_mwarn_cell_microcode,
22946 "emitting conditional microcode insn %s\t[%s] #%d",
22947 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
22951 #include "gt-rs6000.h"