1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
42 #include "basic-block.h"
43 #include "integrate.h"
49 #include "target-def.h"
50 #include "langhooks.h"
52 #include "cfglayout.h"
53 #include "sched-int.h"
55 #include "tree-flow.h"
58 #include "tm-constrs.h"
60 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
63 #include "gstab.h" /* for N_SLINE */
66 #ifndef TARGET_NO_PROTOTYPE
67 #define TARGET_NO_PROTOTYPE 0
70 #define min(A,B) ((A) < (B) ? (A) : (B))
71 #define max(A,B) ((A) > (B) ? (A) : (B))
73 /* Structure used to define the rs6000 stack */
74 typedef struct rs6000_stack {
75 int first_gp_reg_save; /* first callee saved GP register used */
76 int first_fp_reg_save; /* first callee saved FP register used */
77 int first_altivec_reg_save; /* first callee saved AltiVec register used */
78 int lr_save_p; /* true if the link reg needs to be saved */
79 int cr_save_p; /* true if the CR reg needs to be saved */
80 unsigned int vrsave_mask; /* mask of vec registers to save */
81 int push_p; /* true if we need to allocate stack space */
82 int calls_p; /* true if the function makes any calls */
83 int world_save_p; /* true if we're saving *everything*:
84 r13-r31, cr, f14-f31, vrsave, v20-v31 */
85 enum rs6000_abi abi; /* which ABI to use */
86 int gp_save_offset; /* offset to save GP regs from initial SP */
87 int fp_save_offset; /* offset to save FP regs from initial SP */
88 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
89 int lr_save_offset; /* offset to save LR from initial SP */
90 int cr_save_offset; /* offset to save CR from initial SP */
91 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
92 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
93 int varargs_save_offset; /* offset to save the varargs registers */
94 int ehrd_offset; /* offset to EH return data */
95 int reg_size; /* register size (4 or 8) */
96 HOST_WIDE_INT vars_size; /* variable save area size */
97 int parm_size; /* outgoing parameter size */
98 int save_size; /* save area size */
99 int fixed_size; /* fixed size of stack frame */
100 int gp_size; /* size of saved GP registers */
101 int fp_size; /* size of saved FP registers */
102 int altivec_size; /* size of saved AltiVec registers */
103 int cr_size; /* size to hold CR if not in save_size */
104 int vrsave_size; /* size to hold VRSAVE if not in save_size */
105 int altivec_padding_size; /* size of altivec alignment padding if
107 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
108 int spe_padding_size;
109 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
110 int spe_64bit_regs_used;
113 /* A C structure for machine-specific, per-function data.
114 This is added to the cfun structure. */
115 typedef struct GTY(()) machine_function
117 /* Some local-dynamic symbol. */
118 const char *some_ld_name;
119 /* Whether the instruction chain has been scanned already. */
120 int insn_chain_scanned_p;
121 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
122 int ra_needs_full_frame;
123 /* Flags if __builtin_return_address (0) was used. */
125 /* Cache lr_save_p after expansion of builtin_eh_return. */
127 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
128 varargs save area. */
129 HOST_WIDE_INT varargs_save_offset;
130 /* Temporary stack slot to use for SDmode copies. This slot is
131 64-bits wide and is allocated early enough so that the offset
132 does not overflow the 16-bit load/store offset field. */
133 rtx sdmode_stack_slot;
136 /* Target cpu type */
138 enum processor_type rs6000_cpu;
139 struct rs6000_cpu_select rs6000_select[3] =
141 /* switch name, tune arch */
142 { (const char *)0, "--with-cpu=", 1, 1 },
143 { (const char *)0, "-mcpu=", 1, 1 },
144 { (const char *)0, "-mtune=", 1, 0 },
147 /* Always emit branch hint bits. */
148 static GTY(()) bool rs6000_always_hint;
150 /* Schedule instructions for group formation. */
151 static GTY(()) bool rs6000_sched_groups;
153 /* Align branch targets. */
154 static GTY(()) bool rs6000_align_branch_targets;
156 /* Support for -msched-costly-dep option. */
157 const char *rs6000_sched_costly_dep_str;
158 enum rs6000_dependence_cost rs6000_sched_costly_dep;
160 /* Support for -minsert-sched-nops option. */
161 const char *rs6000_sched_insert_nops_str;
162 enum rs6000_nop_insertion rs6000_sched_insert_nops;
164 /* Support targetm.vectorize.builtin_mask_for_load. */
165 static GTY(()) tree altivec_builtin_mask_for_load;
167 /* Size of long double. */
168 int rs6000_long_double_type_size;
170 /* IEEE quad extended precision long double. */
173 /* Nonzero to use AltiVec ABI. */
174 int rs6000_altivec_abi;
176 /* Nonzero if we want SPE SIMD instructions. */
179 /* Nonzero if we want SPE ABI extensions. */
182 /* Nonzero if floating point operations are done in the GPRs. */
183 int rs6000_float_gprs = 0;
185 /* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
186 int rs6000_darwin64_abi;
188 /* Set to nonzero once AIX common-mode calls have been defined. */
189 static GTY(()) int common_mode_defined;
191 /* Label number of label created for -mrelocatable, to call to so we can
192 get the address of the GOT section */
193 int rs6000_pic_labelno;
196 /* Which abi to adhere to */
197 const char *rs6000_abi_name;
199 /* Semantics of the small data area */
200 enum rs6000_sdata_type rs6000_sdata = SDATA_DATA;
202 /* Which small data model to use */
203 const char *rs6000_sdata_name = (char *)0;
205 /* Counter for labels which are to be placed in .fixup. */
206 int fixuplabelno = 0;
209 /* Bit size of immediate TLS offsets and string from which it is decoded. */
210 int rs6000_tls_size = 32;
211 const char *rs6000_tls_size_string;
213 /* ABI enumeration available for subtarget to use. */
214 enum rs6000_abi rs6000_current_abi;
216 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
220 const char *rs6000_debug_name;
221 int rs6000_debug_stack; /* debug stack applications */
222 int rs6000_debug_arg; /* debug argument handling */
223 int rs6000_debug_reg; /* debug register classes */
224 int rs6000_debug_addr; /* debug memory addressing */
225 int rs6000_debug_cost; /* debug rtx_costs */
227 /* Specify the machine mode that pointers have. After generation of rtl, the
228 compiler makes no further distinction between pointers and any other objects
229 of this machine mode. The type is unsigned since not all things that
230 include rs6000.h also include machmode.h. */
231 unsigned rs6000_pmode;
233 /* Width in bits of a pointer. */
234 unsigned rs6000_pointer_size;
237 /* Value is TRUE if register/mode pair is acceptable. */
238 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
240 /* Maximum number of registers needed for a given register class and mode. */
241 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
243 /* How many registers are needed for a given register and mode. */
244 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
246 /* Map register number to register class. */
247 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
249 /* Reload functions based on the type and the vector unit. */
250 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
252 /* Built in types. */
253 tree rs6000_builtin_types[RS6000_BTI_MAX];
254 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
256 const char *rs6000_traceback_name;
258 traceback_default = 0,
264 /* Flag to say the TOC is initialized */
266 char toc_label_name[10];
268 /* Cached value of rs6000_variable_issue. This is cached in
269 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
270 static short cached_can_issue_more;
272 static GTY(()) section *read_only_data_section;
273 static GTY(()) section *private_data_section;
274 static GTY(()) section *read_only_private_data_section;
275 static GTY(()) section *sdata2_section;
276 static GTY(()) section *toc_section;
278 /* Control alignment for fields within structures. */
279 /* String from -malign-XXXXX. */
280 int rs6000_alignment_flags;
282 /* True for any options that were explicitly set. */
284 bool aix_struct_ret; /* True if -maix-struct-ret was used. */
285 bool alignment; /* True if -malign- was used. */
286 bool spe_abi; /* True if -mabi=spe/no-spe was used. */
287 bool altivec_abi; /* True if -mabi=altivec/no-altivec used. */
288 bool spe; /* True if -mspe= was used. */
289 bool float_gprs; /* True if -mfloat-gprs= was used. */
290 bool long_double; /* True if -mlong-double- was used. */
291 bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
292 bool vrsave; /* True if -mvrsave was used. */
293 } rs6000_explicit_options;
295 struct builtin_description
297 /* mask is not const because we're going to alter it below. This
298 nonsense will go away when we rewrite the -march infrastructure
299 to give us more target flag bits. */
301 const enum insn_code icode;
302 const char *const name;
303 const enum rs6000_builtins code;
306 /* Describe the vector unit used for modes. */
307 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
308 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
310 /* Register classes for various constraints that are based on the target
312 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
314 /* Describe the alignment of a vector. */
315 int rs6000_vector_align[NUM_MACHINE_MODES];
317 /* Map selected modes to types for builtins. */
318 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
320 /* Target cpu costs. */
322 struct processor_costs {
323 const int mulsi; /* cost of SImode multiplication. */
324 const int mulsi_const; /* cost of SImode multiplication by constant. */
325 const int mulsi_const9; /* cost of SImode mult by short constant. */
326 const int muldi; /* cost of DImode multiplication. */
327 const int divsi; /* cost of SImode division. */
328 const int divdi; /* cost of DImode division. */
329 const int fp; /* cost of simple SFmode and DFmode insns. */
330 const int dmul; /* cost of DFmode multiplication (and fmadd). */
331 const int sdiv; /* cost of SFmode division (fdivs). */
332 const int ddiv; /* cost of DFmode division (fdiv). */
333 const int cache_line_size; /* cache line size in bytes. */
334 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
335 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
336 const int simultaneous_prefetches; /* number of parallel prefetch
340 const struct processor_costs *rs6000_cost;
342 /* Processor costs (relative to an add) */
344 /* Instruction size costs on 32bit processors. */
346 struct processor_costs size32_cost = {
347 COSTS_N_INSNS (1), /* mulsi */
348 COSTS_N_INSNS (1), /* mulsi_const */
349 COSTS_N_INSNS (1), /* mulsi_const9 */
350 COSTS_N_INSNS (1), /* muldi */
351 COSTS_N_INSNS (1), /* divsi */
352 COSTS_N_INSNS (1), /* divdi */
353 COSTS_N_INSNS (1), /* fp */
354 COSTS_N_INSNS (1), /* dmul */
355 COSTS_N_INSNS (1), /* sdiv */
356 COSTS_N_INSNS (1), /* ddiv */
363 /* Instruction size costs on 64bit processors. */
365 struct processor_costs size64_cost = {
366 COSTS_N_INSNS (1), /* mulsi */
367 COSTS_N_INSNS (1), /* mulsi_const */
368 COSTS_N_INSNS (1), /* mulsi_const9 */
369 COSTS_N_INSNS (1), /* muldi */
370 COSTS_N_INSNS (1), /* divsi */
371 COSTS_N_INSNS (1), /* divdi */
372 COSTS_N_INSNS (1), /* fp */
373 COSTS_N_INSNS (1), /* dmul */
374 COSTS_N_INSNS (1), /* sdiv */
375 COSTS_N_INSNS (1), /* ddiv */
382 /* Instruction costs on RIOS1 processors. */
384 struct processor_costs rios1_cost = {
385 COSTS_N_INSNS (5), /* mulsi */
386 COSTS_N_INSNS (4), /* mulsi_const */
387 COSTS_N_INSNS (3), /* mulsi_const9 */
388 COSTS_N_INSNS (5), /* muldi */
389 COSTS_N_INSNS (19), /* divsi */
390 COSTS_N_INSNS (19), /* divdi */
391 COSTS_N_INSNS (2), /* fp */
392 COSTS_N_INSNS (2), /* dmul */
393 COSTS_N_INSNS (19), /* sdiv */
394 COSTS_N_INSNS (19), /* ddiv */
395 128, /* cache line size */
401 /* Instruction costs on RIOS2 processors. */
403 struct processor_costs rios2_cost = {
404 COSTS_N_INSNS (2), /* mulsi */
405 COSTS_N_INSNS (2), /* mulsi_const */
406 COSTS_N_INSNS (2), /* mulsi_const9 */
407 COSTS_N_INSNS (2), /* muldi */
408 COSTS_N_INSNS (13), /* divsi */
409 COSTS_N_INSNS (13), /* divdi */
410 COSTS_N_INSNS (2), /* fp */
411 COSTS_N_INSNS (2), /* dmul */
412 COSTS_N_INSNS (17), /* sdiv */
413 COSTS_N_INSNS (17), /* ddiv */
414 256, /* cache line size */
420 /* Instruction costs on RS64A processors. */
422 struct processor_costs rs64a_cost = {
423 COSTS_N_INSNS (20), /* mulsi */
424 COSTS_N_INSNS (12), /* mulsi_const */
425 COSTS_N_INSNS (8), /* mulsi_const9 */
426 COSTS_N_INSNS (34), /* muldi */
427 COSTS_N_INSNS (65), /* divsi */
428 COSTS_N_INSNS (67), /* divdi */
429 COSTS_N_INSNS (4), /* fp */
430 COSTS_N_INSNS (4), /* dmul */
431 COSTS_N_INSNS (31), /* sdiv */
432 COSTS_N_INSNS (31), /* ddiv */
433 128, /* cache line size */
439 /* Instruction costs on MPCCORE processors. */
441 struct processor_costs mpccore_cost = {
442 COSTS_N_INSNS (2), /* mulsi */
443 COSTS_N_INSNS (2), /* mulsi_const */
444 COSTS_N_INSNS (2), /* mulsi_const9 */
445 COSTS_N_INSNS (2), /* muldi */
446 COSTS_N_INSNS (6), /* divsi */
447 COSTS_N_INSNS (6), /* divdi */
448 COSTS_N_INSNS (4), /* fp */
449 COSTS_N_INSNS (5), /* dmul */
450 COSTS_N_INSNS (10), /* sdiv */
451 COSTS_N_INSNS (17), /* ddiv */
452 32, /* cache line size */
458 /* Instruction costs on PPC403 processors. */
460 struct processor_costs ppc403_cost = {
461 COSTS_N_INSNS (4), /* mulsi */
462 COSTS_N_INSNS (4), /* mulsi_const */
463 COSTS_N_INSNS (4), /* mulsi_const9 */
464 COSTS_N_INSNS (4), /* muldi */
465 COSTS_N_INSNS (33), /* divsi */
466 COSTS_N_INSNS (33), /* divdi */
467 COSTS_N_INSNS (11), /* fp */
468 COSTS_N_INSNS (11), /* dmul */
469 COSTS_N_INSNS (11), /* sdiv */
470 COSTS_N_INSNS (11), /* ddiv */
471 32, /* cache line size */
477 /* Instruction costs on PPC405 processors. */
479 struct processor_costs ppc405_cost = {
480 COSTS_N_INSNS (5), /* mulsi */
481 COSTS_N_INSNS (4), /* mulsi_const */
482 COSTS_N_INSNS (3), /* mulsi_const9 */
483 COSTS_N_INSNS (5), /* muldi */
484 COSTS_N_INSNS (35), /* divsi */
485 COSTS_N_INSNS (35), /* divdi */
486 COSTS_N_INSNS (11), /* fp */
487 COSTS_N_INSNS (11), /* dmul */
488 COSTS_N_INSNS (11), /* sdiv */
489 COSTS_N_INSNS (11), /* ddiv */
490 32, /* cache line size */
496 /* Instruction costs on PPC440 processors. */
498 struct processor_costs ppc440_cost = {
499 COSTS_N_INSNS (3), /* mulsi */
500 COSTS_N_INSNS (2), /* mulsi_const */
501 COSTS_N_INSNS (2), /* mulsi_const9 */
502 COSTS_N_INSNS (3), /* muldi */
503 COSTS_N_INSNS (34), /* divsi */
504 COSTS_N_INSNS (34), /* divdi */
505 COSTS_N_INSNS (5), /* fp */
506 COSTS_N_INSNS (5), /* dmul */
507 COSTS_N_INSNS (19), /* sdiv */
508 COSTS_N_INSNS (33), /* ddiv */
509 32, /* cache line size */
515 /* Instruction costs on PPC476 processors. */
517 struct processor_costs ppc476_cost = {
518 COSTS_N_INSNS (4), /* mulsi */
519 COSTS_N_INSNS (4), /* mulsi_const */
520 COSTS_N_INSNS (4), /* mulsi_const9 */
521 COSTS_N_INSNS (4), /* muldi */
522 COSTS_N_INSNS (11), /* divsi */
523 COSTS_N_INSNS (11), /* divdi */
524 COSTS_N_INSNS (6), /* fp */
525 COSTS_N_INSNS (6), /* dmul */
526 COSTS_N_INSNS (19), /* sdiv */
527 COSTS_N_INSNS (33), /* ddiv */
528 32, /* l1 cache line size */
534 /* Instruction costs on PPC601 processors. */
536 struct processor_costs ppc601_cost = {
537 COSTS_N_INSNS (5), /* mulsi */
538 COSTS_N_INSNS (5), /* mulsi_const */
539 COSTS_N_INSNS (5), /* mulsi_const9 */
540 COSTS_N_INSNS (5), /* muldi */
541 COSTS_N_INSNS (36), /* divsi */
542 COSTS_N_INSNS (36), /* divdi */
543 COSTS_N_INSNS (4), /* fp */
544 COSTS_N_INSNS (5), /* dmul */
545 COSTS_N_INSNS (17), /* sdiv */
546 COSTS_N_INSNS (31), /* ddiv */
547 32, /* cache line size */
553 /* Instruction costs on PPC603 processors. */
555 struct processor_costs ppc603_cost = {
556 COSTS_N_INSNS (5), /* mulsi */
557 COSTS_N_INSNS (3), /* mulsi_const */
558 COSTS_N_INSNS (2), /* mulsi_const9 */
559 COSTS_N_INSNS (5), /* muldi */
560 COSTS_N_INSNS (37), /* divsi */
561 COSTS_N_INSNS (37), /* divdi */
562 COSTS_N_INSNS (3), /* fp */
563 COSTS_N_INSNS (4), /* dmul */
564 COSTS_N_INSNS (18), /* sdiv */
565 COSTS_N_INSNS (33), /* ddiv */
566 32, /* cache line size */
572 /* Instruction costs on PPC604 processors. */
574 struct processor_costs ppc604_cost = {
575 COSTS_N_INSNS (4), /* mulsi */
576 COSTS_N_INSNS (4), /* mulsi_const */
577 COSTS_N_INSNS (4), /* mulsi_const9 */
578 COSTS_N_INSNS (4), /* muldi */
579 COSTS_N_INSNS (20), /* divsi */
580 COSTS_N_INSNS (20), /* divdi */
581 COSTS_N_INSNS (3), /* fp */
582 COSTS_N_INSNS (3), /* dmul */
583 COSTS_N_INSNS (18), /* sdiv */
584 COSTS_N_INSNS (32), /* ddiv */
585 32, /* cache line size */
591 /* Instruction costs on PPC604e processors. */
593 struct processor_costs ppc604e_cost = {
594 COSTS_N_INSNS (2), /* mulsi */
595 COSTS_N_INSNS (2), /* mulsi_const */
596 COSTS_N_INSNS (2), /* mulsi_const9 */
597 COSTS_N_INSNS (2), /* muldi */
598 COSTS_N_INSNS (20), /* divsi */
599 COSTS_N_INSNS (20), /* divdi */
600 COSTS_N_INSNS (3), /* fp */
601 COSTS_N_INSNS (3), /* dmul */
602 COSTS_N_INSNS (18), /* sdiv */
603 COSTS_N_INSNS (32), /* ddiv */
604 32, /* cache line size */
610 /* Instruction costs on PPC620 processors. */
612 struct processor_costs ppc620_cost = {
613 COSTS_N_INSNS (5), /* mulsi */
614 COSTS_N_INSNS (4), /* mulsi_const */
615 COSTS_N_INSNS (3), /* mulsi_const9 */
616 COSTS_N_INSNS (7), /* muldi */
617 COSTS_N_INSNS (21), /* divsi */
618 COSTS_N_INSNS (37), /* divdi */
619 COSTS_N_INSNS (3), /* fp */
620 COSTS_N_INSNS (3), /* dmul */
621 COSTS_N_INSNS (18), /* sdiv */
622 COSTS_N_INSNS (32), /* ddiv */
623 128, /* cache line size */
629 /* Instruction costs on PPC630 processors. */
631 struct processor_costs ppc630_cost = {
632 COSTS_N_INSNS (5), /* mulsi */
633 COSTS_N_INSNS (4), /* mulsi_const */
634 COSTS_N_INSNS (3), /* mulsi_const9 */
635 COSTS_N_INSNS (7), /* muldi */
636 COSTS_N_INSNS (21), /* divsi */
637 COSTS_N_INSNS (37), /* divdi */
638 COSTS_N_INSNS (3), /* fp */
639 COSTS_N_INSNS (3), /* dmul */
640 COSTS_N_INSNS (17), /* sdiv */
641 COSTS_N_INSNS (21), /* ddiv */
642 128, /* cache line size */
648 /* Instruction costs on Cell processor. */
649 /* COSTS_N_INSNS (1) ~ one add. */
651 struct processor_costs ppccell_cost = {
652 COSTS_N_INSNS (9/2)+2, /* mulsi */
653 COSTS_N_INSNS (6/2), /* mulsi_const */
654 COSTS_N_INSNS (6/2), /* mulsi_const9 */
655 COSTS_N_INSNS (15/2)+2, /* muldi */
656 COSTS_N_INSNS (38/2), /* divsi */
657 COSTS_N_INSNS (70/2), /* divdi */
658 COSTS_N_INSNS (10/2), /* fp */
659 COSTS_N_INSNS (10/2), /* dmul */
660 COSTS_N_INSNS (74/2), /* sdiv */
661 COSTS_N_INSNS (74/2), /* ddiv */
662 128, /* cache line size */
668 /* Instruction costs on PPC750 and PPC7400 processors. */
670 struct processor_costs ppc750_cost = {
671 COSTS_N_INSNS (5), /* mulsi */
672 COSTS_N_INSNS (3), /* mulsi_const */
673 COSTS_N_INSNS (2), /* mulsi_const9 */
674 COSTS_N_INSNS (5), /* muldi */
675 COSTS_N_INSNS (17), /* divsi */
676 COSTS_N_INSNS (17), /* divdi */
677 COSTS_N_INSNS (3), /* fp */
678 COSTS_N_INSNS (3), /* dmul */
679 COSTS_N_INSNS (17), /* sdiv */
680 COSTS_N_INSNS (31), /* ddiv */
681 32, /* cache line size */
687 /* Instruction costs on PPC7450 processors. */
689 struct processor_costs ppc7450_cost = {
690 COSTS_N_INSNS (4), /* mulsi */
691 COSTS_N_INSNS (3), /* mulsi_const */
692 COSTS_N_INSNS (3), /* mulsi_const9 */
693 COSTS_N_INSNS (4), /* muldi */
694 COSTS_N_INSNS (23), /* divsi */
695 COSTS_N_INSNS (23), /* divdi */
696 COSTS_N_INSNS (5), /* fp */
697 COSTS_N_INSNS (5), /* dmul */
698 COSTS_N_INSNS (21), /* sdiv */
699 COSTS_N_INSNS (35), /* ddiv */
700 32, /* cache line size */
706 /* Instruction costs on PPC8540 processors. */
708 struct processor_costs ppc8540_cost = {
709 COSTS_N_INSNS (4), /* mulsi */
710 COSTS_N_INSNS (4), /* mulsi_const */
711 COSTS_N_INSNS (4), /* mulsi_const9 */
712 COSTS_N_INSNS (4), /* muldi */
713 COSTS_N_INSNS (19), /* divsi */
714 COSTS_N_INSNS (19), /* divdi */
715 COSTS_N_INSNS (4), /* fp */
716 COSTS_N_INSNS (4), /* dmul */
717 COSTS_N_INSNS (29), /* sdiv */
718 COSTS_N_INSNS (29), /* ddiv */
719 32, /* cache line size */
722 1, /* prefetch streams /*/
725 /* Instruction costs on E300C2 and E300C3 cores. */
727 struct processor_costs ppce300c2c3_cost = {
728 COSTS_N_INSNS (4), /* mulsi */
729 COSTS_N_INSNS (4), /* mulsi_const */
730 COSTS_N_INSNS (4), /* mulsi_const9 */
731 COSTS_N_INSNS (4), /* muldi */
732 COSTS_N_INSNS (19), /* divsi */
733 COSTS_N_INSNS (19), /* divdi */
734 COSTS_N_INSNS (3), /* fp */
735 COSTS_N_INSNS (4), /* dmul */
736 COSTS_N_INSNS (18), /* sdiv */
737 COSTS_N_INSNS (33), /* ddiv */
741 1, /* prefetch streams /*/
744 /* Instruction costs on PPCE500MC processors. */
746 struct processor_costs ppce500mc_cost = {
747 COSTS_N_INSNS (4), /* mulsi */
748 COSTS_N_INSNS (4), /* mulsi_const */
749 COSTS_N_INSNS (4), /* mulsi_const9 */
750 COSTS_N_INSNS (4), /* muldi */
751 COSTS_N_INSNS (14), /* divsi */
752 COSTS_N_INSNS (14), /* divdi */
753 COSTS_N_INSNS (8), /* fp */
754 COSTS_N_INSNS (10), /* dmul */
755 COSTS_N_INSNS (36), /* sdiv */
756 COSTS_N_INSNS (66), /* ddiv */
757 64, /* cache line size */
760 1, /* prefetch streams /*/
763 /* Instruction costs on PPCE500MC64 processors. */
765 struct processor_costs ppce500mc64_cost = {
766 COSTS_N_INSNS (4), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (4), /* mulsi_const9 */
769 COSTS_N_INSNS (4), /* muldi */
770 COSTS_N_INSNS (14), /* divsi */
771 COSTS_N_INSNS (14), /* divdi */
772 COSTS_N_INSNS (4), /* fp */
773 COSTS_N_INSNS (10), /* dmul */
774 COSTS_N_INSNS (36), /* sdiv */
775 COSTS_N_INSNS (66), /* ddiv */
776 64, /* cache line size */
779 1, /* prefetch streams /*/
782 /* Instruction costs on POWER4 and POWER5 processors. */
784 struct processor_costs power4_cost = {
785 COSTS_N_INSNS (3), /* mulsi */
786 COSTS_N_INSNS (2), /* mulsi_const */
787 COSTS_N_INSNS (2), /* mulsi_const9 */
788 COSTS_N_INSNS (4), /* muldi */
789 COSTS_N_INSNS (18), /* divsi */
790 COSTS_N_INSNS (34), /* divdi */
791 COSTS_N_INSNS (3), /* fp */
792 COSTS_N_INSNS (3), /* dmul */
793 COSTS_N_INSNS (17), /* sdiv */
794 COSTS_N_INSNS (17), /* ddiv */
795 128, /* cache line size */
798 8, /* prefetch streams /*/
801 /* Instruction costs on POWER6 processors. */
803 struct processor_costs power6_cost = {
804 COSTS_N_INSNS (8), /* mulsi */
805 COSTS_N_INSNS (8), /* mulsi_const */
806 COSTS_N_INSNS (8), /* mulsi_const9 */
807 COSTS_N_INSNS (8), /* muldi */
808 COSTS_N_INSNS (22), /* divsi */
809 COSTS_N_INSNS (28), /* divdi */
810 COSTS_N_INSNS (3), /* fp */
811 COSTS_N_INSNS (3), /* dmul */
812 COSTS_N_INSNS (13), /* sdiv */
813 COSTS_N_INSNS (16), /* ddiv */
814 128, /* cache line size */
817 16, /* prefetch streams */
820 /* Instruction costs on POWER7 processors. */
822 struct processor_costs power7_cost = {
823 COSTS_N_INSNS (2), /* mulsi */
824 COSTS_N_INSNS (2), /* mulsi_const */
825 COSTS_N_INSNS (2), /* mulsi_const9 */
826 COSTS_N_INSNS (2), /* muldi */
827 COSTS_N_INSNS (18), /* divsi */
828 COSTS_N_INSNS (34), /* divdi */
829 COSTS_N_INSNS (3), /* fp */
830 COSTS_N_INSNS (3), /* dmul */
831 COSTS_N_INSNS (13), /* sdiv */
832 COSTS_N_INSNS (16), /* ddiv */
833 128, /* cache line size */
836 12, /* prefetch streams */
839 /* Instruction costs on POWER A2 processors. */
841 struct processor_costs ppca2_cost = {
842 COSTS_N_INSNS (16), /* mulsi */
843 COSTS_N_INSNS (16), /* mulsi_const */
844 COSTS_N_INSNS (16), /* mulsi_const9 */
845 COSTS_N_INSNS (16), /* muldi */
846 COSTS_N_INSNS (22), /* divsi */
847 COSTS_N_INSNS (28), /* divdi */
848 COSTS_N_INSNS (3), /* fp */
849 COSTS_N_INSNS (3), /* dmul */
850 COSTS_N_INSNS (59), /* sdiv */
851 COSTS_N_INSNS (72), /* ddiv */
855 16, /* prefetch streams */
859 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
860 #undef RS6000_BUILTIN
861 #undef RS6000_BUILTIN_EQUATE
862 #define RS6000_BUILTIN(NAME, TYPE) TYPE,
863 #define RS6000_BUILTIN_EQUATE(NAME, VALUE)
865 static const enum rs6000_btc builtin_classify[(int)RS6000_BUILTIN_COUNT] =
867 #include "rs6000-builtin.def"
870 #undef RS6000_BUILTIN
871 #undef RS6000_BUILTIN_EQUATE
874 static bool rs6000_function_ok_for_sibcall (tree, tree);
875 static const char *rs6000_invalid_within_doloop (const_rtx);
876 static bool rs6000_legitimate_address_p (enum machine_mode, rtx, bool);
877 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
878 static rtx rs6000_generate_compare (rtx, enum machine_mode);
879 static void rs6000_emit_stack_tie (void);
880 static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx);
881 static bool spe_func_has_64bit_regs_p (void);
882 static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int,
884 static rtx gen_frame_mem_offset (enum machine_mode, rtx, int);
885 static unsigned rs6000_hash_constant (rtx);
886 static unsigned toc_hash_function (const void *);
887 static int toc_hash_eq (const void *, const void *);
888 static bool reg_offset_addressing_ok_p (enum machine_mode);
889 static bool virtual_stack_registers_memory_p (rtx);
890 static bool constant_pool_expr_p (rtx);
891 static bool legitimate_small_data_p (enum machine_mode, rtx);
892 static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
893 static struct machine_function * rs6000_init_machine_status (void);
894 static bool rs6000_assemble_integer (rtx, unsigned int, int);
895 static bool no_global_regs_above (int, bool);
896 #ifdef HAVE_GAS_HIDDEN
897 static void rs6000_assemble_visibility (tree, int);
899 static int rs6000_ra_ever_killed (void);
900 static bool rs6000_attribute_takes_identifier_p (const_tree);
901 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
902 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
903 static bool rs6000_ms_bitfield_layout_p (const_tree);
904 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
905 static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
906 static const char *rs6000_mangle_type (const_tree);
907 static void rs6000_set_default_type_attributes (tree);
908 static rtx rs6000_savres_routine_sym (rs6000_stack_t *, bool, bool, bool);
909 static rtx rs6000_emit_stack_reset (rs6000_stack_t *, rtx, rtx, int, bool);
910 static rtx rs6000_make_savres_rtx (rs6000_stack_t *, rtx, int,
911 enum machine_mode, bool, bool, bool);
912 static bool rs6000_reg_live_or_pic_offset_p (int);
913 static tree rs6000_builtin_vectorized_function (tree, tree, tree);
914 static int rs6000_savres_strategy (rs6000_stack_t *, bool, int, int);
915 static void rs6000_restore_saved_cr (rtx, int);
916 static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
917 static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT);
918 static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
920 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
921 static bool rs6000_return_in_memory (const_tree, const_tree);
922 static rtx rs6000_function_value (const_tree, const_tree, bool);
923 static void rs6000_file_start (void);
925 static int rs6000_elf_reloc_rw_mask (void);
926 static void rs6000_elf_asm_out_constructor (rtx, int);
927 static void rs6000_elf_asm_out_destructor (rtx, int);
928 static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
929 static void rs6000_elf_asm_init_sections (void);
930 static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
931 unsigned HOST_WIDE_INT);
932 static void rs6000_elf_encode_section_info (tree, rtx, int)
935 static bool rs6000_use_blocks_for_constant_p (enum machine_mode, const_rtx);
936 static void rs6000_alloc_sdmode_stack_slot (void);
937 static void rs6000_instantiate_decls (void);
939 static void rs6000_xcoff_asm_output_anchor (rtx);
940 static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
941 static void rs6000_xcoff_asm_init_sections (void);
942 static int rs6000_xcoff_reloc_rw_mask (void);
943 static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
944 static section *rs6000_xcoff_select_section (tree, int,
945 unsigned HOST_WIDE_INT);
946 static void rs6000_xcoff_unique_section (tree, int);
947 static section *rs6000_xcoff_select_rtx_section
948 (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
949 static const char * rs6000_xcoff_strip_name_encoding (const char *);
950 static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
951 static void rs6000_xcoff_file_start (void);
952 static void rs6000_xcoff_file_end (void);
954 static int rs6000_variable_issue (FILE *, int, rtx, int);
955 static bool rs6000_rtx_costs (rtx, int, int, int *, bool);
956 static bool rs6000_debug_rtx_costs (rtx, int, int, int *, bool);
957 static int rs6000_debug_address_cost (rtx, bool);
958 static int rs6000_adjust_cost (rtx, rtx, rtx, int);
959 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
960 static void rs6000_sched_init (FILE *, int, int);
961 static bool is_microcoded_insn (rtx);
962 static bool is_nonpipeline_insn (rtx);
963 static bool is_cracked_insn (rtx);
964 static bool is_branch_slot_insn (rtx);
965 static bool is_load_insn (rtx);
966 static rtx get_store_dest (rtx pat);
967 static bool is_store_insn (rtx);
968 static bool set_to_load_agen (rtx,rtx);
969 static bool adjacent_mem_locations (rtx,rtx);
970 static int rs6000_adjust_priority (rtx, int);
971 static int rs6000_issue_rate (void);
972 static bool rs6000_is_costly_dependence (dep_t, int, int);
973 static rtx get_next_active_insn (rtx, rtx);
974 static bool insn_terminates_group_p (rtx , enum group_termination);
975 static bool insn_must_be_first_in_group (rtx);
976 static bool insn_must_be_last_in_group (rtx);
977 static bool is_costly_group (rtx *, rtx);
978 static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *);
979 static int redefine_groups (FILE *, int, rtx, rtx);
980 static int pad_groups (FILE *, int, rtx, rtx);
981 static void rs6000_sched_finish (FILE *, int);
982 static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int);
983 static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
984 static int rs6000_use_sched_lookahead (void);
985 static int rs6000_use_sched_lookahead_guard (rtx);
986 static void * rs6000_alloc_sched_context (void);
987 static void rs6000_init_sched_context (void *, bool);
988 static void rs6000_set_sched_context (void *);
989 static void rs6000_free_sched_context (void *);
990 static tree rs6000_builtin_reciprocal (unsigned int, bool, bool);
991 static tree rs6000_builtin_mask_for_load (void);
992 static tree rs6000_builtin_mul_widen_even (tree);
993 static tree rs6000_builtin_mul_widen_odd (tree);
994 static tree rs6000_builtin_conversion (unsigned int, tree, tree);
995 static tree rs6000_builtin_vec_perm (tree, tree *);
996 static bool rs6000_builtin_support_vector_misalignment (enum
1001 static void def_builtin (int, const char *, tree, int);
1002 static bool rs6000_vector_alignment_reachable (const_tree, bool);
1003 static void rs6000_init_builtins (void);
1004 static tree rs6000_builtin_decl (unsigned, bool);
1006 static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
1007 static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
1008 static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx);
1009 static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1010 static void altivec_init_builtins (void);
1011 static unsigned builtin_hash_function (const void *);
1012 static int builtin_hash_eq (const void *, const void *);
1013 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1014 enum machine_mode, enum machine_mode,
1015 enum rs6000_builtins, const char *name);
1016 static void rs6000_common_init_builtins (void);
1017 static void rs6000_init_libfuncs (void);
1019 static void paired_init_builtins (void);
1020 static rtx paired_expand_builtin (tree, rtx, bool *);
1021 static rtx paired_expand_lv_builtin (enum insn_code, tree, rtx);
1022 static rtx paired_expand_stv_builtin (enum insn_code, tree);
1023 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1025 static void enable_mask_for_builtins (struct builtin_description *, int,
1026 enum rs6000_builtins,
1027 enum rs6000_builtins);
1028 static void spe_init_builtins (void);
1029 static rtx spe_expand_builtin (tree, rtx, bool *);
1030 static rtx spe_expand_stv_builtin (enum insn_code, tree);
1031 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1032 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1033 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1034 static rs6000_stack_t *rs6000_stack_info (void);
1035 static void debug_stack_info (rs6000_stack_t *);
1037 static rtx altivec_expand_builtin (tree, rtx, bool *);
1038 static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
1039 static rtx altivec_expand_st_builtin (tree, rtx, bool *);
1040 static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
1041 static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
1042 static rtx altivec_expand_predicate_builtin (enum insn_code, tree, rtx);
1043 static rtx altivec_expand_stv_builtin (enum insn_code, tree);
1044 static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
1045 static rtx altivec_expand_vec_set_builtin (tree);
1046 static rtx altivec_expand_vec_ext_builtin (tree, rtx);
1047 static int get_element_number (tree, tree);
1048 static bool rs6000_handle_option (size_t, const char *, int);
1049 static void rs6000_parse_tls_size_option (void);
1050 static void rs6000_parse_yes_no_option (const char *, const char *, int *);
1051 static int first_altivec_reg_to_save (void);
1052 static unsigned int compute_vrsave_mask (void);
1053 static void compute_save_world_info (rs6000_stack_t *info_ptr);
1054 static void is_altivec_return_reg (rtx, void *);
1055 static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
1056 int easy_vector_constant (rtx, enum machine_mode);
1057 static rtx rs6000_dwarf_register_span (rtx);
1058 static void rs6000_init_dwarf_reg_sizes_extra (tree);
1059 static rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
1060 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1061 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1062 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1063 static rtx rs6000_delegitimize_address (rtx);
1064 static rtx rs6000_tls_get_addr (void);
1065 static rtx rs6000_got_sym (void);
1066 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
1067 static const char *rs6000_get_some_local_dynamic_name (void);
1068 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1069 static rtx rs6000_complex_function_value (enum machine_mode);
1070 static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
1071 enum machine_mode, tree);
1072 static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
1074 static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
1075 tree, HOST_WIDE_INT);
1076 static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
1079 static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
1080 const_tree, HOST_WIDE_INT,
1082 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, int, bool);
1083 static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
1084 static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
1085 static void setup_incoming_varargs (CUMULATIVE_ARGS *,
1086 enum machine_mode, tree,
1088 static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1090 static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
1092 static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree);
1094 static void macho_branch_islands (void);
1095 static int no_previous_def (tree function_name);
1096 static tree get_prev_label (tree function_name);
1097 static void rs6000_darwin_file_start (void);
1100 static tree rs6000_build_builtin_va_list (void);
1101 static void rs6000_va_start (tree, rtx);
1102 static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
1103 static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
1104 static bool rs6000_scalar_mode_supported_p (enum machine_mode);
1105 static bool rs6000_vector_mode_supported_p (enum machine_mode);
1106 static rtx rs6000_emit_vector_compare_inner (enum rtx_code, rtx, rtx);
1107 static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
1109 static tree rs6000_stack_protect_fail (void);
1111 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1114 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1117 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1119 = rs6000_legitimize_reload_address;
1121 static bool rs6000_mode_dependent_address_p (const_rtx);
1122 static bool rs6000_mode_dependent_address (const_rtx);
1123 static bool rs6000_debug_mode_dependent_address (const_rtx);
1124 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1125 = rs6000_mode_dependent_address;
1127 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1128 enum machine_mode, rtx);
1129 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1132 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1133 enum machine_mode, rtx)
1134 = rs6000_secondary_reload_class;
1136 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1137 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1139 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1140 = rs6000_preferred_reload_class;
1142 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1145 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1149 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1151 = rs6000_secondary_memory_needed;
1153 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1156 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1160 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1163 = rs6000_cannot_change_mode_class;
1165 static enum reg_class rs6000_secondary_reload (bool, rtx, enum reg_class,
1167 struct secondary_reload_info *);
1169 static const enum reg_class *rs6000_ira_cover_classes (void);
1171 const int INSN_NOT_AVAILABLE = -1;
1172 static enum machine_mode rs6000_eh_return_filter_mode (void);
1173 static bool rs6000_can_eliminate (const int, const int);
1174 static void rs6000_trampoline_init (rtx, tree, rtx);
1176 /* Hash table stuff for keeping track of TOC entries. */
1178 struct GTY(()) toc_hash_struct
1180 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1181 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1183 enum machine_mode key_mode;
1187 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1189 /* Hash table to keep track of the argument types for builtin functions. */
1191 struct GTY(()) builtin_hash_struct
1194 enum machine_mode mode[4]; /* return value + 3 arguments. */
1195 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1198 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1200 /* Default register names. */
1201 char rs6000_reg_names[][8] =
1203 "0", "1", "2", "3", "4", "5", "6", "7",
1204 "8", "9", "10", "11", "12", "13", "14", "15",
1205 "16", "17", "18", "19", "20", "21", "22", "23",
1206 "24", "25", "26", "27", "28", "29", "30", "31",
1207 "0", "1", "2", "3", "4", "5", "6", "7",
1208 "8", "9", "10", "11", "12", "13", "14", "15",
1209 "16", "17", "18", "19", "20", "21", "22", "23",
1210 "24", "25", "26", "27", "28", "29", "30", "31",
1211 "mq", "lr", "ctr","ap",
1212 "0", "1", "2", "3", "4", "5", "6", "7",
1214 /* AltiVec registers. */
1215 "0", "1", "2", "3", "4", "5", "6", "7",
1216 "8", "9", "10", "11", "12", "13", "14", "15",
1217 "16", "17", "18", "19", "20", "21", "22", "23",
1218 "24", "25", "26", "27", "28", "29", "30", "31",
1220 /* SPE registers. */
1221 "spe_acc", "spefscr",
1222 /* Soft frame pointer. */
1226 #ifdef TARGET_REGNAMES
1227 static const char alt_reg_names[][8] =
1229 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1230 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1231 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1232 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1233 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1234 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1235 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1236 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1237 "mq", "lr", "ctr", "ap",
1238 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1240 /* AltiVec registers. */
1241 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1242 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1243 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1244 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1246 /* SPE registers. */
1247 "spe_acc", "spefscr",
1248 /* Soft frame pointer. */
1253 /* Table of valid machine attributes. */
1255 static const struct attribute_spec rs6000_attribute_table[] =
1257 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1258 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
1259 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
1260 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
1261 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
1262 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
1263 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1264 SUBTARGET_ATTRIBUTE_TABLE,
1266 { NULL, 0, 0, false, false, false, NULL }
1269 #ifndef MASK_STRICT_ALIGN
1270 #define MASK_STRICT_ALIGN 0
1272 #ifndef TARGET_PROFILE_KERNEL
1273 #define TARGET_PROFILE_KERNEL 0
1276 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1277 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1279 /* Initialize the GCC target structure. */
1280 #undef TARGET_ATTRIBUTE_TABLE
1281 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1282 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1283 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1284 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1285 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1287 #undef TARGET_ASM_ALIGNED_DI_OP
1288 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1290 /* Default unaligned ops are only provided for ELF. Find the ops needed
1291 for non-ELF systems. */
1292 #ifndef OBJECT_FORMAT_ELF
1294 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1296 #undef TARGET_ASM_UNALIGNED_HI_OP
1297 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1298 #undef TARGET_ASM_UNALIGNED_SI_OP
1299 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1300 #undef TARGET_ASM_UNALIGNED_DI_OP
1301 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1304 #undef TARGET_ASM_UNALIGNED_HI_OP
1305 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1306 #undef TARGET_ASM_UNALIGNED_SI_OP
1307 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1308 #undef TARGET_ASM_UNALIGNED_DI_OP
1309 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1310 #undef TARGET_ASM_ALIGNED_DI_OP
1311 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1315 /* This hook deals with fixups for relocatable code and DI-mode objects
1317 #undef TARGET_ASM_INTEGER
1318 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1320 #ifdef HAVE_GAS_HIDDEN
1321 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1322 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1325 #undef TARGET_HAVE_TLS
1326 #define TARGET_HAVE_TLS HAVE_AS_TLS
1328 #undef TARGET_CANNOT_FORCE_CONST_MEM
1329 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
1331 #undef TARGET_DELEGITIMIZE_ADDRESS
1332 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1334 #undef TARGET_ASM_FUNCTION_PROLOGUE
1335 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1336 #undef TARGET_ASM_FUNCTION_EPILOGUE
1337 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1339 #undef TARGET_LEGITIMIZE_ADDRESS
1340 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1342 #undef TARGET_SCHED_VARIABLE_ISSUE
1343 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1345 #undef TARGET_SCHED_ISSUE_RATE
1346 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1347 #undef TARGET_SCHED_ADJUST_COST
1348 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1349 #undef TARGET_SCHED_ADJUST_PRIORITY
1350 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1351 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1352 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1353 #undef TARGET_SCHED_INIT
1354 #define TARGET_SCHED_INIT rs6000_sched_init
1355 #undef TARGET_SCHED_FINISH
1356 #define TARGET_SCHED_FINISH rs6000_sched_finish
1357 #undef TARGET_SCHED_REORDER
1358 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1359 #undef TARGET_SCHED_REORDER2
1360 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1362 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1363 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1365 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1366 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1368 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1369 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1370 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1371 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1372 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1373 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1374 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1375 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1377 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1378 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1379 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
1380 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
1381 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
1382 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
1383 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
1384 #define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion
1385 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
1386 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM rs6000_builtin_vec_perm
1387 #undef TARGET_SUPPORT_VECTOR_MISALIGNMENT
1388 #define TARGET_SUPPORT_VECTOR_MISALIGNMENT \
1389 rs6000_builtin_support_vector_misalignment
1390 #undef TARGET_VECTOR_ALIGNMENT_REACHABLE
1391 #define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1393 #undef TARGET_INIT_BUILTINS
1394 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1395 #undef TARGET_BUILTIN_DECL
1396 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1398 #undef TARGET_EXPAND_BUILTIN
1399 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1401 #undef TARGET_MANGLE_TYPE
1402 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1404 #undef TARGET_INIT_LIBFUNCS
1405 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1408 #undef TARGET_BINDS_LOCAL_P
1409 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1412 #undef TARGET_MS_BITFIELD_LAYOUT_P
1413 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1415 #undef TARGET_ASM_OUTPUT_MI_THUNK
1416 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1418 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1419 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1421 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1422 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1424 #undef TARGET_INVALID_WITHIN_DOLOOP
1425 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1427 #undef TARGET_RTX_COSTS
1428 #define TARGET_RTX_COSTS rs6000_rtx_costs
1429 #undef TARGET_ADDRESS_COST
1430 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1432 #undef TARGET_DWARF_REGISTER_SPAN
1433 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1435 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1436 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1438 /* On rs6000, function arguments are promoted, as are function return
1440 #undef TARGET_PROMOTE_FUNCTION_MODE
1441 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1443 #undef TARGET_RETURN_IN_MEMORY
1444 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1446 #undef TARGET_SETUP_INCOMING_VARARGS
1447 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1449 /* Always strict argument naming on rs6000. */
1450 #undef TARGET_STRICT_ARGUMENT_NAMING
1451 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1452 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1453 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1454 #undef TARGET_SPLIT_COMPLEX_ARG
1455 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1456 #undef TARGET_MUST_PASS_IN_STACK
1457 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1458 #undef TARGET_PASS_BY_REFERENCE
1459 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1460 #undef TARGET_ARG_PARTIAL_BYTES
1461 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1463 #undef TARGET_BUILD_BUILTIN_VA_LIST
1464 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1466 #undef TARGET_EXPAND_BUILTIN_VA_START
1467 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1469 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1470 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1472 #undef TARGET_EH_RETURN_FILTER_MODE
1473 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1475 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1476 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1478 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1479 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1481 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1482 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1484 #undef TARGET_HANDLE_OPTION
1485 #define TARGET_HANDLE_OPTION rs6000_handle_option
1487 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1488 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1489 rs6000_builtin_vectorized_function
1491 #undef TARGET_DEFAULT_TARGET_FLAGS
1492 #define TARGET_DEFAULT_TARGET_FLAGS \
1495 #undef TARGET_STACK_PROTECT_FAIL
1496 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1498 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1499 The PowerPC architecture requires only weak consistency among
1500 processors--that is, memory accesses between processors need not be
1501 sequentially consistent and memory accesses among processors can occur
1502 in any order. The ability to order memory accesses weakly provides
1503 opportunities for more efficient use of the system bus. Unless a
1504 dependency exists, the 604e allows read operations to precede store
1506 #undef TARGET_RELAXED_ORDERING
1507 #define TARGET_RELAXED_ORDERING true
1510 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1511 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1514 /* Use a 32-bit anchor range. This leads to sequences like:
1516 addis tmp,anchor,high
1519 where tmp itself acts as an anchor, and can be shared between
1520 accesses to the same 64k page. */
1521 #undef TARGET_MIN_ANCHOR_OFFSET
1522 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1523 #undef TARGET_MAX_ANCHOR_OFFSET
1524 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1525 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1526 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1528 #undef TARGET_BUILTIN_RECIPROCAL
1529 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1531 #undef TARGET_EXPAND_TO_RTL_HOOK
1532 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1534 #undef TARGET_INSTANTIATE_DECLS
1535 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1537 #undef TARGET_SECONDARY_RELOAD
1538 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1540 #undef TARGET_IRA_COVER_CLASSES
1541 #define TARGET_IRA_COVER_CLASSES rs6000_ira_cover_classes
1543 #undef TARGET_LEGITIMATE_ADDRESS_P
1544 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1546 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1547 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1549 #undef TARGET_CAN_ELIMINATE
1550 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1552 #undef TARGET_TRAMPOLINE_INIT
1553 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1555 #undef TARGET_FUNCTION_VALUE
1556 #define TARGET_FUNCTION_VALUE rs6000_function_value
1558 struct gcc_target targetm = TARGET_INITIALIZER;
1560 /* Return number of consecutive hard regs needed starting at reg REGNO
1561 to hold something of mode MODE.
1562 This is ordinarily the length in words of a value of mode MODE
1563 but can be less for certain modes in special long registers.
1565 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1566 scalar instructions. The upper 32 bits are only available to the
1569 POWER and PowerPC GPRs hold 32 bits worth;
1570 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1573 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1575 unsigned HOST_WIDE_INT reg_size;
1577 if (FP_REGNO_P (regno))
1578 reg_size = (VECTOR_MEM_VSX_P (mode)
1579 ? UNITS_PER_VSX_WORD
1580 : UNITS_PER_FP_WORD);
1582 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1583 reg_size = UNITS_PER_SPE_WORD;
1585 else if (ALTIVEC_REGNO_P (regno))
1586 reg_size = UNITS_PER_ALTIVEC_WORD;
1588 /* The value returned for SCmode in the E500 double case is 2 for
1589 ABI compatibility; storing an SCmode value in a single register
1590 would require function_arg and rs6000_spe_function_arg to handle
1591 SCmode so as to pass the value correctly in a pair of
1593 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1594 && !DECIMAL_FLOAT_MODE_P (mode))
1595 reg_size = UNITS_PER_FP_WORD;
1598 reg_size = UNITS_PER_WORD;
1600 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1603 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1606 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1608 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1610 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1611 implementations. Don't allow an item to be split between a FP register
1612 and an Altivec register. */
1613 if (VECTOR_MEM_VSX_P (mode))
1615 if (FP_REGNO_P (regno))
1616 return FP_REGNO_P (last_regno);
1618 if (ALTIVEC_REGNO_P (regno))
1619 return ALTIVEC_REGNO_P (last_regno);
1622 /* The GPRs can hold any mode, but values bigger than one register
1623 cannot go past R31. */
1624 if (INT_REGNO_P (regno))
1625 return INT_REGNO_P (last_regno);
1627 /* The float registers (except for VSX vector modes) can only hold floating
1628 modes and DImode. This excludes the 32-bit decimal float mode for
1630 if (FP_REGNO_P (regno))
1632 if (SCALAR_FLOAT_MODE_P (mode)
1633 && (mode != TDmode || (regno % 2) == 0)
1634 && FP_REGNO_P (last_regno))
1637 if (GET_MODE_CLASS (mode) == MODE_INT
1638 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1641 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1642 && PAIRED_VECTOR_MODE (mode))
1648 /* The CR register can only hold CC modes. */
1649 if (CR_REGNO_P (regno))
1650 return GET_MODE_CLASS (mode) == MODE_CC;
1652 if (CA_REGNO_P (regno))
1653 return mode == BImode;
1655 /* AltiVec only in AldyVec registers. */
1656 if (ALTIVEC_REGNO_P (regno))
1657 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1659 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1660 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1663 /* We cannot put TImode anywhere except general register and it must be able
1664 to fit within the register set. In the future, allow TImode in the
1665 Altivec or VSX registers. */
1667 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1670 /* Print interesting facts about registers. */
1672 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1676 for (r = first_regno; r <= last_regno; ++r)
1678 const char *comma = "";
1681 if (first_regno == last_regno)
1682 fprintf (stderr, "%s:\t", reg_name);
1684 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1687 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1688 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1692 fprintf (stderr, ",\n\t");
1697 if (rs6000_hard_regno_nregs[m][r] > 1)
1698 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1699 rs6000_hard_regno_nregs[m][r]);
1701 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1706 if (call_used_regs[r])
1710 fprintf (stderr, ",\n\t");
1715 len += fprintf (stderr, "%s%s", comma, "call-used");
1723 fprintf (stderr, ",\n\t");
1728 len += fprintf (stderr, "%s%s", comma, "fixed");
1734 fprintf (stderr, ",\n\t");
1738 fprintf (stderr, "%sregno = %d\n", comma, r);
1742 /* Print various interesting information with -mdebug=reg. */
1744 rs6000_debug_reg_global (void)
1746 const char *nl = (const char *)0;
1748 char costly_num[20];
1750 const char *costly_str;
1751 const char *nop_str;
1753 /* Map enum rs6000_vector to string. */
1754 static const char *rs6000_debug_vector_unit[] = {
1763 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1764 LAST_VIRTUAL_REGISTER);
1765 rs6000_debug_reg_print (0, 31, "gr");
1766 rs6000_debug_reg_print (32, 63, "fp");
1767 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1770 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1771 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1772 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1773 rs6000_debug_reg_print (MQ_REGNO, MQ_REGNO, "mq");
1774 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1775 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1776 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1777 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1778 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1782 "d reg_class = %s\n"
1783 "f reg_class = %s\n"
1784 "v reg_class = %s\n"
1785 "wa reg_class = %s\n"
1786 "wd reg_class = %s\n"
1787 "wf reg_class = %s\n"
1788 "ws reg_class = %s\n\n",
1789 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1790 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1791 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1792 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1793 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1794 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1795 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1797 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1798 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1801 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1803 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1804 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1810 switch (rs6000_sched_costly_dep)
1812 case max_dep_latency:
1813 costly_str = "max_dep_latency";
1817 costly_str = "no_dep_costly";
1820 case all_deps_costly:
1821 costly_str = "all_deps_costly";
1824 case true_store_to_load_dep_costly:
1825 costly_str = "true_store_to_load_dep_costly";
1828 case store_to_load_dep_costly:
1829 costly_str = "store_to_load_dep_costly";
1833 costly_str = costly_num;
1834 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1838 switch (rs6000_sched_insert_nops)
1840 case sched_finish_regroup_exact:
1841 nop_str = "sched_finish_regroup_exact";
1844 case sched_finish_pad_groups:
1845 nop_str = "sched_finish_pad_groups";
1848 case sched_finish_none:
1849 nop_str = "sched_finish_none";
1854 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1859 "always_hint = %s\n"
1860 "align_branch_targets = %s\n"
1861 "sched_restricted_insns_priority = %d\n"
1862 "sched_costly_dep = %s\n"
1863 "sched_insert_nops = %s\n\n",
1864 rs6000_always_hint ? "true" : "false",
1865 rs6000_align_branch_targets ? "true" : "false",
1866 (int)rs6000_sched_restricted_insns_priority,
1867 costly_str, nop_str);
1870 /* Initialize the various global tables that are based on register size. */
1872 rs6000_init_hard_regno_mode_ok (void)
1878 /* Precalculate REGNO_REG_CLASS. */
1879 rs6000_regno_regclass[0] = GENERAL_REGS;
1880 for (r = 1; r < 32; ++r)
1881 rs6000_regno_regclass[r] = BASE_REGS;
1883 for (r = 32; r < 64; ++r)
1884 rs6000_regno_regclass[r] = FLOAT_REGS;
1886 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1887 rs6000_regno_regclass[r] = NO_REGS;
1889 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1890 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1892 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1893 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1894 rs6000_regno_regclass[r] = CR_REGS;
1896 rs6000_regno_regclass[MQ_REGNO] = MQ_REGS;
1897 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1898 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1899 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1900 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1901 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1902 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1903 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1904 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1905 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1907 /* Precalculate vector information, this must be set up before the
1908 rs6000_hard_regno_nregs_internal below. */
1909 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1911 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1912 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1913 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
1916 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
1917 rs6000_constraints[c] = NO_REGS;
1919 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
1920 believes it can use native alignment or still uses 128-bit alignment. */
1921 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
1932 /* V2DF mode, VSX only. */
1935 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
1936 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
1937 rs6000_vector_align[V2DFmode] = align64;
1940 /* V4SF mode, either VSX or Altivec. */
1943 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
1944 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
1945 rs6000_vector_align[V4SFmode] = align32;
1947 else if (TARGET_ALTIVEC)
1949 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
1950 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
1951 rs6000_vector_align[V4SFmode] = align32;
1954 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
1958 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
1959 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
1960 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
1961 rs6000_vector_align[V4SImode] = align32;
1962 rs6000_vector_align[V8HImode] = align32;
1963 rs6000_vector_align[V16QImode] = align32;
1967 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
1968 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
1969 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
1973 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
1974 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
1975 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
1979 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
1980 Altivec doesn't have 64-bit support. */
1983 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
1984 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
1985 rs6000_vector_align[V2DImode] = align64;
1988 /* DFmode, see if we want to use the VSX unit. */
1989 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
1991 rs6000_vector_unit[DFmode] = VECTOR_VSX;
1992 rs6000_vector_mem[DFmode]
1993 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
1994 rs6000_vector_align[DFmode] = align64;
1997 /* TODO add SPE and paired floating point vector support. */
1999 /* Register class constaints for the constraints that depend on compile
2001 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2002 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2004 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2005 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2009 /* At present, we just use VSX_REGS, but we have different constraints
2010 based on the use, in case we want to fine tune the default register
2011 class used. wa = any VSX register, wf = register class to use for
2012 V4SF, wd = register class to use for V2DF, and ws = register classs to
2013 use for DF scalars. */
2014 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2015 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2016 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2017 if (TARGET_VSX_SCALAR_DOUBLE)
2018 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2022 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2024 /* Set up the reload helper functions. */
2025 if (TARGET_VSX || TARGET_ALTIVEC)
2029 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2030 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2031 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2032 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2033 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2034 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2035 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2036 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2037 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2038 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2039 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2040 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2044 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2045 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2046 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2047 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2048 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2049 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2050 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2051 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2052 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2053 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2054 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2055 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2059 /* Precalculate HARD_REGNO_NREGS. */
2060 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2061 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2062 rs6000_hard_regno_nregs[m][r]
2063 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2065 /* Precalculate HARD_REGNO_MODE_OK. */
2066 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2067 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2068 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2069 rs6000_hard_regno_mode_ok_p[m][r] = true;
2071 /* Precalculate CLASS_MAX_NREGS sizes. */
2072 for (c = 0; c < LIM_REG_CLASSES; ++c)
2076 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2077 reg_size = UNITS_PER_VSX_WORD;
2079 else if (c == ALTIVEC_REGS)
2080 reg_size = UNITS_PER_ALTIVEC_WORD;
2082 else if (c == FLOAT_REGS)
2083 reg_size = UNITS_PER_FP_WORD;
2086 reg_size = UNITS_PER_WORD;
2088 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2089 rs6000_class_max_nregs[m][c]
2090 = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
2093 if (TARGET_E500_DOUBLE)
2094 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2096 if (TARGET_DEBUG_REG)
2097 rs6000_debug_reg_global ();
2101 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2104 darwin_rs6000_override_options (void)
2106 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2108 rs6000_altivec_abi = 1;
2109 TARGET_ALTIVEC_VRSAVE = 1;
2110 if (DEFAULT_ABI == ABI_DARWIN)
2112 if (MACHO_DYNAMIC_NO_PIC_P)
2115 warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
2118 else if (flag_pic == 1)
2123 if (TARGET_64BIT && ! TARGET_POWERPC64)
2125 target_flags |= MASK_POWERPC64;
2126 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2130 rs6000_default_long_calls = 1;
2131 target_flags |= MASK_SOFT_FLOAT;
2134 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2136 if (!flag_mkernel && !flag_apple_kext
2138 && ! (target_flags_explicit & MASK_ALTIVEC))
2139 target_flags |= MASK_ALTIVEC;
2141 /* Unless the user (not the configurer) has explicitly overridden
2142 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2143 G4 unless targetting the kernel. */
2146 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2147 && ! (target_flags_explicit & MASK_ALTIVEC)
2148 && ! rs6000_select[1].string)
2150 target_flags |= MASK_ALTIVEC;
2155 /* If not otherwise specified by a target, make 'long double' equivalent to
2158 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2159 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2162 /* Override command line options. Mostly we process the processor
2163 type and sometimes adjust other TARGET_ options. */
2166 rs6000_override_options (const char *default_cpu)
2169 struct rs6000_cpu_select *ptr;
2172 /* Simplifications for entries below. */
2175 POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
2176 POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
2179 /* This table occasionally claims that a processor does not support
2180 a particular feature even though it does, but the feature is slower
2181 than the alternative. Thus, it shouldn't be relied on as a
2182 complete description of the processor's support.
2184 Please keep this list in order, and don't forget to update the
2185 documentation in invoke.texi when adding a new processor or
2189 const char *const name; /* Canonical processor name. */
2190 const enum processor_type processor; /* Processor type enum value. */
2191 const int target_enable; /* Target flags to enable. */
2192 } const processor_target_table[]
2193 = {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2194 {"403", PROCESSOR_PPC403,
2195 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
2196 {"405", PROCESSOR_PPC405,
2197 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
2198 {"405fp", PROCESSOR_PPC405,
2199 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
2200 {"440", PROCESSOR_PPC440,
2201 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
2202 {"440fp", PROCESSOR_PPC440,
2203 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
2204 {"464", PROCESSOR_PPC440,
2205 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
2206 {"464fp", PROCESSOR_PPC440,
2207 POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
2208 {"476", PROCESSOR_PPC476,
2209 POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_PPC_GFXOPT | MASK_MFCRF
2210 | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB},
2211 {"476fp", PROCESSOR_PPC476,
2212 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB
2213 | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB},
2214 {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
2215 {"601", PROCESSOR_PPC601,
2216 MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
2217 {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2218 {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2219 {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2220 {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2221 {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2222 {"620", PROCESSOR_PPC620,
2223 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
2224 {"630", PROCESSOR_PPC630,
2225 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
2226 {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2227 {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK},
2228 {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK},
2229 {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2230 {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2231 {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2232 {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2233 {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN
2235 /* 8548 has a dummy entry for now. */
2236 {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN
2238 {"a2", PROCESSOR_PPCA2,
2239 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_POPCNTB
2240 | MASK_CMPB | MASK_NO_UPDATE },
2241 {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2242 {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK},
2243 {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT
2245 {"e500mc64", PROCESSOR_PPCE500MC64, POWERPC_BASE_MASK | MASK_POWERPC64
2246 | MASK_PPC_GFXOPT | MASK_ISEL},
2247 {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2248 {"970", PROCESSOR_POWER4,
2249 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
2250 {"cell", PROCESSOR_CELL,
2251 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
2252 {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
2253 {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
2254 {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
2255 {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK},
2256 {"G5", PROCESSOR_POWER4,
2257 POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
2258 {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
2259 {"power2", PROCESSOR_POWER,
2260 MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
2261 {"power3", PROCESSOR_PPC630,
2262 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
2263 {"power4", PROCESSOR_POWER4,
2264 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
2266 {"power5", PROCESSOR_POWER5,
2267 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
2268 | MASK_MFCRF | MASK_POPCNTB},
2269 {"power5+", PROCESSOR_POWER5,
2270 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
2271 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
2272 {"power6", PROCESSOR_POWER6,
2273 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
2274 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
2275 {"power6x", PROCESSOR_POWER6,
2276 POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
2277 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
2279 {"power7", PROCESSOR_POWER7,
2280 POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF
2281 | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP | MASK_POPCNTD
2282 | MASK_VSX}, /* Don't add MASK_ISEL by default */
2283 {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
2284 {"powerpc64", PROCESSOR_POWERPC64,
2285 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
2286 {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
2287 {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
2288 {"rios2", PROCESSOR_RIOS2,
2289 MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
2290 {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
2291 {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
2292 {"rs64", PROCESSOR_RS64A,
2293 POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
2296 const size_t ptt_size = ARRAY_SIZE (processor_target_table);
2298 /* Some OSs don't support saving the high part of 64-bit registers on
2299 context switch. Other OSs don't support saving Altivec registers.
2300 On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
2301 settings; if the user wants either, the user must explicitly specify
2302 them and we won't interfere with the user's specification. */
2305 POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
2306 POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
2307 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
2308 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
2309 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
2310 | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE)
2313 /* Numerous experiment shows that IRA based loop pressure
2314 calculation works better for RTL loop invariant motion on targets
2315 with enough (>= 32) registers. It is an expensive optimization.
2316 So it is on only for peak performance. */
2318 flag_ira_loop_pressure = 1;
2320 /* Set the pointer size. */
2323 rs6000_pmode = (int)DImode;
2324 rs6000_pointer_size = 64;
2328 rs6000_pmode = (int)SImode;
2329 rs6000_pointer_size = 32;
2332 set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
2333 #ifdef OS_MISSING_POWERPC64
2334 if (OS_MISSING_POWERPC64)
2335 set_masks &= ~MASK_POWERPC64;
2337 #ifdef OS_MISSING_ALTIVEC
2338 if (OS_MISSING_ALTIVEC)
2339 set_masks &= ~MASK_ALTIVEC;
2342 /* Don't override by the processor default if given explicitly. */
2343 set_masks &= ~target_flags_explicit;
2345 /* Identify the processor type. */
2346 rs6000_select[0].string = default_cpu;
2347 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
2349 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
2351 ptr = &rs6000_select[i];
2352 if (ptr->string != (char *)0 && ptr->string[0] != '\0')
2354 for (j = 0; j < ptt_size; j++)
2355 if (! strcmp (ptr->string, processor_target_table[j].name))
2357 if (ptr->set_tune_p)
2358 rs6000_cpu = processor_target_table[j].processor;
2360 if (ptr->set_arch_p)
2362 target_flags &= ~set_masks;
2363 target_flags |= (processor_target_table[j].target_enable
2370 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
2374 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2375 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64)
2378 error ("AltiVec not supported in this target");
2380 error ("Spe not supported in this target");
2383 /* Disable Cell microcode if we are optimizing for the Cell
2384 and not optimizing for size. */
2385 if (rs6000_gen_cell_microcode == -1)
2386 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2389 /* If we are optimizing big endian systems for space and it's OK to
2390 use instructions that would be microcoded on the Cell, use the
2391 load/store multiple and string instructions. */
2392 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2393 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
2395 /* Don't allow -mmultiple or -mstring on little endian systems
2396 unless the cpu is a 750, because the hardware doesn't support the
2397 instructions used in little endian mode, and causes an alignment
2398 trap. The 750 does not cause an alignment trap (except when the
2399 target is unaligned). */
2401 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2403 if (TARGET_MULTIPLE)
2405 target_flags &= ~MASK_MULTIPLE;
2406 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
2407 warning (0, "-mmultiple is not supported on little endian systems");
2412 target_flags &= ~MASK_STRING;
2413 if ((target_flags_explicit & MASK_STRING) != 0)
2414 warning (0, "-mstring is not supported on little endian systems");
2418 /* Add some warnings for VSX. */
2421 const char *msg = NULL;
2422 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2423 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2425 if (target_flags_explicit & MASK_VSX)
2426 msg = N_("-mvsx requires hardware floating point");
2428 target_flags &= ~ MASK_VSX;
2430 else if (TARGET_PAIRED_FLOAT)
2431 msg = N_("-mvsx and -mpaired are incompatible");
2432 /* The hardware will allow VSX and little endian, but until we make sure
2433 things like vector select, etc. work don't allow VSX on little endian
2434 systems at this point. */
2435 else if (!BYTES_BIG_ENDIAN)
2436 msg = N_("-mvsx used with little endian code");
2437 else if (TARGET_AVOID_XFORM > 0)
2438 msg = N_("-mvsx needs indexed addressing");
2439 else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC))
2441 if (target_flags_explicit & MASK_VSX)
2442 msg = N_("-mvsx and -mno-altivec are incompatible");
2444 msg = N_("-mno-altivec disables vsx");
2450 target_flags &= ~ MASK_VSX;
2452 else if (TARGET_VSX && !TARGET_ALTIVEC)
2453 target_flags |= MASK_ALTIVEC;
2456 /* Set debug flags */
2457 if (rs6000_debug_name)
2459 if (! strcmp (rs6000_debug_name, "all"))
2460 rs6000_debug_stack = rs6000_debug_arg = rs6000_debug_reg
2461 = rs6000_debug_addr = rs6000_debug_cost = 1;
2462 else if (! strcmp (rs6000_debug_name, "stack"))
2463 rs6000_debug_stack = 1;
2464 else if (! strcmp (rs6000_debug_name, "arg"))
2465 rs6000_debug_arg = 1;
2466 else if (! strcmp (rs6000_debug_name, "reg"))
2467 rs6000_debug_reg = 1;
2468 else if (! strcmp (rs6000_debug_name, "addr"))
2469 rs6000_debug_addr = 1;
2470 else if (! strcmp (rs6000_debug_name, "cost"))
2471 rs6000_debug_cost = 1;
2473 error ("unknown -mdebug-%s switch", rs6000_debug_name);
2475 /* If the appropriate debug option is enabled, replace the target hooks
2476 with debug versions that call the real version and then prints
2477 debugging information. */
2478 if (TARGET_DEBUG_COST)
2480 targetm.rtx_costs = rs6000_debug_rtx_costs;
2481 targetm.address_cost = rs6000_debug_address_cost;
2482 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2485 if (TARGET_DEBUG_ADDR)
2487 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2488 targetm.legitimize_address = rs6000_debug_legitimize_address;
2489 rs6000_secondary_reload_class_ptr
2490 = rs6000_debug_secondary_reload_class;
2491 rs6000_secondary_memory_needed_ptr
2492 = rs6000_debug_secondary_memory_needed;
2493 rs6000_cannot_change_mode_class_ptr
2494 = rs6000_debug_cannot_change_mode_class;
2495 rs6000_preferred_reload_class_ptr
2496 = rs6000_debug_preferred_reload_class;
2497 rs6000_legitimize_reload_address_ptr
2498 = rs6000_debug_legitimize_reload_address;
2499 rs6000_mode_dependent_address_ptr
2500 = rs6000_debug_mode_dependent_address;
2504 if (rs6000_traceback_name)
2506 if (! strncmp (rs6000_traceback_name, "full", 4))
2507 rs6000_traceback = traceback_full;
2508 else if (! strncmp (rs6000_traceback_name, "part", 4))
2509 rs6000_traceback = traceback_part;
2510 else if (! strncmp (rs6000_traceback_name, "no", 2))
2511 rs6000_traceback = traceback_none;
2513 error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
2514 rs6000_traceback_name);
2517 if (!rs6000_explicit_options.long_double)
2518 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2520 #ifndef POWERPC_LINUX
2521 if (!rs6000_explicit_options.ieee)
2522 rs6000_ieeequad = 1;
2525 /* Enable Altivec ABI for AIX -maltivec. */
2526 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2527 rs6000_altivec_abi = 1;
2529 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2530 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2531 be explicitly overridden in either case. */
2534 if (!rs6000_explicit_options.altivec_abi
2535 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2536 rs6000_altivec_abi = 1;
2538 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
2539 if (!rs6000_explicit_options.vrsave)
2540 TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
2543 /* Set the Darwin64 ABI as default for 64-bit Darwin. */
2544 if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
2546 rs6000_darwin64_abi = 1;
2548 darwin_one_byte_bool = 1;
2550 /* Default to natural alignment, for better performance. */
2551 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2554 /* Place FP constants in the constant pool instead of TOC
2555 if section anchors enabled. */
2556 if (flag_section_anchors)
2557 TARGET_NO_FP_IN_TOC = 1;
2559 /* Handle -mtls-size option. */
2560 rs6000_parse_tls_size_option ();
2562 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2563 SUBTARGET_OVERRIDE_OPTIONS;
2565 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2566 SUBSUBTARGET_OVERRIDE_OPTIONS;
2568 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2569 SUB3TARGET_OVERRIDE_OPTIONS;
2572 if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC
2573 || rs6000_cpu == PROCESSOR_PPCE500MC64)
2575 /* The e500 and e500mc do not have string instructions, and we set
2576 MASK_STRING above when optimizing for size. */
2577 if ((target_flags & MASK_STRING) != 0)
2578 target_flags = target_flags & ~MASK_STRING;
2580 else if (rs6000_select[1].string != NULL)
2582 /* For the powerpc-eabispe configuration, we set all these by
2583 default, so let's unset them if we manually set another
2584 CPU that is not the E500. */
2585 if (!rs6000_explicit_options.spe_abi)
2587 if (!rs6000_explicit_options.spe)
2589 if (!rs6000_explicit_options.float_gprs)
2590 rs6000_float_gprs = 0;
2591 if (!(target_flags_explicit & MASK_ISEL))
2592 target_flags &= ~MASK_ISEL;
2595 /* Detect invalid option combinations with E500. */
2598 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2599 && rs6000_cpu != PROCESSOR_POWER5
2600 && rs6000_cpu != PROCESSOR_POWER6
2601 && rs6000_cpu != PROCESSOR_POWER7
2602 && rs6000_cpu != PROCESSOR_PPCA2
2603 && rs6000_cpu != PROCESSOR_CELL);
2604 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2605 || rs6000_cpu == PROCESSOR_POWER5
2606 || rs6000_cpu == PROCESSOR_POWER7);
2607 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2608 || rs6000_cpu == PROCESSOR_POWER5
2609 || rs6000_cpu == PROCESSOR_POWER6
2610 || rs6000_cpu == PROCESSOR_POWER7
2611 || rs6000_cpu == PROCESSOR_PPCE500MC
2612 || rs6000_cpu == PROCESSOR_PPCE500MC64);
2614 /* Allow debug switches to override the above settings. */
2615 if (TARGET_ALWAYS_HINT > 0)
2616 rs6000_always_hint = TARGET_ALWAYS_HINT;
2618 if (TARGET_SCHED_GROUPS > 0)
2619 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2621 if (TARGET_ALIGN_BRANCH_TARGETS > 0)
2622 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2624 rs6000_sched_restricted_insns_priority
2625 = (rs6000_sched_groups ? 1 : 0);
2627 /* Handle -msched-costly-dep option. */
2628 rs6000_sched_costly_dep
2629 = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
2631 if (rs6000_sched_costly_dep_str)
2633 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2634 rs6000_sched_costly_dep = no_dep_costly;
2635 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2636 rs6000_sched_costly_dep = all_deps_costly;
2637 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2638 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2639 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2640 rs6000_sched_costly_dep = store_to_load_dep_costly;
2642 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2643 atoi (rs6000_sched_costly_dep_str));
2646 /* Handle -minsert-sched-nops option. */
2647 rs6000_sched_insert_nops
2648 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2650 if (rs6000_sched_insert_nops_str)
2652 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2653 rs6000_sched_insert_nops = sched_finish_none;
2654 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2655 rs6000_sched_insert_nops = sched_finish_pad_groups;
2656 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2657 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2659 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2660 atoi (rs6000_sched_insert_nops_str));
2663 #ifdef TARGET_REGNAMES
2664 /* If the user desires alternate register names, copy in the
2665 alternate names now. */
2666 if (TARGET_REGNAMES)
2667 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2670 /* Set aix_struct_return last, after the ABI is determined.
2671 If -maix-struct-return or -msvr4-struct-return was explicitly
2672 used, don't override with the ABI default. */
2673 if (!rs6000_explicit_options.aix_struct_ret)
2674 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2676 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2677 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2680 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2682 /* We can only guarantee the availability of DI pseudo-ops when
2683 assembling for 64-bit targets. */
2686 targetm.asm_out.aligned_op.di = NULL;
2687 targetm.asm_out.unaligned_op.di = NULL;
2690 /* Set branch target alignment, if not optimizing for size. */
2693 /* Cell wants to be aligned 8byte for dual issue. */
2694 if (rs6000_cpu == PROCESSOR_CELL)
2696 if (align_functions <= 0)
2697 align_functions = 8;
2698 if (align_jumps <= 0)
2700 if (align_loops <= 0)
2703 if (rs6000_align_branch_targets)
2705 if (align_functions <= 0)
2706 align_functions = 16;
2707 if (align_jumps <= 0)
2709 if (align_loops <= 0)
2712 if (align_jumps_max_skip <= 0)
2713 align_jumps_max_skip = 15;
2714 if (align_loops_max_skip <= 0)
2715 align_loops_max_skip = 15;
2718 /* Arrange to save and restore machine status around nested functions. */
2719 init_machine_status = rs6000_init_machine_status;
2721 /* We should always be splitting complex arguments, but we can't break
2722 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2723 if (DEFAULT_ABI != ABI_AIX)
2724 targetm.calls.split_complex_arg = NULL;
2726 /* Initialize rs6000_cost with the appropriate target costs. */
2728 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2732 case PROCESSOR_RIOS1:
2733 rs6000_cost = &rios1_cost;
2736 case PROCESSOR_RIOS2:
2737 rs6000_cost = &rios2_cost;
2740 case PROCESSOR_RS64A:
2741 rs6000_cost = &rs64a_cost;
2744 case PROCESSOR_MPCCORE:
2745 rs6000_cost = &mpccore_cost;
2748 case PROCESSOR_PPC403:
2749 rs6000_cost = &ppc403_cost;
2752 case PROCESSOR_PPC405:
2753 rs6000_cost = &ppc405_cost;
2756 case PROCESSOR_PPC440:
2757 rs6000_cost = &ppc440_cost;
2760 case PROCESSOR_PPC476:
2761 rs6000_cost = &ppc476_cost;
2764 case PROCESSOR_PPC601:
2765 rs6000_cost = &ppc601_cost;
2768 case PROCESSOR_PPC603:
2769 rs6000_cost = &ppc603_cost;
2772 case PROCESSOR_PPC604:
2773 rs6000_cost = &ppc604_cost;
2776 case PROCESSOR_PPC604e:
2777 rs6000_cost = &ppc604e_cost;
2780 case PROCESSOR_PPC620:
2781 rs6000_cost = &ppc620_cost;
2784 case PROCESSOR_PPC630:
2785 rs6000_cost = &ppc630_cost;
2788 case PROCESSOR_CELL:
2789 rs6000_cost = &ppccell_cost;
2792 case PROCESSOR_PPC750:
2793 case PROCESSOR_PPC7400:
2794 rs6000_cost = &ppc750_cost;
2797 case PROCESSOR_PPC7450:
2798 rs6000_cost = &ppc7450_cost;
2801 case PROCESSOR_PPC8540:
2802 rs6000_cost = &ppc8540_cost;
2805 case PROCESSOR_PPCE300C2:
2806 case PROCESSOR_PPCE300C3:
2807 rs6000_cost = &ppce300c2c3_cost;
2810 case PROCESSOR_PPCE500MC:
2811 rs6000_cost = &ppce500mc_cost;
2814 case PROCESSOR_PPCE500MC64:
2815 rs6000_cost = &ppce500mc64_cost;
2818 case PROCESSOR_POWER4:
2819 case PROCESSOR_POWER5:
2820 rs6000_cost = &power4_cost;
2823 case PROCESSOR_POWER6:
2824 rs6000_cost = &power6_cost;
2827 case PROCESSOR_POWER7:
2828 rs6000_cost = &power7_cost;
2831 case PROCESSOR_PPCA2:
2832 rs6000_cost = &ppca2_cost;
2839 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2840 set_param_value ("simultaneous-prefetches",
2841 rs6000_cost->simultaneous_prefetches);
2842 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
2843 set_param_value ("l1-cache-size", rs6000_cost->l1_cache_size);
2844 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2845 set_param_value ("l1-cache-line-size", rs6000_cost->cache_line_size);
2846 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
2847 set_param_value ("l2-cache-size", rs6000_cost->l2_cache_size);
2849 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
2850 can be optimized to ap = __builtin_next_arg (0). */
2851 if (DEFAULT_ABI != ABI_V4)
2852 targetm.expand_builtin_va_start = NULL;
2854 /* Set up single/double float flags.
2855 If TARGET_HARD_FLOAT is set, but neither single or double is set,
2856 then set both flags. */
2857 if (TARGET_HARD_FLOAT && TARGET_FPRS
2858 && rs6000_single_float == 0 && rs6000_double_float == 0)
2859 rs6000_single_float = rs6000_double_float = 1;
2861 /* Reset single and double FP flags if target is E500. */
2864 rs6000_single_float = rs6000_double_float = 0;
2865 if (TARGET_E500_SINGLE)
2866 rs6000_single_float = 1;
2867 if (TARGET_E500_DOUBLE)
2868 rs6000_single_float = rs6000_double_float = 1;
2871 /* If not explicitly specified via option, decide whether to generate indexed
2872 load/store instructions. */
2873 if (TARGET_AVOID_XFORM == -1)
2874 /* Avoid indexed addressing when targeting Power6 in order to avoid
2875 the DERAT mispredict penalty. */
2876 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB);
2878 rs6000_init_hard_regno_mode_ok ();
2881 /* Implement targetm.vectorize.builtin_mask_for_load. */
2883 rs6000_builtin_mask_for_load (void)
2885 if (TARGET_ALTIVEC || TARGET_VSX)
2886 return altivec_builtin_mask_for_load;
2891 /* Implement targetm.vectorize.builtin_conversion.
2892 Returns a decl of a function that implements conversion of an integer vector
2893 into a floating-point vector, or vice-versa. DEST_TYPE is the
2894 destination type and SRC_TYPE the source type of the conversion.
2895 Return NULL_TREE if it is not available. */
2897 rs6000_builtin_conversion (unsigned int tcode, tree dest_type, tree src_type)
2899 enum tree_code code = (enum tree_code) tcode;
2903 case FIX_TRUNC_EXPR:
2904 switch (TYPE_MODE (dest_type))
2907 if (!VECTOR_UNIT_VSX_P (V2DFmode))
2910 return TYPE_UNSIGNED (dest_type)
2911 ? rs6000_builtin_decls[VSX_BUILTIN_XVCVDPUXDS_UNS]
2912 : rs6000_builtin_decls[VSX_BUILTIN_XVCVDPSXDS];
2915 if (VECTOR_UNIT_NONE_P (V4SImode) || VECTOR_UNIT_NONE_P (V4SFmode))
2918 return TYPE_UNSIGNED (dest_type)
2919 ? rs6000_builtin_decls[VECTOR_BUILTIN_FIXUNS_V4SF_V4SI]
2920 : rs6000_builtin_decls[VECTOR_BUILTIN_FIX_V4SF_V4SI];
2927 switch (TYPE_MODE (src_type))
2930 if (!VECTOR_UNIT_VSX_P (V2DFmode))
2933 return TYPE_UNSIGNED (src_type)
2934 ? rs6000_builtin_decls[VSX_BUILTIN_XVCVUXDDP]
2935 : rs6000_builtin_decls[VSX_BUILTIN_XVCVSXDDP];
2938 if (VECTOR_UNIT_NONE_P (V4SImode) || VECTOR_UNIT_NONE_P (V4SFmode))
2941 return TYPE_UNSIGNED (src_type)
2942 ? rs6000_builtin_decls[VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF]
2943 : rs6000_builtin_decls[VECTOR_BUILTIN_FLOAT_V4SI_V4SF];
2954 /* Implement targetm.vectorize.builtin_mul_widen_even. */
2956 rs6000_builtin_mul_widen_even (tree type)
2958 if (!TARGET_ALTIVEC)
2961 switch (TYPE_MODE (type))
2964 return TYPE_UNSIGNED (type)
2965 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH_UNS]
2966 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
2969 return TYPE_UNSIGNED (type)
2970 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB_UNS]
2971 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
2977 /* Implement targetm.vectorize.builtin_mul_widen_odd. */
2979 rs6000_builtin_mul_widen_odd (tree type)
2981 if (!TARGET_ALTIVEC)
2984 switch (TYPE_MODE (type))
2987 return TYPE_UNSIGNED (type)
2988 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH_UNS]
2989 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
2992 return TYPE_UNSIGNED (type)
2993 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB_UNS]
2994 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
3001 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3002 after applying N number of iterations. This routine does not determine
3003 how may iterations are required to reach desired alignment. */
3006 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3013 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3016 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3026 /* Assuming that all other types are naturally aligned. CHECKME! */
3031 /* Return true if the vector misalignment factor is supported by the
3034 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3041 /* Return if movmisalign pattern is not supported for this mode. */
3042 if (optab_handler (movmisalign_optab, mode)->insn_code ==
3046 if (misalignment == -1)
3048 /* misalignment factor is unknown at compile time but we know
3049 it's word aligned. */
3050 if (rs6000_vector_alignment_reachable (type, is_packed))
3054 /* VSX supports word-aligned vector. */
3055 if (misalignment % 4 == 0)
3061 /* Implement targetm.vectorize.builtin_vec_perm. */
3063 rs6000_builtin_vec_perm (tree type, tree *mask_element_type)
3065 tree inner_type = TREE_TYPE (type);
3066 bool uns_p = TYPE_UNSIGNED (inner_type);
3069 *mask_element_type = unsigned_char_type_node;
3071 switch (TYPE_MODE (type))
3075 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_16QI_UNS]
3076 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_16QI]);
3081 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_8HI_UNS]
3082 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_8HI]);
3087 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SI_UNS]
3088 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SI]);
3092 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SF];
3096 if (!TARGET_ALLOW_DF_PERMUTE)
3099 d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_2DF];
3103 if (!TARGET_ALLOW_DF_PERMUTE)
3107 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_2DI_UNS]
3108 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_2DI]);
3119 /* Handle generic options of the form -mfoo=yes/no.
3120 NAME is the option name.
3121 VALUE is the option value.
3122 FLAG is the pointer to the flag where to store a 1 or 0, depending on
3123 whether the option value is 'yes' or 'no' respectively. */
3125 rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
3129 else if (!strcmp (value, "yes"))
3131 else if (!strcmp (value, "no"))
3134 error ("unknown -m%s= option specified: '%s'", name, value);
3137 /* Validate and record the size specified with the -mtls-size option. */
3140 rs6000_parse_tls_size_option (void)
3142 if (rs6000_tls_size_string == 0)
3144 else if (strcmp (rs6000_tls_size_string, "16") == 0)
3145 rs6000_tls_size = 16;
3146 else if (strcmp (rs6000_tls_size_string, "32") == 0)
3147 rs6000_tls_size = 32;
3148 else if (strcmp (rs6000_tls_size_string, "64") == 0)
3149 rs6000_tls_size = 64;
3151 error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string);
3155 optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
3157 if (DEFAULT_ABI == ABI_DARWIN)
3158 /* The Darwin libraries never set errno, so we might as well
3159 avoid calling them when that's the only reason we would. */
3160 flag_errno_math = 0;
3162 /* Double growth factor to counter reduced min jump length. */
3163 set_param_value ("max-grow-copy-bb-insns", 16);
3165 /* Enable section anchors by default.
3166 Skip section anchors for Objective C and Objective C++
3167 until front-ends fixed. */
3168 if (!TARGET_MACHO && lang_hooks.name[4] != 'O')
3169 flag_section_anchors = 2;
3172 static enum fpu_type_t
3173 rs6000_parse_fpu_option (const char *option)
3175 if (!strcmp("none", option)) return FPU_NONE;
3176 if (!strcmp("sp_lite", option)) return FPU_SF_LITE;
3177 if (!strcmp("dp_lite", option)) return FPU_DF_LITE;
3178 if (!strcmp("sp_full", option)) return FPU_SF_FULL;
3179 if (!strcmp("dp_full", option)) return FPU_DF_FULL;
3180 error("unknown value %s for -mfpu", option);
3184 /* Returns a function decl for a vectorized version of the builtin function
3185 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3186 if it is not available. */
3189 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3192 enum machine_mode in_mode, out_mode;
3194 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3196 if (TREE_CODE (type_out) != VECTOR_TYPE
3197 || TREE_CODE (type_in) != VECTOR_TYPE
3198 || !TARGET_VECTORIZE_BUILTINS
3199 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
3202 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3203 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3204 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3205 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3209 case BUILT_IN_COPYSIGN:
3210 if (VECTOR_UNIT_VSX_P (V2DFmode)
3211 && out_mode == DFmode && out_n == 2
3212 && in_mode == DFmode && in_n == 2)
3213 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3215 case BUILT_IN_COPYSIGNF:
3216 if (out_mode != SFmode || out_n != 4
3217 || in_mode != SFmode || in_n != 4)
3219 if (VECTOR_UNIT_VSX_P (V4SFmode))
3220 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3221 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3222 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3225 if (VECTOR_UNIT_VSX_P (V2DFmode)
3226 && out_mode == DFmode && out_n == 2
3227 && in_mode == DFmode && in_n == 2)
3228 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3230 case BUILT_IN_SQRTF:
3231 if (VECTOR_UNIT_VSX_P (V4SFmode)
3232 && out_mode == SFmode && out_n == 4
3233 && in_mode == SFmode && in_n == 4)
3234 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3237 if (VECTOR_UNIT_VSX_P (V2DFmode)
3238 && out_mode == DFmode && out_n == 2
3239 && in_mode == DFmode && in_n == 2)
3240 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3242 case BUILT_IN_CEILF:
3243 if (out_mode != SFmode || out_n != 4
3244 || in_mode != SFmode || in_n != 4)
3246 if (VECTOR_UNIT_VSX_P (V4SFmode))
3247 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3248 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3249 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3251 case BUILT_IN_FLOOR:
3252 if (VECTOR_UNIT_VSX_P (V2DFmode)
3253 && out_mode == DFmode && out_n == 2
3254 && in_mode == DFmode && in_n == 2)
3255 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3257 case BUILT_IN_FLOORF:
3258 if (out_mode != SFmode || out_n != 4
3259 || in_mode != SFmode || in_n != 4)
3261 if (VECTOR_UNIT_VSX_P (V4SFmode))
3262 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3263 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3264 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3266 case BUILT_IN_TRUNC:
3267 if (VECTOR_UNIT_VSX_P (V2DFmode)
3268 && out_mode == DFmode && out_n == 2
3269 && in_mode == DFmode && in_n == 2)
3270 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3272 case BUILT_IN_TRUNCF:
3273 if (out_mode != SFmode || out_n != 4
3274 || in_mode != SFmode || in_n != 4)
3276 if (VECTOR_UNIT_VSX_P (V4SFmode))
3277 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3278 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3279 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3281 case BUILT_IN_NEARBYINT:
3282 if (VECTOR_UNIT_VSX_P (V2DFmode)
3283 && flag_unsafe_math_optimizations
3284 && out_mode == DFmode && out_n == 2
3285 && in_mode == DFmode && in_n == 2)
3286 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3288 case BUILT_IN_NEARBYINTF:
3289 if (VECTOR_UNIT_VSX_P (V4SFmode)
3290 && flag_unsafe_math_optimizations
3291 && out_mode == SFmode && out_n == 4
3292 && in_mode == SFmode && in_n == 4)
3293 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3296 if (VECTOR_UNIT_VSX_P (V2DFmode)
3297 && !flag_trapping_math
3298 && out_mode == DFmode && out_n == 2
3299 && in_mode == DFmode && in_n == 2)
3300 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3302 case BUILT_IN_RINTF:
3303 if (VECTOR_UNIT_VSX_P (V4SFmode)
3304 && !flag_trapping_math
3305 && out_mode == SFmode && out_n == 4
3306 && in_mode == SFmode && in_n == 4)
3307 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3316 /* Implement TARGET_HANDLE_OPTION. */
3319 rs6000_handle_option (size_t code, const char *arg, int value)
3321 enum fpu_type_t fpu_type = FPU_NONE;
3327 target_flags &= ~(MASK_POWER | MASK_POWER2
3328 | MASK_MULTIPLE | MASK_STRING);
3329 target_flags_explicit |= (MASK_POWER | MASK_POWER2
3330 | MASK_MULTIPLE | MASK_STRING);
3332 case OPT_mno_powerpc:
3333 target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
3334 | MASK_PPC_GFXOPT | MASK_POWERPC64);
3335 target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
3336 | MASK_PPC_GFXOPT | MASK_POWERPC64);
3339 target_flags &= ~MASK_MINIMAL_TOC;
3340 TARGET_NO_FP_IN_TOC = 0;
3341 TARGET_NO_SUM_IN_TOC = 0;
3342 target_flags_explicit |= MASK_MINIMAL_TOC;
3343 #ifdef TARGET_USES_SYSV4_OPT
3344 /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
3345 just the same as -mminimal-toc. */
3346 target_flags |= MASK_MINIMAL_TOC;
3347 target_flags_explicit |= MASK_MINIMAL_TOC;
3351 #ifdef TARGET_USES_SYSV4_OPT
3353 /* Make -mtoc behave like -mminimal-toc. */
3354 target_flags |= MASK_MINIMAL_TOC;
3355 target_flags_explicit |= MASK_MINIMAL_TOC;
3359 #ifdef TARGET_USES_AIX64_OPT
3364 target_flags |= MASK_POWERPC64 | MASK_POWERPC;
3365 target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
3366 target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
3369 #ifdef TARGET_USES_AIX64_OPT
3374 target_flags &= ~MASK_POWERPC64;
3375 target_flags_explicit |= MASK_POWERPC64;
3378 case OPT_minsert_sched_nops_:
3379 rs6000_sched_insert_nops_str = arg;
3382 case OPT_mminimal_toc:
3385 TARGET_NO_FP_IN_TOC = 0;
3386 TARGET_NO_SUM_IN_TOC = 0;
3393 target_flags |= (MASK_MULTIPLE | MASK_STRING);
3394 target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
3401 target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
3402 target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
3406 case OPT_mpowerpc_gpopt:
3407 case OPT_mpowerpc_gfxopt:
3410 target_flags |= MASK_POWERPC;
3411 target_flags_explicit |= MASK_POWERPC;
3415 case OPT_maix_struct_return:
3416 case OPT_msvr4_struct_return:
3417 rs6000_explicit_options.aix_struct_ret = true;
3421 rs6000_explicit_options.vrsave = true;
3422 TARGET_ALTIVEC_VRSAVE = value;
3426 rs6000_explicit_options.vrsave = true;
3427 rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
3431 target_flags_explicit |= MASK_ISEL;
3433 rs6000_parse_yes_no_option ("isel", arg, &isel);
3435 target_flags |= MASK_ISEL;
3437 target_flags &= ~MASK_ISEL;
3441 rs6000_explicit_options.spe = true;
3446 rs6000_explicit_options.spe = true;
3447 rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
3451 rs6000_debug_name = arg;
3454 #ifdef TARGET_USES_SYSV4_OPT
3456 rs6000_abi_name = arg;
3460 rs6000_sdata_name = arg;
3463 case OPT_mtls_size_:
3464 rs6000_tls_size_string = arg;
3467 case OPT_mrelocatable:
3470 target_flags |= MASK_MINIMAL_TOC;
3471 target_flags_explicit |= MASK_MINIMAL_TOC;
3472 TARGET_NO_FP_IN_TOC = 1;
3476 case OPT_mrelocatable_lib:
3479 target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
3480 target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
3481 TARGET_NO_FP_IN_TOC = 1;
3485 target_flags &= ~MASK_RELOCATABLE;
3486 target_flags_explicit |= MASK_RELOCATABLE;
3492 if (!strcmp (arg, "altivec"))
3494 rs6000_explicit_options.altivec_abi = true;
3495 rs6000_altivec_abi = 1;
3497 /* Enabling the AltiVec ABI turns off the SPE ABI. */
3500 else if (! strcmp (arg, "no-altivec"))
3502 rs6000_explicit_options.altivec_abi = true;
3503 rs6000_altivec_abi = 0;
3505 else if (! strcmp (arg, "spe"))
3507 rs6000_explicit_options.spe_abi = true;
3509 rs6000_altivec_abi = 0;
3510 if (!TARGET_SPE_ABI)
3511 error ("not configured for ABI: '%s'", arg);
3513 else if (! strcmp (arg, "no-spe"))
3515 rs6000_explicit_options.spe_abi = true;
3519 /* These are here for testing during development only, do not
3520 document in the manual please. */
3521 else if (! strcmp (arg, "d64"))
3523 rs6000_darwin64_abi = 1;
3524 warning (0, "Using darwin64 ABI");
3526 else if (! strcmp (arg, "d32"))
3528 rs6000_darwin64_abi = 0;
3529 warning (0, "Using old darwin ABI");
3532 else if (! strcmp (arg, "ibmlongdouble"))
3534 rs6000_explicit_options.ieee = true;
3535 rs6000_ieeequad = 0;
3536 warning (0, "Using IBM extended precision long double");
3538 else if (! strcmp (arg, "ieeelongdouble"))
3540 rs6000_explicit_options.ieee = true;
3541 rs6000_ieeequad = 1;
3542 warning (0, "Using IEEE extended precision long double");
3547 error ("unknown ABI specified: '%s'", arg);
3553 rs6000_select[1].string = arg;
3557 rs6000_select[2].string = arg;
3560 case OPT_mtraceback_:
3561 rs6000_traceback_name = arg;
3564 case OPT_mfloat_gprs_:
3565 rs6000_explicit_options.float_gprs = true;
3566 if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
3567 rs6000_float_gprs = 1;
3568 else if (! strcmp (arg, "double"))
3569 rs6000_float_gprs = 2;
3570 else if (! strcmp (arg, "no"))
3571 rs6000_float_gprs = 0;
3574 error ("invalid option for -mfloat-gprs: '%s'", arg);
3579 case OPT_mlong_double_:
3580 rs6000_explicit_options.long_double = true;
3581 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3582 if (value != 64 && value != 128)
3584 error ("Unknown switch -mlong-double-%s", arg);
3585 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3589 rs6000_long_double_type_size = value;
3592 case OPT_msched_costly_dep_:
3593 rs6000_sched_costly_dep_str = arg;
3597 rs6000_explicit_options.alignment = true;
3598 if (! strcmp (arg, "power"))
3600 /* On 64-bit Darwin, power alignment is ABI-incompatible with
3601 some C library functions, so warn about it. The flag may be
3602 useful for performance studies from time to time though, so
3603 don't disable it entirely. */
3604 if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
3605 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3606 " it is incompatible with the installed C and C++ libraries");
3607 rs6000_alignment_flags = MASK_ALIGN_POWER;
3609 else if (! strcmp (arg, "natural"))
3610 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3613 error ("unknown -malign-XXXXX option specified: '%s'", arg);
3618 case OPT_msingle_float:
3619 if (!TARGET_SINGLE_FPU)
3620 warning (0, "-msingle-float option equivalent to -mhard-float");
3621 /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */
3622 rs6000_double_float = 0;
3623 target_flags &= ~MASK_SOFT_FLOAT;
3624 target_flags_explicit |= MASK_SOFT_FLOAT;
3627 case OPT_mdouble_float:
3628 /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */
3629 rs6000_single_float = 1;
3630 target_flags &= ~MASK_SOFT_FLOAT;
3631 target_flags_explicit |= MASK_SOFT_FLOAT;
3634 case OPT_msimple_fpu:
3635 if (!TARGET_SINGLE_FPU)
3636 warning (0, "-msimple-fpu option ignored");
3639 case OPT_mhard_float:
3640 /* -mhard_float implies -msingle-float and -mdouble-float. */
3641 rs6000_single_float = rs6000_double_float = 1;
3644 case OPT_msoft_float:
3645 /* -msoft_float implies -mnosingle-float and -mnodouble-float. */
3646 rs6000_single_float = rs6000_double_float = 0;
3650 fpu_type = rs6000_parse_fpu_option(arg);
3651 if (fpu_type != FPU_NONE)
3652 /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */
3654 target_flags &= ~MASK_SOFT_FLOAT;
3655 target_flags_explicit |= MASK_SOFT_FLOAT;
3656 rs6000_xilinx_fpu = 1;
3657 if (fpu_type == FPU_SF_LITE || fpu_type == FPU_SF_FULL)
3658 rs6000_single_float = 1;
3659 if (fpu_type == FPU_DF_LITE || fpu_type == FPU_DF_FULL)
3660 rs6000_single_float = rs6000_double_float = 1;
3661 if (fpu_type == FPU_SF_LITE || fpu_type == FPU_DF_LITE)
3662 rs6000_simple_fpu = 1;
3666 /* -mfpu=none is equivalent to -msoft-float */
3667 target_flags |= MASK_SOFT_FLOAT;
3668 target_flags_explicit |= MASK_SOFT_FLOAT;
3669 rs6000_single_float = rs6000_double_float = 0;
3676 /* Do anything needed at the start of the asm file. */
3679 rs6000_file_start (void)
3683 const char *start = buffer;
3684 struct rs6000_cpu_select *ptr;
3685 const char *default_cpu = TARGET_CPU_DEFAULT;
3686 FILE *file = asm_out_file;
3688 default_file_start ();
3690 #ifdef TARGET_BI_ARCH
3691 if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT)
3695 if (flag_verbose_asm)
3697 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3698 rs6000_select[0].string = default_cpu;
3700 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
3702 ptr = &rs6000_select[i];
3703 if (ptr->string != (char *)0 && ptr->string[0] != '\0')
3705 fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
3710 if (PPC405_ERRATUM77)
3712 fprintf (file, "%s PPC405CR_ERRATUM77", start);
3716 #ifdef USING_ELFOS_H
3717 switch (rs6000_sdata)
3719 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
3720 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
3721 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
3722 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
3725 if (rs6000_sdata && g_switch_value)
3727 fprintf (file, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED, start,
3737 #ifdef HAVE_AS_GNU_ATTRIBUTE
3738 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
3740 fprintf (file, "\t.gnu_attribute 4, %d\n",
3741 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
3742 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
3744 fprintf (file, "\t.gnu_attribute 8, %d\n",
3745 (TARGET_ALTIVEC_ABI ? 2
3746 : TARGET_SPE_ABI ? 3
3748 fprintf (file, "\t.gnu_attribute 12, %d\n",
3749 aix_struct_return ? 2 : 1);
3754 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
3756 switch_to_section (toc_section);
3757 switch_to_section (text_section);
3762 /* Return nonzero if this function is known to have a null epilogue. */
3765 direct_return (void)
3767 if (reload_completed)
3769 rs6000_stack_t *info = rs6000_stack_info ();
3771 if (info->first_gp_reg_save == 32
3772 && info->first_fp_reg_save == 64
3773 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
3774 && ! info->lr_save_p
3775 && ! info->cr_save_p
3776 && info->vrsave_mask == 0
3784 /* Return the number of instructions it takes to form a constant in an
3785 integer register. */
3788 num_insns_constant_wide (HOST_WIDE_INT value)
3790 /* signed constant loadable with {cal|addi} */
3791 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
3794 /* constant loadable with {cau|addis} */
3795 else if ((value & 0xffff) == 0
3796 && (value >> 31 == -1 || value >> 31 == 0))
3799 #if HOST_BITS_PER_WIDE_INT == 64
3800 else if (TARGET_POWERPC64)
3802 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
3803 HOST_WIDE_INT high = value >> 31;
3805 if (high == 0 || high == -1)
3811 return num_insns_constant_wide (high) + 1;
3813 return num_insns_constant_wide (low) + 1;
3815 return (num_insns_constant_wide (high)
3816 + num_insns_constant_wide (low) + 1);
3825 num_insns_constant (rtx op, enum machine_mode mode)
3827 HOST_WIDE_INT low, high;
3829 switch (GET_CODE (op))
3832 #if HOST_BITS_PER_WIDE_INT == 64
3833 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
3834 && mask64_operand (op, mode))
3838 return num_insns_constant_wide (INTVAL (op));
3841 if (mode == SFmode || mode == SDmode)
3846 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
3847 if (DECIMAL_FLOAT_MODE_P (mode))
3848 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
3850 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
3851 return num_insns_constant_wide ((HOST_WIDE_INT) l);
3854 if (mode == VOIDmode || mode == DImode)
3856 high = CONST_DOUBLE_HIGH (op);
3857 low = CONST_DOUBLE_LOW (op);
3864 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
3865 if (DECIMAL_FLOAT_MODE_P (mode))
3866 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
3868 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
3869 high = l[WORDS_BIG_ENDIAN == 0];
3870 low = l[WORDS_BIG_ENDIAN != 0];
3874 return (num_insns_constant_wide (low)
3875 + num_insns_constant_wide (high));
3878 if ((high == 0 && low >= 0)
3879 || (high == -1 && low < 0))
3880 return num_insns_constant_wide (low);
3882 else if (mask64_operand (op, mode))
3886 return num_insns_constant_wide (high) + 1;
3889 return (num_insns_constant_wide (high)
3890 + num_insns_constant_wide (low) + 1);
3898 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
3899 If the mode of OP is MODE_VECTOR_INT, this simply returns the
3900 corresponding element of the vector, but for V4SFmode and V2SFmode,
3901 the corresponding "float" is interpreted as an SImode integer. */
3904 const_vector_elt_as_int (rtx op, unsigned int elt)
3906 rtx tmp = CONST_VECTOR_ELT (op, elt);
3907 if (GET_MODE (op) == V4SFmode
3908 || GET_MODE (op) == V2SFmode)
3909 tmp = gen_lowpart (SImode, tmp);
3910 return INTVAL (tmp);
3913 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
3914 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
3915 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
3916 all items are set to the same value and contain COPIES replicas of the
3917 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
3918 operand and the others are set to the value of the operand's msb. */
3921 vspltis_constant (rtx op, unsigned step, unsigned copies)
3923 enum machine_mode mode = GET_MODE (op);
3924 enum machine_mode inner = GET_MODE_INNER (mode);
3927 unsigned nunits = GET_MODE_NUNITS (mode);
3928 unsigned bitsize = GET_MODE_BITSIZE (inner);
3929 unsigned mask = GET_MODE_MASK (inner);
3931 HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
3932 HOST_WIDE_INT splat_val = val;
3933 HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
3935 /* Construct the value to be splatted, if possible. If not, return 0. */
3936 for (i = 2; i <= copies; i *= 2)
3938 HOST_WIDE_INT small_val;
3940 small_val = splat_val >> bitsize;
3942 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
3944 splat_val = small_val;
3947 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
3948 if (EASY_VECTOR_15 (splat_val))
3951 /* Also check if we can splat, and then add the result to itself. Do so if
3952 the value is positive, of if the splat instruction is using OP's mode;
3953 for splat_val < 0, the splat and the add should use the same mode. */
3954 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
3955 && (splat_val >= 0 || (step == 1 && copies == 1)))
3958 /* Also check if are loading up the most significant bit which can be done by
3959 loading up -1 and shifting the value left by -1. */
3960 else if (EASY_VECTOR_MSB (splat_val, inner))
3966 /* Check if VAL is present in every STEP-th element, and the
3967 other elements are filled with its most significant bit. */
3968 for (i = 0; i < nunits - 1; ++i)
3970 HOST_WIDE_INT desired_val;
3971 if (((i + 1) & (step - 1)) == 0)
3974 desired_val = msb_val;
3976 if (desired_val != const_vector_elt_as_int (op, i))
3984 /* Return true if OP is of the given MODE and can be synthesized
3985 with a vspltisb, vspltish or vspltisw. */
3988 easy_altivec_constant (rtx op, enum machine_mode mode)
3990 unsigned step, copies;
3992 if (mode == VOIDmode)
3993 mode = GET_MODE (op);
3994 else if (mode != GET_MODE (op))
3997 /* Start with a vspltisw. */
3998 step = GET_MODE_NUNITS (mode) / 4;
4001 if (vspltis_constant (op, step, copies))
4004 /* Then try with a vspltish. */
4010 if (vspltis_constant (op, step, copies))
4013 /* And finally a vspltisb. */
4019 if (vspltis_constant (op, step, copies))
4025 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4026 result is OP. Abort if it is not possible. */
4029 gen_easy_altivec_constant (rtx op)
4031 enum machine_mode mode = GET_MODE (op);
4032 int nunits = GET_MODE_NUNITS (mode);
4033 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4034 unsigned step = nunits / 4;
4035 unsigned copies = 1;
4037 /* Start with a vspltisw. */
4038 if (vspltis_constant (op, step, copies))
4039 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4041 /* Then try with a vspltish. */
4047 if (vspltis_constant (op, step, copies))
4048 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4050 /* And finally a vspltisb. */
4056 if (vspltis_constant (op, step, copies))
4057 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4063 output_vec_const_move (rtx *operands)
4066 enum machine_mode mode;
4071 mode = GET_MODE (dest);
4073 if (TARGET_VSX && zero_constant (vec, mode))
4074 return "xxlxor %x0,%x0,%x0";
4079 if (zero_constant (vec, mode))
4080 return "vxor %0,%0,%0";
4082 splat_vec = gen_easy_altivec_constant (vec);
4083 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4084 operands[1] = XEXP (splat_vec, 0);
4085 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4088 switch (GET_MODE (splat_vec))
4091 return "vspltisw %0,%1";
4094 return "vspltish %0,%1";
4097 return "vspltisb %0,%1";
4104 gcc_assert (TARGET_SPE);
4106 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4107 pattern of V1DI, V4HI, and V2SF.
4109 FIXME: We should probably return # and add post reload
4110 splitters for these, but this way is so easy ;-). */
4111 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4112 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4113 operands[1] = CONST_VECTOR_ELT (vec, 0);
4114 operands[2] = CONST_VECTOR_ELT (vec, 1);
4116 return "li %0,%1\n\tevmergelo %0,%0,%0";
4118 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4121 /* Initialize TARGET of vector PAIRED to VALS. */
4124 paired_expand_vector_init (rtx target, rtx vals)
4126 enum machine_mode mode = GET_MODE (target);
4127 int n_elts = GET_MODE_NUNITS (mode);
4129 rtx x, new_rtx, tmp, constant_op, op1, op2;
4132 for (i = 0; i < n_elts; ++i)
4134 x = XVECEXP (vals, 0, i);
4135 if (!CONSTANT_P (x))
4140 /* Load from constant pool. */
4141 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4147 /* The vector is initialized only with non-constants. */
4148 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4149 XVECEXP (vals, 0, 1));
4151 emit_move_insn (target, new_rtx);
4155 /* One field is non-constant and the other one is a constant. Load the
4156 constant from the constant pool and use ps_merge instruction to
4157 construct the whole vector. */
4158 op1 = XVECEXP (vals, 0, 0);
4159 op2 = XVECEXP (vals, 0, 1);
4161 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4163 tmp = gen_reg_rtx (GET_MODE (constant_op));
4164 emit_move_insn (tmp, constant_op);
4166 if (CONSTANT_P (op1))
4167 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4169 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4171 emit_move_insn (target, new_rtx);
4175 paired_expand_vector_move (rtx operands[])
4177 rtx op0 = operands[0], op1 = operands[1];
4179 emit_move_insn (op0, op1);
4182 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4183 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4184 operands for the relation operation COND. This is a recursive
4188 paired_emit_vector_compare (enum rtx_code rcode,
4189 rtx dest, rtx op0, rtx op1,
4190 rtx cc_op0, rtx cc_op1)
4192 rtx tmp = gen_reg_rtx (V2SFmode);
4195 gcc_assert (TARGET_PAIRED_FLOAT);
4196 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4202 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4206 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4207 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4211 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4214 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4217 tmp1 = gen_reg_rtx (V2SFmode);
4218 max = gen_reg_rtx (V2SFmode);
4219 min = gen_reg_rtx (V2SFmode);
4220 gen_reg_rtx (V2SFmode);
4222 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4223 emit_insn (gen_selv2sf4
4224 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4225 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4226 emit_insn (gen_selv2sf4
4227 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4228 emit_insn (gen_subv2sf3 (tmp1, min, max));
4229 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4232 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4235 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4238 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4241 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4244 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4253 /* Emit vector conditional expression.
4254 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4255 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4258 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4259 rtx cond, rtx cc_op0, rtx cc_op1)
4261 enum rtx_code rcode = GET_CODE (cond);
4263 if (!TARGET_PAIRED_FLOAT)
4266 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4271 /* Initialize vector TARGET to VALS. */
4274 rs6000_expand_vector_init (rtx target, rtx vals)
4276 enum machine_mode mode = GET_MODE (target);
4277 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4278 int n_elts = GET_MODE_NUNITS (mode);
4279 int n_var = 0, one_var = -1;
4280 bool all_same = true, all_const_zero = true;
4284 for (i = 0; i < n_elts; ++i)
4286 x = XVECEXP (vals, 0, i);
4287 if (!CONSTANT_P (x))
4288 ++n_var, one_var = i;
4289 else if (x != CONST0_RTX (inner_mode))
4290 all_const_zero = false;
4292 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4298 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4299 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4300 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4302 /* Zero register. */
4303 emit_insn (gen_rtx_SET (VOIDmode, target,
4304 gen_rtx_XOR (mode, target, target)));
4307 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4309 /* Splat immediate. */
4310 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4315 /* Load from constant pool. */
4316 emit_move_insn (target, const_vec);
4321 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4322 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4326 rtx element = XVECEXP (vals, 0, 0);
4327 if (mode == V2DFmode)
4328 emit_insn (gen_vsx_splat_v2df (target, element));
4330 emit_insn (gen_vsx_splat_v2di (target, element));
4334 rtx op0 = copy_to_reg (XVECEXP (vals, 0, 0));
4335 rtx op1 = copy_to_reg (XVECEXP (vals, 0, 1));
4336 if (mode == V2DFmode)
4337 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4339 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4344 /* With single precision floating point on VSX, know that internally single
4345 precision is actually represented as a double, and either make 2 V2DF
4346 vectors, and convert these vectors to single precision, or do one
4347 conversion, and splat the result to the other elements. */
4348 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4352 rtx freg = gen_reg_rtx (V4SFmode);
4353 rtx sreg = copy_to_reg (XVECEXP (vals, 0, 0));
4355 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4356 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4360 rtx dbl_even = gen_reg_rtx (V2DFmode);
4361 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4362 rtx flt_even = gen_reg_rtx (V4SFmode);
4363 rtx flt_odd = gen_reg_rtx (V4SFmode);
4365 emit_insn (gen_vsx_concat_v2sf (dbl_even,
4366 copy_to_reg (XVECEXP (vals, 0, 0)),
4367 copy_to_reg (XVECEXP (vals, 0, 1))));
4368 emit_insn (gen_vsx_concat_v2sf (dbl_odd,
4369 copy_to_reg (XVECEXP (vals, 0, 2)),
4370 copy_to_reg (XVECEXP (vals, 0, 3))));
4371 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4372 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4373 emit_insn (gen_vec_extract_evenv4sf (target, flt_even, flt_odd));
4378 /* Store value to stack temp. Load vector element. Splat. However, splat
4379 of 64-bit items is not supported on Altivec. */
4380 if (all_same && GET_MODE_SIZE (mode) <= 4)
4382 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
4383 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4384 XVECEXP (vals, 0, 0));
4385 x = gen_rtx_UNSPEC (VOIDmode,
4386 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4387 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4389 gen_rtx_SET (VOIDmode,
4392 x = gen_rtx_VEC_SELECT (inner_mode, target,
4393 gen_rtx_PARALLEL (VOIDmode,
4394 gen_rtvec (1, const0_rtx)));
4395 emit_insn (gen_rtx_SET (VOIDmode, target,
4396 gen_rtx_VEC_DUPLICATE (mode, x)));
4400 /* One field is non-constant. Load constant then overwrite
4404 rtx copy = copy_rtx (vals);
4406 /* Load constant part of vector, substitute neighboring value for
4408 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4409 rs6000_expand_vector_init (target, copy);
4411 /* Insert variable. */
4412 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4416 /* Construct the vector in memory one field at a time
4417 and load the whole vector. */
4418 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
4419 for (i = 0; i < n_elts; i++)
4420 emit_move_insn (adjust_address_nv (mem, inner_mode,
4421 i * GET_MODE_SIZE (inner_mode)),
4422 XVECEXP (vals, 0, i));
4423 emit_move_insn (target, mem);
4426 /* Set field ELT of TARGET to VAL. */
4429 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4431 enum machine_mode mode = GET_MODE (target);
4432 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4433 rtx reg = gen_reg_rtx (mode);
4435 int width = GET_MODE_SIZE (inner_mode);
4438 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4440 rtx (*set_func) (rtx, rtx, rtx, rtx)
4441 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4442 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4446 /* Load single variable value. */
4447 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
4448 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4449 x = gen_rtx_UNSPEC (VOIDmode,
4450 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4451 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4453 gen_rtx_SET (VOIDmode,
4457 /* Linear sequence. */
4458 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4459 for (i = 0; i < 16; ++i)
4460 XVECEXP (mask, 0, i) = GEN_INT (i);
4462 /* Set permute mask to insert element into target. */
4463 for (i = 0; i < width; ++i)
4464 XVECEXP (mask, 0, elt*width + i)
4465 = GEN_INT (i + 0x10);
4466 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4467 x = gen_rtx_UNSPEC (mode,
4468 gen_rtvec (3, target, reg,
4469 force_reg (V16QImode, x)),
4471 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4474 /* Extract field ELT from VEC into TARGET. */
4477 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4479 enum machine_mode mode = GET_MODE (vec);
4480 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4483 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4485 rtx (*extract_func) (rtx, rtx, rtx)
4486 = ((mode == V2DFmode) ? gen_vsx_extract_v2df : gen_vsx_extract_v2di);
4487 emit_insn (extract_func (target, vec, GEN_INT (elt)));
4491 /* Allocate mode-sized buffer. */
4492 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
4494 /* Add offset to field within buffer matching vector element. */
4495 mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
4497 /* Store single field into mode-sized buffer. */
4498 x = gen_rtx_UNSPEC (VOIDmode,
4499 gen_rtvec (1, const0_rtx), UNSPEC_STVE);
4500 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4502 gen_rtx_SET (VOIDmode,
4505 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4508 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4509 implement ANDing by the mask IN. */
4511 build_mask64_2_operands (rtx in, rtx *out)
4513 #if HOST_BITS_PER_WIDE_INT >= 64
4514 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4517 gcc_assert (GET_CODE (in) == CONST_INT);
4522 /* Assume c initially something like 0x00fff000000fffff. The idea
4523 is to rotate the word so that the middle ^^^^^^ group of zeros
4524 is at the MS end and can be cleared with an rldicl mask. We then
4525 rotate back and clear off the MS ^^ group of zeros with a
4527 c = ~c; /* c == 0xff000ffffff00000 */
4528 lsb = c & -c; /* lsb == 0x0000000000100000 */
4529 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4530 c = ~c; /* c == 0x00fff000000fffff */
4531 c &= -lsb; /* c == 0x00fff00000000000 */
4532 lsb = c & -c; /* lsb == 0x0000100000000000 */
4533 c = ~c; /* c == 0xff000fffffffffff */
4534 c &= -lsb; /* c == 0xff00000000000000 */
4536 while ((lsb >>= 1) != 0)
4537 shift++; /* shift == 44 on exit from loop */
4538 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4539 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4540 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4544 /* Assume c initially something like 0xff000f0000000000. The idea
4545 is to rotate the word so that the ^^^ middle group of zeros
4546 is at the LS end and can be cleared with an rldicr mask. We then
4547 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4549 lsb = c & -c; /* lsb == 0x0000010000000000 */
4550 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4551 c = ~c; /* c == 0x00fff0ffffffffff */
4552 c &= -lsb; /* c == 0x00fff00000000000 */
4553 lsb = c & -c; /* lsb == 0x0000100000000000 */
4554 c = ~c; /* c == 0xff000fffffffffff */
4555 c &= -lsb; /* c == 0xff00000000000000 */
4557 while ((lsb >>= 1) != 0)
4558 shift++; /* shift == 44 on exit from loop */
4559 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4560 m1 >>= shift; /* m1 == 0x0000000000000fff */
4561 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4564 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4565 masks will be all 1's. We are guaranteed more than one transition. */
4566 out[0] = GEN_INT (64 - shift);
4567 out[1] = GEN_INT (m1);
4568 out[2] = GEN_INT (shift);
4569 out[3] = GEN_INT (m2);
4577 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4580 invalid_e500_subreg (rtx op, enum machine_mode mode)
4582 if (TARGET_E500_DOUBLE)
4584 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4585 subreg:TI and reg:TF. Decimal float modes are like integer
4586 modes (only low part of each register used) for this
4588 if (GET_CODE (op) == SUBREG
4589 && (mode == SImode || mode == DImode || mode == TImode
4590 || mode == DDmode || mode == TDmode)
4591 && REG_P (SUBREG_REG (op))
4592 && (GET_MODE (SUBREG_REG (op)) == DFmode
4593 || GET_MODE (SUBREG_REG (op)) == TFmode))
4596 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4598 if (GET_CODE (op) == SUBREG
4599 && (mode == DFmode || mode == TFmode)
4600 && REG_P (SUBREG_REG (op))
4601 && (GET_MODE (SUBREG_REG (op)) == DImode
4602 || GET_MODE (SUBREG_REG (op)) == TImode
4603 || GET_MODE (SUBREG_REG (op)) == DDmode
4604 || GET_MODE (SUBREG_REG (op)) == TDmode))
4609 && GET_CODE (op) == SUBREG
4611 && REG_P (SUBREG_REG (op))
4612 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4618 /* AIX increases natural record alignment to doubleword if the first
4619 field is an FP double while the FP fields remain word aligned. */
4622 rs6000_special_round_type_align (tree type, unsigned int computed,
4623 unsigned int specified)
4625 unsigned int align = MAX (computed, specified);
4626 tree field = TYPE_FIELDS (type);
4628 /* Skip all non field decls */
4629 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4630 field = TREE_CHAIN (field);
4632 if (field != NULL && field != type)
4634 type = TREE_TYPE (field);
4635 while (TREE_CODE (type) == ARRAY_TYPE)
4636 type = TREE_TYPE (type);
4638 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4639 align = MAX (align, 64);
4645 /* Darwin increases record alignment to the natural alignment of
4649 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
4650 unsigned int specified)
4652 unsigned int align = MAX (computed, specified);
4654 if (TYPE_PACKED (type))
4657 /* Find the first field, looking down into aggregates. */
4659 tree field = TYPE_FIELDS (type);
4660 /* Skip all non field decls */
4661 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4662 field = TREE_CHAIN (field);
4665 /* A packed field does not contribute any extra alignment. */
4666 if (DECL_PACKED (field))
4668 type = TREE_TYPE (field);
4669 while (TREE_CODE (type) == ARRAY_TYPE)
4670 type = TREE_TYPE (type);
4671 } while (AGGREGATE_TYPE_P (type));
4673 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
4674 align = MAX (align, TYPE_ALIGN (type));
4679 /* Return 1 for an operand in small memory on V.4/eabi. */
4682 small_data_operand (rtx op ATTRIBUTE_UNUSED,
4683 enum machine_mode mode ATTRIBUTE_UNUSED)
4688 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
4691 if (DEFAULT_ABI != ABI_V4)
4694 /* Vector and float memory instructions have a limited offset on the
4695 SPE, so using a vector or float variable directly as an operand is
4698 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
4701 if (GET_CODE (op) == SYMBOL_REF)
4704 else if (GET_CODE (op) != CONST
4705 || GET_CODE (XEXP (op, 0)) != PLUS
4706 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
4707 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
4712 rtx sum = XEXP (op, 0);
4713 HOST_WIDE_INT summand;
4715 /* We have to be careful here, because it is the referenced address
4716 that must be 32k from _SDA_BASE_, not just the symbol. */
4717 summand = INTVAL (XEXP (sum, 1));
4718 if (summand < 0 || (unsigned HOST_WIDE_INT) summand > g_switch_value)
4721 sym_ref = XEXP (sum, 0);
4724 return SYMBOL_REF_SMALL_P (sym_ref);
4730 /* Return true if either operand is a general purpose register. */
4733 gpr_or_gpr_p (rtx op0, rtx op1)
4735 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
4736 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
4740 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
4743 reg_offset_addressing_ok_p (enum machine_mode mode)
4753 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
4754 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
4762 /* Paired vector modes. Only reg+reg addressing is valid. */
4763 if (TARGET_PAIRED_FLOAT)
4775 virtual_stack_registers_memory_p (rtx op)
4779 if (GET_CODE (op) == REG)
4780 regnum = REGNO (op);
4782 else if (GET_CODE (op) == PLUS
4783 && GET_CODE (XEXP (op, 0)) == REG
4784 && GET_CODE (XEXP (op, 1)) == CONST_INT)
4785 regnum = REGNO (XEXP (op, 0));
4790 return (regnum >= FIRST_VIRTUAL_REGISTER
4791 && regnum <= LAST_VIRTUAL_REGISTER);
4795 constant_pool_expr_p (rtx op)
4799 split_const (op, &base, &offset);
4800 return (GET_CODE (base) == SYMBOL_REF
4801 && CONSTANT_POOL_ADDRESS_P (base)
4802 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
4806 toc_relative_expr_p (rtx op)
4810 if (GET_CODE (op) != CONST)
4813 split_const (op, &base, &offset);
4814 return (GET_CODE (base) == UNSPEC
4815 && XINT (base, 1) == UNSPEC_TOCREL);
4819 legitimate_constant_pool_address_p (rtx x)
4822 && GET_CODE (x) == PLUS
4823 && GET_CODE (XEXP (x, 0)) == REG
4824 && (TARGET_MINIMAL_TOC || REGNO (XEXP (x, 0)) == TOC_REGISTER)
4825 && toc_relative_expr_p (XEXP (x, 1)));
4829 legitimate_small_data_p (enum machine_mode mode, rtx x)
4831 return (DEFAULT_ABI == ABI_V4
4832 && !flag_pic && !TARGET_TOC
4833 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
4834 && small_data_operand (x, mode));
4837 /* SPE offset addressing is limited to 5-bits worth of double words. */
4838 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
4841 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
4843 unsigned HOST_WIDE_INT offset, extra;
4845 if (GET_CODE (x) != PLUS)
4847 if (GET_CODE (XEXP (x, 0)) != REG)
4849 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
4851 if (!reg_offset_addressing_ok_p (mode))
4852 return virtual_stack_registers_memory_p (x);
4853 if (legitimate_constant_pool_address_p (x))
4855 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4858 offset = INTVAL (XEXP (x, 1));
4866 /* SPE vector modes. */
4867 return SPE_CONST_OFFSET_OK (offset);
4870 if (TARGET_E500_DOUBLE)
4871 return SPE_CONST_OFFSET_OK (offset);
4873 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
4875 if (VECTOR_MEM_VSX_P (DFmode))
4880 /* On e500v2, we may have:
4882 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
4884 Which gets addressed with evldd instructions. */
4885 if (TARGET_E500_DOUBLE)
4886 return SPE_CONST_OFFSET_OK (offset);
4888 if (mode == DFmode || mode == DDmode || !TARGET_POWERPC64)
4890 else if (offset & 3)
4895 if (TARGET_E500_DOUBLE)
4896 return (SPE_CONST_OFFSET_OK (offset)
4897 && SPE_CONST_OFFSET_OK (offset + 8));
4901 if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
4903 else if (offset & 3)
4914 return (offset < 0x10000) && (offset + extra < 0x10000);
4918 legitimate_indexed_address_p (rtx x, int strict)
4922 if (GET_CODE (x) != PLUS)
4928 /* Recognize the rtl generated by reload which we know will later be
4929 replaced with proper base and index regs. */
4931 && reload_in_progress
4932 && (REG_P (op0) || GET_CODE (op0) == PLUS)
4936 return (REG_P (op0) && REG_P (op1)
4937 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
4938 && INT_REG_OK_FOR_INDEX_P (op1, strict))
4939 || (INT_REG_OK_FOR_BASE_P (op1, strict)
4940 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
4944 avoiding_indexed_address_p (enum machine_mode mode)
4946 /* Avoid indexed addressing for modes that have non-indexed
4947 load/store instruction forms. */
4948 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
4952 legitimate_indirect_address_p (rtx x, int strict)
4954 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
4958 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
4960 if (!TARGET_MACHO || !flag_pic
4961 || mode != SImode || GET_CODE (x) != MEM)
4965 if (GET_CODE (x) != LO_SUM)
4967 if (GET_CODE (XEXP (x, 0)) != REG)
4969 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
4973 return CONSTANT_P (x);
4977 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
4979 if (GET_CODE (x) != LO_SUM)
4981 if (GET_CODE (XEXP (x, 0)) != REG)
4983 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
4985 /* Restrict addressing for DI because of our SUBREG hackery. */
4986 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
4987 || mode == DDmode || mode == TDmode
4992 if (TARGET_ELF || TARGET_MACHO)
4994 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
4998 if (GET_MODE_NUNITS (mode) != 1)
5000 if (GET_MODE_BITSIZE (mode) > 64
5001 || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
5002 && !(TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5003 && (mode == DFmode || mode == DDmode))))
5006 return CONSTANT_P (x);
5013 /* Try machine-dependent ways of modifying an illegitimate address
5014 to be legitimate. If we find one, return the new, valid address.
5015 This is used from only one place: `memory_address' in explow.c.
5017 OLDX is the address as it was before break_out_memory_refs was
5018 called. In some cases it is useful to look at this to decide what
5021 It is always safe for this function to do nothing. It exists to
5022 recognize opportunities to optimize the output.
5024 On RS/6000, first check for the sum of a register with a constant
5025 integer that is out of range. If so, generate code to add the
5026 constant with the low-order 16 bits masked to the register and force
5027 this result into another register (this can be done with `cau').
5028 Then generate an address of REG+(CONST&0xffff), allowing for the
5029 possibility of bit 16 being a one.
5031 Then check for the sum of a register and something not constant, try to
5032 load the other things into a register and return the sum. */
5035 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5036 enum machine_mode mode)
5038 unsigned int extra = 0;
5040 if (!reg_offset_addressing_ok_p (mode))
5042 if (virtual_stack_registers_memory_p (x))
5045 /* In theory we should not be seeing addresses of the form reg+0,
5046 but just in case it is generated, optimize it away. */
5047 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5048 return force_reg (Pmode, XEXP (x, 0));
5050 /* Make sure both operands are registers. */
5051 else if (GET_CODE (x) == PLUS)
5052 return gen_rtx_PLUS (Pmode,
5053 force_reg (Pmode, XEXP (x, 0)),
5054 force_reg (Pmode, XEXP (x, 1)));
5056 return force_reg (Pmode, x);
5058 if (GET_CODE (x) == SYMBOL_REF)
5060 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5062 return rs6000_legitimize_tls_address (x, model);
5072 if (!TARGET_POWERPC64)
5080 extra = TARGET_POWERPC64 ? 8 : 12;
5086 if (GET_CODE (x) == PLUS
5087 && GET_CODE (XEXP (x, 0)) == REG
5088 && GET_CODE (XEXP (x, 1)) == CONST_INT
5089 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5091 && !((TARGET_POWERPC64
5092 && (mode == DImode || mode == TImode)
5093 && (INTVAL (XEXP (x, 1)) & 3) != 0)
5094 || SPE_VECTOR_MODE (mode)
5095 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5096 || mode == DImode || mode == DDmode
5097 || mode == TDmode))))
5099 HOST_WIDE_INT high_int, low_int;
5101 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5102 if (low_int >= 0x8000 - extra)
5104 high_int = INTVAL (XEXP (x, 1)) - low_int;
5105 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5106 GEN_INT (high_int)), 0);
5107 return plus_constant (sum, low_int);
5109 else if (GET_CODE (x) == PLUS
5110 && GET_CODE (XEXP (x, 0)) == REG
5111 && GET_CODE (XEXP (x, 1)) != CONST_INT
5112 && GET_MODE_NUNITS (mode) == 1
5113 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5115 || ((mode != DImode && mode != DFmode && mode != DDmode)
5116 || (TARGET_E500_DOUBLE && mode != DDmode)))
5117 && (TARGET_POWERPC64 || mode != DImode)
5118 && !avoiding_indexed_address_p (mode)
5123 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5124 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5126 else if (SPE_VECTOR_MODE (mode)
5127 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5128 || mode == DDmode || mode == TDmode
5129 || mode == DImode)))
5133 /* We accept [reg + reg] and [reg + OFFSET]. */
5135 if (GET_CODE (x) == PLUS)
5137 rtx op1 = XEXP (x, 0);
5138 rtx op2 = XEXP (x, 1);
5141 op1 = force_reg (Pmode, op1);
5143 if (GET_CODE (op2) != REG
5144 && (GET_CODE (op2) != CONST_INT
5145 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5146 || (GET_MODE_SIZE (mode) > 8
5147 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5148 op2 = force_reg (Pmode, op2);
5150 /* We can't always do [reg + reg] for these, because [reg +
5151 reg + offset] is not a legitimate addressing mode. */
5152 y = gen_rtx_PLUS (Pmode, op1, op2);
5154 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5155 return force_reg (Pmode, y);
5160 return force_reg (Pmode, x);
5166 && GET_CODE (x) != CONST_INT
5167 && GET_CODE (x) != CONST_DOUBLE
5169 && GET_MODE_NUNITS (mode) == 1
5170 && (GET_MODE_BITSIZE (mode) <= 32
5171 || ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5172 && (mode == DFmode || mode == DDmode))))
5174 rtx reg = gen_reg_rtx (Pmode);
5175 emit_insn (gen_elf_high (reg, x));
5176 return gen_rtx_LO_SUM (Pmode, reg, x);
5178 else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
5181 && ! MACHO_DYNAMIC_NO_PIC_P
5183 && GET_CODE (x) != CONST_INT
5184 && GET_CODE (x) != CONST_DOUBLE
5186 && GET_MODE_NUNITS (mode) == 1
5187 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5188 || (mode != DFmode && mode != DDmode))
5192 rtx reg = gen_reg_rtx (Pmode);
5193 emit_insn (gen_macho_high (reg, x));
5194 return gen_rtx_LO_SUM (Pmode, reg, x);
5197 && GET_CODE (x) == SYMBOL_REF
5198 && constant_pool_expr_p (x)
5199 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5201 return create_TOC_reference (x);
5207 /* Debug version of rs6000_legitimize_address. */
5209 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5215 ret = rs6000_legitimize_address (x, oldx, mode);
5216 insns = get_insns ();
5222 "\nrs6000_legitimize_address: mode %s, old code %s, "
5223 "new code %s, modified\n",
5224 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5225 GET_RTX_NAME (GET_CODE (ret)));
5227 fprintf (stderr, "Original address:\n");
5230 fprintf (stderr, "oldx:\n");
5233 fprintf (stderr, "New address:\n");
5238 fprintf (stderr, "Insns added:\n");
5239 debug_rtx_list (insns, 20);
5245 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5246 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5257 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5258 We need to emit DTP-relative relocations. */
5261 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5266 fputs ("\t.long\t", file);
5269 fputs (DOUBLE_INT_ASM_OP, file);
5274 output_addr_const (file, x);
5275 fputs ("@dtprel+0x8000", file);
5278 /* In the name of slightly smaller debug output, and to cater to
5279 general assembler lossage, recognize various UNSPEC sequences
5280 and turn them back into a direct symbol reference. */
5283 rs6000_delegitimize_address (rtx orig_x)
5287 orig_x = delegitimize_mem_from_attrs (orig_x);
5292 if (GET_CODE (x) == PLUS
5293 && GET_CODE (XEXP (x, 1)) == CONST
5294 && GET_CODE (XEXP (x, 0)) == REG
5295 && REGNO (XEXP (x, 0)) == TOC_REGISTER)
5297 y = XEXP (XEXP (x, 1), 0);
5298 if (GET_CODE (y) == UNSPEC
5299 && XINT (y, 1) == UNSPEC_TOCREL)
5301 y = XVECEXP (y, 0, 0);
5302 if (!MEM_P (orig_x))
5305 return replace_equiv_address_nv (orig_x, y);
5311 && GET_CODE (orig_x) == LO_SUM
5312 && GET_CODE (XEXP (x, 1)) == CONST)
5314 y = XEXP (XEXP (x, 1), 0);
5315 if (GET_CODE (y) == UNSPEC
5316 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5317 return XVECEXP (y, 0, 0);
5323 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5325 static GTY(()) rtx rs6000_tls_symbol;
5327 rs6000_tls_get_addr (void)
5329 if (!rs6000_tls_symbol)
5330 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5332 return rs6000_tls_symbol;
5335 /* Construct the SYMBOL_REF for TLS GOT references. */
5337 static GTY(()) rtx rs6000_got_symbol;
5339 rs6000_got_sym (void)
5341 if (!rs6000_got_symbol)
5343 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5344 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5345 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5348 return rs6000_got_symbol;
5351 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5352 this (thread-local) address. */
5355 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
5359 dest = gen_reg_rtx (Pmode);
5360 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
5366 tlsreg = gen_rtx_REG (Pmode, 13);
5367 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
5371 tlsreg = gen_rtx_REG (Pmode, 2);
5372 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
5376 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
5380 tmp = gen_reg_rtx (Pmode);
5383 tlsreg = gen_rtx_REG (Pmode, 13);
5384 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
5388 tlsreg = gen_rtx_REG (Pmode, 2);
5389 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
5393 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
5395 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
5400 rtx r3, got, tga, tmp1, tmp2, call_insn;
5402 /* We currently use relocations like @got@tlsgd for tls, which
5403 means the linker will handle allocation of tls entries, placing
5404 them in the .got section. So use a pointer to the .got section,
5405 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5406 or to secondary GOT sections used by 32-bit -fPIC. */
5408 got = gen_rtx_REG (Pmode, 2);
5412 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
5415 rtx gsym = rs6000_got_sym ();
5416 got = gen_reg_rtx (Pmode);
5418 rs6000_emit_move (got, gsym, Pmode);
5424 tmp1 = gen_reg_rtx (Pmode);
5425 tmp2 = gen_reg_rtx (Pmode);
5426 tmp3 = gen_reg_rtx (Pmode);
5427 mem = gen_const_mem (Pmode, tmp1);
5429 emit_insn (gen_load_toc_v4_PIC_1b (gsym));
5430 emit_move_insn (tmp1,
5431 gen_rtx_REG (Pmode, LR_REGNO));
5432 emit_move_insn (tmp2, mem);
5433 emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
5434 last = emit_move_insn (got, tmp3);
5435 set_unique_reg_note (last, REG_EQUAL, gsym);
5440 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
5442 r3 = gen_rtx_REG (Pmode, 3);
5443 tga = rs6000_tls_get_addr ();
5444 emit_library_call_value (tga, dest, LCT_CONST, Pmode, 1, r3, Pmode);
5446 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5447 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
5448 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5449 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
5450 else if (DEFAULT_ABI == ABI_V4)
5451 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
5454 call_insn = last_call_insn ();
5455 PATTERN (call_insn) = insn;
5456 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5457 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5458 pic_offset_table_rtx);
5460 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
5462 r3 = gen_rtx_REG (Pmode, 3);
5463 tga = rs6000_tls_get_addr ();
5464 tmp1 = gen_reg_rtx (Pmode);
5465 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode, 1, r3, Pmode);
5467 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5468 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
5469 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5470 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
5471 else if (DEFAULT_ABI == ABI_V4)
5472 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
5475 call_insn = last_call_insn ();
5476 PATTERN (call_insn) = insn;
5477 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5478 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5479 pic_offset_table_rtx);
5481 if (rs6000_tls_size == 16)
5484 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
5486 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
5488 else if (rs6000_tls_size == 32)
5490 tmp2 = gen_reg_rtx (Pmode);
5492 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
5494 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
5497 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
5499 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
5503 tmp2 = gen_reg_rtx (Pmode);
5505 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
5507 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
5509 insn = gen_rtx_SET (Pmode, dest,
5510 gen_rtx_PLUS (Pmode, tmp2, tmp1));
5516 /* IE, or 64-bit offset LE. */
5517 tmp2 = gen_reg_rtx (Pmode);
5519 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
5521 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
5524 insn = gen_tls_tls_64 (dest, tmp2, addr);
5526 insn = gen_tls_tls_32 (dest, tmp2, addr);
5534 /* Return 1 if X contains a thread-local symbol. */
5537 rs6000_tls_referenced_p (rtx x)
5539 if (! TARGET_HAVE_TLS)
5542 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
5545 /* Return 1 if *X is a thread-local symbol. This is the same as
5546 rs6000_tls_symbol_ref except for the type of the unused argument. */
5549 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
5551 return RS6000_SYMBOL_REF_TLS_P (*x);
5554 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
5555 replace the input X, or the original X if no replacement is called for.
5556 The output parameter *WIN is 1 if the calling macro should goto WIN,
5559 For RS/6000, we wish to handle large displacements off a base
5560 register by splitting the addend across an addiu/addis and the mem insn.
5561 This cuts number of extra insns needed from 3 to 1.
5563 On Darwin, we use this to generate code for floating point constants.
5564 A movsf_low is generated so we wind up with 2 instructions rather than 3.
5565 The Darwin code is inside #if TARGET_MACHO because only then are the
5566 machopic_* functions defined. */
5568 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
5569 int opnum, int type,
5570 int ind_levels ATTRIBUTE_UNUSED, int *win)
5572 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
5574 /* We must recognize output that we have already generated ourselves. */
5575 if (GET_CODE (x) == PLUS
5576 && GET_CODE (XEXP (x, 0)) == PLUS
5577 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5578 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5579 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5581 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5582 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5583 opnum, (enum reload_type)type);
5589 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
5590 && GET_CODE (x) == LO_SUM
5591 && GET_CODE (XEXP (x, 0)) == PLUS
5592 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
5593 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
5594 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
5595 && machopic_operand_p (XEXP (x, 1)))
5597 /* Result of previous invocation of this function on Darwin
5598 floating point constant. */
5599 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5600 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5601 opnum, (enum reload_type)type);
5607 /* Force ld/std non-word aligned offset into base register by wrapping
5609 if (GET_CODE (x) == PLUS
5610 && GET_CODE (XEXP (x, 0)) == REG
5611 && REGNO (XEXP (x, 0)) < 32
5612 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
5613 && GET_CODE (XEXP (x, 1)) == CONST_INT
5615 && (INTVAL (XEXP (x, 1)) & 3) != 0
5616 && VECTOR_MEM_NONE_P (mode)
5617 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
5618 && TARGET_POWERPC64)
5620 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
5621 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5622 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5623 opnum, (enum reload_type) type);
5628 if (GET_CODE (x) == PLUS
5629 && GET_CODE (XEXP (x, 0)) == REG
5630 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
5631 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
5632 && GET_CODE (XEXP (x, 1)) == CONST_INT
5634 && !SPE_VECTOR_MODE (mode)
5635 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5636 || mode == DDmode || mode == TDmode
5638 && VECTOR_MEM_NONE_P (mode))
5640 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
5641 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
5643 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
5645 /* Check for 32-bit overflow. */
5646 if (high + low != val)
5652 /* Reload the high part into a base reg; leave the low part
5653 in the mem directly. */
5655 x = gen_rtx_PLUS (GET_MODE (x),
5656 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
5660 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5661 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5662 opnum, (enum reload_type)type);
5667 if (GET_CODE (x) == SYMBOL_REF
5669 && VECTOR_MEM_NONE_P (mode)
5670 && !SPE_VECTOR_MODE (mode)
5672 && DEFAULT_ABI == ABI_DARWIN
5673 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
5675 && DEFAULT_ABI == ABI_V4
5678 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
5679 The same goes for DImode without 64-bit gprs and DFmode and DDmode
5683 && (mode != DImode || TARGET_POWERPC64)
5684 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
5685 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
5690 rtx offset = machopic_gen_offset (x);
5691 x = gen_rtx_LO_SUM (GET_MODE (x),
5692 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
5693 gen_rtx_HIGH (Pmode, offset)), offset);
5697 x = gen_rtx_LO_SUM (GET_MODE (x),
5698 gen_rtx_HIGH (Pmode, x), x);
5700 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5701 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5702 opnum, (enum reload_type)type);
5707 /* Reload an offset address wrapped by an AND that represents the
5708 masking of the lower bits. Strip the outer AND and let reload
5709 convert the offset address into an indirect address. For VSX,
5710 force reload to create the address with an AND in a separate
5711 register, because we can't guarantee an altivec register will
5713 if (VECTOR_MEM_ALTIVEC_P (mode)
5714 && GET_CODE (x) == AND
5715 && GET_CODE (XEXP (x, 0)) == PLUS
5716 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5717 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5718 && GET_CODE (XEXP (x, 1)) == CONST_INT
5719 && INTVAL (XEXP (x, 1)) == -16)
5728 && GET_CODE (x) == SYMBOL_REF
5729 && constant_pool_expr_p (x)
5730 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
5732 x = create_TOC_reference (x);
5740 /* Debug version of rs6000_legitimize_reload_address. */
5742 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
5743 int opnum, int type,
5744 int ind_levels, int *win)
5746 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
5749 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
5750 "type = %d, ind_levels = %d, win = %d, original addr:\n",
5751 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
5755 fprintf (stderr, "Same address returned\n");
5757 fprintf (stderr, "NULL returned\n");
5760 fprintf (stderr, "New address:\n");
5767 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
5768 that is a valid memory address for an instruction.
5769 The MODE argument is the machine mode for the MEM expression
5770 that wants to use this address.
5772 On the RS/6000, there are four valid address: a SYMBOL_REF that
5773 refers to a constant pool entry of an address (or the sum of it
5774 plus a constant), a short (16-bit signed) constant plus a register,
5775 the sum of two registers, or a register indirect, possibly with an
5776 auto-increment. For DFmode, DDmode and DImode with a constant plus
5777 register, we must ensure that both words are addressable or PowerPC64
5778 with offset word aligned.
5780 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
5781 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
5782 because adjacent memory cells are accessed by adding word-sized offsets
5783 during assembly output. */
5785 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
5787 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
5789 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
5790 if (VECTOR_MEM_ALTIVEC_P (mode)
5791 && GET_CODE (x) == AND
5792 && GET_CODE (XEXP (x, 1)) == CONST_INT
5793 && INTVAL (XEXP (x, 1)) == -16)
5796 if (RS6000_SYMBOL_REF_TLS_P (x))
5798 if (legitimate_indirect_address_p (x, reg_ok_strict))
5800 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
5801 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
5802 && !SPE_VECTOR_MODE (mode)
5805 /* Restrict addressing for DI because of our SUBREG hackery. */
5806 && !(TARGET_E500_DOUBLE
5807 && (mode == DFmode || mode == DDmode || mode == DImode))
5809 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
5811 if (virtual_stack_registers_memory_p (x))
5813 if (reg_offset_p && legitimate_small_data_p (mode, x))
5815 if (reg_offset_p && legitimate_constant_pool_address_p (x))
5817 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
5820 && GET_CODE (x) == PLUS
5821 && GET_CODE (XEXP (x, 0)) == REG
5822 && (XEXP (x, 0) == virtual_stack_vars_rtx
5823 || XEXP (x, 0) == arg_pointer_rtx)
5824 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5826 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
5831 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5833 || (mode != DFmode && mode != DDmode)
5834 || (TARGET_E500_DOUBLE && mode != DDmode))
5835 && (TARGET_POWERPC64 || mode != DImode)
5836 && !avoiding_indexed_address_p (mode)
5837 && legitimate_indexed_address_p (x, reg_ok_strict))
5839 if (GET_CODE (x) == PRE_MODIFY
5843 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5845 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
5846 && (TARGET_POWERPC64 || mode != DImode)
5847 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
5848 && !SPE_VECTOR_MODE (mode)
5849 /* Restrict addressing for DI because of our SUBREG hackery. */
5850 && !(TARGET_E500_DOUBLE
5851 && (mode == DFmode || mode == DDmode || mode == DImode))
5853 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
5854 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
5855 || (!avoiding_indexed_address_p (mode)
5856 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
5857 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5859 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
5864 /* Debug version of rs6000_legitimate_address_p. */
5866 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
5869 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
5871 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
5872 "strict = %d, code = %s\n",
5873 ret ? "true" : "false",
5874 GET_MODE_NAME (mode),
5876 GET_RTX_NAME (GET_CODE (x)));
5882 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
5885 rs6000_mode_dependent_address_p (const_rtx addr)
5887 return rs6000_mode_dependent_address_ptr (addr);
5890 /* Go to LABEL if ADDR (a legitimate address expression)
5891 has an effect that depends on the machine mode it is used for.
5893 On the RS/6000 this is true of all integral offsets (since AltiVec
5894 and VSX modes don't allow them) or is a pre-increment or decrement.
5896 ??? Except that due to conceptual problems in offsettable_address_p
5897 we can't really report the problems of integral offsets. So leave
5898 this assuming that the adjustable offset must be valid for the
5899 sub-words of a TFmode operand, which is what we had before. */
5902 rs6000_mode_dependent_address (const_rtx addr)
5904 switch (GET_CODE (addr))
5907 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
5908 is considered a legitimate address before reload, so there
5909 are no offset restrictions in that case. Note that this
5910 condition is safe in strict mode because any address involving
5911 virtual_stack_vars_rtx or arg_pointer_rtx would already have
5912 been rejected as illegitimate. */
5913 if (XEXP (addr, 0) != virtual_stack_vars_rtx
5914 && XEXP (addr, 0) != arg_pointer_rtx
5915 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5917 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
5918 return val + 12 + 0x8000 >= 0x10000;
5925 /* Auto-increment cases are now treated generically in recog.c. */
5927 return TARGET_UPDATE;
5929 /* AND is only allowed in Altivec loads. */
5940 /* Debug version of rs6000_mode_dependent_address. */
5942 rs6000_debug_mode_dependent_address (const_rtx addr)
5944 bool ret = rs6000_mode_dependent_address (addr);
5946 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
5947 ret ? "true" : "false");
5953 /* Implement FIND_BASE_TERM. */
5956 rs6000_find_base_term (rtx op)
5960 split_const (op, &base, &offset);
5961 if (GET_CODE (base) == UNSPEC)
5962 switch (XINT (base, 1))
5965 case UNSPEC_MACHOPIC_OFFSET:
5966 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
5967 for aliasing purposes. */
5968 return XVECEXP (base, 0, 0);
5974 /* More elaborate version of recog's offsettable_memref_p predicate
5975 that works around the ??? note of rs6000_mode_dependent_address.
5976 In particular it accepts
5978 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
5980 in 32-bit mode, that the recog predicate rejects. */
5983 rs6000_offsettable_memref_p (rtx op)
5988 /* First mimic offsettable_memref_p. */
5989 if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
5992 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
5993 the latter predicate knows nothing about the mode of the memory
5994 reference and, therefore, assumes that it is the largest supported
5995 mode (TFmode). As a consequence, legitimate offsettable memory
5996 references are rejected. rs6000_legitimate_offset_address_p contains
5997 the correct logic for the PLUS case of rs6000_mode_dependent_address. */
5998 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
6001 /* Change register usage conditional on target flags. */
6003 rs6000_conditional_register_usage (void)
6007 /* Set MQ register fixed (already call_used) if not POWER
6008 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
6013 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6015 fixed_regs[13] = call_used_regs[13]
6016 = call_really_used_regs[13] = 1;
6018 /* Conditionally disable FPRs. */
6019 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6020 for (i = 32; i < 64; i++)
6021 fixed_regs[i] = call_used_regs[i]
6022 = call_really_used_regs[i] = 1;
6024 /* The TOC register is not killed across calls in a way that is
6025 visible to the compiler. */
6026 if (DEFAULT_ABI == ABI_AIX)
6027 call_really_used_regs[2] = 0;
6029 if (DEFAULT_ABI == ABI_V4
6030 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6032 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6034 if (DEFAULT_ABI == ABI_V4
6035 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6037 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6038 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6039 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6041 if (DEFAULT_ABI == ABI_DARWIN
6042 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6043 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6044 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6045 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6047 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6048 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6049 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6053 global_regs[SPEFSCR_REGNO] = 1;
6054 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6055 registers in prologues and epilogues. We no longer use r14
6056 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6057 pool for link-compatibility with older versions of GCC. Once
6058 "old" code has died out, we can return r14 to the allocation
6061 = call_used_regs[14]
6062 = call_really_used_regs[14] = 1;
6065 if (!TARGET_ALTIVEC && !TARGET_VSX)
6067 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6068 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6069 call_really_used_regs[VRSAVE_REGNO] = 1;
6072 if (TARGET_ALTIVEC || TARGET_VSX)
6073 global_regs[VSCR_REGNO] = 1;
6075 if (TARGET_ALTIVEC_ABI)
6077 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6078 call_used_regs[i] = call_really_used_regs[i] = 1;
6080 /* AIX reserves VR20:31 in non-extended ABI mode. */
6082 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6083 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6087 /* Try to output insns to set TARGET equal to the constant C if it can
6088 be done in less than N insns. Do all computations in MODE.
6089 Returns the place where the output has been placed if it can be
6090 done and the insns have been emitted. If it would take more than N
6091 insns, zero is returned and no insns and emitted. */
6094 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6095 rtx source, int n ATTRIBUTE_UNUSED)
6097 rtx result, insn, set;
6098 HOST_WIDE_INT c0, c1;
6105 dest = gen_reg_rtx (mode);
6106 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6110 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6112 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6113 GEN_INT (INTVAL (source)
6114 & (~ (HOST_WIDE_INT) 0xffff))));
6115 emit_insn (gen_rtx_SET (VOIDmode, dest,
6116 gen_rtx_IOR (SImode, copy_rtx (result),
6117 GEN_INT (INTVAL (source) & 0xffff))));
6122 switch (GET_CODE (source))
6125 c0 = INTVAL (source);
6130 #if HOST_BITS_PER_WIDE_INT >= 64
6131 c0 = CONST_DOUBLE_LOW (source);
6134 c0 = CONST_DOUBLE_LOW (source);
6135 c1 = CONST_DOUBLE_HIGH (source);
6143 result = rs6000_emit_set_long_const (dest, c0, c1);
6150 insn = get_last_insn ();
6151 set = single_set (insn);
6152 if (! CONSTANT_P (SET_SRC (set)))
6153 set_unique_reg_note (insn, REG_EQUAL, source);
6158 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6159 fall back to a straight forward decomposition. We do this to avoid
6160 exponential run times encountered when looking for longer sequences
6161 with rs6000_emit_set_const. */
6163 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6165 if (!TARGET_POWERPC64)
6167 rtx operand1, operand2;
6169 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6171 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6173 emit_move_insn (operand1, GEN_INT (c1));
6174 emit_move_insn (operand2, GEN_INT (c2));
6178 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6181 ud2 = (c1 & 0xffff0000) >> 16;
6182 #if HOST_BITS_PER_WIDE_INT >= 64
6186 ud4 = (c2 & 0xffff0000) >> 16;
6188 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6189 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6192 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6194 emit_move_insn (dest, GEN_INT (ud1));
6197 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6198 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6201 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6204 emit_move_insn (dest, GEN_INT (ud2 << 16));
6206 emit_move_insn (copy_rtx (dest),
6207 gen_rtx_IOR (DImode, copy_rtx (dest),
6210 else if (ud3 == 0 && ud4 == 0)
6212 gcc_assert (ud2 & 0x8000);
6213 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6216 emit_move_insn (copy_rtx (dest),
6217 gen_rtx_IOR (DImode, copy_rtx (dest),
6219 emit_move_insn (copy_rtx (dest),
6220 gen_rtx_ZERO_EXTEND (DImode,
6221 gen_lowpart (SImode,
6224 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6225 || (ud4 == 0 && ! (ud3 & 0x8000)))
6228 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6231 emit_move_insn (dest, GEN_INT (ud3 << 16));
6234 emit_move_insn (copy_rtx (dest),
6235 gen_rtx_IOR (DImode, copy_rtx (dest),
6237 emit_move_insn (copy_rtx (dest),
6238 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6241 emit_move_insn (copy_rtx (dest),
6242 gen_rtx_IOR (DImode, copy_rtx (dest),
6248 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6251 emit_move_insn (dest, GEN_INT (ud4 << 16));
6254 emit_move_insn (copy_rtx (dest),
6255 gen_rtx_IOR (DImode, copy_rtx (dest),
6258 emit_move_insn (copy_rtx (dest),
6259 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6262 emit_move_insn (copy_rtx (dest),
6263 gen_rtx_IOR (DImode, copy_rtx (dest),
6264 GEN_INT (ud2 << 16)));
6266 emit_move_insn (copy_rtx (dest),
6267 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
6273 /* Helper for the following. Get rid of [r+r] memory refs
6274 in cases where it won't work (TImode, TFmode, TDmode). */
6277 rs6000_eliminate_indexed_memrefs (rtx operands[2])
6279 if (GET_CODE (operands[0]) == MEM
6280 && GET_CODE (XEXP (operands[0], 0)) != REG
6281 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0))
6282 && ! reload_in_progress)
6284 = replace_equiv_address (operands[0],
6285 copy_addr_to_reg (XEXP (operands[0], 0)));
6287 if (GET_CODE (operands[1]) == MEM
6288 && GET_CODE (XEXP (operands[1], 0)) != REG
6289 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0))
6290 && ! reload_in_progress)
6292 = replace_equiv_address (operands[1],
6293 copy_addr_to_reg (XEXP (operands[1], 0)));
6296 /* Emit a move from SOURCE to DEST in mode MODE. */
6298 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
6302 operands[1] = source;
6304 if (TARGET_DEBUG_ADDR)
6307 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6308 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6309 GET_MODE_NAME (mode),
6312 can_create_pseudo_p ());
6314 fprintf (stderr, "source:\n");
6318 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6319 if (GET_CODE (operands[1]) == CONST_DOUBLE
6320 && ! FLOAT_MODE_P (mode)
6321 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6323 /* FIXME. This should never happen. */
6324 /* Since it seems that it does, do the safe thing and convert
6326 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
6328 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
6329 || FLOAT_MODE_P (mode)
6330 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
6331 || CONST_DOUBLE_LOW (operands[1]) < 0)
6332 && (CONST_DOUBLE_HIGH (operands[1]) != -1
6333 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
6335 /* Check if GCC is setting up a block move that will end up using FP
6336 registers as temporaries. We must make sure this is acceptable. */
6337 if (GET_CODE (operands[0]) == MEM
6338 && GET_CODE (operands[1]) == MEM
6340 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
6341 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
6342 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
6343 ? 32 : MEM_ALIGN (operands[0])))
6344 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
6346 : MEM_ALIGN (operands[1]))))
6347 && ! MEM_VOLATILE_P (operands [0])
6348 && ! MEM_VOLATILE_P (operands [1]))
6350 emit_move_insn (adjust_address (operands[0], SImode, 0),
6351 adjust_address (operands[1], SImode, 0));
6352 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
6353 adjust_address (copy_rtx (operands[1]), SImode, 4));
6357 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
6358 && !gpc_reg_operand (operands[1], mode))
6359 operands[1] = force_reg (mode, operands[1]);
6361 if (mode == SFmode && ! TARGET_POWERPC
6362 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6363 && GET_CODE (operands[0]) == MEM)
6367 if (reload_in_progress || reload_completed)
6368 regnum = true_regnum (operands[1]);
6369 else if (GET_CODE (operands[1]) == REG)
6370 regnum = REGNO (operands[1]);
6374 /* If operands[1] is a register, on POWER it may have
6375 double-precision data in it, so truncate it to single
6377 if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
6380 newreg = (!can_create_pseudo_p () ? copy_rtx (operands[1])
6381 : gen_reg_rtx (mode));
6382 emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
6383 operands[1] = newreg;
6387 /* Recognize the case where operand[1] is a reference to thread-local
6388 data and load its address to a register. */
6389 if (rs6000_tls_referenced_p (operands[1]))
6391 enum tls_model model;
6392 rtx tmp = operands[1];
6395 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
6397 addend = XEXP (XEXP (tmp, 0), 1);
6398 tmp = XEXP (XEXP (tmp, 0), 0);
6401 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
6402 model = SYMBOL_REF_TLS_MODEL (tmp);
6403 gcc_assert (model != 0);
6405 tmp = rs6000_legitimize_tls_address (tmp, model);
6408 tmp = gen_rtx_PLUS (mode, tmp, addend);
6409 tmp = force_operand (tmp, operands[0]);
6414 /* Handle the case where reload calls us with an invalid address. */
6415 if (reload_in_progress && mode == Pmode
6416 && (! general_operand (operands[1], mode)
6417 || ! nonimmediate_operand (operands[0], mode)))
6420 /* 128-bit constant floating-point values on Darwin should really be
6421 loaded as two parts. */
6422 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
6423 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
6425 /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
6426 know how to get a DFmode SUBREG of a TFmode. */
6427 enum machine_mode imode = (TARGET_E500_DOUBLE ? DFmode : DImode);
6428 rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode, 0),
6429 simplify_gen_subreg (imode, operands[1], mode, 0),
6431 rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode,
6432 GET_MODE_SIZE (imode)),
6433 simplify_gen_subreg (imode, operands[1], mode,
6434 GET_MODE_SIZE (imode)),
6439 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
6440 cfun->machine->sdmode_stack_slot =
6441 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
6443 if (reload_in_progress
6445 && MEM_P (operands[0])
6446 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
6447 && REG_P (operands[1]))
6449 if (FP_REGNO_P (REGNO (operands[1])))
6451 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
6452 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6453 emit_insn (gen_movsd_store (mem, operands[1]));
6455 else if (INT_REGNO_P (REGNO (operands[1])))
6457 rtx mem = adjust_address_nv (operands[0], mode, 4);
6458 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6459 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
6465 if (reload_in_progress
6467 && REG_P (operands[0])
6468 && MEM_P (operands[1])
6469 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
6471 if (FP_REGNO_P (REGNO (operands[0])))
6473 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
6474 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6475 emit_insn (gen_movsd_load (operands[0], mem));
6477 else if (INT_REGNO_P (REGNO (operands[0])))
6479 rtx mem = adjust_address_nv (operands[1], mode, 4);
6480 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6481 emit_insn (gen_movsd_hardfloat (operands[0], mem));
6488 /* FIXME: In the long term, this switch statement should go away
6489 and be replaced by a sequence of tests based on things like
6495 if (CONSTANT_P (operands[1])
6496 && GET_CODE (operands[1]) != CONST_INT)
6497 operands[1] = force_const_mem (mode, operands[1]);
6502 rs6000_eliminate_indexed_memrefs (operands);
6509 if (CONSTANT_P (operands[1])
6510 && ! easy_fp_constant (operands[1], mode))
6511 operands[1] = force_const_mem (mode, operands[1]);
6524 if (CONSTANT_P (operands[1])
6525 && !easy_vector_constant (operands[1], mode))
6526 operands[1] = force_const_mem (mode, operands[1]);
6531 /* Use default pattern for address of ELF small data */
6534 && DEFAULT_ABI == ABI_V4
6535 && (GET_CODE (operands[1]) == SYMBOL_REF
6536 || GET_CODE (operands[1]) == CONST)
6537 && small_data_operand (operands[1], mode))
6539 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
6543 if (DEFAULT_ABI == ABI_V4
6544 && mode == Pmode && mode == SImode
6545 && flag_pic == 1 && got_operand (operands[1], mode))
6547 emit_insn (gen_movsi_got (operands[0], operands[1]));
6551 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
6555 && CONSTANT_P (operands[1])
6556 && GET_CODE (operands[1]) != HIGH
6557 && GET_CODE (operands[1]) != CONST_INT)
6559 rtx target = (!can_create_pseudo_p ()
6561 : gen_reg_rtx (mode));
6563 /* If this is a function address on -mcall-aixdesc,
6564 convert it to the address of the descriptor. */
6565 if (DEFAULT_ABI == ABI_AIX
6566 && GET_CODE (operands[1]) == SYMBOL_REF
6567 && XSTR (operands[1], 0)[0] == '.')
6569 const char *name = XSTR (operands[1], 0);
6571 while (*name == '.')
6573 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
6574 CONSTANT_POOL_ADDRESS_P (new_ref)
6575 = CONSTANT_POOL_ADDRESS_P (operands[1]);
6576 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
6577 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
6578 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
6579 operands[1] = new_ref;
6582 if (DEFAULT_ABI == ABI_DARWIN)
6585 if (MACHO_DYNAMIC_NO_PIC_P)
6587 /* Take care of any required data indirection. */
6588 operands[1] = rs6000_machopic_legitimize_pic_address (
6589 operands[1], mode, operands[0]);
6590 if (operands[0] != operands[1])
6591 emit_insn (gen_rtx_SET (VOIDmode,
6592 operands[0], operands[1]));
6596 emit_insn (gen_macho_high (target, operands[1]));
6597 emit_insn (gen_macho_low (operands[0], target, operands[1]));
6601 emit_insn (gen_elf_high (target, operands[1]));
6602 emit_insn (gen_elf_low (operands[0], target, operands[1]));
6606 /* If this is a SYMBOL_REF that refers to a constant pool entry,
6607 and we have put it in the TOC, we just need to make a TOC-relative
6610 && GET_CODE (operands[1]) == SYMBOL_REF
6611 && constant_pool_expr_p (operands[1])
6612 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
6613 get_pool_mode (operands[1])))
6615 operands[1] = create_TOC_reference (operands[1]);
6617 else if (mode == Pmode
6618 && CONSTANT_P (operands[1])
6619 && ((GET_CODE (operands[1]) != CONST_INT
6620 && ! easy_fp_constant (operands[1], mode))
6621 || (GET_CODE (operands[1]) == CONST_INT
6622 && num_insns_constant (operands[1], mode) > 2)
6623 || (GET_CODE (operands[0]) == REG
6624 && FP_REGNO_P (REGNO (operands[0]))))
6625 && GET_CODE (operands[1]) != HIGH
6626 && ! legitimate_constant_pool_address_p (operands[1])
6627 && ! toc_relative_expr_p (operands[1]))
6631 /* Darwin uses a special PIC legitimizer. */
6632 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
6635 rs6000_machopic_legitimize_pic_address (operands[1], mode,
6637 if (operands[0] != operands[1])
6638 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
6643 /* If we are to limit the number of things we put in the TOC and
6644 this is a symbol plus a constant we can add in one insn,
6645 just put the symbol in the TOC and add the constant. Don't do
6646 this if reload is in progress. */
6647 if (GET_CODE (operands[1]) == CONST
6648 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
6649 && GET_CODE (XEXP (operands[1], 0)) == PLUS
6650 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
6651 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
6652 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
6653 && ! side_effects_p (operands[0]))
6656 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
6657 rtx other = XEXP (XEXP (operands[1], 0), 1);
6659 sym = force_reg (mode, sym);
6660 emit_insn (gen_add3_insn (operands[0], sym, other));
6664 operands[1] = force_const_mem (mode, operands[1]);
6667 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
6668 && constant_pool_expr_p (XEXP (operands[1], 0))
6669 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
6670 get_pool_constant (XEXP (operands[1], 0)),
6671 get_pool_mode (XEXP (operands[1], 0))))
6674 = gen_const_mem (mode,
6675 create_TOC_reference (XEXP (operands[1], 0)));
6676 set_mem_alias_set (operands[1], get_TOC_alias_set ());
6682 rs6000_eliminate_indexed_memrefs (operands);
6686 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6688 gen_rtx_SET (VOIDmode,
6689 operands[0], operands[1]),
6690 gen_rtx_CLOBBER (VOIDmode,
6691 gen_rtx_SCRATCH (SImode)))));
6697 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
6700 /* Above, we may have called force_const_mem which may have returned
6701 an invalid address. If we can, fix this up; otherwise, reload will
6702 have to deal with it. */
6703 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
6704 operands[1] = validize_mem (operands[1]);
6707 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
6710 /* Nonzero if we can use a floating-point register to pass this arg. */
6711 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
6712 (SCALAR_FLOAT_MODE_P (MODE) \
6713 && (CUM)->fregno <= FP_ARG_MAX_REG \
6714 && TARGET_HARD_FLOAT && TARGET_FPRS)
6716 /* Nonzero if we can use an AltiVec register to pass this arg. */
6717 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
6718 ((ALTIVEC_VECTOR_MODE (MODE) || VSX_VECTOR_MODE (MODE)) \
6719 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
6720 && TARGET_ALTIVEC_ABI \
6723 /* Return a nonzero value to say to return the function value in
6724 memory, just as large structures are always returned. TYPE will be
6725 the data type of the value, and FNTYPE will be the type of the
6726 function doing the returning, or @code{NULL} for libcalls.
6728 The AIX ABI for the RS/6000 specifies that all structures are
6729 returned in memory. The Darwin ABI does the same. The SVR4 ABI
6730 specifies that structures <= 8 bytes are returned in r3/r4, but a
6731 draft put them in memory, and GCC used to implement the draft
6732 instead of the final standard. Therefore, aix_struct_return
6733 controls this instead of DEFAULT_ABI; V.4 targets needing backward
6734 compatibility can change DRAFT_V4_STRUCT_RET to override the
6735 default, and -m switches get the final word. See
6736 rs6000_override_options for more details.
6738 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
6739 long double support is enabled. These values are returned in memory.
6741 int_size_in_bytes returns -1 for variable size objects, which go in
6742 memory always. The cast to unsigned makes -1 > 8. */
6745 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6747 /* In the darwin64 abi, try to use registers for larger structs
6749 if (rs6000_darwin64_abi
6750 && TREE_CODE (type) == RECORD_TYPE
6751 && int_size_in_bytes (type) > 0)
6753 CUMULATIVE_ARGS valcum;
6757 valcum.fregno = FP_ARG_MIN_REG;
6758 valcum.vregno = ALTIVEC_ARG_MIN_REG;
6759 /* Do a trial code generation as if this were going to be passed
6760 as an argument; if any part goes in memory, we return NULL. */
6761 valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
6764 /* Otherwise fall through to more conventional ABI rules. */
6767 if (AGGREGATE_TYPE_P (type)
6768 && (aix_struct_return
6769 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
6772 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
6773 modes only exist for GCC vector types if -maltivec. */
6774 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
6775 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
6778 /* Return synthetic vectors in memory. */
6779 if (TREE_CODE (type) == VECTOR_TYPE
6780 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
6782 static bool warned_for_return_big_vectors = false;
6783 if (!warned_for_return_big_vectors)
6785 warning (0, "GCC vector returned by reference: "
6786 "non-standard ABI extension with no compatibility guarantee");
6787 warned_for_return_big_vectors = true;
6792 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
6798 /* Initialize a variable CUM of type CUMULATIVE_ARGS
6799 for a call to a function whose data type is FNTYPE.
6800 For a library call, FNTYPE is 0.
6802 For incoming args we set the number of arguments in the prototype large
6803 so we never return a PARALLEL. */
6806 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
6807 rtx libname ATTRIBUTE_UNUSED, int incoming,
6808 int libcall, int n_named_args)
6810 static CUMULATIVE_ARGS zero_cumulative;
6812 *cum = zero_cumulative;
6814 cum->fregno = FP_ARG_MIN_REG;
6815 cum->vregno = ALTIVEC_ARG_MIN_REG;
6816 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
6817 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
6818 ? CALL_LIBCALL : CALL_NORMAL);
6819 cum->sysv_gregno = GP_ARG_MIN_REG;
6820 cum->stdarg = fntype
6821 && (TYPE_ARG_TYPES (fntype) != 0
6822 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
6823 != void_type_node));
6825 cum->nargs_prototype = 0;
6826 if (incoming || cum->prototype)
6827 cum->nargs_prototype = n_named_args;
6829 /* Check for a longcall attribute. */
6830 if ((!fntype && rs6000_default_long_calls)
6832 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
6833 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
6834 cum->call_cookie |= CALL_LONG;
6836 if (TARGET_DEBUG_ARG)
6838 fprintf (stderr, "\ninit_cumulative_args:");
6841 tree ret_type = TREE_TYPE (fntype);
6842 fprintf (stderr, " ret code = %s,",
6843 tree_code_name[ (int)TREE_CODE (ret_type) ]);
6846 if (cum->call_cookie & CALL_LONG)
6847 fprintf (stderr, " longcall,");
6849 fprintf (stderr, " proto = %d, nargs = %d\n",
6850 cum->prototype, cum->nargs_prototype);
6855 && TARGET_ALTIVEC_ABI
6856 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
6858 error ("cannot return value in vector register because"
6859 " altivec instructions are disabled, use -maltivec"
6864 /* Return true if TYPE must be passed on the stack and not in registers. */
6867 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
6869 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
6870 return must_pass_in_stack_var_size (mode, type);
6872 return must_pass_in_stack_var_size_or_pad (mode, type);
6875 /* If defined, a C expression which determines whether, and in which
6876 direction, to pad out an argument with extra space. The value
6877 should be of type `enum direction': either `upward' to pad above
6878 the argument, `downward' to pad below, or `none' to inhibit
6881 For the AIX ABI structs are always stored left shifted in their
6885 function_arg_padding (enum machine_mode mode, const_tree type)
6887 #ifndef AGGREGATE_PADDING_FIXED
6888 #define AGGREGATE_PADDING_FIXED 0
6890 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
6891 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
6894 if (!AGGREGATE_PADDING_FIXED)
6896 /* GCC used to pass structures of the same size as integer types as
6897 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
6898 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
6899 passed padded downward, except that -mstrict-align further
6900 muddied the water in that multi-component structures of 2 and 4
6901 bytes in size were passed padded upward.
6903 The following arranges for best compatibility with previous
6904 versions of gcc, but removes the -mstrict-align dependency. */
6905 if (BYTES_BIG_ENDIAN)
6907 HOST_WIDE_INT size = 0;
6909 if (mode == BLKmode)
6911 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
6912 size = int_size_in_bytes (type);
6915 size = GET_MODE_SIZE (mode);
6917 if (size == 1 || size == 2 || size == 4)
6923 if (AGGREGATES_PAD_UPWARD_ALWAYS)
6925 if (type != 0 && AGGREGATE_TYPE_P (type))
6929 /* Fall back to the default. */
6930 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6933 /* If defined, a C expression that gives the alignment boundary, in bits,
6934 of an argument with the specified mode and type. If it is not defined,
6935 PARM_BOUNDARY is used for all arguments.
6937 V.4 wants long longs and doubles to be double word aligned. Just
6938 testing the mode size is a boneheaded way to do this as it means
6939 that other types such as complex int are also double word aligned.
6940 However, we're stuck with this because changing the ABI might break
6941 existing library interfaces.
6943 Doubleword align SPE vectors.
6944 Quadword align Altivec vectors.
6945 Quadword align large synthetic vector types. */
6948 function_arg_boundary (enum machine_mode mode, tree type)
6950 if (DEFAULT_ABI == ABI_V4
6951 && (GET_MODE_SIZE (mode) == 8
6952 || (TARGET_HARD_FLOAT
6954 && (mode == TFmode || mode == TDmode))))
6956 else if (SPE_VECTOR_MODE (mode)
6957 || (type && TREE_CODE (type) == VECTOR_TYPE
6958 && int_size_in_bytes (type) >= 8
6959 && int_size_in_bytes (type) < 16))
6961 else if ((ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
6962 || (type && TREE_CODE (type) == VECTOR_TYPE
6963 && int_size_in_bytes (type) >= 16))
6965 else if (rs6000_darwin64_abi && mode == BLKmode
6966 && type && TYPE_ALIGN (type) > 64)
6969 return PARM_BOUNDARY;
6972 /* For a function parm of MODE and TYPE, return the starting word in
6973 the parameter area. NWORDS of the parameter area are already used. */
6976 rs6000_parm_start (enum machine_mode mode, tree type, unsigned int nwords)
6979 unsigned int parm_offset;
6981 align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
6982 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
6983 return nwords + (-(parm_offset + nwords) & align);
6986 /* Compute the size (in words) of a function argument. */
6988 static unsigned long
6989 rs6000_arg_size (enum machine_mode mode, tree type)
6993 if (mode != BLKmode)
6994 size = GET_MODE_SIZE (mode);
6996 size = int_size_in_bytes (type);
6999 return (size + 3) >> 2;
7001 return (size + 7) >> 3;
7004 /* Use this to flush pending int fields. */
7007 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7008 HOST_WIDE_INT bitpos)
7010 unsigned int startbit, endbit;
7011 int intregs, intoffset;
7012 enum machine_mode mode;
7014 if (cum->intoffset == -1)
7017 intoffset = cum->intoffset;
7018 cum->intoffset = -1;
7020 if (intoffset % BITS_PER_WORD != 0)
7022 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7024 if (mode == BLKmode)
7026 /* We couldn't find an appropriate mode, which happens,
7027 e.g., in packed structs when there are 3 bytes to load.
7028 Back intoffset back to the beginning of the word in this
7030 intoffset = intoffset & -BITS_PER_WORD;
7034 startbit = intoffset & -BITS_PER_WORD;
7035 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7036 intregs = (endbit - startbit) / BITS_PER_WORD;
7037 cum->words += intregs;
7040 /* The darwin64 ABI calls for us to recurse down through structs,
7041 looking for elements passed in registers. Unfortunately, we have
7042 to track int register count here also because of misalignments
7043 in powerpc alignment mode. */
7046 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7048 HOST_WIDE_INT startbitpos)
7052 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
7053 if (TREE_CODE (f) == FIELD_DECL)
7055 HOST_WIDE_INT bitpos = startbitpos;
7056 tree ftype = TREE_TYPE (f);
7057 enum machine_mode mode;
7058 if (ftype == error_mark_node)
7060 mode = TYPE_MODE (ftype);
7062 if (DECL_SIZE (f) != 0
7063 && host_integerp (bit_position (f), 1))
7064 bitpos += int_bit_position (f);
7066 /* ??? FIXME: else assume zero offset. */
7068 if (TREE_CODE (ftype) == RECORD_TYPE)
7069 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7070 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7072 rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
7073 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7074 cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
7076 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7078 rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
7082 else if (cum->intoffset == -1)
7083 cum->intoffset = bitpos;
7087 /* Update the data in CUM to advance over an argument
7088 of mode MODE and data type TYPE.
7089 (TYPE is null for libcalls where that information may not be available.)
7091 Note that for args passed by reference, function_arg will be called
7092 with MODE and TYPE set to that of the pointer to the arg, not the arg
7096 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7097 tree type, int named, int depth)
7101 /* Only tick off an argument if we're not recursing. */
7103 cum->nargs_prototype--;
7105 if (TARGET_ALTIVEC_ABI
7106 && (ALTIVEC_VECTOR_MODE (mode)
7107 || VSX_VECTOR_MODE (mode)
7108 || (type && TREE_CODE (type) == VECTOR_TYPE
7109 && int_size_in_bytes (type) == 16)))
7113 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7116 if (!TARGET_ALTIVEC)
7117 error ("cannot pass argument in vector register because"
7118 " altivec instructions are disabled, use -maltivec"
7121 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7122 even if it is going to be passed in a vector register.
7123 Darwin does the same for variable-argument functions. */
7124 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
7125 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
7135 /* Vector parameters must be 16-byte aligned. This places
7136 them at 2 mod 4 in terms of words in 32-bit mode, since
7137 the parameter save area starts at offset 24 from the
7138 stack. In 64-bit mode, they just have to start on an
7139 even word, since the parameter save area is 16-byte
7140 aligned. Space for GPRs is reserved even if the argument
7141 will be passed in memory. */
7143 align = (2 - cum->words) & 3;
7145 align = cum->words & 1;
7146 cum->words += align + rs6000_arg_size (mode, type);
7148 if (TARGET_DEBUG_ARG)
7150 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
7152 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
7153 cum->nargs_prototype, cum->prototype,
7154 GET_MODE_NAME (mode));
7158 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
7160 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7163 else if (rs6000_darwin64_abi
7165 && TREE_CODE (type) == RECORD_TYPE
7166 && (size = int_size_in_bytes (type)) > 0)
7168 /* Variable sized types have size == -1 and are
7169 treated as if consisting entirely of ints.
7170 Pad to 16 byte boundary if needed. */
7171 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7172 && (cum->words % 2) != 0)
7174 /* For varargs, we can just go up by the size of the struct. */
7176 cum->words += (size + 7) / 8;
7179 /* It is tempting to say int register count just goes up by
7180 sizeof(type)/8, but this is wrong in a case such as
7181 { int; double; int; } [powerpc alignment]. We have to
7182 grovel through the fields for these too. */
7184 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
7185 rs6000_darwin64_record_arg_advance_flush (cum,
7186 size * BITS_PER_UNIT);
7189 else if (DEFAULT_ABI == ABI_V4)
7191 if (TARGET_HARD_FLOAT && TARGET_FPRS
7192 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7193 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7194 || (mode == TFmode && !TARGET_IEEEQUAD)
7195 || mode == SDmode || mode == DDmode || mode == TDmode))
7197 /* _Decimal128 must use an even/odd register pair. This assumes
7198 that the register number is odd when fregno is odd. */
7199 if (mode == TDmode && (cum->fregno % 2) == 1)
7202 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7203 <= FP_ARG_V4_MAX_REG)
7204 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7207 cum->fregno = FP_ARG_V4_MAX_REG + 1;
7208 if (mode == DFmode || mode == TFmode
7209 || mode == DDmode || mode == TDmode)
7210 cum->words += cum->words & 1;
7211 cum->words += rs6000_arg_size (mode, type);
7216 int n_words = rs6000_arg_size (mode, type);
7217 int gregno = cum->sysv_gregno;
7219 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7220 (r7,r8) or (r9,r10). As does any other 2 word item such
7221 as complex int due to a historical mistake. */
7223 gregno += (1 - gregno) & 1;
7225 /* Multi-reg args are not split between registers and stack. */
7226 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7228 /* Long long and SPE vectors are aligned on the stack.
7229 So are other 2 word items such as complex int due to
7230 a historical mistake. */
7232 cum->words += cum->words & 1;
7233 cum->words += n_words;
7236 /* Note: continuing to accumulate gregno past when we've started
7237 spilling to the stack indicates the fact that we've started
7238 spilling to the stack to expand_builtin_saveregs. */
7239 cum->sysv_gregno = gregno + n_words;
7242 if (TARGET_DEBUG_ARG)
7244 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7245 cum->words, cum->fregno);
7246 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
7247 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
7248 fprintf (stderr, "mode = %4s, named = %d\n",
7249 GET_MODE_NAME (mode), named);
7254 int n_words = rs6000_arg_size (mode, type);
7255 int start_words = cum->words;
7256 int align_words = rs6000_parm_start (mode, type, start_words);
7258 cum->words = align_words + n_words;
7260 if (SCALAR_FLOAT_MODE_P (mode)
7261 && TARGET_HARD_FLOAT && TARGET_FPRS)
7263 /* _Decimal128 must be passed in an even/odd float register pair.
7264 This assumes that the register number is odd when fregno is
7266 if (mode == TDmode && (cum->fregno % 2) == 1)
7268 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7271 if (TARGET_DEBUG_ARG)
7273 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7274 cum->words, cum->fregno);
7275 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
7276 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
7277 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
7278 named, align_words - start_words, depth);
7284 spe_build_register_parallel (enum machine_mode mode, int gregno)
7291 r1 = gen_rtx_REG (DImode, gregno);
7292 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
7293 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
7297 r1 = gen_rtx_REG (DImode, gregno);
7298 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
7299 r3 = gen_rtx_REG (DImode, gregno + 2);
7300 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
7301 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
7304 r1 = gen_rtx_REG (DImode, gregno);
7305 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
7306 r3 = gen_rtx_REG (DImode, gregno + 2);
7307 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
7308 r5 = gen_rtx_REG (DImode, gregno + 4);
7309 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
7310 r7 = gen_rtx_REG (DImode, gregno + 6);
7311 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
7312 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
7319 /* Determine where to put a SIMD argument on the SPE. */
7321 rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7324 int gregno = cum->sysv_gregno;
7326 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
7327 are passed and returned in a pair of GPRs for ABI compatibility. */
7328 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
7329 || mode == DCmode || mode == TCmode))
7331 int n_words = rs6000_arg_size (mode, type);
7333 /* Doubles go in an odd/even register pair (r5/r6, etc). */
7335 gregno += (1 - gregno) & 1;
7337 /* Multi-reg args are not split between registers and stack. */
7338 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7341 return spe_build_register_parallel (mode, gregno);
7345 int n_words = rs6000_arg_size (mode, type);
7347 /* SPE vectors are put in odd registers. */
7348 if (n_words == 2 && (gregno & 1) == 0)
7351 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
7354 enum machine_mode m = SImode;
7356 r1 = gen_rtx_REG (m, gregno);
7357 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
7358 r2 = gen_rtx_REG (m, gregno + 1);
7359 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
7360 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
7367 if (gregno <= GP_ARG_MAX_REG)
7368 return gen_rtx_REG (mode, gregno);
7374 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
7375 structure between cum->intoffset and bitpos to integer registers. */
7378 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
7379 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
7381 enum machine_mode mode;
7383 unsigned int startbit, endbit;
7384 int this_regno, intregs, intoffset;
7387 if (cum->intoffset == -1)
7390 intoffset = cum->intoffset;
7391 cum->intoffset = -1;
7393 /* If this is the trailing part of a word, try to only load that
7394 much into the register. Otherwise load the whole register. Note
7395 that in the latter case we may pick up unwanted bits. It's not a
7396 problem at the moment but may wish to revisit. */
7398 if (intoffset % BITS_PER_WORD != 0)
7400 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7402 if (mode == BLKmode)
7404 /* We couldn't find an appropriate mode, which happens,
7405 e.g., in packed structs when there are 3 bytes to load.
7406 Back intoffset back to the beginning of the word in this
7408 intoffset = intoffset & -BITS_PER_WORD;
7415 startbit = intoffset & -BITS_PER_WORD;
7416 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7417 intregs = (endbit - startbit) / BITS_PER_WORD;
7418 this_regno = cum->words + intoffset / BITS_PER_WORD;
7420 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
7423 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
7427 intoffset /= BITS_PER_UNIT;
7430 regno = GP_ARG_MIN_REG + this_regno;
7431 reg = gen_rtx_REG (mode, regno);
7433 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7436 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
7440 while (intregs > 0);
7443 /* Recursive workhorse for the following. */
7446 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
7447 HOST_WIDE_INT startbitpos, rtx rvec[],
7452 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
7453 if (TREE_CODE (f) == FIELD_DECL)
7455 HOST_WIDE_INT bitpos = startbitpos;
7456 tree ftype = TREE_TYPE (f);
7457 enum machine_mode mode;
7458 if (ftype == error_mark_node)
7460 mode = TYPE_MODE (ftype);
7462 if (DECL_SIZE (f) != 0
7463 && host_integerp (bit_position (f), 1))
7464 bitpos += int_bit_position (f);
7466 /* ??? FIXME: else assume zero offset. */
7468 if (TREE_CODE (ftype) == RECORD_TYPE)
7469 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
7470 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
7475 case SCmode: mode = SFmode; break;
7476 case DCmode: mode = DFmode; break;
7477 case TCmode: mode = TFmode; break;
7481 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
7483 = gen_rtx_EXPR_LIST (VOIDmode,
7484 gen_rtx_REG (mode, cum->fregno++),
7485 GEN_INT (bitpos / BITS_PER_UNIT));
7486 if (mode == TFmode || mode == TDmode)
7489 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
7491 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
7493 = gen_rtx_EXPR_LIST (VOIDmode,
7494 gen_rtx_REG (mode, cum->vregno++),
7495 GEN_INT (bitpos / BITS_PER_UNIT));
7497 else if (cum->intoffset == -1)
7498 cum->intoffset = bitpos;
7502 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
7503 the register(s) to be used for each field and subfield of a struct
7504 being passed by value, along with the offset of where the
7505 register's value may be found in the block. FP fields go in FP
7506 register, vector fields go in vector registers, and everything
7507 else goes in int registers, packed as in memory.
7509 This code is also used for function return values. RETVAL indicates
7510 whether this is the case.
7512 Much of this is taken from the SPARC V9 port, which has a similar
7513 calling convention. */
7516 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
7517 int named, bool retval)
7519 rtx rvec[FIRST_PSEUDO_REGISTER];
7520 int k = 1, kbase = 1;
7521 HOST_WIDE_INT typesize = int_size_in_bytes (type);
7522 /* This is a copy; modifications are not visible to our caller. */
7523 CUMULATIVE_ARGS copy_cum = *orig_cum;
7524 CUMULATIVE_ARGS *cum = ©_cum;
7526 /* Pad to 16 byte boundary if needed. */
7527 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7528 && (cum->words % 2) != 0)
7535 /* Put entries into rvec[] for individual FP and vector fields, and
7536 for the chunks of memory that go in int regs. Note we start at
7537 element 1; 0 is reserved for an indication of using memory, and
7538 may or may not be filled in below. */
7539 rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
7540 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
7542 /* If any part of the struct went on the stack put all of it there.
7543 This hack is because the generic code for
7544 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
7545 parts of the struct are not at the beginning. */
7549 return NULL_RTX; /* doesn't go in registers at all */
7551 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7553 if (k > 1 || cum->use_stack)
7554 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
7559 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
7562 rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
7566 rtx rvec[GP_ARG_NUM_REG + 1];
7568 if (align_words >= GP_ARG_NUM_REG)
7571 n_units = rs6000_arg_size (mode, type);
7573 /* Optimize the simple case where the arg fits in one gpr, except in
7574 the case of BLKmode due to assign_parms assuming that registers are
7575 BITS_PER_WORD wide. */
7577 || (n_units == 1 && mode != BLKmode))
7578 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
7581 if (align_words + n_units > GP_ARG_NUM_REG)
7582 /* Not all of the arg fits in gprs. Say that it goes in memory too,
7583 using a magic NULL_RTX component.
7584 This is not strictly correct. Only some of the arg belongs in
7585 memory, not all of it. However, the normal scheme using
7586 function_arg_partial_nregs can result in unusual subregs, eg.
7587 (subreg:SI (reg:DF) 4), which are not handled well. The code to
7588 store the whole arg to memory is often more efficient than code
7589 to store pieces, and we know that space is available in the right
7590 place for the whole arg. */
7591 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7596 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
7597 rtx off = GEN_INT (i++ * 4);
7598 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
7600 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
7602 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
7605 /* Determine where to put an argument to a function.
7606 Value is zero to push the argument on the stack,
7607 or a hard register in which to store the argument.
7609 MODE is the argument's machine mode.
7610 TYPE is the data type of the argument (as a tree).
7611 This is null for libcalls where that information may
7613 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7614 the preceding args and about the function being called. It is
7615 not modified in this routine.
7616 NAMED is nonzero if this argument is a named parameter
7617 (otherwise it is an extra parameter matching an ellipsis).
7619 On RS/6000 the first eight words of non-FP are normally in registers
7620 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
7621 Under V.4, the first 8 FP args are in registers.
7623 If this is floating-point and no prototype is specified, we use
7624 both an FP and integer register (or possibly FP reg and stack). Library
7625 functions (when CALL_LIBCALL is set) always have the proper types for args,
7626 so we can pass the FP value just in one register. emit_library_function
7627 doesn't support PARALLEL anyway.
7629 Note that for args passed by reference, function_arg will be called
7630 with MODE and TYPE set to that of the pointer to the arg, not the arg
7634 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7635 tree type, int named)
7637 enum rs6000_abi abi = DEFAULT_ABI;
7639 /* Return a marker to indicate whether CR1 needs to set or clear the
7640 bit that V.4 uses to say fp args were passed in registers.
7641 Assume that we don't need the marker for software floating point,
7642 or compiler generated library calls. */
7643 if (mode == VOIDmode)
7646 && (cum->call_cookie & CALL_LIBCALL) == 0
7648 || (cum->nargs_prototype < 0
7649 && (cum->prototype || TARGET_NO_PROTOTYPE))))
7651 /* For the SPE, we need to crxor CR6 always. */
7653 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
7654 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
7655 return GEN_INT (cum->call_cookie
7656 | ((cum->fregno == FP_ARG_MIN_REG)
7657 ? CALL_V4_SET_FP_ARGS
7658 : CALL_V4_CLEAR_FP_ARGS));
7661 return GEN_INT (cum->call_cookie);
7664 if (rs6000_darwin64_abi && mode == BLKmode
7665 && TREE_CODE (type) == RECORD_TYPE)
7667 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
7668 if (rslt != NULL_RTX)
7670 /* Else fall through to usual handling. */
7673 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7674 if (TARGET_64BIT && ! cum->prototype)
7676 /* Vector parameters get passed in vector register
7677 and also in GPRs or memory, in absence of prototype. */
7680 align_words = (cum->words + 1) & ~1;
7682 if (align_words >= GP_ARG_NUM_REG)
7688 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
7690 return gen_rtx_PARALLEL (mode,
7692 gen_rtx_EXPR_LIST (VOIDmode,
7694 gen_rtx_EXPR_LIST (VOIDmode,
7695 gen_rtx_REG (mode, cum->vregno),
7699 return gen_rtx_REG (mode, cum->vregno);
7700 else if (TARGET_ALTIVEC_ABI
7701 && (ALTIVEC_VECTOR_MODE (mode)
7702 || VSX_VECTOR_MODE (mode)
7703 || (type && TREE_CODE (type) == VECTOR_TYPE
7704 && int_size_in_bytes (type) == 16)))
7706 if (named || abi == ABI_V4)
7710 /* Vector parameters to varargs functions under AIX or Darwin
7711 get passed in memory and possibly also in GPRs. */
7712 int align, align_words, n_words;
7713 enum machine_mode part_mode;
7715 /* Vector parameters must be 16-byte aligned. This places them at
7716 2 mod 4 in terms of words in 32-bit mode, since the parameter
7717 save area starts at offset 24 from the stack. In 64-bit mode,
7718 they just have to start on an even word, since the parameter
7719 save area is 16-byte aligned. */
7721 align = (2 - cum->words) & 3;
7723 align = cum->words & 1;
7724 align_words = cum->words + align;
7726 /* Out of registers? Memory, then. */
7727 if (align_words >= GP_ARG_NUM_REG)
7730 if (TARGET_32BIT && TARGET_POWERPC64)
7731 return rs6000_mixed_function_arg (mode, type, align_words);
7733 /* The vector value goes in GPRs. Only the part of the
7734 value in GPRs is reported here. */
7736 n_words = rs6000_arg_size (mode, type);
7737 if (align_words + n_words > GP_ARG_NUM_REG)
7738 /* Fortunately, there are only two possibilities, the value
7739 is either wholly in GPRs or half in GPRs and half not. */
7742 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
7745 else if (TARGET_SPE_ABI && TARGET_SPE
7746 && (SPE_VECTOR_MODE (mode)
7747 || (TARGET_E500_DOUBLE && (mode == DFmode
7750 || mode == TCmode))))
7751 return rs6000_spe_function_arg (cum, mode, type);
7753 else if (abi == ABI_V4)
7755 if (TARGET_HARD_FLOAT && TARGET_FPRS
7756 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7757 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7758 || (mode == TFmode && !TARGET_IEEEQUAD)
7759 || mode == SDmode || mode == DDmode || mode == TDmode))
7761 /* _Decimal128 must use an even/odd register pair. This assumes
7762 that the register number is odd when fregno is odd. */
7763 if (mode == TDmode && (cum->fregno % 2) == 1)
7766 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7767 <= FP_ARG_V4_MAX_REG)
7768 return gen_rtx_REG (mode, cum->fregno);
7774 int n_words = rs6000_arg_size (mode, type);
7775 int gregno = cum->sysv_gregno;
7777 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7778 (r7,r8) or (r9,r10). As does any other 2 word item such
7779 as complex int due to a historical mistake. */
7781 gregno += (1 - gregno) & 1;
7783 /* Multi-reg args are not split between registers and stack. */
7784 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7787 if (TARGET_32BIT && TARGET_POWERPC64)
7788 return rs6000_mixed_function_arg (mode, type,
7789 gregno - GP_ARG_MIN_REG);
7790 return gen_rtx_REG (mode, gregno);
7795 int align_words = rs6000_parm_start (mode, type, cum->words);
7797 /* _Decimal128 must be passed in an even/odd float register pair.
7798 This assumes that the register number is odd when fregno is odd. */
7799 if (mode == TDmode && (cum->fregno % 2) == 1)
7802 if (USE_FP_FOR_ARG_P (cum, mode, type))
7804 rtx rvec[GP_ARG_NUM_REG + 1];
7808 enum machine_mode fmode = mode;
7809 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
7811 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
7813 /* Currently, we only ever need one reg here because complex
7814 doubles are split. */
7815 gcc_assert (cum->fregno == FP_ARG_MAX_REG
7816 && (fmode == TFmode || fmode == TDmode));
7818 /* Long double or _Decimal128 split over regs and memory. */
7819 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
7822 /* Do we also need to pass this arg in the parameter save
7825 && (cum->nargs_prototype <= 0
7826 || (DEFAULT_ABI == ABI_AIX
7828 && align_words >= GP_ARG_NUM_REG)));
7830 if (!needs_psave && mode == fmode)
7831 return gen_rtx_REG (fmode, cum->fregno);
7836 /* Describe the part that goes in gprs or the stack.
7837 This piece must come first, before the fprs. */
7838 if (align_words < GP_ARG_NUM_REG)
7840 unsigned long n_words = rs6000_arg_size (mode, type);
7842 if (align_words + n_words > GP_ARG_NUM_REG
7843 || (TARGET_32BIT && TARGET_POWERPC64))
7845 /* If this is partially on the stack, then we only
7846 include the portion actually in registers here. */
7847 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
7850 if (align_words + n_words > GP_ARG_NUM_REG)
7851 /* Not all of the arg fits in gprs. Say that it
7852 goes in memory too, using a magic NULL_RTX
7853 component. Also see comment in
7854 rs6000_mixed_function_arg for why the normal
7855 function_arg_partial_nregs scheme doesn't work
7857 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
7861 r = gen_rtx_REG (rmode,
7862 GP_ARG_MIN_REG + align_words);
7863 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
7864 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
7866 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
7870 /* The whole arg fits in gprs. */
7871 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
7872 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
7876 /* It's entirely in memory. */
7877 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7880 /* Describe where this piece goes in the fprs. */
7881 r = gen_rtx_REG (fmode, cum->fregno);
7882 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
7884 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
7886 else if (align_words < GP_ARG_NUM_REG)
7888 if (TARGET_32BIT && TARGET_POWERPC64)
7889 return rs6000_mixed_function_arg (mode, type, align_words);
7891 if (mode == BLKmode)
7894 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
7901 /* For an arg passed partly in registers and partly in memory, this is
7902 the number of bytes passed in registers. For args passed entirely in
7903 registers or entirely in memory, zero. When an arg is described by a
7904 PARALLEL, perhaps using more than one register type, this function
7905 returns the number of bytes used by the first element of the PARALLEL. */
7908 rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7909 tree type, bool named)
7914 if (DEFAULT_ABI == ABI_V4)
7917 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
7918 && cum->nargs_prototype >= 0)
7921 /* In this complicated case we just disable the partial_nregs code. */
7922 if (rs6000_darwin64_abi && mode == BLKmode
7923 && TREE_CODE (type) == RECORD_TYPE
7924 && int_size_in_bytes (type) > 0)
7927 align_words = rs6000_parm_start (mode, type, cum->words);
7929 if (USE_FP_FOR_ARG_P (cum, mode, type))
7931 /* If we are passing this arg in the fixed parameter save area
7932 (gprs or memory) as well as fprs, then this function should
7933 return the number of partial bytes passed in the parameter
7934 save area rather than partial bytes passed in fprs. */
7936 && (cum->nargs_prototype <= 0
7937 || (DEFAULT_ABI == ABI_AIX
7939 && align_words >= GP_ARG_NUM_REG)))
7941 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
7942 > FP_ARG_MAX_REG + 1)
7943 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
7944 else if (cum->nargs_prototype >= 0)
7948 if (align_words < GP_ARG_NUM_REG
7949 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
7950 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
7952 if (ret != 0 && TARGET_DEBUG_ARG)
7953 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
7958 /* A C expression that indicates when an argument must be passed by
7959 reference. If nonzero for an argument, a copy of that argument is
7960 made in memory and a pointer to the argument is passed instead of
7961 the argument itself. The pointer is passed in whatever way is
7962 appropriate for passing a pointer to that type.
7964 Under V.4, aggregates and long double are passed by reference.
7966 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
7967 reference unless the AltiVec vector extension ABI is in force.
7969 As an extension to all ABIs, variable sized types are passed by
7973 rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7974 enum machine_mode mode, const_tree type,
7975 bool named ATTRIBUTE_UNUSED)
7977 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
7979 if (TARGET_DEBUG_ARG)
7980 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
7987 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
7989 if (TARGET_DEBUG_ARG)
7990 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
7994 if (int_size_in_bytes (type) < 0)
7996 if (TARGET_DEBUG_ARG)
7997 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8001 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8002 modes only exist for GCC vector types if -maltivec. */
8003 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8005 if (TARGET_DEBUG_ARG)
8006 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8010 /* Pass synthetic vectors in memory. */
8011 if (TREE_CODE (type) == VECTOR_TYPE
8012 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8014 static bool warned_for_pass_big_vectors = false;
8015 if (TARGET_DEBUG_ARG)
8016 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8017 if (!warned_for_pass_big_vectors)
8019 warning (0, "GCC vector passed by reference: "
8020 "non-standard ABI extension with no compatibility guarantee");
8021 warned_for_pass_big_vectors = true;
8030 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8033 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8038 for (i = 0; i < nregs; i++)
8040 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8041 if (reload_completed)
8043 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8046 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8047 i * GET_MODE_SIZE (reg_mode));
8050 tem = replace_equiv_address (tem, XEXP (tem, 0));
8054 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8058 /* Perform any needed actions needed for a function that is receiving a
8059 variable number of arguments.
8063 MODE and TYPE are the mode and type of the current parameter.
8065 PRETEND_SIZE is a variable that should be set to the amount of stack
8066 that must be pushed by the prolog to pretend that our caller pushed
8069 Normally, this macro will push all remaining incoming registers on the
8070 stack and set PRETEND_SIZE to the length of the registers pushed. */
8073 setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8074 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8077 CUMULATIVE_ARGS next_cum;
8078 int reg_size = TARGET_32BIT ? 4 : 8;
8079 rtx save_area = NULL_RTX, mem;
8080 int first_reg_offset;
8083 /* Skip the last named argument. */
8085 function_arg_advance (&next_cum, mode, type, 1, 0);
8087 if (DEFAULT_ABI == ABI_V4)
8089 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8093 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8094 HOST_WIDE_INT offset = 0;
8096 /* Try to optimize the size of the varargs save area.
8097 The ABI requires that ap.reg_save_area is doubleword
8098 aligned, but we don't need to allocate space for all
8099 the bytes, only those to which we actually will save
8101 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
8102 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
8103 if (TARGET_HARD_FLOAT && TARGET_FPRS
8104 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8105 && cfun->va_list_fpr_size)
8108 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
8109 * UNITS_PER_FP_WORD;
8110 if (cfun->va_list_fpr_size
8111 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8112 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
8114 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8115 * UNITS_PER_FP_WORD;
8119 offset = -((first_reg_offset * reg_size) & ~7);
8120 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
8122 gpr_reg_num = cfun->va_list_gpr_size;
8123 if (reg_size == 4 && (first_reg_offset & 1))
8126 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
8129 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
8131 - (int) (GP_ARG_NUM_REG * reg_size);
8133 if (gpr_size + fpr_size)
8136 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
8137 gcc_assert (GET_CODE (reg_save_area) == MEM);
8138 reg_save_area = XEXP (reg_save_area, 0);
8139 if (GET_CODE (reg_save_area) == PLUS)
8141 gcc_assert (XEXP (reg_save_area, 0)
8142 == virtual_stack_vars_rtx);
8143 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
8144 offset += INTVAL (XEXP (reg_save_area, 1));
8147 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
8150 cfun->machine->varargs_save_offset = offset;
8151 save_area = plus_constant (virtual_stack_vars_rtx, offset);
8156 first_reg_offset = next_cum.words;
8157 save_area = virtual_incoming_args_rtx;
8159 if (targetm.calls.must_pass_in_stack (mode, type))
8160 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
8163 set = get_varargs_alias_set ();
8164 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
8165 && cfun->va_list_gpr_size)
8167 int nregs = GP_ARG_NUM_REG - first_reg_offset;
8169 if (va_list_gpr_counter_field)
8171 /* V4 va_list_gpr_size counts number of registers needed. */
8172 if (nregs > cfun->va_list_gpr_size)
8173 nregs = cfun->va_list_gpr_size;
8177 /* char * va_list instead counts number of bytes needed. */
8178 if (nregs > cfun->va_list_gpr_size / reg_size)
8179 nregs = cfun->va_list_gpr_size / reg_size;
8182 mem = gen_rtx_MEM (BLKmode,
8183 plus_constant (save_area,
8184 first_reg_offset * reg_size));
8185 MEM_NOTRAP_P (mem) = 1;
8186 set_mem_alias_set (mem, set);
8187 set_mem_align (mem, BITS_PER_WORD);
8189 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
8193 /* Save FP registers if needed. */
8194 if (DEFAULT_ABI == ABI_V4
8195 && TARGET_HARD_FLOAT && TARGET_FPRS
8197 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8198 && cfun->va_list_fpr_size)
8200 int fregno = next_cum.fregno, nregs;
8201 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
8202 rtx lab = gen_label_rtx ();
8203 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
8204 * UNITS_PER_FP_WORD);
8207 (gen_rtx_SET (VOIDmode,
8209 gen_rtx_IF_THEN_ELSE (VOIDmode,
8210 gen_rtx_NE (VOIDmode, cr1,
8212 gen_rtx_LABEL_REF (VOIDmode, lab),
8216 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
8217 fregno++, off += UNITS_PER_FP_WORD, nregs++)
8219 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8221 plus_constant (save_area, off));
8222 MEM_NOTRAP_P (mem) = 1;
8223 set_mem_alias_set (mem, set);
8224 set_mem_align (mem, GET_MODE_ALIGNMENT (
8225 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8226 ? DFmode : SFmode));
8227 emit_move_insn (mem, gen_rtx_REG (
8228 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8229 ? DFmode : SFmode, fregno));
8236 /* Create the va_list data type. */
8239 rs6000_build_builtin_va_list (void)
8241 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
8243 /* For AIX, prefer 'char *' because that's what the system
8244 header files like. */
8245 if (DEFAULT_ABI != ABI_V4)
8246 return build_pointer_type (char_type_node);
8248 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
8249 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
8250 get_identifier ("__va_list_tag"), record);
8252 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
8253 unsigned_char_type_node);
8254 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
8255 unsigned_char_type_node);
8256 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
8258 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
8259 get_identifier ("reserved"), short_unsigned_type_node);
8260 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
8261 get_identifier ("overflow_arg_area"),
8263 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
8264 get_identifier ("reg_save_area"),
8267 va_list_gpr_counter_field = f_gpr;
8268 va_list_fpr_counter_field = f_fpr;
8270 DECL_FIELD_CONTEXT (f_gpr) = record;
8271 DECL_FIELD_CONTEXT (f_fpr) = record;
8272 DECL_FIELD_CONTEXT (f_res) = record;
8273 DECL_FIELD_CONTEXT (f_ovf) = record;
8274 DECL_FIELD_CONTEXT (f_sav) = record;
8276 TREE_CHAIN (record) = type_decl;
8277 TYPE_NAME (record) = type_decl;
8278 TYPE_FIELDS (record) = f_gpr;
8279 TREE_CHAIN (f_gpr) = f_fpr;
8280 TREE_CHAIN (f_fpr) = f_res;
8281 TREE_CHAIN (f_res) = f_ovf;
8282 TREE_CHAIN (f_ovf) = f_sav;
8284 layout_type (record);
8286 /* The correct type is an array type of one element. */
8287 return build_array_type (record, build_index_type (size_zero_node));
8290 /* Implement va_start. */
8293 rs6000_va_start (tree valist, rtx nextarg)
8295 HOST_WIDE_INT words, n_gpr, n_fpr;
8296 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
8297 tree gpr, fpr, ovf, sav, t;
8299 /* Only SVR4 needs something special. */
8300 if (DEFAULT_ABI != ABI_V4)
8302 std_expand_builtin_va_start (valist, nextarg);
8306 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8307 f_fpr = TREE_CHAIN (f_gpr);
8308 f_res = TREE_CHAIN (f_fpr);
8309 f_ovf = TREE_CHAIN (f_res);
8310 f_sav = TREE_CHAIN (f_ovf);
8312 valist = build_va_arg_indirect_ref (valist);
8313 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8314 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8316 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8318 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8321 /* Count number of gp and fp argument registers used. */
8322 words = crtl->args.info.words;
8323 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
8325 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
8328 if (TARGET_DEBUG_ARG)
8329 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
8330 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
8331 words, n_gpr, n_fpr);
8333 if (cfun->va_list_gpr_size)
8335 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8336 build_int_cst (NULL_TREE, n_gpr));
8337 TREE_SIDE_EFFECTS (t) = 1;
8338 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8341 if (cfun->va_list_fpr_size)
8343 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8344 build_int_cst (NULL_TREE, n_fpr));
8345 TREE_SIDE_EFFECTS (t) = 1;
8346 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8349 /* Find the overflow area. */
8350 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8352 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t,
8353 size_int (words * UNITS_PER_WORD));
8354 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8355 TREE_SIDE_EFFECTS (t) = 1;
8356 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8358 /* If there were no va_arg invocations, don't set up the register
8360 if (!cfun->va_list_gpr_size
8361 && !cfun->va_list_fpr_size
8362 && n_gpr < GP_ARG_NUM_REG
8363 && n_fpr < FP_ARG_V4_MAX_REG)
8366 /* Find the register save area. */
8367 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
8368 if (cfun->machine->varargs_save_offset)
8369 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8370 size_int (cfun->machine->varargs_save_offset));
8371 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8372 TREE_SIDE_EFFECTS (t) = 1;
8373 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8376 /* Implement va_arg. */
8379 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8382 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
8383 tree gpr, fpr, ovf, sav, reg, t, u;
8384 int size, rsize, n_reg, sav_ofs, sav_scale;
8385 tree lab_false, lab_over, addr;
8387 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8391 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8393 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
8394 return build_va_arg_indirect_ref (t);
8397 if (DEFAULT_ABI != ABI_V4)
8399 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
8401 tree elem_type = TREE_TYPE (type);
8402 enum machine_mode elem_mode = TYPE_MODE (elem_type);
8403 int elem_size = GET_MODE_SIZE (elem_mode);
8405 if (elem_size < UNITS_PER_WORD)
8407 tree real_part, imag_part;
8408 gimple_seq post = NULL;
8410 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
8412 /* Copy the value into a temporary, lest the formal temporary
8413 be reused out from under us. */
8414 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
8415 gimple_seq_add_seq (pre_p, post);
8417 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
8420 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
8424 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
8427 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8428 f_fpr = TREE_CHAIN (f_gpr);
8429 f_res = TREE_CHAIN (f_fpr);
8430 f_ovf = TREE_CHAIN (f_res);
8431 f_sav = TREE_CHAIN (f_ovf);
8433 valist = build_va_arg_indirect_ref (valist);
8434 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8435 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8437 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8439 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8442 size = int_size_in_bytes (type);
8443 rsize = (size + 3) / 4;
8446 if (TARGET_HARD_FLOAT && TARGET_FPRS
8447 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
8448 || (TARGET_DOUBLE_FLOAT
8449 && (TYPE_MODE (type) == DFmode
8450 || TYPE_MODE (type) == TFmode
8451 || TYPE_MODE (type) == SDmode
8452 || TYPE_MODE (type) == DDmode
8453 || TYPE_MODE (type) == TDmode))))
8455 /* FP args go in FP registers, if present. */
8457 n_reg = (size + 7) / 8;
8458 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
8459 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
8460 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
8465 /* Otherwise into GP registers. */
8474 /* Pull the value out of the saved registers.... */
8477 addr = create_tmp_var (ptr_type_node, "addr");
8479 /* AltiVec vectors never go in registers when -mabi=altivec. */
8480 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
8484 lab_false = create_artificial_label (input_location);
8485 lab_over = create_artificial_label (input_location);
8487 /* Long long and SPE vectors are aligned in the registers.
8488 As are any other 2 gpr item such as complex int due to a
8489 historical mistake. */
8491 if (n_reg == 2 && reg == gpr)
8494 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
8495 build_int_cst (TREE_TYPE (reg), n_reg - 1));
8496 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
8497 unshare_expr (reg), u);
8499 /* _Decimal128 is passed in even/odd fpr pairs; the stored
8500 reg number is 0 for f1, so we want to make it odd. */
8501 else if (reg == fpr && TYPE_MODE (type) == TDmode)
8503 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
8504 build_int_cst (TREE_TYPE (reg), 1));
8505 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
8508 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
8509 t = build2 (GE_EXPR, boolean_type_node, u, t);
8510 u = build1 (GOTO_EXPR, void_type_node, lab_false);
8511 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
8512 gimplify_and_add (t, pre_p);
8516 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
8518 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
8519 build_int_cst (TREE_TYPE (reg), n_reg));
8520 u = fold_convert (sizetype, u);
8521 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
8522 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, u);
8524 /* _Decimal32 varargs are located in the second word of the 64-bit
8525 FP register for 32-bit binaries. */
8526 if (!TARGET_POWERPC64
8527 && TARGET_HARD_FLOAT && TARGET_FPRS
8528 && TYPE_MODE (type) == SDmode)
8529 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
8531 gimplify_assign (addr, t, pre_p);
8533 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8535 stmt = gimple_build_label (lab_false);
8536 gimple_seq_add_stmt (pre_p, stmt);
8538 if ((n_reg == 2 && !regalign) || n_reg > 2)
8540 /* Ensure that we don't find any more args in regs.
8541 Alignment has taken care of for special cases. */
8542 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
8546 /* ... otherwise out of the overflow area. */
8548 /* Care for on-stack alignment if needed. */
8552 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
8553 t = fold_convert (sizetype, t);
8554 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8556 t = fold_convert (TREE_TYPE (ovf), t);
8558 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8560 gimplify_assign (unshare_expr (addr), t, pre_p);
8562 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
8563 gimplify_assign (unshare_expr (ovf), t, pre_p);
8567 stmt = gimple_build_label (lab_over);
8568 gimple_seq_add_stmt (pre_p, stmt);
8571 if (STRICT_ALIGNMENT
8572 && (TYPE_ALIGN (type)
8573 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
8575 /* The value (of type complex double, for example) may not be
8576 aligned in memory in the saved registers, so copy via a
8577 temporary. (This is the same code as used for SPARC.) */
8578 tree tmp = create_tmp_var (type, "va_arg_tmp");
8579 tree dest_addr = build_fold_addr_expr (tmp);
8581 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
8582 3, dest_addr, addr, size_int (rsize * 4));
8584 gimplify_and_add (copy, pre_p);
8588 addr = fold_convert (ptrtype, addr);
8589 return build_va_arg_indirect_ref (addr);
8595 def_builtin (int mask, const char *name, tree type, int code)
8597 if ((mask & target_flags) || TARGET_PAIRED_FLOAT)
8600 if (rs6000_builtin_decls[code])
8601 fatal_error ("internal error: builtin function to %s already processed.",
8604 rs6000_builtin_decls[code] = t =
8605 add_builtin_function (name, type, code, BUILT_IN_MD,
8608 gcc_assert (code >= 0 && code < (int)RS6000_BUILTIN_COUNT);
8609 switch (builtin_classify[code])
8614 /* assume builtin can do anything. */
8615 case RS6000_BTC_MISC:
8618 /* const function, function only depends on the inputs. */
8619 case RS6000_BTC_CONST:
8620 TREE_READONLY (t) = 1;
8621 TREE_NOTHROW (t) = 1;
8624 /* pure function, function can read global memory. */
8625 case RS6000_BTC_PURE:
8626 DECL_PURE_P (t) = 1;
8627 TREE_NOTHROW (t) = 1;
8630 /* Function is a math function. If rounding mode is on, then treat
8631 the function as not reading global memory, but it can have
8632 arbitrary side effects. If it is off, then assume the function is
8633 a const function. This mimics the ATTR_MATHFN_FPROUNDING
8634 attribute in builtin-attribute.def that is used for the math
8636 case RS6000_BTC_FP_PURE:
8637 TREE_NOTHROW (t) = 1;
8638 if (flag_rounding_math)
8640 DECL_PURE_P (t) = 1;
8641 DECL_IS_NOVOPS (t) = 1;
8644 TREE_READONLY (t) = 1;
8650 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
8652 static const struct builtin_description bdesc_3arg[] =
8654 { MASK_ALTIVEC, CODE_FOR_altivec_vmaddfp, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP },
8655 { MASK_ALTIVEC, CODE_FOR_altivec_vmhaddshs, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS },
8656 { MASK_ALTIVEC, CODE_FOR_altivec_vmhraddshs, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS },
8657 { MASK_ALTIVEC, CODE_FOR_altivec_vmladduhm, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM},
8658 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumubm, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM },
8659 { MASK_ALTIVEC, CODE_FOR_altivec_vmsummbm, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM },
8660 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhm, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM },
8661 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
8662 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
8663 { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
8664 { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
8665 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v2df, "__builtin_altivec_vperm_2df", ALTIVEC_BUILTIN_VPERM_2DF },
8666 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v2di, "__builtin_altivec_vperm_2di", ALTIVEC_BUILTIN_VPERM_2DI },
8667 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
8668 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
8669 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
8670 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi_uns, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
8671 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v2di_uns, "__builtin_altivec_vperm_2di_uns", ALTIVEC_BUILTIN_VPERM_2DI_UNS },
8672 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si_uns, "__builtin_altivec_vperm_4si_uns", ALTIVEC_BUILTIN_VPERM_4SI_UNS },
8673 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi_uns, "__builtin_altivec_vperm_8hi_uns", ALTIVEC_BUILTIN_VPERM_8HI_UNS },
8674 { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi_uns, "__builtin_altivec_vperm_16qi_uns", ALTIVEC_BUILTIN_VPERM_16QI_UNS },
8675 { MASK_ALTIVEC, CODE_FOR_vector_select_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
8676 { MASK_ALTIVEC, CODE_FOR_vector_select_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
8677 { MASK_ALTIVEC, CODE_FOR_vector_select_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
8678 { MASK_ALTIVEC, CODE_FOR_vector_select_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
8679 { MASK_ALTIVEC, CODE_FOR_vector_select_v2df, "__builtin_altivec_vsel_2df", ALTIVEC_BUILTIN_VSEL_2DF },
8680 { MASK_ALTIVEC, CODE_FOR_vector_select_v2di, "__builtin_altivec_vsel_2di", ALTIVEC_BUILTIN_VSEL_2DI },
8681 { MASK_ALTIVEC, CODE_FOR_vector_select_v4si_uns, "__builtin_altivec_vsel_4si_uns", ALTIVEC_BUILTIN_VSEL_4SI_UNS },
8682 { MASK_ALTIVEC, CODE_FOR_vector_select_v8hi_uns, "__builtin_altivec_vsel_8hi_uns", ALTIVEC_BUILTIN_VSEL_8HI_UNS },
8683 { MASK_ALTIVEC, CODE_FOR_vector_select_v16qi_uns, "__builtin_altivec_vsel_16qi_uns", ALTIVEC_BUILTIN_VSEL_16QI_UNS },
8684 { MASK_ALTIVEC, CODE_FOR_vector_select_v2di_uns, "__builtin_altivec_vsel_2di_uns", ALTIVEC_BUILTIN_VSEL_2DI_UNS },
8685 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
8686 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
8687 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
8688 { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
8690 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD },
8691 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS },
8692 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD },
8693 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS },
8694 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM },
8695 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM },
8696 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM },
8697 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM },
8698 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM },
8699 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS },
8700 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS },
8701 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS },
8702 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB },
8703 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM },
8704 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL },
8706 { MASK_VSX, CODE_FOR_vsx_fmaddv2df4, "__builtin_vsx_xvmadddp", VSX_BUILTIN_XVMADDDP },
8707 { MASK_VSX, CODE_FOR_vsx_fmsubv2df4, "__builtin_vsx_xvmsubdp", VSX_BUILTIN_XVMSUBDP },
8708 { MASK_VSX, CODE_FOR_vsx_fnmaddv2df4, "__builtin_vsx_xvnmadddp", VSX_BUILTIN_XVNMADDDP },
8709 { MASK_VSX, CODE_FOR_vsx_fnmsubv2df4, "__builtin_vsx_xvnmsubdp", VSX_BUILTIN_XVNMSUBDP },
8711 { MASK_VSX, CODE_FOR_vsx_fmaddv4sf4, "__builtin_vsx_xvmaddsp", VSX_BUILTIN_XVMADDSP },
8712 { MASK_VSX, CODE_FOR_vsx_fmsubv4sf4, "__builtin_vsx_xvmsubsp", VSX_BUILTIN_XVMSUBSP },
8713 { MASK_VSX, CODE_FOR_vsx_fnmaddv4sf4, "__builtin_vsx_xvnmaddsp", VSX_BUILTIN_XVNMADDSP },
8714 { MASK_VSX, CODE_FOR_vsx_fnmsubv4sf4, "__builtin_vsx_xvnmsubsp", VSX_BUILTIN_XVNMSUBSP },
8716 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msub", VSX_BUILTIN_VEC_MSUB },
8717 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmadd", VSX_BUILTIN_VEC_NMADD },
8719 { MASK_VSX, CODE_FOR_vector_select_v2di, "__builtin_vsx_xxsel_2di", VSX_BUILTIN_XXSEL_2DI },
8720 { MASK_VSX, CODE_FOR_vector_select_v2df, "__builtin_vsx_xxsel_2df", VSX_BUILTIN_XXSEL_2DF },
8721 { MASK_VSX, CODE_FOR_vector_select_v4sf, "__builtin_vsx_xxsel_4sf", VSX_BUILTIN_XXSEL_4SF },
8722 { MASK_VSX, CODE_FOR_vector_select_v4si, "__builtin_vsx_xxsel_4si", VSX_BUILTIN_XXSEL_4SI },
8723 { MASK_VSX, CODE_FOR_vector_select_v8hi, "__builtin_vsx_xxsel_8hi", VSX_BUILTIN_XXSEL_8HI },
8724 { MASK_VSX, CODE_FOR_vector_select_v16qi, "__builtin_vsx_xxsel_16qi", VSX_BUILTIN_XXSEL_16QI },
8725 { MASK_VSX, CODE_FOR_vector_select_v2di_uns, "__builtin_vsx_xxsel_2di_uns", VSX_BUILTIN_XXSEL_2DI_UNS },
8726 { MASK_VSX, CODE_FOR_vector_select_v4si_uns, "__builtin_vsx_xxsel_4si_uns", VSX_BUILTIN_XXSEL_4SI_UNS },
8727 { MASK_VSX, CODE_FOR_vector_select_v8hi_uns, "__builtin_vsx_xxsel_8hi_uns", VSX_BUILTIN_XXSEL_8HI_UNS },
8728 { MASK_VSX, CODE_FOR_vector_select_v16qi_uns, "__builtin_vsx_xxsel_16qi_uns", VSX_BUILTIN_XXSEL_16QI_UNS },
8730 { MASK_VSX, CODE_FOR_altivec_vperm_v2di, "__builtin_vsx_vperm_2di", VSX_BUILTIN_VPERM_2DI },
8731 { MASK_VSX, CODE_FOR_altivec_vperm_v2df, "__builtin_vsx_vperm_2df", VSX_BUILTIN_VPERM_2DF },
8732 { MASK_VSX, CODE_FOR_altivec_vperm_v4sf, "__builtin_vsx_vperm_4sf", VSX_BUILTIN_VPERM_4SF },
8733 { MASK_VSX, CODE_FOR_altivec_vperm_v4si, "__builtin_vsx_vperm_4si", VSX_BUILTIN_VPERM_4SI },
8734 { MASK_VSX, CODE_FOR_altivec_vperm_v8hi, "__builtin_vsx_vperm_8hi", VSX_BUILTIN_VPERM_8HI },
8735 { MASK_VSX, CODE_FOR_altivec_vperm_v16qi, "__builtin_vsx_vperm_16qi", VSX_BUILTIN_VPERM_16QI },
8736 { MASK_VSX, CODE_FOR_altivec_vperm_v2di_uns, "__builtin_vsx_vperm_2di_uns", VSX_BUILTIN_VPERM_2DI_UNS },
8737 { MASK_VSX, CODE_FOR_altivec_vperm_v4si_uns, "__builtin_vsx_vperm_4si_uns", VSX_BUILTIN_VPERM_4SI_UNS },
8738 { MASK_VSX, CODE_FOR_altivec_vperm_v8hi_uns, "__builtin_vsx_vperm_8hi_uns", VSX_BUILTIN_VPERM_8HI_UNS },
8739 { MASK_VSX, CODE_FOR_altivec_vperm_v16qi_uns, "__builtin_vsx_vperm_16qi_uns", VSX_BUILTIN_VPERM_16QI_UNS },
8741 { MASK_VSX, CODE_FOR_vsx_xxpermdi_v2df, "__builtin_vsx_xxpermdi_2df", VSX_BUILTIN_XXPERMDI_2DF },
8742 { MASK_VSX, CODE_FOR_vsx_xxpermdi_v2di, "__builtin_vsx_xxpermdi_2di", VSX_BUILTIN_XXPERMDI_2DI },
8743 { MASK_VSX, CODE_FOR_vsx_xxpermdi_v4sf, "__builtin_vsx_xxpermdi_4sf", VSX_BUILTIN_XXPERMDI_4SF },
8744 { MASK_VSX, CODE_FOR_vsx_xxpermdi_v4si, "__builtin_vsx_xxpermdi_4si", VSX_BUILTIN_XXPERMDI_4SI },
8745 { MASK_VSX, CODE_FOR_vsx_xxpermdi_v8hi, "__builtin_vsx_xxpermdi_8hi", VSX_BUILTIN_XXPERMDI_8HI },
8746 { MASK_VSX, CODE_FOR_vsx_xxpermdi_v16qi, "__builtin_vsx_xxpermdi_16qi", VSX_BUILTIN_XXPERMDI_16QI },
8747 { MASK_VSX, CODE_FOR_nothing, "__builtin_vsx_xxpermdi", VSX_BUILTIN_VEC_XXPERMDI },
8748 { MASK_VSX, CODE_FOR_vsx_set_v2df, "__builtin_vsx_set_2df", VSX_BUILTIN_SET_2DF },
8749 { MASK_VSX, CODE_FOR_vsx_set_v2di, "__builtin_vsx_set_2di", VSX_BUILTIN_SET_2DI },
8751 { MASK_VSX, CODE_FOR_vsx_xxsldwi_v2di, "__builtin_vsx_xxsldwi_2di", VSX_BUILTIN_XXSLDWI_2DI },
8752 { MASK_VSX, CODE_FOR_vsx_xxsldwi_v2df, "__builtin_vsx_xxsldwi_2df", VSX_BUILTIN_XXSLDWI_2DF },
8753 { MASK_VSX, CODE_FOR_vsx_xxsldwi_v4sf, "__builtin_vsx_xxsldwi_4sf", VSX_BUILTIN_XXSLDWI_4SF },
8754 { MASK_VSX, CODE_FOR_vsx_xxsldwi_v4si, "__builtin_vsx_xxsldwi_4si", VSX_BUILTIN_XXSLDWI_4SI },
8755 { MASK_VSX, CODE_FOR_vsx_xxsldwi_v8hi, "__builtin_vsx_xxsldwi_8hi", VSX_BUILTIN_XXSLDWI_8HI },
8756 { MASK_VSX, CODE_FOR_vsx_xxsldwi_v16qi, "__builtin_vsx_xxsldwi_16qi", VSX_BUILTIN_XXSLDWI_16QI },
8757 { MASK_VSX, CODE_FOR_nothing, "__builtin_vsx_xxsldwi", VSX_BUILTIN_VEC_XXSLDWI },
8759 { 0, CODE_FOR_paired_msub, "__builtin_paired_msub", PAIRED_BUILTIN_MSUB },
8760 { 0, CODE_FOR_paired_madd, "__builtin_paired_madd", PAIRED_BUILTIN_MADD },
8761 { 0, CODE_FOR_paired_madds0, "__builtin_paired_madds0", PAIRED_BUILTIN_MADDS0 },
8762 { 0, CODE_FOR_paired_madds1, "__builtin_paired_madds1", PAIRED_BUILTIN_MADDS1 },
8763 { 0, CODE_FOR_paired_nmsub, "__builtin_paired_nmsub", PAIRED_BUILTIN_NMSUB },
8764 { 0, CODE_FOR_paired_nmadd, "__builtin_paired_nmadd", PAIRED_BUILTIN_NMADD },
8765 { 0, CODE_FOR_paired_sum0, "__builtin_paired_sum0", PAIRED_BUILTIN_SUM0 },
8766 { 0, CODE_FOR_paired_sum1, "__builtin_paired_sum1", PAIRED_BUILTIN_SUM1 },
8767 { 0, CODE_FOR_selv2sf4, "__builtin_paired_selv2sf4", PAIRED_BUILTIN_SELV2SF4 },
8770 /* DST operations: void foo (void *, const int, const char). */
8772 static const struct builtin_description bdesc_dst[] =
8774 { MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
8775 { MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
8776 { MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
8777 { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT },
8779 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST },
8780 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT },
8781 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST },
8782 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT }
8785 /* Simple binary operations: VECc = foo (VECa, VECb). */
8787 static struct builtin_description bdesc_2arg[] =
8789 { MASK_ALTIVEC, CODE_FOR_addv16qi3, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM },
8790 { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
8791 { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
8792 { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
8793 { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
8794 { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
8795 { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
8796 { MASK_ALTIVEC, CODE_FOR_altivec_vadduhs, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS },
8797 { MASK_ALTIVEC, CODE_FOR_altivec_vaddshs, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS },
8798 { MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
8799 { MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
8800 { MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
8801 { MASK_ALTIVEC, CODE_FOR_andcv4si3, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
8802 { MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
8803 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
8804 { MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
8805 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsh, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH },
8806 { MASK_ALTIVEC, CODE_FOR_altivec_vavguw, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW },
8807 { MASK_ALTIVEC, CODE_FOR_altivec_vavgsw, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW },
8808 { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
8809 { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
8810 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
8811 { MASK_ALTIVEC, CODE_FOR_vector_eqv16qi, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
8812 { MASK_ALTIVEC, CODE_FOR_vector_eqv8hi, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
8813 { MASK_ALTIVEC, CODE_FOR_vector_eqv4si, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
8814 { MASK_ALTIVEC, CODE_FOR_vector_eqv4sf, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
8815 { MASK_ALTIVEC, CODE_FOR_vector_gev4sf, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
8816 { MASK_ALTIVEC, CODE_FOR_vector_gtuv16qi, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
8817 { MASK_ALTIVEC, CODE_FOR_vector_gtv16qi, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
8818 { MASK_ALTIVEC, CODE_FOR_vector_gtuv8hi, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
8819 { MASK_ALTIVEC, CODE_FOR_vector_gtv8hi, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
8820 { MASK_ALTIVEC, CODE_FOR_vector_gtuv4si, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
8821 { MASK_ALTIVEC, CODE_FOR_vector_gtv4si, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
8822 { MASK_ALTIVEC, CODE_FOR_vector_gtv4sf, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
8823 { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
8824 { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
8825 { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
8826 { MASK_ALTIVEC, CODE_FOR_smaxv16qi3, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB },
8827 { MASK_ALTIVEC, CODE_FOR_umaxv8hi3, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH },
8828 { MASK_ALTIVEC, CODE_FOR_smaxv8hi3, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH },
8829 { MASK_ALTIVEC, CODE_FOR_umaxv4si3, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW },
8830 { MASK_ALTIVEC, CODE_FOR_smaxv4si3, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW },
8831 { MASK_ALTIVEC, CODE_FOR_smaxv4sf3, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP },
8832 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghb, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB },
8833 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghh, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH },
8834 { MASK_ALTIVEC, CODE_FOR_altivec_vmrghw, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW },
8835 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglb, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB },
8836 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglh, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH },
8837 { MASK_ALTIVEC, CODE_FOR_altivec_vmrglw, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW },
8838 { MASK_ALTIVEC, CODE_FOR_uminv16qi3, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB },
8839 { MASK_ALTIVEC, CODE_FOR_sminv16qi3, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB },
8840 { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH },
8841 { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH },
8842 { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW },
8843 { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW },
8844 { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP },
8845 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB },
8846 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub_uns", ALTIVEC_BUILTIN_VMULEUB_UNS },
8847 { MASK_ALTIVEC, CODE_FOR_altivec_vmulesb, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB },
8848 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH },
8849 { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh_uns", ALTIVEC_BUILTIN_VMULEUH_UNS },
8850 { MASK_ALTIVEC, CODE_FOR_altivec_vmulesh, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH },
8851 { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB },
8852 { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub_uns", ALTIVEC_BUILTIN_VMULOUB_UNS },
8853 { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
8854 { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
8855 { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh_uns", ALTIVEC_BUILTIN_VMULOUH_UNS },
8856 { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
8857 { MASK_ALTIVEC, CODE_FOR_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
8858 { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
8859 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
8860 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
8861 { MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
8862 { MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
8863 { MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
8864 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
8865 { MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
8866 { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwus, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS },
8867 { MASK_ALTIVEC, CODE_FOR_altivec_vpkswus, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS },
8868 { MASK_ALTIVEC, CODE_FOR_vrotlv16qi3, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB },
8869 { MASK_ALTIVEC, CODE_FOR_vrotlv8hi3, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH },
8870 { MASK_ALTIVEC, CODE_FOR_vrotlv4si3, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW },
8871 { MASK_ALTIVEC, CODE_FOR_vashlv16qi3, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB },
8872 { MASK_ALTIVEC, CODE_FOR_vashlv8hi3, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH },
8873 { MASK_ALTIVEC, CODE_FOR_vashlv4si3, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW },
8874 { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL },
8875 { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO },
8876 { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
8877 { MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
8878 { MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
8879 { MASK_ALTIVEC, CODE_FOR_vlshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
8880 { MASK_ALTIVEC, CODE_FOR_vlshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
8881 { MASK_ALTIVEC, CODE_FOR_vlshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
8882 { MASK_ALTIVEC, CODE_FOR_vashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
8883 { MASK_ALTIVEC, CODE_FOR_vashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
8884 { MASK_ALTIVEC, CODE_FOR_vashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
8885 { MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
8886 { MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
8887 { MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
8888 { MASK_ALTIVEC, CODE_FOR_subv8hi3, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM },
8889 { MASK_ALTIVEC, CODE_FOR_subv4si3, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM },
8890 { MASK_ALTIVEC, CODE_FOR_subv4sf3, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP },
8891 { MASK_ALTIVEC, CODE_FOR_altivec_vsubcuw, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW },
8892 { MASK_ALTIVEC, CODE_FOR_altivec_vsububs, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS },
8893 { MASK_ALTIVEC, CODE_FOR_altivec_vsubsbs, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS },
8894 { MASK_ALTIVEC, CODE_FOR_altivec_vsubuhs, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS },
8895 { MASK_ALTIVEC, CODE_FOR_altivec_vsubshs, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS },
8896 { MASK_ALTIVEC, CODE_FOR_altivec_vsubuws, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS },
8897 { MASK_ALTIVEC, CODE_FOR_altivec_vsubsws, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS },
8898 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4ubs, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS },
8899 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4sbs, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS },
8900 { MASK_ALTIVEC, CODE_FOR_altivec_vsum4shs, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS },
8901 { MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
8902 { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
8903 { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
8904 { MASK_ALTIVEC, CODE_FOR_vector_copysignv4sf3, "__builtin_altivec_copysignfp", ALTIVEC_BUILTIN_COPYSIGN_V4SF },
8906 { MASK_VSX, CODE_FOR_addv2df3, "__builtin_vsx_xvadddp", VSX_BUILTIN_XVADDDP },
8907 { MASK_VSX, CODE_FOR_subv2df3, "__builtin_vsx_xvsubdp", VSX_BUILTIN_XVSUBDP },
8908 { MASK_VSX, CODE_FOR_mulv2df3, "__builtin_vsx_xvmuldp", VSX_BUILTIN_XVMULDP },
8909 { MASK_VSX, CODE_FOR_divv2df3, "__builtin_vsx_xvdivdp", VSX_BUILTIN_XVDIVDP },
8910 { MASK_VSX, CODE_FOR_sminv2df3, "__builtin_vsx_xvmindp", VSX_BUILTIN_XVMINDP },
8911 { MASK_VSX, CODE_FOR_smaxv2df3, "__builtin_vsx_xvmaxdp", VSX_BUILTIN_XVMAXDP },
8912 { MASK_VSX, CODE_FOR_vsx_tdivv2df3_fe, "__builtin_vsx_xvtdivdp_fe", VSX_BUILTIN_XVTDIVDP_FE },
8913 { MASK_VSX, CODE_FOR_vsx_tdivv2df3_fg, "__builtin_vsx_xvtdivdp_fg", VSX_BUILTIN_XVTDIVDP_FG },
8914 { MASK_VSX, CODE_FOR_vector_eqv2df, "__builtin_vsx_xvcmpeqdp", VSX_BUILTIN_XVCMPEQDP },
8915 { MASK_VSX, CODE_FOR_vector_gtv2df, "__builtin_vsx_xvcmpgtdp", VSX_BUILTIN_XVCMPGTDP },
8916 { MASK_VSX, CODE_FOR_vector_gev2df, "__builtin_vsx_xvcmpgedp", VSX_BUILTIN_XVCMPGEDP },
8918 { MASK_VSX, CODE_FOR_addv4sf3, "__builtin_vsx_xvaddsp", VSX_BUILTIN_XVADDSP },
8919 { MASK_VSX, CODE_FOR_subv4sf3, "__builtin_vsx_xvsubsp", VSX_BUILTIN_XVSUBSP },
8920 { MASK_VSX, CODE_FOR_mulv4sf3, "__builtin_vsx_xvmulsp", VSX_BUILTIN_XVMULSP },
8921 { MASK_VSX, CODE_FOR_divv4sf3, "__builtin_vsx_xvdivsp", VSX_BUILTIN_XVDIVSP },
8922 { MASK_VSX, CODE_FOR_sminv4sf3, "__builtin_vsx_xvminsp", VSX_BUILTIN_XVMINSP },
8923 { MASK_VSX, CODE_FOR_smaxv4sf3, "__builtin_vsx_xvmaxsp", VSX_BUILTIN_XVMAXSP },
8924 { MASK_VSX, CODE_FOR_vsx_tdivv4sf3_fe, "__builtin_vsx_xvtdivsp_fe", VSX_BUILTIN_XVTDIVSP_FE },
8925 { MASK_VSX, CODE_FOR_vsx_tdivv4sf3_fg, "__builtin_vsx_xvtdivsp_fg", VSX_BUILTIN_XVTDIVSP_FG },
8926 { MASK_VSX, CODE_FOR_vector_eqv4sf, "__builtin_vsx_xvcmpeqsp", VSX_BUILTIN_XVCMPEQSP },
8927 { MASK_VSX, CODE_FOR_vector_gtv4sf, "__builtin_vsx_xvcmpgtsp", VSX_BUILTIN_XVCMPGTSP },
8928 { MASK_VSX, CODE_FOR_vector_gev4sf, "__builtin_vsx_xvcmpgesp", VSX_BUILTIN_XVCMPGESP },
8930 { MASK_VSX, CODE_FOR_smindf3, "__builtin_vsx_xsmindp", VSX_BUILTIN_XSMINDP },
8931 { MASK_VSX, CODE_FOR_smaxdf3, "__builtin_vsx_xsmaxdp", VSX_BUILTIN_XSMAXDP },
8932 { MASK_VSX, CODE_FOR_vsx_tdivdf3_fe, "__builtin_vsx_xstdivdp_fe", VSX_BUILTIN_XSTDIVDP_FE },
8933 { MASK_VSX, CODE_FOR_vsx_tdivdf3_fg, "__builtin_vsx_xstdivdp_fg", VSX_BUILTIN_XSTDIVDP_FG },
8934 { MASK_VSX, CODE_FOR_vector_copysignv2df3, "__builtin_vsx_cpsgndp", VSX_BUILTIN_CPSGNDP },
8935 { MASK_VSX, CODE_FOR_vector_copysignv4sf3, "__builtin_vsx_cpsgnsp", VSX_BUILTIN_CPSGNSP },
8937 { MASK_VSX, CODE_FOR_vsx_concat_v2df, "__builtin_vsx_concat_2df", VSX_BUILTIN_CONCAT_2DF },
8938 { MASK_VSX, CODE_FOR_vsx_concat_v2di, "__builtin_vsx_concat_2di", VSX_BUILTIN_CONCAT_2DI },
8939 { MASK_VSX, CODE_FOR_vsx_splat_v2df, "__builtin_vsx_splat_2df", VSX_BUILTIN_SPLAT_2DF },
8940 { MASK_VSX, CODE_FOR_vsx_splat_v2di, "__builtin_vsx_splat_2di", VSX_BUILTIN_SPLAT_2DI },
8941 { MASK_VSX, CODE_FOR_vsx_xxmrghw_v4sf, "__builtin_vsx_xxmrghw", VSX_BUILTIN_XXMRGHW_4SF },
8942 { MASK_VSX, CODE_FOR_vsx_xxmrghw_v4si, "__builtin_vsx_xxmrghw_4si", VSX_BUILTIN_XXMRGHW_4SI },
8943 { MASK_VSX, CODE_FOR_vsx_xxmrglw_v4sf, "__builtin_vsx_xxmrglw", VSX_BUILTIN_XXMRGLW_4SF },
8944 { MASK_VSX, CODE_FOR_vsx_xxmrglw_v4si, "__builtin_vsx_xxmrglw_4si", VSX_BUILTIN_XXMRGLW_4SI },
8945 { MASK_VSX, CODE_FOR_vec_interleave_lowv2df, "__builtin_vsx_mergel_2df", VSX_BUILTIN_VEC_MERGEL_V2DF },
8946 { MASK_VSX, CODE_FOR_vec_interleave_lowv2di, "__builtin_vsx_mergel_2di", VSX_BUILTIN_VEC_MERGEL_V2DI },
8947 { MASK_VSX, CODE_FOR_vec_interleave_highv2df, "__builtin_vsx_mergeh_2df", VSX_BUILTIN_VEC_MERGEH_V2DF },
8948 { MASK_VSX, CODE_FOR_vec_interleave_highv2di, "__builtin_vsx_mergeh_2di", VSX_BUILTIN_VEC_MERGEH_V2DI },
8950 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
8951 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
8952 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
8953 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM },
8954 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM },
8955 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC },
8956 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS },
8957 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS },
8958 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS },
8959 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS },
8960 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS },
8961 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS },
8962 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS },
8963 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND },
8964 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC },
8965 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG },
8966 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW },
8967 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW },
8968 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH },
8969 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH },
8970 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB },
8971 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB },
8972 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB },
8973 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ },
8974 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP },
8975 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW },
8976 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH },
8977 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB },
8978 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE },
8979 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT },
8980 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP },
8981 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW },
8982 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW },
8983 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH },
8984 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH },
8985 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB },
8986 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB },
8987 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE },
8988 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT },
8989 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_copysign", ALTIVEC_BUILTIN_VEC_COPYSIGN },
8990 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX },
8991 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP },
8992 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW },
8993 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW },
8994 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH },
8995 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH },
8996 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB },
8997 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB },
8998 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH },
8999 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW },
9000 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH },
9001 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB },
9002 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL },
9003 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW },
9004 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH },
9005 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB },
9006 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN },
9007 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP },
9008 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW },
9009 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW },
9010 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH },
9011 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH },
9012 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB },
9013 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB },
9014 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE },
9015 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB },
9016 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB },
9017 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH },
9018 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH },
9019 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO },
9020 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH },
9021 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH },
9022 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB },
9023 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB },
9024 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR },
9025 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR },
9026 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK },
9027 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM },
9028 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM },
9029 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX },
9030 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS },
9031 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS },
9032 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS },
9033 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS },
9034 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS },
9035 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU },
9036 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS },
9037 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS },
9038 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL },
9039 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW },
9040 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH },
9041 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB },
9042 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL },
9043 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW },
9044 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH },
9045 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB },
9046 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL },
9047 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO },
9048 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR },
9049 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW },
9050 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH },
9051 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB },
9052 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA },
9053 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW },
9054 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH },
9055 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB },
9056 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL },
9057 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO },
9058 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB },
9059 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP },
9060 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM },
9061 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM },
9062 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM },
9063 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC },
9064 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS },
9065 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS },
9066 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS },
9067 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS },
9068 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS },
9069 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS },
9070 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS },
9071 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S },
9072 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS },
9073 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS },
9074 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS },
9075 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S },
9076 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
9077 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
9079 { MASK_VSX, CODE_FOR_nothing, "__builtin_vec_mul", VSX_BUILTIN_VEC_MUL },
9080 { MASK_VSX, CODE_FOR_nothing, "__builtin_vec_div", VSX_BUILTIN_VEC_DIV },
9082 { 0, CODE_FOR_paired_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
9083 { 0, CODE_FOR_paired_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
9084 { 0, CODE_FOR_paired_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
9085 { 0, CODE_FOR_paired_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
9086 { 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 },
9087 { 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 },
9088 { 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 },
9089 { 0, CODE_FOR_paired_merge01, "__builtin_paired_merge01", PAIRED_BUILTIN_MERGE01 },
9090 { 0, CODE_FOR_paired_merge10, "__builtin_paired_merge10", PAIRED_BUILTIN_MERGE10 },
9091 { 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 },
9093 /* Place holder, leave as first spe builtin. */
9094 { 0, CODE_FOR_addv2si3, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
9095 { 0, CODE_FOR_andv2si3, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
9096 { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
9097 { 0, CODE_FOR_divv2si3, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
9098 { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
9099 { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
9100 { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
9101 { 0, CODE_FOR_spe_evfsdiv, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV },
9102 { 0, CODE_FOR_spe_evfsmul, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL },
9103 { 0, CODE_FOR_spe_evfssub, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB },
9104 { 0, CODE_FOR_spe_evmergehi, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI },
9105 { 0, CODE_FOR_spe_evmergehilo, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO },
9106 { 0, CODE_FOR_spe_evmergelo, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO },
9107 { 0, CODE_FOR_spe_evmergelohi, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI },
9108 { 0, CODE_FOR_spe_evmhegsmfaa, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA },
9109 { 0, CODE_FOR_spe_evmhegsmfan, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN },
9110 { 0, CODE_FOR_spe_evmhegsmiaa, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA },
9111 { 0, CODE_FOR_spe_evmhegsmian, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN },
9112 { 0, CODE_FOR_spe_evmhegumiaa, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA },
9113 { 0, CODE_FOR_spe_evmhegumian, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN },
9114 { 0, CODE_FOR_spe_evmhesmf, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF },
9115 { 0, CODE_FOR_spe_evmhesmfa, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA },
9116 { 0, CODE_FOR_spe_evmhesmfaaw, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW },
9117 { 0, CODE_FOR_spe_evmhesmfanw, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW },
9118 { 0, CODE_FOR_spe_evmhesmi, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI },
9119 { 0, CODE_FOR_spe_evmhesmia, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA },
9120 { 0, CODE_FOR_spe_evmhesmiaaw, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW },
9121 { 0, CODE_FOR_spe_evmhesmianw, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW },
9122 { 0, CODE_FOR_spe_evmhessf, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF },
9123 { 0, CODE_FOR_spe_evmhessfa, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA },
9124 { 0, CODE_FOR_spe_evmhessfaaw, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW },
9125 { 0, CODE_FOR_spe_evmhessfanw, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW },
9126 { 0, CODE_FOR_spe_evmhessiaaw, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW },
9127 { 0, CODE_FOR_spe_evmhessianw, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW },
9128 { 0, CODE_FOR_spe_evmheumi, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI },
9129 { 0, CODE_FOR_spe_evmheumia, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA },
9130 { 0, CODE_FOR_spe_evmheumiaaw, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW },
9131 { 0, CODE_FOR_spe_evmheumianw, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW },
9132 { 0, CODE_FOR_spe_evmheusiaaw, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW },
9133 { 0, CODE_FOR_spe_evmheusianw, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW },
9134 { 0, CODE_FOR_spe_evmhogsmfaa, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA },
9135 { 0, CODE_FOR_spe_evmhogsmfan, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN },
9136 { 0, CODE_FOR_spe_evmhogsmiaa, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA },
9137 { 0, CODE_FOR_spe_evmhogsmian, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN },
9138 { 0, CODE_FOR_spe_evmhogumiaa, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA },
9139 { 0, CODE_FOR_spe_evmhogumian, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN },
9140 { 0, CODE_FOR_spe_evmhosmf, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF },
9141 { 0, CODE_FOR_spe_evmhosmfa, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA },
9142 { 0, CODE_FOR_spe_evmhosmfaaw, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW },
9143 { 0, CODE_FOR_spe_evmhosmfanw, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW },
9144 { 0, CODE_FOR_spe_evmhosmi, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI },
9145 { 0, CODE_FOR_spe_evmhosmia, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA },
9146 { 0, CODE_FOR_spe_evmhosmiaaw, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW },
9147 { 0, CODE_FOR_spe_evmhosmianw, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW },
9148 { 0, CODE_FOR_spe_evmhossf, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF },
9149 { 0, CODE_FOR_spe_evmhossfa, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA },
9150 { 0, CODE_FOR_spe_evmhossfaaw, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW },
9151 { 0, CODE_FOR_spe_evmhossfanw, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW },
9152 { 0, CODE_FOR_spe_evmhossiaaw, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW },
9153 { 0, CODE_FOR_spe_evmhossianw, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW },
9154 { 0, CODE_FOR_spe_evmhoumi, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI },
9155 { 0, CODE_FOR_spe_evmhoumia, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA },
9156 { 0, CODE_FOR_spe_evmhoumiaaw, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW },
9157 { 0, CODE_FOR_spe_evmhoumianw, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW },
9158 { 0, CODE_FOR_spe_evmhousiaaw, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW },
9159 { 0, CODE_FOR_spe_evmhousianw, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW },
9160 { 0, CODE_FOR_spe_evmwhsmf, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF },
9161 { 0, CODE_FOR_spe_evmwhsmfa, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA },
9162 { 0, CODE_FOR_spe_evmwhsmi, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI },
9163 { 0, CODE_FOR_spe_evmwhsmia, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA },
9164 { 0, CODE_FOR_spe_evmwhssf, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF },
9165 { 0, CODE_FOR_spe_evmwhssfa, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA },
9166 { 0, CODE_FOR_spe_evmwhumi, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI },
9167 { 0, CODE_FOR_spe_evmwhumia, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA },
9168 { 0, CODE_FOR_spe_evmwlsmiaaw, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW },
9169 { 0, CODE_FOR_spe_evmwlsmianw, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW },
9170 { 0, CODE_FOR_spe_evmwlssiaaw, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW },
9171 { 0, CODE_FOR_spe_evmwlssianw, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW },
9172 { 0, CODE_FOR_spe_evmwlumi, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI },
9173 { 0, CODE_FOR_spe_evmwlumia, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA },
9174 { 0, CODE_FOR_spe_evmwlumiaaw, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW },
9175 { 0, CODE_FOR_spe_evmwlumianw, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW },
9176 { 0, CODE_FOR_spe_evmwlusiaaw, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW },
9177 { 0, CODE_FOR_spe_evmwlusianw, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW },
9178 { 0, CODE_FOR_spe_evmwsmf, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF },
9179 { 0, CODE_FOR_spe_evmwsmfa, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA },
9180 { 0, CODE_FOR_spe_evmwsmfaa, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA },
9181 { 0, CODE_FOR_spe_evmwsmfan, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN },
9182 { 0, CODE_FOR_spe_evmwsmi, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI },
9183 { 0, CODE_FOR_spe_evmwsmia, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA },
9184 { 0, CODE_FOR_spe_evmwsmiaa, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA },
9185 { 0, CODE_FOR_spe_evmwsmian, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN },
9186 { 0, CODE_FOR_spe_evmwssf, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF },
9187 { 0, CODE_FOR_spe_evmwssfa, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA },
9188 { 0, CODE_FOR_spe_evmwssfaa, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA },
9189 { 0, CODE_FOR_spe_evmwssfan, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN },
9190 { 0, CODE_FOR_spe_evmwumi, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI },
9191 { 0, CODE_FOR_spe_evmwumia, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA },
9192 { 0, CODE_FOR_spe_evmwumiaa, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA },
9193 { 0, CODE_FOR_spe_evmwumian, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN },
9194 { 0, CODE_FOR_spe_evnand, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND },
9195 { 0, CODE_FOR_spe_evnor, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR },
9196 { 0, CODE_FOR_spe_evor, "__builtin_spe_evor", SPE_BUILTIN_EVOR },
9197 { 0, CODE_FOR_spe_evorc, "__builtin_spe_evorc", SPE_BUILTIN_EVORC },
9198 { 0, CODE_FOR_spe_evrlw, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW },
9199 { 0, CODE_FOR_spe_evslw, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW },
9200 { 0, CODE_FOR_spe_evsrws, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS },
9201 { 0, CODE_FOR_spe_evsrwu, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU },
9202 { 0, CODE_FOR_subv2si3, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
9204 /* SPE binary operations expecting a 5-bit unsigned literal. */
9205 { 0, CODE_FOR_spe_evaddiw, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW },
9207 { 0, CODE_FOR_spe_evrlwi, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI },
9208 { 0, CODE_FOR_spe_evslwi, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI },
9209 { 0, CODE_FOR_spe_evsrwis, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS },
9210 { 0, CODE_FOR_spe_evsrwiu, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU },
9211 { 0, CODE_FOR_spe_evsubifw, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW },
9212 { 0, CODE_FOR_spe_evmwhssfaa, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA },
9213 { 0, CODE_FOR_spe_evmwhssmaa, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA },
9214 { 0, CODE_FOR_spe_evmwhsmfaa, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA },
9215 { 0, CODE_FOR_spe_evmwhsmiaa, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA },
9216 { 0, CODE_FOR_spe_evmwhusiaa, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA },
9217 { 0, CODE_FOR_spe_evmwhumiaa, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA },
9218 { 0, CODE_FOR_spe_evmwhssfan, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN },
9219 { 0, CODE_FOR_spe_evmwhssian, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN },
9220 { 0, CODE_FOR_spe_evmwhsmfan, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN },
9221 { 0, CODE_FOR_spe_evmwhsmian, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN },
9222 { 0, CODE_FOR_spe_evmwhusian, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN },
9223 { 0, CODE_FOR_spe_evmwhumian, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN },
9224 { 0, CODE_FOR_spe_evmwhgssfaa, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA },
9225 { 0, CODE_FOR_spe_evmwhgsmfaa, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA },
9226 { 0, CODE_FOR_spe_evmwhgsmiaa, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA },
9227 { 0, CODE_FOR_spe_evmwhgumiaa, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA },
9228 { 0, CODE_FOR_spe_evmwhgssfan, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN },
9229 { 0, CODE_FOR_spe_evmwhgsmfan, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN },
9230 { 0, CODE_FOR_spe_evmwhgsmian, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN },
9231 { 0, CODE_FOR_spe_evmwhgumian, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN },
9232 { 0, CODE_FOR_spe_brinc, "__builtin_spe_brinc", SPE_BUILTIN_BRINC },
9234 /* Place-holder. Leave as last binary SPE builtin. */
9235 { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR }
9238 /* AltiVec predicates. */
9240 struct builtin_description_predicates
9242 const unsigned int mask;
9243 const enum insn_code icode;
9244 const char *const name;
9245 const enum rs6000_builtins code;
9248 static const struct builtin_description_predicates bdesc_altivec_preds[] =
9250 { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp_p, "__builtin_altivec_vcmpbfp_p",
9251 ALTIVEC_BUILTIN_VCMPBFP_P },
9252 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_vector_eq_v4sf_p,
9253 "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
9254 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_vector_ge_v4sf_p,
9255 "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
9256 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_vector_gt_v4sf_p,
9257 "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
9258 { MASK_ALTIVEC, CODE_FOR_vector_eq_v4si_p, "__builtin_altivec_vcmpequw_p",
9259 ALTIVEC_BUILTIN_VCMPEQUW_P },
9260 { MASK_ALTIVEC, CODE_FOR_vector_gt_v4si_p, "__builtin_altivec_vcmpgtsw_p",
9261 ALTIVEC_BUILTIN_VCMPGTSW_P },
9262 { MASK_ALTIVEC, CODE_FOR_vector_gtu_v4si_p, "__builtin_altivec_vcmpgtuw_p",
9263 ALTIVEC_BUILTIN_VCMPGTUW_P },
9264 { MASK_ALTIVEC, CODE_FOR_vector_eq_v8hi_p, "__builtin_altivec_vcmpequh_p",
9265 ALTIVEC_BUILTIN_VCMPEQUH_P },
9266 { MASK_ALTIVEC, CODE_FOR_vector_gt_v8hi_p, "__builtin_altivec_vcmpgtsh_p",
9267 ALTIVEC_BUILTIN_VCMPGTSH_P },
9268 { MASK_ALTIVEC, CODE_FOR_vector_gtu_v8hi_p, "__builtin_altivec_vcmpgtuh_p",
9269 ALTIVEC_BUILTIN_VCMPGTUH_P },
9270 { MASK_ALTIVEC, CODE_FOR_vector_eq_v16qi_p, "__builtin_altivec_vcmpequb_p",
9271 ALTIVEC_BUILTIN_VCMPEQUB_P },
9272 { MASK_ALTIVEC, CODE_FOR_vector_gt_v16qi_p, "__builtin_altivec_vcmpgtsb_p",
9273 ALTIVEC_BUILTIN_VCMPGTSB_P },
9274 { MASK_ALTIVEC, CODE_FOR_vector_gtu_v16qi_p, "__builtin_altivec_vcmpgtub_p",
9275 ALTIVEC_BUILTIN_VCMPGTUB_P },
9277 { MASK_VSX, CODE_FOR_vector_eq_v4sf_p, "__builtin_vsx_xvcmpeqsp_p",
9278 VSX_BUILTIN_XVCMPEQSP_P },
9279 { MASK_VSX, CODE_FOR_vector_ge_v4sf_p, "__builtin_vsx_xvcmpgesp_p",
9280 VSX_BUILTIN_XVCMPGESP_P },
9281 { MASK_VSX, CODE_FOR_vector_gt_v4sf_p, "__builtin_vsx_xvcmpgtsp_p",
9282 VSX_BUILTIN_XVCMPGTSP_P },
9283 { MASK_VSX, CODE_FOR_vector_eq_v2df_p, "__builtin_vsx_xvcmpeqdp_p",
9284 VSX_BUILTIN_XVCMPEQDP_P },
9285 { MASK_VSX, CODE_FOR_vector_ge_v2df_p, "__builtin_vsx_xvcmpgedp_p",
9286 VSX_BUILTIN_XVCMPGEDP_P },
9287 { MASK_VSX, CODE_FOR_vector_gt_v2df_p, "__builtin_vsx_xvcmpgtdp_p",
9288 VSX_BUILTIN_XVCMPGTDP_P },
9290 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vcmpeq_p",
9291 ALTIVEC_BUILTIN_VCMPEQ_P },
9292 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vcmpgt_p",
9293 ALTIVEC_BUILTIN_VCMPGT_P },
9294 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vcmpge_p",
9295 ALTIVEC_BUILTIN_VCMPGE_P }
9298 /* SPE predicates. */
9299 static struct builtin_description bdesc_spe_predicates[] =
9301 /* Place-holder. Leave as first. */
9302 { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ },
9303 { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS },
9304 { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU },
9305 { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS },
9306 { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU },
9307 { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ },
9308 { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT },
9309 { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT },
9310 { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ },
9311 { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT },
9312 /* Place-holder. Leave as last. */
9313 { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT },
9316 /* SPE evsel predicates. */
9317 static struct builtin_description bdesc_spe_evsel[] =
9319 /* Place-holder. Leave as first. */
9320 { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS },
9321 { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU },
9322 { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS },
9323 { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU },
9324 { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ },
9325 { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT },
9326 { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT },
9327 { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ },
9328 { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT },
9329 { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT },
9330 /* Place-holder. Leave as last. */
9331 { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ },
9334 /* PAIRED predicates. */
9335 static const struct builtin_description bdesc_paired_preds[] =
9337 /* Place-holder. Leave as first. */
9338 { 0, CODE_FOR_paired_cmpu0, "__builtin_paired_cmpu0", PAIRED_BUILTIN_CMPU0 },
9339 /* Place-holder. Leave as last. */
9340 { 0, CODE_FOR_paired_cmpu1, "__builtin_paired_cmpu1", PAIRED_BUILTIN_CMPU1 },
9343 /* ABS* operations. */
9345 static const struct builtin_description bdesc_abs[] =
9347 { MASK_ALTIVEC, CODE_FOR_absv4si2, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI },
9348 { MASK_ALTIVEC, CODE_FOR_absv8hi2, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI },
9349 { MASK_ALTIVEC, CODE_FOR_absv4sf2, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF },
9350 { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
9351 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
9352 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
9353 { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI },
9354 { MASK_VSX, CODE_FOR_absv2df2, "__builtin_vsx_xvabsdp", VSX_BUILTIN_XVABSDP },
9355 { MASK_VSX, CODE_FOR_vsx_nabsv2df2, "__builtin_vsx_xvnabsdp", VSX_BUILTIN_XVNABSDP },
9356 { MASK_VSX, CODE_FOR_absv4sf2, "__builtin_vsx_xvabssp", VSX_BUILTIN_XVABSSP },
9357 { MASK_VSX, CODE_FOR_vsx_nabsv4sf2, "__builtin_vsx_xvnabssp", VSX_BUILTIN_XVNABSSP },
9360 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9363 static struct builtin_description bdesc_1arg[] =
9365 { MASK_ALTIVEC, CODE_FOR_altivec_vexptefp, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP },
9366 { MASK_ALTIVEC, CODE_FOR_altivec_vlogefp, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP },
9367 { MASK_ALTIVEC, CODE_FOR_altivec_vrefp, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP },
9368 { MASK_ALTIVEC, CODE_FOR_vector_floorv4sf2, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM },
9369 { MASK_ALTIVEC, CODE_FOR_altivec_vrfin, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN },
9370 { MASK_ALTIVEC, CODE_FOR_vector_ceilv4sf2, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP },
9371 { MASK_ALTIVEC, CODE_FOR_vector_btruncv4sf2, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ },
9372 { MASK_ALTIVEC, CODE_FOR_altivec_vrsqrtefp, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP },
9373 { MASK_ALTIVEC, CODE_FOR_altivec_vspltisb, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB },
9374 { MASK_ALTIVEC, CODE_FOR_altivec_vspltish, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH },
9375 { MASK_ALTIVEC, CODE_FOR_altivec_vspltisw, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW },
9376 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsb, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB },
9377 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhpx, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX },
9378 { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsh, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH },
9379 { MASK_ALTIVEC, CODE_FOR_altivec_vupklsb, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB },
9380 { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
9381 { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
9383 { MASK_VSX, CODE_FOR_negv2df2, "__builtin_vsx_xvnegdp", VSX_BUILTIN_XVNEGDP },
9384 { MASK_VSX, CODE_FOR_sqrtv2df2, "__builtin_vsx_xvsqrtdp", VSX_BUILTIN_XVSQRTDP },
9385 { MASK_VSX, CODE_FOR_vsx_rsqrtev2df2, "__builtin_vsx_xvrsqrtedp", VSX_BUILTIN_XVRSQRTEDP },
9386 { MASK_VSX, CODE_FOR_vsx_tsqrtv2df2_fe, "__builtin_vsx_xvtsqrtdp_fe", VSX_BUILTIN_XVTSQRTDP_FE },
9387 { MASK_VSX, CODE_FOR_vsx_tsqrtv2df2_fg, "__builtin_vsx_xvtsqrtdp_fg", VSX_BUILTIN_XVTSQRTDP_FG },
9388 { MASK_VSX, CODE_FOR_vsx_frev2df2, "__builtin_vsx_xvredp", VSX_BUILTIN_XVREDP },
9390 { MASK_VSX, CODE_FOR_negv4sf2, "__builtin_vsx_xvnegsp", VSX_BUILTIN_XVNEGSP },
9391 { MASK_VSX, CODE_FOR_sqrtv4sf2, "__builtin_vsx_xvsqrtsp", VSX_BUILTIN_XVSQRTSP },
9392 { MASK_VSX, CODE_FOR_vsx_rsqrtev4sf2, "__builtin_vsx_xvrsqrtesp", VSX_BUILTIN_XVRSQRTESP },
9393 { MASK_VSX, CODE_FOR_vsx_tsqrtv4sf2_fe, "__builtin_vsx_xvtsqrtsp_fe", VSX_BUILTIN_XVTSQRTSP_FE },
9394 { MASK_VSX, CODE_FOR_vsx_tsqrtv4sf2_fg, "__builtin_vsx_xvtsqrtsp_fg", VSX_BUILTIN_XVTSQRTSP_FG },
9395 { MASK_VSX, CODE_FOR_vsx_frev4sf2, "__builtin_vsx_xvresp", VSX_BUILTIN_XVRESP },
9397 { MASK_VSX, CODE_FOR_vsx_xscvdpsp, "__builtin_vsx_xscvdpsp", VSX_BUILTIN_XSCVDPSP },
9398 { MASK_VSX, CODE_FOR_vsx_xscvdpsp, "__builtin_vsx_xscvspdp", VSX_BUILTIN_XSCVSPDP },
9399 { MASK_VSX, CODE_FOR_vsx_xvcvdpsp, "__builtin_vsx_xvcvdpsp", VSX_BUILTIN_XVCVDPSP },
9400 { MASK_VSX, CODE_FOR_vsx_xvcvspdp, "__builtin_vsx_xvcvspdp", VSX_BUILTIN_XVCVSPDP },
9401 { MASK_VSX, CODE_FOR_vsx_tsqrtdf2_fe, "__builtin_vsx_xstsqrtdp_fe", VSX_BUILTIN_XSTSQRTDP_FE },
9402 { MASK_VSX, CODE_FOR_vsx_tsqrtdf2_fg, "__builtin_vsx_xstsqrtdp_fg", VSX_BUILTIN_XSTSQRTDP_FG },
9404 { MASK_VSX, CODE_FOR_vsx_fix_truncv2dfv2di2, "__builtin_vsx_xvcvdpsxds", VSX_BUILTIN_XVCVDPSXDS },
9405 { MASK_VSX, CODE_FOR_vsx_fixuns_truncv2dfv2di2, "__builtin_vsx_xvcvdpuxds", VSX_BUILTIN_XVCVDPUXDS },
9406 { MASK_VSX, CODE_FOR_vsx_fixuns_truncv2dfv2di2, "__builtin_vsx_xvcvdpuxds_uns", VSX_BUILTIN_XVCVDPUXDS_UNS },
9407 { MASK_VSX, CODE_FOR_vsx_floatv2div2df2, "__builtin_vsx_xvcvsxddp", VSX_BUILTIN_XVCVSXDDP },
9408 { MASK_VSX, CODE_FOR_vsx_floatunsv2div2df2, "__builtin_vsx_xvcvuxddp", VSX_BUILTIN_XVCVUXDDP },
9409 { MASK_VSX, CODE_FOR_vsx_floatunsv2div2df2, "__builtin_vsx_xvcvuxddp_uns", VSX_BUILTIN_XVCVUXDDP_UNS },
9411 { MASK_VSX, CODE_FOR_vsx_fix_truncv4sfv4si2, "__builtin_vsx_xvcvspsxws", VSX_BUILTIN_XVCVSPSXWS },
9412 { MASK_VSX, CODE_FOR_vsx_fixuns_truncv4sfv4si2, "__builtin_vsx_xvcvspuxws", VSX_BUILTIN_XVCVSPUXWS },
9413 { MASK_VSX, CODE_FOR_vsx_floatv4siv4sf2, "__builtin_vsx_xvcvsxwsp", VSX_BUILTIN_XVCVSXWSP },
9414 { MASK_VSX, CODE_FOR_vsx_floatunsv4siv4sf2, "__builtin_vsx_xvcvuxwsp", VSX_BUILTIN_XVCVUXWSP },
9416 { MASK_VSX, CODE_FOR_vsx_xvcvdpsxws, "__builtin_vsx_xvcvdpsxws", VSX_BUILTIN_XVCVDPSXWS },
9417 { MASK_VSX, CODE_FOR_vsx_xvcvdpuxws, "__builtin_vsx_xvcvdpuxws", VSX_BUILTIN_XVCVDPUXWS },
9418 { MASK_VSX, CODE_FOR_vsx_xvcvsxwdp, "__builtin_vsx_xvcvsxwdp", VSX_BUILTIN_XVCVSXWDP },
9419 { MASK_VSX, CODE_FOR_vsx_xvcvuxwdp, "__builtin_vsx_xvcvuxwdp", VSX_BUILTIN_XVCVUXWDP },
9420 { MASK_VSX, CODE_FOR_vsx_xvrdpi, "__builtin_vsx_xvrdpi", VSX_BUILTIN_XVRDPI },
9421 { MASK_VSX, CODE_FOR_vsx_xvrdpic, "__builtin_vsx_xvrdpic", VSX_BUILTIN_XVRDPIC },
9422 { MASK_VSX, CODE_FOR_vsx_floorv2df2, "__builtin_vsx_xvrdpim", VSX_BUILTIN_XVRDPIM },
9423 { MASK_VSX, CODE_FOR_vsx_ceilv2df2, "__builtin_vsx_xvrdpip", VSX_BUILTIN_XVRDPIP },
9424 { MASK_VSX, CODE_FOR_vsx_btruncv2df2, "__builtin_vsx_xvrdpiz", VSX_BUILTIN_XVRDPIZ },
9426 { MASK_VSX, CODE_FOR_vsx_xvcvspsxds, "__builtin_vsx_xvcvspsxds", VSX_BUILTIN_XVCVSPSXDS },
9427 { MASK_VSX, CODE_FOR_vsx_xvcvspuxds, "__builtin_vsx_xvcvspuxds", VSX_BUILTIN_XVCVSPUXDS },
9428 { MASK_VSX, CODE_FOR_vsx_xvcvsxdsp, "__builtin_vsx_xvcvsxdsp", VSX_BUILTIN_XVCVSXDSP },
9429 { MASK_VSX, CODE_FOR_vsx_xvcvuxdsp, "__builtin_vsx_xvcvuxdsp", VSX_BUILTIN_XVCVUXDSP },
9430 { MASK_VSX, CODE_FOR_vsx_xvrspi, "__builtin_vsx_xvrspi", VSX_BUILTIN_XVRSPI },
9431 { MASK_VSX, CODE_FOR_vsx_xvrspic, "__builtin_vsx_xvrspic", VSX_BUILTIN_XVRSPIC },
9432 { MASK_VSX, CODE_FOR_vsx_floorv4sf2, "__builtin_vsx_xvrspim", VSX_BUILTIN_XVRSPIM },
9433 { MASK_VSX, CODE_FOR_vsx_ceilv4sf2, "__builtin_vsx_xvrspip", VSX_BUILTIN_XVRSPIP },
9434 { MASK_VSX, CODE_FOR_vsx_btruncv4sf2, "__builtin_vsx_xvrspiz", VSX_BUILTIN_XVRSPIZ },
9436 { MASK_VSX, CODE_FOR_vsx_xsrdpi, "__builtin_vsx_xsrdpi", VSX_BUILTIN_XSRDPI },
9437 { MASK_VSX, CODE_FOR_vsx_xsrdpic, "__builtin_vsx_xsrdpic", VSX_BUILTIN_XSRDPIC },
9438 { MASK_VSX, CODE_FOR_vsx_floordf2, "__builtin_vsx_xsrdpim", VSX_BUILTIN_XSRDPIM },
9439 { MASK_VSX, CODE_FOR_vsx_ceildf2, "__builtin_vsx_xsrdpip", VSX_BUILTIN_XSRDPIP },
9440 { MASK_VSX, CODE_FOR_vsx_btruncdf2, "__builtin_vsx_xsrdpiz", VSX_BUILTIN_XSRDPIZ },
9442 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
9443 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
9444 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
9445 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE },
9446 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR },
9447 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE },
9448 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR },
9449 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE },
9450 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND },
9451 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE },
9452 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC },
9453 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH },
9454 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH },
9455 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX },
9456 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB },
9457 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL },
9458 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX },
9459 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
9460 { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
9462 { MASK_VSX, CODE_FOR_nothing, "__builtin_vec_nearbyint", ALTIVEC_BUILTIN_VEC_NEARBYINT },
9463 { MASK_VSX, CODE_FOR_nothing, "__builtin_vec_rint", ALTIVEC_BUILTIN_VEC_RINT },
9464 { MASK_VSX, CODE_FOR_nothing, "__builtin_vec_sqrt", ALTIVEC_BUILTIN_VEC_SQRT },
9466 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_floatv4siv4sf2, "__builtin_vec_float_sisf", VECTOR_BUILTIN_FLOAT_V4SI_V4SF },
9467 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_unsigned_floatv4siv4sf2, "__builtin_vec_uns_float_sisf", VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF },
9468 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_fix_truncv4sfv4si2, "__builtin_vec_fix_sfsi", VECTOR_BUILTIN_FIX_V4SF_V4SI },
9469 { MASK_ALTIVEC|MASK_VSX, CODE_FOR_fixuns_truncv4sfv4si2, "__builtin_vec_fixuns_sfsi", VECTOR_BUILTIN_FIXUNS_V4SF_V4SI },
9471 /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
9472 end with SPE_BUILTIN_EVSUBFUSIAAW. */
9473 { 0, CODE_FOR_absv2si2, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
9474 { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
9475 { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
9476 { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
9477 { 0, CODE_FOR_spe_evaddusiaaw, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW },
9478 { 0, CODE_FOR_spe_evcntlsw, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW },
9479 { 0, CODE_FOR_spe_evcntlzw, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW },
9480 { 0, CODE_FOR_spe_evextsb, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB },
9481 { 0, CODE_FOR_spe_evextsh, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH },
9482 { 0, CODE_FOR_spe_evfsabs, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS },
9483 { 0, CODE_FOR_spe_evfscfsf, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF },
9484 { 0, CODE_FOR_spe_evfscfsi, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI },
9485 { 0, CODE_FOR_spe_evfscfuf, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF },
9486 { 0, CODE_FOR_spe_evfscfui, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI },
9487 { 0, CODE_FOR_spe_evfsctsf, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF },
9488 { 0, CODE_FOR_spe_evfsctsi, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI },
9489 { 0, CODE_FOR_spe_evfsctsiz, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ },
9490 { 0, CODE_FOR_spe_evfsctuf, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF },
9491 { 0, CODE_FOR_spe_evfsctui, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI },
9492 { 0, CODE_FOR_spe_evfsctuiz, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ },
9493 { 0, CODE_FOR_spe_evfsnabs, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS },
9494 { 0, CODE_FOR_spe_evfsneg, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG },
9495 { 0, CODE_FOR_spe_evmra, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA },
9496 { 0, CODE_FOR_negv2si2, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG },
9497 { 0, CODE_FOR_spe_evrndw, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW },
9498 { 0, CODE_FOR_spe_evsubfsmiaaw, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW },
9499 { 0, CODE_FOR_spe_evsubfssiaaw, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW },
9500 { 0, CODE_FOR_spe_evsubfumiaaw, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW },
9502 /* Place-holder. Leave as last unary SPE builtin. */
9503 { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
9505 { 0, CODE_FOR_paired_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
9506 { 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 },
9507 { 0, CODE_FOR_paired_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
9508 { 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 },
9509 { 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 }
9513 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9516 tree arg0 = CALL_EXPR_ARG (exp, 0);
9517 rtx op0 = expand_normal (arg0);
9518 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9519 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9521 if (icode == CODE_FOR_nothing)
9522 /* Builtin not supported on this processor. */
9525 /* If we got invalid arguments bail out before generating bad rtl. */
9526 if (arg0 == error_mark_node)
9529 if (icode == CODE_FOR_altivec_vspltisb
9530 || icode == CODE_FOR_altivec_vspltish
9531 || icode == CODE_FOR_altivec_vspltisw
9532 || icode == CODE_FOR_spe_evsplatfi
9533 || icode == CODE_FOR_spe_evsplati)
9535 /* Only allow 5-bit *signed* literals. */
9536 if (GET_CODE (op0) != CONST_INT
9537 || INTVAL (op0) > 15
9538 || INTVAL (op0) < -16)
9540 error ("argument 1 must be a 5-bit signed literal");
9546 || GET_MODE (target) != tmode
9547 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9548 target = gen_reg_rtx (tmode);
9550 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9551 op0 = copy_to_mode_reg (mode0, op0);
9553 pat = GEN_FCN (icode) (target, op0);
9562 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9564 rtx pat, scratch1, scratch2;
9565 tree arg0 = CALL_EXPR_ARG (exp, 0);
9566 rtx op0 = expand_normal (arg0);
9567 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9568 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9570 /* If we have invalid arguments, bail out before generating bad rtl. */
9571 if (arg0 == error_mark_node)
9575 || GET_MODE (target) != tmode
9576 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9577 target = gen_reg_rtx (tmode);
9579 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9580 op0 = copy_to_mode_reg (mode0, op0);
9582 scratch1 = gen_reg_rtx (mode0);
9583 scratch2 = gen_reg_rtx (mode0);
9585 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9594 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
9597 tree arg0 = CALL_EXPR_ARG (exp, 0);
9598 tree arg1 = CALL_EXPR_ARG (exp, 1);
9599 rtx op0 = expand_normal (arg0);
9600 rtx op1 = expand_normal (arg1);
9601 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9602 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9603 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9605 if (icode == CODE_FOR_nothing)
9606 /* Builtin not supported on this processor. */
9609 /* If we got invalid arguments bail out before generating bad rtl. */
9610 if (arg0 == error_mark_node || arg1 == error_mark_node)
9613 if (icode == CODE_FOR_altivec_vcfux
9614 || icode == CODE_FOR_altivec_vcfsx
9615 || icode == CODE_FOR_altivec_vctsxs
9616 || icode == CODE_FOR_altivec_vctuxs
9617 || icode == CODE_FOR_altivec_vspltb
9618 || icode == CODE_FOR_altivec_vsplth
9619 || icode == CODE_FOR_altivec_vspltw
9620 || icode == CODE_FOR_spe_evaddiw
9621 || icode == CODE_FOR_spe_evldd
9622 || icode == CODE_FOR_spe_evldh
9623 || icode == CODE_FOR_spe_evldw
9624 || icode == CODE_FOR_spe_evlhhesplat
9625 || icode == CODE_FOR_spe_evlhhossplat
9626 || icode == CODE_FOR_spe_evlhhousplat
9627 || icode == CODE_FOR_spe_evlwhe
9628 || icode == CODE_FOR_spe_evlwhos
9629 || icode == CODE_FOR_spe_evlwhou
9630 || icode == CODE_FOR_spe_evlwhsplat
9631 || icode == CODE_FOR_spe_evlwwsplat
9632 || icode == CODE_FOR_spe_evrlwi
9633 || icode == CODE_FOR_spe_evslwi
9634 || icode == CODE_FOR_spe_evsrwis
9635 || icode == CODE_FOR_spe_evsubifw
9636 || icode == CODE_FOR_spe_evsrwiu)
9638 /* Only allow 5-bit unsigned literals. */
9640 if (TREE_CODE (arg1) != INTEGER_CST
9641 || TREE_INT_CST_LOW (arg1) & ~0x1f)
9643 error ("argument 2 must be a 5-bit unsigned literal");
9649 || GET_MODE (target) != tmode
9650 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9651 target = gen_reg_rtx (tmode);
9653 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9654 op0 = copy_to_mode_reg (mode0, op0);
9655 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9656 op1 = copy_to_mode_reg (mode1, op1);
9658 pat = GEN_FCN (icode) (target, op0, op1);
9667 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9670 tree cr6_form = CALL_EXPR_ARG (exp, 0);
9671 tree arg0 = CALL_EXPR_ARG (exp, 1);
9672 tree arg1 = CALL_EXPR_ARG (exp, 2);
9673 rtx op0 = expand_normal (arg0);
9674 rtx op1 = expand_normal (arg1);
9675 enum machine_mode tmode = SImode;
9676 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9677 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9680 if (TREE_CODE (cr6_form) != INTEGER_CST)
9682 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9686 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
9688 gcc_assert (mode0 == mode1);
9690 /* If we have invalid arguments, bail out before generating bad rtl. */
9691 if (arg0 == error_mark_node || arg1 == error_mark_node)
9695 || GET_MODE (target) != tmode
9696 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9697 target = gen_reg_rtx (tmode);
9699 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9700 op0 = copy_to_mode_reg (mode0, op0);
9701 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9702 op1 = copy_to_mode_reg (mode1, op1);
9704 scratch = gen_reg_rtx (mode0);
9706 pat = GEN_FCN (icode) (scratch, op0, op1);
9711 /* The vec_any* and vec_all* predicates use the same opcodes for two
9712 different operations, but the bits in CR6 will be different
9713 depending on what information we want. So we have to play tricks
9714 with CR6 to get the right bits out.
9716 If you think this is disgusting, look at the specs for the
9717 AltiVec predicates. */
9719 switch (cr6_form_int)
9722 emit_insn (gen_cr6_test_for_zero (target));
9725 emit_insn (gen_cr6_test_for_zero_reverse (target));
9728 emit_insn (gen_cr6_test_for_lt (target));
9731 emit_insn (gen_cr6_test_for_lt_reverse (target));
9734 error ("argument 1 of __builtin_altivec_predicate is out of range");
9742 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
9745 tree arg0 = CALL_EXPR_ARG (exp, 0);
9746 tree arg1 = CALL_EXPR_ARG (exp, 1);
9747 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9748 enum machine_mode mode0 = Pmode;
9749 enum machine_mode mode1 = Pmode;
9750 rtx op0 = expand_normal (arg0);
9751 rtx op1 = expand_normal (arg1);
9753 if (icode == CODE_FOR_nothing)
9754 /* Builtin not supported on this processor. */
9757 /* If we got invalid arguments bail out before generating bad rtl. */
9758 if (arg0 == error_mark_node || arg1 == error_mark_node)
9762 || GET_MODE (target) != tmode
9763 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9764 target = gen_reg_rtx (tmode);
9766 op1 = copy_to_mode_reg (mode1, op1);
9768 if (op0 == const0_rtx)
9770 addr = gen_rtx_MEM (tmode, op1);
9774 op0 = copy_to_mode_reg (mode0, op0);
9775 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
9778 pat = GEN_FCN (icode) (target, addr);
9788 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
9791 tree arg0 = CALL_EXPR_ARG (exp, 0);
9792 tree arg1 = CALL_EXPR_ARG (exp, 1);
9793 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9794 enum machine_mode mode0 = Pmode;
9795 enum machine_mode mode1 = Pmode;
9796 rtx op0 = expand_normal (arg0);
9797 rtx op1 = expand_normal (arg1);
9799 if (icode == CODE_FOR_nothing)
9800 /* Builtin not supported on this processor. */
9803 /* If we got invalid arguments bail out before generating bad rtl. */
9804 if (arg0 == error_mark_node || arg1 == error_mark_node)
9808 || GET_MODE (target) != tmode
9809 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9810 target = gen_reg_rtx (tmode);
9812 op1 = copy_to_mode_reg (mode1, op1);
9814 if (op0 == const0_rtx)
9816 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
9820 op0 = copy_to_mode_reg (mode0, op0);
9821 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
9824 pat = GEN_FCN (icode) (target, addr);
9834 spe_expand_stv_builtin (enum insn_code icode, tree exp)
9836 tree arg0 = CALL_EXPR_ARG (exp, 0);
9837 tree arg1 = CALL_EXPR_ARG (exp, 1);
9838 tree arg2 = CALL_EXPR_ARG (exp, 2);
9839 rtx op0 = expand_normal (arg0);
9840 rtx op1 = expand_normal (arg1);
9841 rtx op2 = expand_normal (arg2);
9843 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
9844 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
9845 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
9847 /* Invalid arguments. Bail before doing anything stoopid! */
9848 if (arg0 == error_mark_node
9849 || arg1 == error_mark_node
9850 || arg2 == error_mark_node)
9853 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
9854 op0 = copy_to_mode_reg (mode2, op0);
9855 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
9856 op1 = copy_to_mode_reg (mode0, op1);
9857 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
9858 op2 = copy_to_mode_reg (mode1, op2);
9860 pat = GEN_FCN (icode) (op1, op2, op0);
9867 paired_expand_stv_builtin (enum insn_code icode, tree exp)
9869 tree arg0 = CALL_EXPR_ARG (exp, 0);
9870 tree arg1 = CALL_EXPR_ARG (exp, 1);
9871 tree arg2 = CALL_EXPR_ARG (exp, 2);
9872 rtx op0 = expand_normal (arg0);
9873 rtx op1 = expand_normal (arg1);
9874 rtx op2 = expand_normal (arg2);
9876 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9877 enum machine_mode mode1 = Pmode;
9878 enum machine_mode mode2 = Pmode;
9880 /* Invalid arguments. Bail before doing anything stoopid! */
9881 if (arg0 == error_mark_node
9882 || arg1 == error_mark_node
9883 || arg2 == error_mark_node)
9886 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
9887 op0 = copy_to_mode_reg (tmode, op0);
9889 op2 = copy_to_mode_reg (mode2, op2);
9891 if (op1 == const0_rtx)
9893 addr = gen_rtx_MEM (tmode, op2);
9897 op1 = copy_to_mode_reg (mode1, op1);
9898 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
9901 pat = GEN_FCN (icode) (addr, op0);
9908 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
9910 tree arg0 = CALL_EXPR_ARG (exp, 0);
9911 tree arg1 = CALL_EXPR_ARG (exp, 1);
9912 tree arg2 = CALL_EXPR_ARG (exp, 2);
9913 rtx op0 = expand_normal (arg0);
9914 rtx op1 = expand_normal (arg1);
9915 rtx op2 = expand_normal (arg2);
9917 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9918 enum machine_mode mode1 = Pmode;
9919 enum machine_mode mode2 = Pmode;
9921 /* Invalid arguments. Bail before doing anything stoopid! */
9922 if (arg0 == error_mark_node
9923 || arg1 == error_mark_node
9924 || arg2 == error_mark_node)
9927 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
9928 op0 = copy_to_mode_reg (tmode, op0);
9930 op2 = copy_to_mode_reg (mode2, op2);
9932 if (op1 == const0_rtx)
9934 addr = gen_rtx_MEM (tmode, op2);
9938 op1 = copy_to_mode_reg (mode1, op1);
9939 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
9942 pat = GEN_FCN (icode) (addr, op0);
9949 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
9952 tree arg0 = CALL_EXPR_ARG (exp, 0);
9953 tree arg1 = CALL_EXPR_ARG (exp, 1);
9954 tree arg2 = CALL_EXPR_ARG (exp, 2);
9955 rtx op0 = expand_normal (arg0);
9956 rtx op1 = expand_normal (arg1);
9957 rtx op2 = expand_normal (arg2);
9958 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9959 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9960 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9961 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
9963 if (icode == CODE_FOR_nothing)
9964 /* Builtin not supported on this processor. */
9967 /* If we got invalid arguments bail out before generating bad rtl. */
9968 if (arg0 == error_mark_node
9969 || arg1 == error_mark_node
9970 || arg2 == error_mark_node)
9975 case CODE_FOR_altivec_vsldoi_v4sf:
9976 case CODE_FOR_altivec_vsldoi_v4si:
9977 case CODE_FOR_altivec_vsldoi_v8hi:
9978 case CODE_FOR_altivec_vsldoi_v16qi:
9979 /* Only allow 4-bit unsigned literals. */
9981 if (TREE_CODE (arg2) != INTEGER_CST
9982 || TREE_INT_CST_LOW (arg2) & ~0xf)
9984 error ("argument 3 must be a 4-bit unsigned literal");
9989 case CODE_FOR_vsx_xxpermdi_v2df:
9990 case CODE_FOR_vsx_xxpermdi_v2di:
9991 case CODE_FOR_vsx_xxsldwi_v16qi:
9992 case CODE_FOR_vsx_xxsldwi_v8hi:
9993 case CODE_FOR_vsx_xxsldwi_v4si:
9994 case CODE_FOR_vsx_xxsldwi_v4sf:
9995 case CODE_FOR_vsx_xxsldwi_v2di:
9996 case CODE_FOR_vsx_xxsldwi_v2df:
9997 /* Only allow 2-bit unsigned literals. */
9999 if (TREE_CODE (arg2) != INTEGER_CST
10000 || TREE_INT_CST_LOW (arg2) & ~0x3)
10002 error ("argument 3 must be a 2-bit unsigned literal");
10007 case CODE_FOR_vsx_set_v2df:
10008 case CODE_FOR_vsx_set_v2di:
10009 /* Only allow 1-bit unsigned literals. */
10011 if (TREE_CODE (arg2) != INTEGER_CST
10012 || TREE_INT_CST_LOW (arg2) & ~0x1)
10014 error ("argument 3 must be a 1-bit unsigned literal");
10024 || GET_MODE (target) != tmode
10025 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10026 target = gen_reg_rtx (tmode);
10028 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10029 op0 = copy_to_mode_reg (mode0, op0);
10030 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10031 op1 = copy_to_mode_reg (mode1, op1);
10032 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10033 op2 = copy_to_mode_reg (mode2, op2);
10035 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10036 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10038 pat = GEN_FCN (icode) (target, op0, op1, op2);
10046 /* Expand the lvx builtins. */
10048 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10050 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10051 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10053 enum machine_mode tmode, mode0;
10055 enum insn_code icode;
10059 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10060 icode = CODE_FOR_vector_load_v16qi;
10062 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10063 icode = CODE_FOR_vector_load_v8hi;
10065 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10066 icode = CODE_FOR_vector_load_v4si;
10068 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10069 icode = CODE_FOR_vector_load_v4sf;
10072 *expandedp = false;
10078 arg0 = CALL_EXPR_ARG (exp, 0);
10079 op0 = expand_normal (arg0);
10080 tmode = insn_data[icode].operand[0].mode;
10081 mode0 = insn_data[icode].operand[1].mode;
10084 || GET_MODE (target) != tmode
10085 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10086 target = gen_reg_rtx (tmode);
10088 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10089 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10091 pat = GEN_FCN (icode) (target, op0);
10098 /* Expand the stvx builtins. */
10100 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10103 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10104 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10106 enum machine_mode mode0, mode1;
10108 enum insn_code icode;
10112 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10113 icode = CODE_FOR_vector_store_v16qi;
10115 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10116 icode = CODE_FOR_vector_store_v8hi;
10118 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10119 icode = CODE_FOR_vector_store_v4si;
10121 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10122 icode = CODE_FOR_vector_store_v4sf;
10125 *expandedp = false;
10129 arg0 = CALL_EXPR_ARG (exp, 0);
10130 arg1 = CALL_EXPR_ARG (exp, 1);
10131 op0 = expand_normal (arg0);
10132 op1 = expand_normal (arg1);
10133 mode0 = insn_data[icode].operand[0].mode;
10134 mode1 = insn_data[icode].operand[1].mode;
10136 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10137 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10138 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10139 op1 = copy_to_mode_reg (mode1, op1);
10141 pat = GEN_FCN (icode) (op0, op1);
10149 /* Expand the dst builtins. */
10151 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10154 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10155 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10156 tree arg0, arg1, arg2;
10157 enum machine_mode mode0, mode1;
10158 rtx pat, op0, op1, op2;
10159 const struct builtin_description *d;
10162 *expandedp = false;
10164 /* Handle DST variants. */
10166 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10167 if (d->code == fcode)
10169 arg0 = CALL_EXPR_ARG (exp, 0);
10170 arg1 = CALL_EXPR_ARG (exp, 1);
10171 arg2 = CALL_EXPR_ARG (exp, 2);
10172 op0 = expand_normal (arg0);
10173 op1 = expand_normal (arg1);
10174 op2 = expand_normal (arg2);
10175 mode0 = insn_data[d->icode].operand[0].mode;
10176 mode1 = insn_data[d->icode].operand[1].mode;
10178 /* Invalid arguments, bail out before generating bad rtl. */
10179 if (arg0 == error_mark_node
10180 || arg1 == error_mark_node
10181 || arg2 == error_mark_node)
10186 if (TREE_CODE (arg2) != INTEGER_CST
10187 || TREE_INT_CST_LOW (arg2) & ~0x3)
10189 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10193 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10194 op0 = copy_to_mode_reg (Pmode, op0);
10195 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10196 op1 = copy_to_mode_reg (mode1, op1);
10198 pat = GEN_FCN (d->icode) (op0, op1, op2);
10208 /* Expand vec_init builtin. */
10210 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10212 enum machine_mode tmode = TYPE_MODE (type);
10213 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10214 int i, n_elt = GET_MODE_NUNITS (tmode);
10215 rtvec v = rtvec_alloc (n_elt);
10217 gcc_assert (VECTOR_MODE_P (tmode));
10218 gcc_assert (n_elt == call_expr_nargs (exp));
10220 for (i = 0; i < n_elt; ++i)
10222 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10223 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10226 if (!target || !register_operand (target, tmode))
10227 target = gen_reg_rtx (tmode);
10229 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10233 /* Return the integer constant in ARG. Constrain it to be in the range
10234 of the subparts of VEC_TYPE; issue an error if not. */
10237 get_element_number (tree vec_type, tree arg)
10239 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10241 if (!host_integerp (arg, 1)
10242 || (elt = tree_low_cst (arg, 1), elt > max))
10244 error ("selector must be an integer constant in the range 0..%wi", max);
10251 /* Expand vec_set builtin. */
10253 altivec_expand_vec_set_builtin (tree exp)
10255 enum machine_mode tmode, mode1;
10256 tree arg0, arg1, arg2;
10260 arg0 = CALL_EXPR_ARG (exp, 0);
10261 arg1 = CALL_EXPR_ARG (exp, 1);
10262 arg2 = CALL_EXPR_ARG (exp, 2);
10264 tmode = TYPE_MODE (TREE_TYPE (arg0));
10265 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10266 gcc_assert (VECTOR_MODE_P (tmode));
10268 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10269 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10270 elt = get_element_number (TREE_TYPE (arg0), arg2);
10272 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10273 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10275 op0 = force_reg (tmode, op0);
10276 op1 = force_reg (mode1, op1);
10278 rs6000_expand_vector_set (op0, op1, elt);
10283 /* Expand vec_ext builtin. */
10285 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10287 enum machine_mode tmode, mode0;
10292 arg0 = CALL_EXPR_ARG (exp, 0);
10293 arg1 = CALL_EXPR_ARG (exp, 1);
10295 op0 = expand_normal (arg0);
10296 elt = get_element_number (TREE_TYPE (arg0), arg1);
10298 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10299 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10300 gcc_assert (VECTOR_MODE_P (mode0));
10302 op0 = force_reg (mode0, op0);
10304 if (optimize || !target || !register_operand (target, tmode))
10305 target = gen_reg_rtx (tmode);
10307 rs6000_expand_vector_extract (target, op0, elt);
10312 /* Expand the builtin in EXP and store the result in TARGET. Store
10313 true in *EXPANDEDP if we found a builtin to expand. */
10315 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10317 const struct builtin_description *d;
10318 const struct builtin_description_predicates *dp;
10320 enum insn_code icode;
10321 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10324 enum machine_mode tmode, mode0;
10325 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10327 if ((fcode >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10328 && fcode <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
10329 || (fcode >= VSX_BUILTIN_OVERLOADED_FIRST
10330 && fcode <= VSX_BUILTIN_OVERLOADED_LAST))
10333 error ("unresolved overload for Altivec builtin %qF", fndecl);
10337 target = altivec_expand_ld_builtin (exp, target, expandedp);
10341 target = altivec_expand_st_builtin (exp, target, expandedp);
10345 target = altivec_expand_dst_builtin (exp, target, expandedp);
10353 case ALTIVEC_BUILTIN_STVX:
10354 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, exp);
10355 case ALTIVEC_BUILTIN_STVEBX:
10356 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10357 case ALTIVEC_BUILTIN_STVEHX:
10358 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10359 case ALTIVEC_BUILTIN_STVEWX:
10360 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10361 case ALTIVEC_BUILTIN_STVXL:
10362 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10364 case ALTIVEC_BUILTIN_STVLX:
10365 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10366 case ALTIVEC_BUILTIN_STVLXL:
10367 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10368 case ALTIVEC_BUILTIN_STVRX:
10369 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10370 case ALTIVEC_BUILTIN_STVRXL:
10371 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10373 case ALTIVEC_BUILTIN_MFVSCR:
10374 icode = CODE_FOR_altivec_mfvscr;
10375 tmode = insn_data[icode].operand[0].mode;
10378 || GET_MODE (target) != tmode
10379 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10380 target = gen_reg_rtx (tmode);
10382 pat = GEN_FCN (icode) (target);
10388 case ALTIVEC_BUILTIN_MTVSCR:
10389 icode = CODE_FOR_altivec_mtvscr;
10390 arg0 = CALL_EXPR_ARG (exp, 0);
10391 op0 = expand_normal (arg0);
10392 mode0 = insn_data[icode].operand[0].mode;
10394 /* If we got invalid arguments bail out before generating bad rtl. */
10395 if (arg0 == error_mark_node)
10398 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10399 op0 = copy_to_mode_reg (mode0, op0);
10401 pat = GEN_FCN (icode) (op0);
10406 case ALTIVEC_BUILTIN_DSSALL:
10407 emit_insn (gen_altivec_dssall ());
10410 case ALTIVEC_BUILTIN_DSS:
10411 icode = CODE_FOR_altivec_dss;
10412 arg0 = CALL_EXPR_ARG (exp, 0);
10414 op0 = expand_normal (arg0);
10415 mode0 = insn_data[icode].operand[0].mode;
10417 /* If we got invalid arguments bail out before generating bad rtl. */
10418 if (arg0 == error_mark_node)
10421 if (TREE_CODE (arg0) != INTEGER_CST
10422 || TREE_INT_CST_LOW (arg0) & ~0x3)
10424 error ("argument to dss must be a 2-bit unsigned literal");
10428 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10429 op0 = copy_to_mode_reg (mode0, op0);
10431 emit_insn (gen_altivec_dss (op0));
10434 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10435 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10436 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10437 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10438 case VSX_BUILTIN_VEC_INIT_V2DF:
10439 case VSX_BUILTIN_VEC_INIT_V2DI:
10440 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10442 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10443 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10444 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10445 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10446 case VSX_BUILTIN_VEC_SET_V2DF:
10447 case VSX_BUILTIN_VEC_SET_V2DI:
10448 return altivec_expand_vec_set_builtin (exp);
10450 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10451 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10452 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10453 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10454 case VSX_BUILTIN_VEC_EXT_V2DF:
10455 case VSX_BUILTIN_VEC_EXT_V2DI:
10456 return altivec_expand_vec_ext_builtin (exp, target);
10460 /* Fall through. */
10463 /* Expand abs* operations. */
10465 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10466 if (d->code == fcode)
10467 return altivec_expand_abs_builtin (d->icode, exp, target);
10469 /* Expand the AltiVec predicates. */
10470 dp = bdesc_altivec_preds;
10471 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
10472 if (dp->code == fcode)
10473 return altivec_expand_predicate_builtin (dp->icode, exp, target);
10475 /* LV* are funky. We initialized them differently. */
10478 case ALTIVEC_BUILTIN_LVSL:
10479 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10480 exp, target, false);
10481 case ALTIVEC_BUILTIN_LVSR:
10482 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10483 exp, target, false);
10484 case ALTIVEC_BUILTIN_LVEBX:
10485 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10486 exp, target, false);
10487 case ALTIVEC_BUILTIN_LVEHX:
10488 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10489 exp, target, false);
10490 case ALTIVEC_BUILTIN_LVEWX:
10491 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10492 exp, target, false);
10493 case ALTIVEC_BUILTIN_LVXL:
10494 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10495 exp, target, false);
10496 case ALTIVEC_BUILTIN_LVX:
10497 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
10498 exp, target, false);
10499 case ALTIVEC_BUILTIN_LVLX:
10500 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10501 exp, target, true);
10502 case ALTIVEC_BUILTIN_LVLXL:
10503 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10504 exp, target, true);
10505 case ALTIVEC_BUILTIN_LVRX:
10506 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10507 exp, target, true);
10508 case ALTIVEC_BUILTIN_LVRXL:
10509 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10510 exp, target, true);
10513 /* Fall through. */
10516 *expandedp = false;
10520 /* Expand the builtin in EXP and store the result in TARGET. Store
10521 true in *EXPANDEDP if we found a builtin to expand. */
10523 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10525 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10526 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10527 const struct builtin_description *d;
10534 case PAIRED_BUILTIN_STX:
10535 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10536 case PAIRED_BUILTIN_LX:
10537 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10540 /* Fall through. */
10543 /* Expand the paired predicates. */
10544 d = bdesc_paired_preds;
10545 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10546 if (d->code == fcode)
10547 return paired_expand_predicate_builtin (d->icode, exp, target);
10549 *expandedp = false;
10553 /* Binops that need to be initialized manually, but can be expanded
10554 automagically by rs6000_expand_binop_builtin. */
10555 static struct builtin_description bdesc_2arg_spe[] =
10557 { 0, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
10558 { 0, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
10559 { 0, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
10560 { 0, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
10561 { 0, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
10562 { 0, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
10563 { 0, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
10564 { 0, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
10565 { 0, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
10566 { 0, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
10567 { 0, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
10568 { 0, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
10569 { 0, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
10570 { 0, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
10571 { 0, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
10572 { 0, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
10573 { 0, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
10574 { 0, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
10575 { 0, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
10576 { 0, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
10577 { 0, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
10578 { 0, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
10581 /* Expand the builtin in EXP and store the result in TARGET. Store
10582 true in *EXPANDEDP if we found a builtin to expand.
10584 This expands the SPE builtins that are not simple unary and binary
10587 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
10589 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10591 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10592 enum insn_code icode;
10593 enum machine_mode tmode, mode0;
10595 struct builtin_description *d;
10600 /* Syntax check for a 5-bit unsigned immediate. */
10603 case SPE_BUILTIN_EVSTDD:
10604 case SPE_BUILTIN_EVSTDH:
10605 case SPE_BUILTIN_EVSTDW:
10606 case SPE_BUILTIN_EVSTWHE:
10607 case SPE_BUILTIN_EVSTWHO:
10608 case SPE_BUILTIN_EVSTWWE:
10609 case SPE_BUILTIN_EVSTWWO:
10610 arg1 = CALL_EXPR_ARG (exp, 2);
10611 if (TREE_CODE (arg1) != INTEGER_CST
10612 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10614 error ("argument 2 must be a 5-bit unsigned literal");
10622 /* The evsplat*i instructions are not quite generic. */
10625 case SPE_BUILTIN_EVSPLATFI:
10626 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
10628 case SPE_BUILTIN_EVSPLATI:
10629 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
10635 d = (struct builtin_description *) bdesc_2arg_spe;
10636 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
10637 if (d->code == fcode)
10638 return rs6000_expand_binop_builtin (d->icode, exp, target);
10640 d = (struct builtin_description *) bdesc_spe_predicates;
10641 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
10642 if (d->code == fcode)
10643 return spe_expand_predicate_builtin (d->icode, exp, target);
10645 d = (struct builtin_description *) bdesc_spe_evsel;
10646 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
10647 if (d->code == fcode)
10648 return spe_expand_evsel_builtin (d->icode, exp, target);
10652 case SPE_BUILTIN_EVSTDDX:
10653 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
10654 case SPE_BUILTIN_EVSTDHX:
10655 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
10656 case SPE_BUILTIN_EVSTDWX:
10657 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
10658 case SPE_BUILTIN_EVSTWHEX:
10659 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
10660 case SPE_BUILTIN_EVSTWHOX:
10661 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
10662 case SPE_BUILTIN_EVSTWWEX:
10663 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
10664 case SPE_BUILTIN_EVSTWWOX:
10665 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
10666 case SPE_BUILTIN_EVSTDD:
10667 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
10668 case SPE_BUILTIN_EVSTDH:
10669 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
10670 case SPE_BUILTIN_EVSTDW:
10671 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
10672 case SPE_BUILTIN_EVSTWHE:
10673 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
10674 case SPE_BUILTIN_EVSTWHO:
10675 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
10676 case SPE_BUILTIN_EVSTWWE:
10677 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
10678 case SPE_BUILTIN_EVSTWWO:
10679 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
10680 case SPE_BUILTIN_MFSPEFSCR:
10681 icode = CODE_FOR_spe_mfspefscr;
10682 tmode = insn_data[icode].operand[0].mode;
10685 || GET_MODE (target) != tmode
10686 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10687 target = gen_reg_rtx (tmode);
10689 pat = GEN_FCN (icode) (target);
10694 case SPE_BUILTIN_MTSPEFSCR:
10695 icode = CODE_FOR_spe_mtspefscr;
10696 arg0 = CALL_EXPR_ARG (exp, 0);
10697 op0 = expand_normal (arg0);
10698 mode0 = insn_data[icode].operand[0].mode;
10700 if (arg0 == error_mark_node)
10703 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10704 op0 = copy_to_mode_reg (mode0, op0);
10706 pat = GEN_FCN (icode) (op0);
10714 *expandedp = false;
10719 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10721 rtx pat, scratch, tmp;
10722 tree form = CALL_EXPR_ARG (exp, 0);
10723 tree arg0 = CALL_EXPR_ARG (exp, 1);
10724 tree arg1 = CALL_EXPR_ARG (exp, 2);
10725 rtx op0 = expand_normal (arg0);
10726 rtx op1 = expand_normal (arg1);
10727 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10728 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10730 enum rtx_code code;
10732 if (TREE_CODE (form) != INTEGER_CST)
10734 error ("argument 1 of __builtin_paired_predicate must be a constant");
10738 form_int = TREE_INT_CST_LOW (form);
10740 gcc_assert (mode0 == mode1);
10742 if (arg0 == error_mark_node || arg1 == error_mark_node)
10746 || GET_MODE (target) != SImode
10747 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
10748 target = gen_reg_rtx (SImode);
10749 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
10750 op0 = copy_to_mode_reg (mode0, op0);
10751 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
10752 op1 = copy_to_mode_reg (mode1, op1);
10754 scratch = gen_reg_rtx (CCFPmode);
10756 pat = GEN_FCN (icode) (scratch, op0, op1);
10778 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
10781 error ("argument 1 of __builtin_paired_predicate is out of range");
10785 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
10786 emit_move_insn (target, tmp);
10791 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10793 rtx pat, scratch, tmp;
10794 tree form = CALL_EXPR_ARG (exp, 0);
10795 tree arg0 = CALL_EXPR_ARG (exp, 1);
10796 tree arg1 = CALL_EXPR_ARG (exp, 2);
10797 rtx op0 = expand_normal (arg0);
10798 rtx op1 = expand_normal (arg1);
10799 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10800 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10802 enum rtx_code code;
10804 if (TREE_CODE (form) != INTEGER_CST)
10806 error ("argument 1 of __builtin_spe_predicate must be a constant");
10810 form_int = TREE_INT_CST_LOW (form);
10812 gcc_assert (mode0 == mode1);
10814 if (arg0 == error_mark_node || arg1 == error_mark_node)
10818 || GET_MODE (target) != SImode
10819 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
10820 target = gen_reg_rtx (SImode);
10822 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10823 op0 = copy_to_mode_reg (mode0, op0);
10824 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10825 op1 = copy_to_mode_reg (mode1, op1);
10827 scratch = gen_reg_rtx (CCmode);
10829 pat = GEN_FCN (icode) (scratch, op0, op1);
10834 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
10835 _lower_. We use one compare, but look in different bits of the
10836 CR for each variant.
10838 There are 2 elements in each SPE simd type (upper/lower). The CR
10839 bits are set as follows:
10841 BIT0 | BIT 1 | BIT 2 | BIT 3
10842 U | L | (U | L) | (U & L)
10844 So, for an "all" relationship, BIT 3 would be set.
10845 For an "any" relationship, BIT 2 would be set. Etc.
10847 Following traditional nomenclature, these bits map to:
10849 BIT0 | BIT 1 | BIT 2 | BIT 3
10852 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
10857 /* All variant. OV bit. */
10859 /* We need to get to the OV bit, which is the ORDERED bit. We
10860 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
10861 that's ugly and will make validate_condition_mode die.
10862 So let's just use another pattern. */
10863 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
10865 /* Any variant. EQ bit. */
10869 /* Upper variant. LT bit. */
10873 /* Lower variant. GT bit. */
10878 error ("argument 1 of __builtin_spe_predicate is out of range");
10882 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
10883 emit_move_insn (target, tmp);
10888 /* The evsel builtins look like this:
10890 e = __builtin_spe_evsel_OP (a, b, c, d);
10892 and work like this:
10894 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
10895 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
10899 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
10902 tree arg0 = CALL_EXPR_ARG (exp, 0);
10903 tree arg1 = CALL_EXPR_ARG (exp, 1);
10904 tree arg2 = CALL_EXPR_ARG (exp, 2);
10905 tree arg3 = CALL_EXPR_ARG (exp, 3);
10906 rtx op0 = expand_normal (arg0);
10907 rtx op1 = expand_normal (arg1);
10908 rtx op2 = expand_normal (arg2);
10909 rtx op3 = expand_normal (arg3);
10910 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10911 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10913 gcc_assert (mode0 == mode1);
10915 if (arg0 == error_mark_node || arg1 == error_mark_node
10916 || arg2 == error_mark_node || arg3 == error_mark_node)
10920 || GET_MODE (target) != mode0
10921 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
10922 target = gen_reg_rtx (mode0);
10924 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10925 op0 = copy_to_mode_reg (mode0, op0);
10926 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10927 op1 = copy_to_mode_reg (mode0, op1);
10928 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10929 op2 = copy_to_mode_reg (mode0, op2);
10930 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
10931 op3 = copy_to_mode_reg (mode0, op3);
10933 /* Generate the compare. */
10934 scratch = gen_reg_rtx (CCmode);
10935 pat = GEN_FCN (icode) (scratch, op0, op1);
10940 if (mode0 == V2SImode)
10941 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
10943 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
10948 /* Expand an expression EXP that calls a built-in function,
10949 with result going to TARGET if that's convenient
10950 (and in mode MODE if that's convenient).
10951 SUBTARGET may be used as the target for computing one of EXP's operands.
10952 IGNORE is nonzero if the value is to be ignored. */
10955 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10956 enum machine_mode mode ATTRIBUTE_UNUSED,
10957 int ignore ATTRIBUTE_UNUSED)
10959 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10960 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10961 const struct builtin_description *d;
10966 if (fcode == RS6000_BUILTIN_RECIP)
10967 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
10969 if (fcode == RS6000_BUILTIN_RECIPF)
10970 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
10972 if (fcode == RS6000_BUILTIN_RSQRTF)
10973 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
10975 if (fcode == RS6000_BUILTIN_BSWAP_HI)
10976 return rs6000_expand_unop_builtin (CODE_FOR_bswaphi2, exp, target);
10978 if (fcode == POWER7_BUILTIN_BPERMD)
10979 return rs6000_expand_binop_builtin (((TARGET_64BIT)
10980 ? CODE_FOR_bpermd_di
10981 : CODE_FOR_bpermd_si), exp, target);
10983 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_LOAD
10984 || fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
10986 int icode = (int) CODE_FOR_altivec_lvsr;
10987 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10988 enum machine_mode mode = insn_data[icode].operand[1].mode;
10992 gcc_assert (TARGET_ALTIVEC);
10994 arg = CALL_EXPR_ARG (exp, 0);
10995 gcc_assert (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE);
10996 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
10997 addr = memory_address (mode, op);
10998 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11002 /* For the load case need to negate the address. */
11003 op = gen_reg_rtx (GET_MODE (addr));
11004 emit_insn (gen_rtx_SET (VOIDmode, op,
11005 gen_rtx_NEG (GET_MODE (addr), addr)));
11007 op = gen_rtx_MEM (mode, op);
11010 || GET_MODE (target) != tmode
11011 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11012 target = gen_reg_rtx (tmode);
11014 /*pat = gen_altivec_lvsr (target, op);*/
11015 pat = GEN_FCN (icode) (target, op);
11023 /* FIXME: There's got to be a nicer way to handle this case than
11024 constructing a new CALL_EXPR. */
11025 if (fcode == ALTIVEC_BUILTIN_VCFUX
11026 || fcode == ALTIVEC_BUILTIN_VCFSX
11027 || fcode == ALTIVEC_BUILTIN_VCTUXS
11028 || fcode == ALTIVEC_BUILTIN_VCTSXS)
11030 if (call_expr_nargs (exp) == 1)
11031 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11032 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11035 if (TARGET_ALTIVEC)
11037 ret = altivec_expand_builtin (exp, target, &success);
11044 ret = spe_expand_builtin (exp, target, &success);
11049 if (TARGET_PAIRED_FLOAT)
11051 ret = paired_expand_builtin (exp, target, &success);
11057 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11059 /* Handle simple unary operations. */
11060 d = (struct builtin_description *) bdesc_1arg;
11061 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11062 if (d->code == fcode)
11063 return rs6000_expand_unop_builtin (d->icode, exp, target);
11065 /* Handle simple binary operations. */
11066 d = (struct builtin_description *) bdesc_2arg;
11067 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11068 if (d->code == fcode)
11069 return rs6000_expand_binop_builtin (d->icode, exp, target);
11071 /* Handle simple ternary operations. */
11073 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11074 if (d->code == fcode)
11075 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11077 gcc_unreachable ();
11081 rs6000_init_builtins (void)
11085 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11086 V2SF_type_node = build_vector_type (float_type_node, 2);
11087 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11088 V2DF_type_node = build_vector_type (double_type_node, 2);
11089 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11090 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11091 V4SF_type_node = build_vector_type (float_type_node, 4);
11092 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11093 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11095 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11096 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11097 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11098 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11100 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11101 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11102 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11103 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11105 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11106 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11107 'vector unsigned short'. */
11109 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11110 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11111 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11112 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11113 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11115 long_integer_type_internal_node = long_integer_type_node;
11116 long_unsigned_type_internal_node = long_unsigned_type_node;
11117 intQI_type_internal_node = intQI_type_node;
11118 uintQI_type_internal_node = unsigned_intQI_type_node;
11119 intHI_type_internal_node = intHI_type_node;
11120 uintHI_type_internal_node = unsigned_intHI_type_node;
11121 intSI_type_internal_node = intSI_type_node;
11122 uintSI_type_internal_node = unsigned_intSI_type_node;
11123 intDI_type_internal_node = intDI_type_node;
11124 uintDI_type_internal_node = unsigned_intDI_type_node;
11125 float_type_internal_node = float_type_node;
11126 double_type_internal_node = float_type_node;
11127 void_type_internal_node = void_type_node;
11129 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11131 builtin_mode_to_type[QImode][0] = integer_type_node;
11132 builtin_mode_to_type[HImode][0] = integer_type_node;
11133 builtin_mode_to_type[SImode][0] = intSI_type_node;
11134 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11135 builtin_mode_to_type[DImode][0] = intDI_type_node;
11136 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11137 builtin_mode_to_type[SFmode][0] = float_type_node;
11138 builtin_mode_to_type[DFmode][0] = double_type_node;
11139 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11140 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11141 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11142 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11143 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11144 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11145 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11146 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11147 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11148 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11149 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11150 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11151 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11153 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11154 get_identifier ("__bool char"),
11155 bool_char_type_node);
11156 TYPE_NAME (bool_char_type_node) = tdecl;
11157 (*lang_hooks.decls.pushdecl) (tdecl);
11158 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11159 get_identifier ("__bool short"),
11160 bool_short_type_node);
11161 TYPE_NAME (bool_short_type_node) = tdecl;
11162 (*lang_hooks.decls.pushdecl) (tdecl);
11163 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11164 get_identifier ("__bool int"),
11165 bool_int_type_node);
11166 TYPE_NAME (bool_int_type_node) = tdecl;
11167 (*lang_hooks.decls.pushdecl) (tdecl);
11168 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL, get_identifier ("__pixel"),
11170 TYPE_NAME (pixel_type_node) = tdecl;
11171 (*lang_hooks.decls.pushdecl) (tdecl);
11173 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11174 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11175 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11176 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11177 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11179 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11180 get_identifier ("__vector unsigned char"),
11181 unsigned_V16QI_type_node);
11182 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11183 (*lang_hooks.decls.pushdecl) (tdecl);
11184 tdecl = build_decl (BUILTINS_LOCATION,
11185 TYPE_DECL, get_identifier ("__vector signed char"),
11187 TYPE_NAME (V16QI_type_node) = tdecl;
11188 (*lang_hooks.decls.pushdecl) (tdecl);
11189 tdecl = build_decl (BUILTINS_LOCATION,
11190 TYPE_DECL, get_identifier ("__vector __bool char"),
11191 bool_V16QI_type_node);
11192 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11193 (*lang_hooks.decls.pushdecl) (tdecl);
11195 tdecl = build_decl (BUILTINS_LOCATION,
11196 TYPE_DECL, get_identifier ("__vector unsigned short"),
11197 unsigned_V8HI_type_node);
11198 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11199 (*lang_hooks.decls.pushdecl) (tdecl);
11200 tdecl = build_decl (BUILTINS_LOCATION,
11201 TYPE_DECL, get_identifier ("__vector signed short"),
11203 TYPE_NAME (V8HI_type_node) = tdecl;
11204 (*lang_hooks.decls.pushdecl) (tdecl);
11205 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11206 get_identifier ("__vector __bool short"),
11207 bool_V8HI_type_node);
11208 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11209 (*lang_hooks.decls.pushdecl) (tdecl);
11211 tdecl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11212 get_identifier ("__vector unsigned int"),
11213 unsigned_V4SI_type_node);
11214 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11215 (*lang_hooks.decls.pushdecl) (tdecl);
11216 tdecl = build_decl (BUILTINS_LOCATION,
11217 TYPE_DECL, get_identifier ("__vector signed int"),
11219 TYPE_NAME (V4SI_type_node) = tdecl;
11220 (*lang_hooks.decls.pushdecl) (tdecl);
11221 tdecl = build_decl (BUILTINS_LOCATION,
11222 TYPE_DECL, get_identifier ("__vector __bool int"),
11223 bool_V4SI_type_node);
11224 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11225 (*lang_hooks.decls.pushdecl) (tdecl);
11227 tdecl = build_decl (BUILTINS_LOCATION,
11228 TYPE_DECL, get_identifier ("__vector float"),
11230 TYPE_NAME (V4SF_type_node) = tdecl;
11231 (*lang_hooks.decls.pushdecl) (tdecl);
11232 tdecl = build_decl (BUILTINS_LOCATION,
11233 TYPE_DECL, get_identifier ("__vector __pixel"),
11234 pixel_V8HI_type_node);
11235 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11236 (*lang_hooks.decls.pushdecl) (tdecl);
11240 tdecl = build_decl (BUILTINS_LOCATION,
11241 TYPE_DECL, get_identifier ("__vector double"),
11243 TYPE_NAME (V2DF_type_node) = tdecl;
11244 (*lang_hooks.decls.pushdecl) (tdecl);
11246 tdecl = build_decl (BUILTINS_LOCATION,
11247 TYPE_DECL, get_identifier ("__vector long"),
11249 TYPE_NAME (V2DI_type_node) = tdecl;
11250 (*lang_hooks.decls.pushdecl) (tdecl);
11252 tdecl = build_decl (BUILTINS_LOCATION,
11253 TYPE_DECL, get_identifier ("__vector unsigned long"),
11254 unsigned_V2DI_type_node);
11255 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11256 (*lang_hooks.decls.pushdecl) (tdecl);
11258 tdecl = build_decl (BUILTINS_LOCATION,
11259 TYPE_DECL, get_identifier ("__vector __bool long"),
11260 bool_V2DI_type_node);
11261 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11262 (*lang_hooks.decls.pushdecl) (tdecl);
11265 if (TARGET_PAIRED_FLOAT)
11266 paired_init_builtins ();
11268 spe_init_builtins ();
11269 if (TARGET_ALTIVEC)
11270 altivec_init_builtins ();
11271 if (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT || TARGET_VSX)
11272 rs6000_common_init_builtins ();
11273 if (TARGET_PPC_GFXOPT)
11275 tree ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11276 RS6000_BUILTIN_RECIPF,
11277 "__builtin_recipdivf");
11278 def_builtin (MASK_PPC_GFXOPT, "__builtin_recipdivf", ftype,
11279 RS6000_BUILTIN_RECIPF);
11281 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11282 RS6000_BUILTIN_RSQRTF,
11283 "__builtin_rsqrtf");
11284 def_builtin (MASK_PPC_GFXOPT, "__builtin_rsqrtf", ftype,
11285 RS6000_BUILTIN_RSQRTF);
11287 if (TARGET_POPCNTB)
11289 tree ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11290 RS6000_BUILTIN_RECIP,
11291 "__builtin_recipdiv");
11292 def_builtin (MASK_POPCNTB, "__builtin_recipdiv", ftype,
11293 RS6000_BUILTIN_RECIP);
11296 if (TARGET_POPCNTD)
11298 enum machine_mode mode = (TARGET_64BIT) ? DImode : SImode;
11299 tree ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11300 POWER7_BUILTIN_BPERMD,
11301 "__builtin_bpermd");
11302 def_builtin (MASK_POPCNTD, "__builtin_bpermd", ftype,
11303 POWER7_BUILTIN_BPERMD);
11305 if (TARGET_POWERPC)
11307 /* Don't use builtin_function_type here, as it maps HI/QI to SI. */
11308 tree ftype = build_function_type_list (unsigned_intHI_type_node,
11309 unsigned_intHI_type_node,
11311 def_builtin (MASK_POWERPC, "__builtin_bswap16", ftype,
11312 RS6000_BUILTIN_BSWAP_HI);
11316 /* AIX libm provides clog as __clog. */
11317 if (built_in_decls [BUILT_IN_CLOG])
11318 set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
11321 #ifdef SUBTARGET_INIT_BUILTINS
11322 SUBTARGET_INIT_BUILTINS;
11326 /* Returns the rs6000 builtin decl for CODE. */
11329 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11331 if (code >= RS6000_BUILTIN_COUNT)
11332 return error_mark_node;
11334 return rs6000_builtin_decls[code];
11337 /* Search through a set of builtins and enable the mask bits.
11338 DESC is an array of builtins.
11339 SIZE is the total number of builtins.
11340 START is the builtin enum at which to start.
11341 END is the builtin enum at which to end. */
11343 enable_mask_for_builtins (struct builtin_description *desc, int size,
11344 enum rs6000_builtins start,
11345 enum rs6000_builtins end)
11349 for (i = 0; i < size; ++i)
11350 if (desc[i].code == start)
11356 for (; i < size; ++i)
11358 /* Flip all the bits on. */
11359 desc[i].mask = target_flags;
11360 if (desc[i].code == end)
11366 spe_init_builtins (void)
11368 tree endlink = void_list_node;
11369 tree puint_type_node = build_pointer_type (unsigned_type_node);
11370 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11371 struct builtin_description *d;
11374 tree v2si_ftype_4_v2si
11375 = build_function_type
11376 (opaque_V2SI_type_node,
11377 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11378 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11379 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11380 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11383 tree v2sf_ftype_4_v2sf
11384 = build_function_type
11385 (opaque_V2SF_type_node,
11386 tree_cons (NULL_TREE, opaque_V2SF_type_node,
11387 tree_cons (NULL_TREE, opaque_V2SF_type_node,
11388 tree_cons (NULL_TREE, opaque_V2SF_type_node,
11389 tree_cons (NULL_TREE, opaque_V2SF_type_node,
11392 tree int_ftype_int_v2si_v2si
11393 = build_function_type
11394 (integer_type_node,
11395 tree_cons (NULL_TREE, integer_type_node,
11396 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11397 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11400 tree int_ftype_int_v2sf_v2sf
11401 = build_function_type
11402 (integer_type_node,
11403 tree_cons (NULL_TREE, integer_type_node,
11404 tree_cons (NULL_TREE, opaque_V2SF_type_node,
11405 tree_cons (NULL_TREE, opaque_V2SF_type_node,
11408 tree void_ftype_v2si_puint_int
11409 = build_function_type (void_type_node,
11410 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11411 tree_cons (NULL_TREE, puint_type_node,
11412 tree_cons (NULL_TREE,
11416 tree void_ftype_v2si_puint_char
11417 = build_function_type (void_type_node,
11418 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11419 tree_cons (NULL_TREE, puint_type_node,
11420 tree_cons (NULL_TREE,
11424 tree void_ftype_v2si_pv2si_int
11425 = build_function_type (void_type_node,
11426 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11427 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
11428 tree_cons (NULL_TREE,
11432 tree void_ftype_v2si_pv2si_char
11433 = build_function_type (void_type_node,
11434 tree_cons (NULL_TREE, opaque_V2SI_type_node,
11435 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
11436 tree_cons (NULL_TREE,
11440 tree void_ftype_int
11441 = build_function_type (void_type_node,
11442 tree_cons (NULL_TREE, integer_type_node, endlink));
11444 tree int_ftype_void
11445 = build_function_type (integer_type_node, endlink);
11447 tree v2si_ftype_pv2si_int
11448 = build_function_type (opaque_V2SI_type_node,
11449 tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
11450 tree_cons (NULL_TREE, integer_type_node,
11453 tree v2si_ftype_puint_int
11454 = build_function_type (opaque_V2SI_type_node,
11455 tree_cons (NULL_TREE, puint_type_node,
11456 tree_cons (NULL_TREE, integer_type_node,
11459 tree v2si_ftype_pushort_int
11460 = build_function_type (opaque_V2SI_type_node,
11461 tree_cons (NULL_TREE, pushort_type_node,
11462 tree_cons (NULL_TREE, integer_type_node,
11465 tree v2si_ftype_signed_char
11466 = build_function_type (opaque_V2SI_type_node,
11467 tree_cons (NULL_TREE, signed_char_type_node,
11470 /* The initialization of the simple binary and unary builtins is
11471 done in rs6000_common_init_builtins, but we have to enable the
11472 mask bits here manually because we have run out of `target_flags'
11473 bits. We really need to redesign this mask business. */
11475 enable_mask_for_builtins ((struct builtin_description *) bdesc_2arg,
11476 ARRAY_SIZE (bdesc_2arg),
11477 SPE_BUILTIN_EVADDW,
11478 SPE_BUILTIN_EVXOR);
11479 enable_mask_for_builtins ((struct builtin_description *) bdesc_1arg,
11480 ARRAY_SIZE (bdesc_1arg),
11482 SPE_BUILTIN_EVSUBFUSIAAW);
11483 enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_predicates,
11484 ARRAY_SIZE (bdesc_spe_predicates),
11485 SPE_BUILTIN_EVCMPEQ,
11486 SPE_BUILTIN_EVFSTSTLT);
11487 enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_evsel,
11488 ARRAY_SIZE (bdesc_spe_evsel),
11489 SPE_BUILTIN_EVSEL_CMPGTS,
11490 SPE_BUILTIN_EVSEL_FSTSTEQ);
11492 (*lang_hooks.decls.pushdecl)
11493 (build_decl (BUILTINS_LOCATION, TYPE_DECL,
11494 get_identifier ("__ev64_opaque__"),
11495 opaque_V2SI_type_node));
11497 /* Initialize irregular SPE builtins. */
11499 def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11500 def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11501 def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11502 def_builtin (target_flags, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11503 def_builtin (target_flags, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11504 def_builtin (target_flags, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11505 def_builtin (target_flags, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11506 def_builtin (target_flags, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11507 def_builtin (target_flags, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11508 def_builtin (target_flags, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11509 def_builtin (target_flags, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11510 def_builtin (target_flags, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11511 def_builtin (target_flags, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11512 def_builtin (target_flags, "__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11513 def_builtin (target_flags, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11514 def_builtin (target_flags, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11515 def_builtin (target_flags, "__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11516 def_builtin (target_flags, "__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11519 def_builtin (target_flags, "__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11520 def_builtin (target_flags, "__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11521 def_builtin (target_flags, "__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11522 def_builtin (target_flags, "__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11523 def_builtin (target_flags, "__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11524 def_builtin (target_flags, "__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11525 def_builtin (target_flags, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11526 def_builtin (target_flags, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11527 def_builtin (target_flags, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11528 def_builtin (target_flags, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11529 def_builtin (target_flags, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11530 def_builtin (target_flags, "__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11531 def_builtin (target_flags, "__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11532 def_builtin (target_flags, "__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11533 def_builtin (target_flags, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11534 def_builtin (target_flags, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11535 def_builtin (target_flags, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11536 def_builtin (target_flags, "__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11537 def_builtin (target_flags, "__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11538 def_builtin (target_flags, "__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11539 def_builtin (target_flags, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11540 def_builtin (target_flags, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11543 d = (struct builtin_description *) bdesc_spe_predicates;
11544 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11548 switch (insn_data[d->icode].operand[1].mode)
11551 type = int_ftype_int_v2si_v2si;
11554 type = int_ftype_int_v2sf_v2sf;
11557 gcc_unreachable ();
11560 def_builtin (d->mask, d->name, type, d->code);
11563 /* Evsel predicates. */
11564 d = (struct builtin_description *) bdesc_spe_evsel;
11565 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
11569 switch (insn_data[d->icode].operand[1].mode)
11572 type = v2si_ftype_4_v2si;
11575 type = v2sf_ftype_4_v2sf;
11578 gcc_unreachable ();
11581 def_builtin (d->mask, d->name, type, d->code);
11586 paired_init_builtins (void)
11588 const struct builtin_description *d;
11590 tree endlink = void_list_node;
11592 tree int_ftype_int_v2sf_v2sf
11593 = build_function_type
11594 (integer_type_node,
11595 tree_cons (NULL_TREE, integer_type_node,
11596 tree_cons (NULL_TREE, V2SF_type_node,
11597 tree_cons (NULL_TREE, V2SF_type_node,
11599 tree pcfloat_type_node =
11600 build_pointer_type (build_qualified_type
11601 (float_type_node, TYPE_QUAL_CONST));
11603 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
11604 long_integer_type_node,
11607 tree void_ftype_v2sf_long_pcfloat =
11608 build_function_type_list (void_type_node,
11610 long_integer_type_node,
11615 def_builtin (0, "__builtin_paired_lx", v2sf_ftype_long_pcfloat,
11616 PAIRED_BUILTIN_LX);
11619 def_builtin (0, "__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
11620 PAIRED_BUILTIN_STX);
11623 d = bdesc_paired_preds;
11624 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
11628 switch (insn_data[d->icode].operand[1].mode)
11631 type = int_ftype_int_v2sf_v2sf;
11634 gcc_unreachable ();
11637 def_builtin (d->mask, d->name, type, d->code);
11642 altivec_init_builtins (void)
11644 const struct builtin_description *d;
11645 const struct builtin_description_predicates *dp;
11649 tree pfloat_type_node = build_pointer_type (float_type_node);
11650 tree pint_type_node = build_pointer_type (integer_type_node);
11651 tree pshort_type_node = build_pointer_type (short_integer_type_node);
11652 tree pchar_type_node = build_pointer_type (char_type_node);
11654 tree pvoid_type_node = build_pointer_type (void_type_node);
11656 tree pcfloat_type_node = build_pointer_type (build_qualified_type (float_type_node, TYPE_QUAL_CONST));
11657 tree pcint_type_node = build_pointer_type (build_qualified_type (integer_type_node, TYPE_QUAL_CONST));
11658 tree pcshort_type_node = build_pointer_type (build_qualified_type (short_integer_type_node, TYPE_QUAL_CONST));
11659 tree pcchar_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
11661 tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
11663 tree int_ftype_opaque
11664 = build_function_type_list (integer_type_node,
11665 opaque_V4SI_type_node, NULL_TREE);
11666 tree opaque_ftype_opaque
11667 = build_function_type (integer_type_node,
11669 tree opaque_ftype_opaque_int
11670 = build_function_type_list (opaque_V4SI_type_node,
11671 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
11672 tree opaque_ftype_opaque_opaque_int
11673 = build_function_type_list (opaque_V4SI_type_node,
11674 opaque_V4SI_type_node, opaque_V4SI_type_node,
11675 integer_type_node, NULL_TREE);
11676 tree int_ftype_int_opaque_opaque
11677 = build_function_type_list (integer_type_node,
11678 integer_type_node, opaque_V4SI_type_node,
11679 opaque_V4SI_type_node, NULL_TREE);
11680 tree int_ftype_int_v4si_v4si
11681 = build_function_type_list (integer_type_node,
11682 integer_type_node, V4SI_type_node,
11683 V4SI_type_node, NULL_TREE);
11684 tree v4sf_ftype_pcfloat
11685 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
11686 tree void_ftype_pfloat_v4sf
11687 = build_function_type_list (void_type_node,
11688 pfloat_type_node, V4SF_type_node, NULL_TREE);
11689 tree v4si_ftype_pcint
11690 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
11691 tree void_ftype_pint_v4si
11692 = build_function_type_list (void_type_node,
11693 pint_type_node, V4SI_type_node, NULL_TREE);
11694 tree v8hi_ftype_pcshort
11695 = build_function_type_list (V8HI_type_node, pcshort_type_node, NULL_TREE);
11696 tree void_ftype_pshort_v8hi
11697 = build_function_type_list (void_type_node,
11698 pshort_type_node, V8HI_type_node, NULL_TREE);
11699 tree v16qi_ftype_pcchar
11700 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
11701 tree void_ftype_pchar_v16qi
11702 = build_function_type_list (void_type_node,
11703 pchar_type_node, V16QI_type_node, NULL_TREE);
11704 tree void_ftype_v4si
11705 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
11706 tree v8hi_ftype_void
11707 = build_function_type (V8HI_type_node, void_list_node);
11708 tree void_ftype_void
11709 = build_function_type (void_type_node, void_list_node);
11710 tree void_ftype_int
11711 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11713 tree opaque_ftype_long_pcvoid
11714 = build_function_type_list (opaque_V4SI_type_node,
11715 long_integer_type_node, pcvoid_type_node, NULL_TREE);
11716 tree v16qi_ftype_long_pcvoid
11717 = build_function_type_list (V16QI_type_node,
11718 long_integer_type_node, pcvoid_type_node, NULL_TREE);
11719 tree v8hi_ftype_long_pcvoid
11720 = build_function_type_list (V8HI_type_node,
11721 long_integer_type_node, pcvoid_type_node, NULL_TREE);
11722 tree v4si_ftype_long_pcvoid
11723 = build_function_type_list (V4SI_type_node,
11724 long_integer_type_node, pcvoid_type_node, NULL_TREE);
11726 tree void_ftype_opaque_long_pvoid
11727 = build_function_type_list (void_type_node,
11728 opaque_V4SI_type_node, long_integer_type_node,
11729 pvoid_type_node, NULL_TREE);
11730 tree void_ftype_v4si_long_pvoid
11731 = build_function_type_list (void_type_node,
11732 V4SI_type_node, long_integer_type_node,
11733 pvoid_type_node, NULL_TREE);
11734 tree void_ftype_v16qi_long_pvoid
11735 = build_function_type_list (void_type_node,
11736 V16QI_type_node, long_integer_type_node,
11737 pvoid_type_node, NULL_TREE);
11738 tree void_ftype_v8hi_long_pvoid
11739 = build_function_type_list (void_type_node,
11740 V8HI_type_node, long_integer_type_node,
11741 pvoid_type_node, NULL_TREE);
11742 tree int_ftype_int_v8hi_v8hi
11743 = build_function_type_list (integer_type_node,
11744 integer_type_node, V8HI_type_node,
11745 V8HI_type_node, NULL_TREE);
11746 tree int_ftype_int_v16qi_v16qi
11747 = build_function_type_list (integer_type_node,
11748 integer_type_node, V16QI_type_node,
11749 V16QI_type_node, NULL_TREE);
11750 tree int_ftype_int_v4sf_v4sf
11751 = build_function_type_list (integer_type_node,
11752 integer_type_node, V4SF_type_node,
11753 V4SF_type_node, NULL_TREE);
11754 tree int_ftype_int_v2df_v2df
11755 = build_function_type_list (integer_type_node,
11756 integer_type_node, V2DF_type_node,
11757 V2DF_type_node, NULL_TREE);
11758 tree v4si_ftype_v4si
11759 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
11760 tree v8hi_ftype_v8hi
11761 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
11762 tree v16qi_ftype_v16qi
11763 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
11764 tree v4sf_ftype_v4sf
11765 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
11766 tree v2df_ftype_v2df
11767 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
11768 tree void_ftype_pcvoid_int_int
11769 = build_function_type_list (void_type_node,
11770 pcvoid_type_node, integer_type_node,
11771 integer_type_node, NULL_TREE);
11773 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
11774 ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
11775 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
11776 ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
11777 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint,
11778 ALTIVEC_BUILTIN_LD_INTERNAL_4si);
11779 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si,
11780 ALTIVEC_BUILTIN_ST_INTERNAL_4si);
11781 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort,
11782 ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
11783 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi,
11784 ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
11785 def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar,
11786 ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
11787 def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi,
11788 ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
11789 def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
11790 def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
11791 def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
11792 def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
11793 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
11794 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
11795 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
11796 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
11797 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
11798 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
11799 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
11800 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
11801 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
11802 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
11803 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
11804 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
11805 def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
11806 def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
11807 def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
11808 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
11809 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
11810 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
11811 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
11812 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
11813 def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
11814 def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
11815 def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
11816 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
11817 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
11818 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
11820 if (rs6000_cpu == PROCESSOR_CELL)
11822 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
11823 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
11824 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
11825 def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
11827 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
11828 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
11829 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
11830 def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
11832 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
11833 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
11834 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
11835 def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
11837 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
11838 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
11839 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
11840 def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
11842 def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
11843 def_builtin (MASK_ALTIVEC, "__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
11844 def_builtin (MASK_ALTIVEC, "__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
11846 def_builtin (MASK_ALTIVEC, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
11847 def_builtin (MASK_ALTIVEC, "__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
11848 def_builtin (MASK_ALTIVEC, "__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
11849 def_builtin (MASK_ALTIVEC, "__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
11850 def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
11851 def_builtin (MASK_ALTIVEC, "__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
11852 def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
11853 def_builtin (MASK_ALTIVEC, "__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
11854 def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
11855 def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
11856 def_builtin (MASK_ALTIVEC, "__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
11857 def_builtin (MASK_ALTIVEC, "__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
11859 /* Add the DST variants. */
11861 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
11862 def_builtin (d->mask, d->name, void_ftype_pcvoid_int_int, d->code);
11864 /* Initialize the predicates. */
11865 dp = bdesc_altivec_preds;
11866 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
11868 enum machine_mode mode1;
11870 bool is_overloaded = ((dp->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
11871 && dp->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
11872 || (dp->code >= VSX_BUILTIN_OVERLOADED_FIRST
11873 && dp->code <= VSX_BUILTIN_OVERLOADED_LAST));
11878 mode1 = insn_data[dp->icode].operand[1].mode;
11883 type = int_ftype_int_opaque_opaque;
11886 type = int_ftype_int_v4si_v4si;
11889 type = int_ftype_int_v8hi_v8hi;
11892 type = int_ftype_int_v16qi_v16qi;
11895 type = int_ftype_int_v4sf_v4sf;
11898 type = int_ftype_int_v2df_v2df;
11901 gcc_unreachable ();
11904 def_builtin (dp->mask, dp->name, type, dp->code);
11907 /* Initialize the abs* operators. */
11909 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
11911 enum machine_mode mode0;
11914 mode0 = insn_data[d->icode].operand[0].mode;
11919 type = v4si_ftype_v4si;
11922 type = v8hi_ftype_v8hi;
11925 type = v16qi_ftype_v16qi;
11928 type = v4sf_ftype_v4sf;
11931 type = v2df_ftype_v2df;
11934 gcc_unreachable ();
11937 def_builtin (d->mask, d->name, type, d->code);
11940 if (TARGET_ALTIVEC)
11944 /* Initialize target builtin that implements
11945 targetm.vectorize.builtin_mask_for_load. */
11947 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
11948 v16qi_ftype_long_pcvoid,
11949 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
11950 BUILT_IN_MD, NULL, NULL_TREE);
11951 TREE_READONLY (decl) = 1;
11952 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
11953 altivec_builtin_mask_for_load = decl;
11956 /* Access to the vec_init patterns. */
11957 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
11958 integer_type_node, integer_type_node,
11959 integer_type_node, NULL_TREE);
11960 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
11961 ALTIVEC_BUILTIN_VEC_INIT_V4SI);
11963 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
11964 short_integer_type_node,
11965 short_integer_type_node,
11966 short_integer_type_node,
11967 short_integer_type_node,
11968 short_integer_type_node,
11969 short_integer_type_node,
11970 short_integer_type_node, NULL_TREE);
11971 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
11972 ALTIVEC_BUILTIN_VEC_INIT_V8HI);
11974 ftype = build_function_type_list (V16QI_type_node, char_type_node,
11975 char_type_node, char_type_node,
11976 char_type_node, char_type_node,
11977 char_type_node, char_type_node,
11978 char_type_node, char_type_node,
11979 char_type_node, char_type_node,
11980 char_type_node, char_type_node,
11981 char_type_node, char_type_node,
11982 char_type_node, NULL_TREE);
11983 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
11984 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
11986 ftype = build_function_type_list (V4SF_type_node, float_type_node,
11987 float_type_node, float_type_node,
11988 float_type_node, NULL_TREE);
11989 def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
11990 ALTIVEC_BUILTIN_VEC_INIT_V4SF);
11994 ftype = build_function_type_list (V2DF_type_node, double_type_node,
11995 double_type_node, NULL_TREE);
11996 def_builtin (MASK_VSX, "__builtin_vec_init_v2df", ftype,
11997 VSX_BUILTIN_VEC_INIT_V2DF);
11999 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12000 intDI_type_node, NULL_TREE);
12001 def_builtin (MASK_VSX, "__builtin_vec_init_v2di", ftype,
12002 VSX_BUILTIN_VEC_INIT_V2DI);
12005 /* Access to the vec_set patterns. */
12006 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12008 integer_type_node, NULL_TREE);
12009 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
12010 ALTIVEC_BUILTIN_VEC_SET_V4SI);
12012 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12014 integer_type_node, NULL_TREE);
12015 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
12016 ALTIVEC_BUILTIN_VEC_SET_V8HI);
12018 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12020 integer_type_node, NULL_TREE);
12021 def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
12022 ALTIVEC_BUILTIN_VEC_SET_V16QI);
12024 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12026 integer_type_node, NULL_TREE);
12027 def_builtin (MASK_ALTIVEC|MASK_VSX, "__builtin_vec_set_v4sf", ftype,
12028 ALTIVEC_BUILTIN_VEC_SET_V4SF);
12032 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12034 integer_type_node, NULL_TREE);
12035 def_builtin (MASK_VSX, "__builtin_vec_set_v2df", ftype,
12036 VSX_BUILTIN_VEC_SET_V2DF);
12038 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12040 integer_type_node, NULL_TREE);
12041 def_builtin (MASK_VSX, "__builtin_vec_set_v2di", ftype,
12042 VSX_BUILTIN_VEC_SET_V2DI);
12045 /* Access to the vec_extract patterns. */
12046 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12047 integer_type_node, NULL_TREE);
12048 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
12049 ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12051 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12052 integer_type_node, NULL_TREE);
12053 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
12054 ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12056 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12057 integer_type_node, NULL_TREE);
12058 def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
12059 ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12061 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12062 integer_type_node, NULL_TREE);
12063 def_builtin (MASK_ALTIVEC|MASK_VSX, "__builtin_vec_ext_v4sf", ftype,
12064 ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12068 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12069 integer_type_node, NULL_TREE);
12070 def_builtin (MASK_VSX, "__builtin_vec_ext_v2df", ftype,
12071 VSX_BUILTIN_VEC_EXT_V2DF);
12073 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12074 integer_type_node, NULL_TREE);
12075 def_builtin (MASK_VSX, "__builtin_vec_ext_v2di", ftype,
12076 VSX_BUILTIN_VEC_EXT_V2DI);
12080 /* Hash function for builtin functions with up to 3 arguments and a return
12083 builtin_hash_function (const void *hash_entry)
12087 const struct builtin_hash_struct *bh =
12088 (const struct builtin_hash_struct *) hash_entry;
12090 for (i = 0; i < 4; i++)
12092 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12093 ret = (ret * 2) + bh->uns_p[i];
12099 /* Compare builtin hash entries H1 and H2 for equivalence. */
12101 builtin_hash_eq (const void *h1, const void *h2)
12103 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12104 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12106 return ((p1->mode[0] == p2->mode[0])
12107 && (p1->mode[1] == p2->mode[1])
12108 && (p1->mode[2] == p2->mode[2])
12109 && (p1->mode[3] == p2->mode[3])
12110 && (p1->uns_p[0] == p2->uns_p[0])
12111 && (p1->uns_p[1] == p2->uns_p[1])
12112 && (p1->uns_p[2] == p2->uns_p[2])
12113 && (p1->uns_p[3] == p2->uns_p[3]));
12116 /* Map types for builtin functions with an explicit return type and up to 3
12117 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12118 of the argument. */
12120 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12121 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12122 enum rs6000_builtins builtin, const char *name)
12124 struct builtin_hash_struct h;
12125 struct builtin_hash_struct *h2;
12129 tree ret_type = NULL_TREE;
12130 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12133 /* Create builtin_hash_table. */
12134 if (builtin_hash_table == NULL)
12135 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12136 builtin_hash_eq, NULL);
12138 h.type = NULL_TREE;
12139 h.mode[0] = mode_ret;
12140 h.mode[1] = mode_arg0;
12141 h.mode[2] = mode_arg1;
12142 h.mode[3] = mode_arg2;
12148 /* If the builtin is a type that produces unsigned results or takes unsigned
12149 arguments, and it is returned as a decl for the vectorizer (such as
12150 widening multiplies, permute), make sure the arguments and return value
12151 are type correct. */
12154 /* unsigned 2 argument functions. */
12155 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12156 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12157 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12158 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12164 /* unsigned 3 argument functions. */
12165 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12166 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12167 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12168 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12169 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12170 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12171 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12172 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12173 case VSX_BUILTIN_VPERM_16QI_UNS:
12174 case VSX_BUILTIN_VPERM_8HI_UNS:
12175 case VSX_BUILTIN_VPERM_4SI_UNS:
12176 case VSX_BUILTIN_VPERM_2DI_UNS:
12177 case VSX_BUILTIN_XXSEL_16QI_UNS:
12178 case VSX_BUILTIN_XXSEL_8HI_UNS:
12179 case VSX_BUILTIN_XXSEL_4SI_UNS:
12180 case VSX_BUILTIN_XXSEL_2DI_UNS:
12187 /* signed permute functions with unsigned char mask. */
12188 case ALTIVEC_BUILTIN_VPERM_16QI:
12189 case ALTIVEC_BUILTIN_VPERM_8HI:
12190 case ALTIVEC_BUILTIN_VPERM_4SI:
12191 case ALTIVEC_BUILTIN_VPERM_4SF:
12192 case ALTIVEC_BUILTIN_VPERM_2DI:
12193 case ALTIVEC_BUILTIN_VPERM_2DF:
12194 case VSX_BUILTIN_VPERM_16QI:
12195 case VSX_BUILTIN_VPERM_8HI:
12196 case VSX_BUILTIN_VPERM_4SI:
12197 case VSX_BUILTIN_VPERM_4SF:
12198 case VSX_BUILTIN_VPERM_2DI:
12199 case VSX_BUILTIN_VPERM_2DF:
12203 /* unsigned args, signed return. */
12204 case VSX_BUILTIN_XVCVUXDDP_UNS:
12205 case VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF:
12209 /* signed args, unsigned return. */
12210 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12211 case VECTOR_BUILTIN_FIXUNS_V4SF_V4SI:
12219 /* Figure out how many args are present. */
12220 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12224 fatal_error ("internal error: builtin function %s had no type", name);
12226 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12227 if (!ret_type && h.uns_p[0])
12228 ret_type = builtin_mode_to_type[h.mode[0]][0];
12231 fatal_error ("internal error: builtin function %s had an unexpected "
12232 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12234 for (i = 0; i < num_args; i++)
12236 int m = (int) h.mode[i+1];
12237 int uns_p = h.uns_p[i+1];
12239 arg_type[i] = builtin_mode_to_type[m][uns_p];
12240 if (!arg_type[i] && uns_p)
12241 arg_type[i] = builtin_mode_to_type[m][0];
12244 fatal_error ("internal error: builtin function %s, argument %d "
12245 "had unexpected argument type %s", name, i,
12246 GET_MODE_NAME (m));
12249 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12250 if (*found == NULL)
12252 h2 = GGC_NEW (struct builtin_hash_struct);
12254 *found = (void *)h2;
12255 args = void_list_node;
12257 for (i = num_args - 1; i >= 0; i--)
12258 args = tree_cons (NULL_TREE, arg_type[i], args);
12260 h2->type = build_function_type (ret_type, args);
12263 return ((struct builtin_hash_struct *)(*found))->type;
12267 rs6000_common_init_builtins (void)
12269 const struct builtin_description *d;
12272 tree opaque_ftype_opaque = NULL_TREE;
12273 tree opaque_ftype_opaque_opaque = NULL_TREE;
12274 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12275 tree v2si_ftype_qi = NULL_TREE;
12276 tree v2si_ftype_v2si_qi = NULL_TREE;
12277 tree v2si_ftype_int_qi = NULL_TREE;
12279 if (!TARGET_PAIRED_FLOAT)
12281 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12282 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12285 /* Add the ternary operators. */
12287 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12290 int mask = d->mask;
12292 if ((mask != 0 && (mask & target_flags) == 0)
12293 || (mask == 0 && !TARGET_PAIRED_FLOAT))
12296 if ((d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
12297 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
12298 || (d->code >= VSX_BUILTIN_OVERLOADED_FIRST
12299 && d->code <= VSX_BUILTIN_OVERLOADED_LAST))
12301 if (! (type = opaque_ftype_opaque_opaque_opaque))
12302 type = opaque_ftype_opaque_opaque_opaque
12303 = build_function_type_list (opaque_V4SI_type_node,
12304 opaque_V4SI_type_node,
12305 opaque_V4SI_type_node,
12306 opaque_V4SI_type_node,
12311 enum insn_code icode = d->icode;
12312 if (d->name == 0 || icode == CODE_FOR_nothing)
12315 type = builtin_function_type (insn_data[icode].operand[0].mode,
12316 insn_data[icode].operand[1].mode,
12317 insn_data[icode].operand[2].mode,
12318 insn_data[icode].operand[3].mode,
12322 def_builtin (d->mask, d->name, type, d->code);
12325 /* Add the binary operators. */
12327 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12329 enum machine_mode mode0, mode1, mode2;
12331 int mask = d->mask;
12333 if ((mask != 0 && (mask & target_flags) == 0)
12334 || (mask == 0 && !TARGET_PAIRED_FLOAT))
12337 if ((d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
12338 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
12339 || (d->code >= VSX_BUILTIN_OVERLOADED_FIRST
12340 && d->code <= VSX_BUILTIN_OVERLOADED_LAST))
12342 if (! (type = opaque_ftype_opaque_opaque))
12343 type = opaque_ftype_opaque_opaque
12344 = build_function_type_list (opaque_V4SI_type_node,
12345 opaque_V4SI_type_node,
12346 opaque_V4SI_type_node,
12351 enum insn_code icode = d->icode;
12352 if (d->name == 0 || icode == CODE_FOR_nothing)
12355 mode0 = insn_data[icode].operand[0].mode;
12356 mode1 = insn_data[icode].operand[1].mode;
12357 mode2 = insn_data[icode].operand[2].mode;
12359 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12361 if (! (type = v2si_ftype_v2si_qi))
12362 type = v2si_ftype_v2si_qi
12363 = build_function_type_list (opaque_V2SI_type_node,
12364 opaque_V2SI_type_node,
12369 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12370 && mode2 == QImode)
12372 if (! (type = v2si_ftype_int_qi))
12373 type = v2si_ftype_int_qi
12374 = build_function_type_list (opaque_V2SI_type_node,
12381 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12385 def_builtin (d->mask, d->name, type, d->code);
12388 /* Add the simple unary operators. */
12389 d = (struct builtin_description *) bdesc_1arg;
12390 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12392 enum machine_mode mode0, mode1;
12394 int mask = d->mask;
12396 if ((mask != 0 && (mask & target_flags) == 0)
12397 || (mask == 0 && !TARGET_PAIRED_FLOAT))
12400 if ((d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
12401 && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
12402 || (d->code >= VSX_BUILTIN_OVERLOADED_FIRST
12403 && d->code <= VSX_BUILTIN_OVERLOADED_LAST))
12405 if (! (type = opaque_ftype_opaque))
12406 type = opaque_ftype_opaque
12407 = build_function_type_list (opaque_V4SI_type_node,
12408 opaque_V4SI_type_node,
12413 enum insn_code icode = d->icode;
12414 if (d->name == 0 || icode == CODE_FOR_nothing)
12417 mode0 = insn_data[icode].operand[0].mode;
12418 mode1 = insn_data[icode].operand[1].mode;
12420 if (mode0 == V2SImode && mode1 == QImode)
12422 if (! (type = v2si_ftype_qi))
12423 type = v2si_ftype_qi
12424 = build_function_type_list (opaque_V2SI_type_node,
12430 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12434 def_builtin (d->mask, d->name, type, d->code);
12439 rs6000_init_libfuncs (void)
12441 if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
12442 && !TARGET_POWER2 && !TARGET_POWERPC)
12444 /* AIX library routines for float->int conversion. */
12445 set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
12446 set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
12447 set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
12448 set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
12451 if (!TARGET_IEEEQUAD)
12452 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12453 if (!TARGET_XL_COMPAT)
12455 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12456 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12457 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12458 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12460 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12462 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12463 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12464 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12465 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12466 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12467 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12468 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12470 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12471 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12472 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12473 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12474 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12475 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12476 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12477 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12480 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12481 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12485 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12486 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12487 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12488 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12492 /* 32-bit SVR4 quad floating point routines. */
12494 set_optab_libfunc (add_optab, TFmode, "_q_add");
12495 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12496 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12497 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12498 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12499 if (TARGET_PPC_GPOPT || TARGET_POWER2)
12500 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12502 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12503 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12504 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12505 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12506 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12507 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12509 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12510 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12511 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12512 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12513 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12514 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12515 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12516 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12521 /* Expand a block clear operation, and return 1 if successful. Return 0
12522 if we should let the compiler generate normal code.
12524 operands[0] is the destination
12525 operands[1] is the length
12526 operands[3] is the alignment */
12529 expand_block_clear (rtx operands[])
12531 rtx orig_dest = operands[0];
12532 rtx bytes_rtx = operands[1];
12533 rtx align_rtx = operands[3];
12534 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12535 HOST_WIDE_INT align;
12536 HOST_WIDE_INT bytes;
12541 /* If this is not a fixed size move, just call memcpy */
12545 /* This must be a fixed size alignment */
12546 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12547 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12549 /* Anything to clear? */
12550 bytes = INTVAL (bytes_rtx);
12554 /* Use the builtin memset after a point, to avoid huge code bloat.
12555 When optimize_size, avoid any significant code bloat; calling
12556 memset is about 4 instructions, so allow for one instruction to
12557 load zero and three to do clearing. */
12558 if (TARGET_ALTIVEC && align >= 128)
12560 else if (TARGET_POWERPC64 && align >= 32)
12562 else if (TARGET_SPE && align >= 64)
12567 if (optimize_size && bytes > 3 * clear_step)
12569 if (! optimize_size && bytes > 8 * clear_step)
12572 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12574 enum machine_mode mode = BLKmode;
12577 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
12582 else if (bytes >= 8 && TARGET_SPE && align >= 64)
12587 else if (bytes >= 8 && TARGET_POWERPC64
12588 /* 64-bit loads and stores require word-aligned
12590 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12595 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12596 { /* move 4 bytes */
12600 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12601 { /* move 2 bytes */
12605 else /* move 1 byte at a time */
12611 dest = adjust_address (orig_dest, mode, offset);
12613 emit_move_insn (dest, CONST0_RTX (mode));
12620 /* Expand a block move operation, and return 1 if successful. Return 0
12621 if we should let the compiler generate normal code.
12623 operands[0] is the destination
12624 operands[1] is the source
12625 operands[2] is the length
12626 operands[3] is the alignment */
12628 #define MAX_MOVE_REG 4
12631 expand_block_move (rtx operands[])
12633 rtx orig_dest = operands[0];
12634 rtx orig_src = operands[1];
12635 rtx bytes_rtx = operands[2];
12636 rtx align_rtx = operands[3];
12637 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
12642 rtx stores[MAX_MOVE_REG];
12645 /* If this is not a fixed size move, just call memcpy */
12649 /* This must be a fixed size alignment */
12650 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12651 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12653 /* Anything to move? */
12654 bytes = INTVAL (bytes_rtx);
12658 /* store_one_arg depends on expand_block_move to handle at least the size of
12659 reg_parm_stack_space. */
12660 if (bytes > (TARGET_POWERPC64 ? 64 : 32))
12663 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
12666 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
12667 rtx (*mov) (rtx, rtx);
12669 enum machine_mode mode = BLKmode;
12672 /* Altivec first, since it will be faster than a string move
12673 when it applies, and usually not significantly larger. */
12674 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
12678 gen_func.mov = gen_movv4si;
12680 else if (TARGET_SPE && bytes >= 8 && align >= 64)
12684 gen_func.mov = gen_movv2si;
12686 else if (TARGET_STRING
12687 && bytes > 24 /* move up to 32 bytes at a time */
12693 && ! fixed_regs[10]
12694 && ! fixed_regs[11]
12695 && ! fixed_regs[12])
12697 move_bytes = (bytes > 32) ? 32 : bytes;
12698 gen_func.movmemsi = gen_movmemsi_8reg;
12700 else if (TARGET_STRING
12701 && bytes > 16 /* move up to 24 bytes at a time */
12707 && ! fixed_regs[10])
12709 move_bytes = (bytes > 24) ? 24 : bytes;
12710 gen_func.movmemsi = gen_movmemsi_6reg;
12712 else if (TARGET_STRING
12713 && bytes > 8 /* move up to 16 bytes at a time */
12717 && ! fixed_regs[8])
12719 move_bytes = (bytes > 16) ? 16 : bytes;
12720 gen_func.movmemsi = gen_movmemsi_4reg;
12722 else if (bytes >= 8 && TARGET_POWERPC64
12723 /* 64-bit loads and stores require word-aligned
12725 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12729 gen_func.mov = gen_movdi;
12731 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
12732 { /* move up to 8 bytes at a time */
12733 move_bytes = (bytes > 8) ? 8 : bytes;
12734 gen_func.movmemsi = gen_movmemsi_2reg;
12736 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12737 { /* move 4 bytes */
12740 gen_func.mov = gen_movsi;
12742 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12743 { /* move 2 bytes */
12746 gen_func.mov = gen_movhi;
12748 else if (TARGET_STRING && bytes > 1)
12749 { /* move up to 4 bytes at a time */
12750 move_bytes = (bytes > 4) ? 4 : bytes;
12751 gen_func.movmemsi = gen_movmemsi_1reg;
12753 else /* move 1 byte at a time */
12757 gen_func.mov = gen_movqi;
12760 src = adjust_address (orig_src, mode, offset);
12761 dest = adjust_address (orig_dest, mode, offset);
12763 if (mode != BLKmode)
12765 rtx tmp_reg = gen_reg_rtx (mode);
12767 emit_insn ((*gen_func.mov) (tmp_reg, src));
12768 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
12771 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
12774 for (i = 0; i < num_reg; i++)
12775 emit_insn (stores[i]);
12779 if (mode == BLKmode)
12781 /* Move the address into scratch registers. The movmemsi
12782 patterns require zero offset. */
12783 if (!REG_P (XEXP (src, 0)))
12785 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
12786 src = replace_equiv_address (src, src_reg);
12788 set_mem_size (src, GEN_INT (move_bytes));
12790 if (!REG_P (XEXP (dest, 0)))
12792 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
12793 dest = replace_equiv_address (dest, dest_reg);
12795 set_mem_size (dest, GEN_INT (move_bytes));
12797 emit_insn ((*gen_func.movmemsi) (dest, src,
12798 GEN_INT (move_bytes & 31),
12807 /* Return a string to perform a load_multiple operation.
12808 operands[0] is the vector.
12809 operands[1] is the source address.
12810 operands[2] is the first destination register. */
12813 rs6000_output_load_multiple (rtx operands[3])
12815 /* We have to handle the case where the pseudo used to contain the address
12816 is assigned to one of the output registers. */
12818 int words = XVECLEN (operands[0], 0);
12821 if (XVECLEN (operands[0], 0) == 1)
12822 return "{l|lwz} %2,0(%1)";
12824 for (i = 0; i < words; i++)
12825 if (refers_to_regno_p (REGNO (operands[2]) + i,
12826 REGNO (operands[2]) + i + 1, operands[1], 0))
12830 xop[0] = GEN_INT (4 * (words-1));
12831 xop[1] = operands[1];
12832 xop[2] = operands[2];
12833 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
12838 xop[0] = GEN_INT (4 * (words-1));
12839 xop[1] = operands[1];
12840 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
12841 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
12846 for (j = 0; j < words; j++)
12849 xop[0] = GEN_INT (j * 4);
12850 xop[1] = operands[1];
12851 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
12852 output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
12854 xop[0] = GEN_INT (i * 4);
12855 xop[1] = operands[1];
12856 output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
12861 return "{lsi|lswi} %2,%1,%N0";
12865 /* A validation routine: say whether CODE, a condition code, and MODE
12866 match. The other alternatives either don't make sense or should
12867 never be generated. */
12870 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
12872 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
12873 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
12874 && GET_MODE_CLASS (mode) == MODE_CC);
12876 /* These don't make sense. */
12877 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
12878 || mode != CCUNSmode);
12880 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
12881 || mode == CCUNSmode);
12883 gcc_assert (mode == CCFPmode
12884 || (code != ORDERED && code != UNORDERED
12885 && code != UNEQ && code != LTGT
12886 && code != UNGT && code != UNLT
12887 && code != UNGE && code != UNLE));
12889 /* These should never be generated except for
12890 flag_finite_math_only. */
12891 gcc_assert (mode != CCFPmode
12892 || flag_finite_math_only
12893 || (code != LE && code != GE
12894 && code != UNEQ && code != LTGT
12895 && code != UNGT && code != UNLT));
12897 /* These are invalid; the information is not there. */
12898 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
12902 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
12903 mask required to convert the result of a rotate insn into a shift
12904 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
12907 includes_lshift_p (rtx shiftop, rtx andop)
12909 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
12911 shift_mask <<= INTVAL (shiftop);
12913 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
12916 /* Similar, but for right shift. */
12919 includes_rshift_p (rtx shiftop, rtx andop)
12921 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
12923 shift_mask >>= INTVAL (shiftop);
12925 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
12928 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
12929 to perform a left shift. It must have exactly SHIFTOP least
12930 significant 0's, then one or more 1's, then zero or more 0's. */
12933 includes_rldic_lshift_p (rtx shiftop, rtx andop)
12935 if (GET_CODE (andop) == CONST_INT)
12937 HOST_WIDE_INT c, lsb, shift_mask;
12939 c = INTVAL (andop);
12940 if (c == 0 || c == ~0)
12944 shift_mask <<= INTVAL (shiftop);
12946 /* Find the least significant one bit. */
12949 /* It must coincide with the LSB of the shift mask. */
12950 if (-lsb != shift_mask)
12953 /* Invert to look for the next transition (if any). */
12956 /* Remove the low group of ones (originally low group of zeros). */
12959 /* Again find the lsb, and check we have all 1's above. */
12963 else if (GET_CODE (andop) == CONST_DOUBLE
12964 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
12966 HOST_WIDE_INT low, high, lsb;
12967 HOST_WIDE_INT shift_mask_low, shift_mask_high;
12969 low = CONST_DOUBLE_LOW (andop);
12970 if (HOST_BITS_PER_WIDE_INT < 64)
12971 high = CONST_DOUBLE_HIGH (andop);
12973 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
12974 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
12977 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
12979 shift_mask_high = ~0;
12980 if (INTVAL (shiftop) > 32)
12981 shift_mask_high <<= INTVAL (shiftop) - 32;
12983 lsb = high & -high;
12985 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
12991 lsb = high & -high;
12992 return high == -lsb;
12995 shift_mask_low = ~0;
12996 shift_mask_low <<= INTVAL (shiftop);
13000 if (-lsb != shift_mask_low)
13003 if (HOST_BITS_PER_WIDE_INT < 64)
13008 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13010 lsb = high & -high;
13011 return high == -lsb;
13015 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13021 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13022 to perform a left shift. It must have SHIFTOP or more least
13023 significant 0's, with the remainder of the word 1's. */
13026 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13028 if (GET_CODE (andop) == CONST_INT)
13030 HOST_WIDE_INT c, lsb, shift_mask;
13033 shift_mask <<= INTVAL (shiftop);
13034 c = INTVAL (andop);
13036 /* Find the least significant one bit. */
13039 /* It must be covered by the shift mask.
13040 This test also rejects c == 0. */
13041 if ((lsb & shift_mask) == 0)
13044 /* Check we have all 1's above the transition, and reject all 1's. */
13045 return c == -lsb && lsb != 1;
13047 else if (GET_CODE (andop) == CONST_DOUBLE
13048 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13050 HOST_WIDE_INT low, lsb, shift_mask_low;
13052 low = CONST_DOUBLE_LOW (andop);
13054 if (HOST_BITS_PER_WIDE_INT < 64)
13056 HOST_WIDE_INT high, shift_mask_high;
13058 high = CONST_DOUBLE_HIGH (andop);
13062 shift_mask_high = ~0;
13063 if (INTVAL (shiftop) > 32)
13064 shift_mask_high <<= INTVAL (shiftop) - 32;
13066 lsb = high & -high;
13068 if ((lsb & shift_mask_high) == 0)
13071 return high == -lsb;
13077 shift_mask_low = ~0;
13078 shift_mask_low <<= INTVAL (shiftop);
13082 if ((lsb & shift_mask_low) == 0)
13085 return low == -lsb && lsb != 1;
13091 /* Return 1 if operands will generate a valid arguments to rlwimi
13092 instruction for insert with right shift in 64-bit mode. The mask may
13093 not start on the first bit or stop on the last bit because wrap-around
13094 effects of instruction do not correspond to semantics of RTL insn. */
13097 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13099 if (INTVAL (startop) > 32
13100 && INTVAL (startop) < 64
13101 && INTVAL (sizeop) > 1
13102 && INTVAL (sizeop) + INTVAL (startop) < 64
13103 && INTVAL (shiftop) > 0
13104 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13105 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13111 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13112 for lfq and stfq insns iff the registers are hard registers. */
13115 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13117 /* We might have been passed a SUBREG. */
13118 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13121 /* We might have been passed non floating point registers. */
13122 if (!FP_REGNO_P (REGNO (reg1))
13123 || !FP_REGNO_P (REGNO (reg2)))
13126 return (REGNO (reg1) == REGNO (reg2) - 1);
13129 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13130 addr1 and addr2 must be in consecutive memory locations
13131 (addr2 == addr1 + 8). */
13134 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13137 unsigned int reg1, reg2;
13138 int offset1, offset2;
13140 /* The mems cannot be volatile. */
13141 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13144 addr1 = XEXP (mem1, 0);
13145 addr2 = XEXP (mem2, 0);
13147 /* Extract an offset (if used) from the first addr. */
13148 if (GET_CODE (addr1) == PLUS)
13150 /* If not a REG, return zero. */
13151 if (GET_CODE (XEXP (addr1, 0)) != REG)
13155 reg1 = REGNO (XEXP (addr1, 0));
13156 /* The offset must be constant! */
13157 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13159 offset1 = INTVAL (XEXP (addr1, 1));
13162 else if (GET_CODE (addr1) != REG)
13166 reg1 = REGNO (addr1);
13167 /* This was a simple (mem (reg)) expression. Offset is 0. */
13171 /* And now for the second addr. */
13172 if (GET_CODE (addr2) == PLUS)
13174 /* If not a REG, return zero. */
13175 if (GET_CODE (XEXP (addr2, 0)) != REG)
13179 reg2 = REGNO (XEXP (addr2, 0));
13180 /* The offset must be constant. */
13181 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13183 offset2 = INTVAL (XEXP (addr2, 1));
13186 else if (GET_CODE (addr2) != REG)
13190 reg2 = REGNO (addr2);
13191 /* This was a simple (mem (reg)) expression. Offset is 0. */
13195 /* Both of these must have the same base register. */
13199 /* The offset for the second addr must be 8 more than the first addr. */
13200 if (offset2 != offset1 + 8)
13203 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13210 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13212 static bool eliminated = false;
13215 if (mode != SDmode)
13216 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13219 rtx mem = cfun->machine->sdmode_stack_slot;
13220 gcc_assert (mem != NULL_RTX);
13224 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13225 cfun->machine->sdmode_stack_slot = mem;
13231 if (TARGET_DEBUG_ADDR)
13233 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13234 GET_MODE_NAME (mode));
13236 fprintf (stderr, "\tNULL_RTX\n");
13245 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13247 /* Don't walk into types. */
13248 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13250 *walk_subtrees = 0;
13254 switch (TREE_CODE (*tp))
13263 case ALIGN_INDIRECT_REF:
13264 case MISALIGNED_INDIRECT_REF:
13265 case VIEW_CONVERT_EXPR:
13266 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13276 enum reload_reg_type {
13278 VECTOR_REGISTER_TYPE,
13279 OTHER_REGISTER_TYPE
13282 static enum reload_reg_type
13283 rs6000_reload_register_type (enum reg_class rclass)
13289 return GPR_REGISTER_TYPE;
13294 return VECTOR_REGISTER_TYPE;
13297 return OTHER_REGISTER_TYPE;
13301 /* Inform reload about cases where moving X with a mode MODE to a register in
13302 RCLASS requires an extra scratch or immediate register. Return the class
13303 needed for the immediate register.
13305 For VSX and Altivec, we may need a register to convert sp+offset into
13308 static enum reg_class
13309 rs6000_secondary_reload (bool in_p,
13311 enum reg_class rclass,
13312 enum machine_mode mode,
13313 secondary_reload_info *sri)
13315 enum reg_class ret = ALL_REGS;
13316 enum insn_code icode;
13317 bool default_p = false;
13319 sri->icode = CODE_FOR_nothing;
13321 /* Convert vector loads and stores into gprs to use an additional base
13323 icode = rs6000_vector_reload[mode][in_p != false];
13324 if (icode != CODE_FOR_nothing)
13327 sri->icode = CODE_FOR_nothing;
13328 sri->extra_cost = 0;
13330 if (GET_CODE (x) == MEM)
13332 rtx addr = XEXP (x, 0);
13334 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13335 an extra register in that case, but it would need an extra
13336 register if the addressing is reg+reg or (reg+reg)&(-16). */
13337 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13339 if (!legitimate_indirect_address_p (addr, false)
13340 && !rs6000_legitimate_offset_address_p (TImode, addr, false))
13342 sri->icode = icode;
13343 /* account for splitting the loads, and converting the
13344 address from reg+reg to reg. */
13345 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13346 + ((GET_CODE (addr) == AND) ? 1 : 0));
13349 /* Loads to and stores from vector registers can only do reg+reg
13350 addressing. Altivec registers can also do (reg+reg)&(-16). */
13351 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13352 || rclass == FLOAT_REGS || rclass == NO_REGS)
13354 if (!VECTOR_MEM_ALTIVEC_P (mode)
13355 && GET_CODE (addr) == AND
13356 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13357 && INTVAL (XEXP (addr, 1)) == -16
13358 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13359 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13361 sri->icode = icode;
13362 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13365 else if (!legitimate_indirect_address_p (addr, false)
13366 && (rclass == NO_REGS
13367 || !legitimate_indexed_address_p (addr, false)))
13369 sri->icode = icode;
13370 sri->extra_cost = 1;
13373 icode = CODE_FOR_nothing;
13375 /* Any other loads, including to pseudo registers which haven't been
13376 assigned to a register yet, default to require a scratch
13380 sri->icode = icode;
13381 sri->extra_cost = 2;
13384 else if (REG_P (x))
13386 int regno = true_regnum (x);
13388 icode = CODE_FOR_nothing;
13389 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13393 enum reg_class xclass = REGNO_REG_CLASS (regno);
13394 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13395 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13397 /* If memory is needed, use default_secondary_reload to create the
13399 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13412 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13414 gcc_assert (ret != ALL_REGS);
13416 if (TARGET_DEBUG_ADDR)
13419 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13421 reg_class_names[ret],
13422 in_p ? "true" : "false",
13423 reg_class_names[rclass],
13424 GET_MODE_NAME (mode));
13427 fprintf (stderr, ", default secondary reload");
13429 if (sri->icode != CODE_FOR_nothing)
13430 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13431 insn_data[sri->icode].name, sri->extra_cost);
13433 fprintf (stderr, "\n");
13441 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13442 to SP+reg addressing. */
13445 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13447 int regno = true_regnum (reg);
13448 enum machine_mode mode = GET_MODE (reg);
13449 enum reg_class rclass;
13451 rtx and_op2 = NULL_RTX;
13454 rtx scratch_or_premodify = scratch;
13458 if (TARGET_DEBUG_ADDR)
13460 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13461 store_p ? "store" : "load");
13462 fprintf (stderr, "reg:\n");
13464 fprintf (stderr, "mem:\n");
13466 fprintf (stderr, "scratch:\n");
13467 debug_rtx (scratch);
13470 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13471 gcc_assert (GET_CODE (mem) == MEM);
13472 rclass = REGNO_REG_CLASS (regno);
13473 addr = XEXP (mem, 0);
13477 /* GPRs can handle reg + small constant, all other addresses need to use
13478 the scratch register. */
13481 if (GET_CODE (addr) == AND)
13483 and_op2 = XEXP (addr, 1);
13484 addr = XEXP (addr, 0);
13487 if (GET_CODE (addr) == PRE_MODIFY)
13489 scratch_or_premodify = XEXP (addr, 0);
13490 gcc_assert (REG_P (scratch_or_premodify));
13491 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13492 addr = XEXP (addr, 1);
13495 if (GET_CODE (addr) == PLUS
13496 && (!rs6000_legitimate_offset_address_p (TImode, addr, false)
13497 || and_op2 != NULL_RTX))
13499 addr_op1 = XEXP (addr, 0);
13500 addr_op2 = XEXP (addr, 1);
13501 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
13503 if (!REG_P (addr_op2)
13504 && (GET_CODE (addr_op2) != CONST_INT
13505 || !satisfies_constraint_I (addr_op2)))
13507 if (TARGET_DEBUG_ADDR)
13510 "\nMove plus addr to register %s, mode = %s: ",
13511 rs6000_reg_names[REGNO (scratch)],
13512 GET_MODE_NAME (mode));
13513 debug_rtx (addr_op2);
13515 rs6000_emit_move (scratch, addr_op2, Pmode);
13516 addr_op2 = scratch;
13519 emit_insn (gen_rtx_SET (VOIDmode,
13520 scratch_or_premodify,
13521 gen_rtx_PLUS (Pmode,
13525 addr = scratch_or_premodify;
13526 scratch_or_premodify = scratch;
13528 else if (!legitimate_indirect_address_p (addr, false)
13529 && !rs6000_legitimate_offset_address_p (TImode, addr, false))
13531 if (TARGET_DEBUG_ADDR)
13533 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13534 rs6000_reg_names[REGNO (scratch_or_premodify)],
13535 GET_MODE_NAME (mode));
13538 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13539 addr = scratch_or_premodify;
13540 scratch_or_premodify = scratch;
13544 /* Float/Altivec registers can only handle reg+reg addressing. Move
13545 other addresses into a scratch register. */
13550 /* With float regs, we need to handle the AND ourselves, since we can't
13551 use the Altivec instruction with an implicit AND -16. Allow scalar
13552 loads to float registers to use reg+offset even if VSX. */
13553 if (GET_CODE (addr) == AND
13554 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
13555 || GET_CODE (XEXP (addr, 1)) != CONST_INT
13556 || INTVAL (XEXP (addr, 1)) != -16
13557 || !VECTOR_MEM_ALTIVEC_P (mode)))
13559 and_op2 = XEXP (addr, 1);
13560 addr = XEXP (addr, 0);
13563 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13564 as the address later. */
13565 if (GET_CODE (addr) == PRE_MODIFY
13566 && (!VECTOR_MEM_VSX_P (mode)
13567 || and_op2 != NULL_RTX
13568 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
13570 scratch_or_premodify = XEXP (addr, 0);
13571 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
13573 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13574 addr = XEXP (addr, 1);
13577 if (legitimate_indirect_address_p (addr, false) /* reg */
13578 || legitimate_indexed_address_p (addr, false) /* reg+reg */
13579 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
13580 || (GET_CODE (addr) == AND /* Altivec memory */
13581 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13582 && INTVAL (XEXP (addr, 1)) == -16
13583 && VECTOR_MEM_ALTIVEC_P (mode))
13584 || (rclass == FLOAT_REGS /* legacy float mem */
13585 && GET_MODE_SIZE (mode) == 8
13586 && and_op2 == NULL_RTX
13587 && scratch_or_premodify == scratch
13588 && rs6000_legitimate_offset_address_p (mode, addr, false)))
13591 else if (GET_CODE (addr) == PLUS)
13593 addr_op1 = XEXP (addr, 0);
13594 addr_op2 = XEXP (addr, 1);
13595 gcc_assert (REG_P (addr_op1));
13597 if (TARGET_DEBUG_ADDR)
13599 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
13600 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13601 debug_rtx (addr_op2);
13603 rs6000_emit_move (scratch, addr_op2, Pmode);
13604 emit_insn (gen_rtx_SET (VOIDmode,
13605 scratch_or_premodify,
13606 gen_rtx_PLUS (Pmode,
13609 addr = scratch_or_premodify;
13610 scratch_or_premodify = scratch;
13613 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
13614 || GET_CODE (addr) == CONST_INT || REG_P (addr))
13616 if (TARGET_DEBUG_ADDR)
13618 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13619 rs6000_reg_names[REGNO (scratch_or_premodify)],
13620 GET_MODE_NAME (mode));
13624 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13625 addr = scratch_or_premodify;
13626 scratch_or_premodify = scratch;
13630 gcc_unreachable ();
13635 gcc_unreachable ();
13638 /* If the original address involved a pre-modify that we couldn't use the VSX
13639 memory instruction with update, and we haven't taken care of already,
13640 store the address in the pre-modify register and use that as the
13642 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
13644 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
13645 addr = scratch_or_premodify;
13648 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13649 memory instruction, recreate the AND now, including the clobber which is
13650 generated by the general ANDSI3/ANDDI3 patterns for the
13651 andi. instruction. */
13652 if (and_op2 != NULL_RTX)
13654 if (! legitimate_indirect_address_p (addr, false))
13656 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
13660 if (TARGET_DEBUG_ADDR)
13662 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
13663 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13664 debug_rtx (and_op2);
13667 and_rtx = gen_rtx_SET (VOIDmode,
13669 gen_rtx_AND (Pmode,
13673 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
13674 emit_insn (gen_rtx_PARALLEL (VOIDmode,
13675 gen_rtvec (2, and_rtx, cc_clobber)));
13679 /* Adjust the address if it changed. */
13680 if (addr != XEXP (mem, 0))
13682 mem = change_address (mem, mode, addr);
13683 if (TARGET_DEBUG_ADDR)
13684 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
13687 /* Now create the move. */
13689 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
13691 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
13696 /* Target hook to return the cover classes for Integrated Register Allocator.
13697 Cover classes is a set of non-intersected register classes covering all hard
13698 registers used for register allocation purpose. Any move between two
13699 registers of a cover class should be cheaper than load or store of the
13700 registers. The value is array of register classes with LIM_REG_CLASSES used
13703 We need two IRA_COVER_CLASSES, one for pre-VSX, and the other for VSX to
13704 account for the Altivec and Floating registers being subsets of the VSX
13705 register set under VSX, but distinct register sets on pre-VSX machines. */
13707 static const enum reg_class *
13708 rs6000_ira_cover_classes (void)
13710 static const enum reg_class cover_pre_vsx[] = IRA_COVER_CLASSES_PRE_VSX;
13711 static const enum reg_class cover_vsx[] = IRA_COVER_CLASSES_VSX;
13713 return (TARGET_VSX) ? cover_vsx : cover_pre_vsx;
13716 /* Allocate a 64-bit stack slot to be used for copying SDmode
13717 values through if this function has any SDmode references. */
13720 rs6000_alloc_sdmode_stack_slot (void)
13724 gimple_stmt_iterator gsi;
13726 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
13729 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
13731 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
13734 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
13735 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
13741 /* Check for any SDmode parameters of the function. */
13742 for (t = DECL_ARGUMENTS (cfun->decl); t; t = TREE_CHAIN (t))
13744 if (TREE_TYPE (t) == error_mark_node)
13747 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
13748 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
13750 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
13751 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
13759 rs6000_instantiate_decls (void)
13761 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
13762 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
13765 /* Given an rtx X being reloaded into a reg required to be
13766 in class CLASS, return the class of reg to actually use.
13767 In general this is just CLASS; but on some machines
13768 in some cases it is preferable to use a more restrictive class.
13770 On the RS/6000, we have to return NO_REGS when we want to reload a
13771 floating-point CONST_DOUBLE to force it to be copied to memory.
13773 We also don't want to reload integer values into floating-point
13774 registers if we can at all help it. In fact, this can
13775 cause reload to die, if it tries to generate a reload of CTR
13776 into a FP register and discovers it doesn't have the memory location
13779 ??? Would it be a good idea to have reload do the converse, that is
13780 try to reload floating modes into FP registers if possible?
13783 static enum reg_class
13784 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
13786 enum machine_mode mode = GET_MODE (x);
13788 if (VECTOR_UNIT_VSX_P (mode)
13789 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
13792 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
13793 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
13794 && easy_vector_constant (x, mode))
13795 return ALTIVEC_REGS;
13797 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
13800 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
13801 return GENERAL_REGS;
13803 /* For VSX, prefer the traditional registers for DF if the address is of the
13804 form reg+offset because we can use the non-VSX loads. Prefer the Altivec
13805 registers if Altivec is handling the vector operations (i.e. V16QI, V8HI,
13807 if (rclass == VSX_REGS && VECTOR_MEM_VSX_P (mode))
13809 if (mode == DFmode && GET_CODE (x) == MEM)
13811 rtx addr = XEXP (x, 0);
13813 if (legitimate_indirect_address_p (addr, false)) /* reg */
13816 if (legitimate_indexed_address_p (addr, false)) /* reg+reg */
13819 if (GET_CODE (addr) == PRE_MODIFY
13820 && legitimate_indexed_address_p (XEXP (addr, 0), false))
13826 if (VECTOR_UNIT_ALTIVEC_P (mode))
13827 return ALTIVEC_REGS;
13835 /* Debug version of rs6000_preferred_reload_class. */
13836 static enum reg_class
13837 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
13839 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
13842 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
13844 reg_class_names[ret], reg_class_names[rclass],
13845 GET_MODE_NAME (GET_MODE (x)));
13851 /* If we are copying between FP or AltiVec registers and anything else, we need
13852 a memory location. The exception is when we are targeting ppc64 and the
13853 move to/from fpr to gpr instructions are available. Also, under VSX, you
13854 can copy vector registers from the FP register set to the Altivec register
13855 set and vice versa. */
13858 rs6000_secondary_memory_needed (enum reg_class class1,
13859 enum reg_class class2,
13860 enum machine_mode mode)
13862 if (class1 == class2)
13865 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
13866 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
13867 between these classes. But we need memory for other things that can go in
13868 FLOAT_REGS like SFmode. */
13870 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
13871 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
13872 || class1 == FLOAT_REGS))
13873 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
13874 && class2 != FLOAT_REGS);
13876 if (class1 == VSX_REGS || class2 == VSX_REGS)
13879 if (class1 == FLOAT_REGS
13880 && (!TARGET_MFPGPR || !TARGET_POWERPC64
13881 || ((mode != DFmode)
13882 && (mode != DDmode)
13883 && (mode != DImode))))
13886 if (class2 == FLOAT_REGS
13887 && (!TARGET_MFPGPR || !TARGET_POWERPC64
13888 || ((mode != DFmode)
13889 && (mode != DDmode)
13890 && (mode != DImode))))
13893 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
13899 /* Debug version of rs6000_secondary_memory_needed. */
13901 rs6000_debug_secondary_memory_needed (enum reg_class class1,
13902 enum reg_class class2,
13903 enum machine_mode mode)
13905 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
13908 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
13909 "class2 = %s, mode = %s\n",
13910 ret ? "true" : "false", reg_class_names[class1],
13911 reg_class_names[class2], GET_MODE_NAME (mode));
13916 /* Return the register class of a scratch register needed to copy IN into
13917 or out of a register in RCLASS in MODE. If it can be done directly,
13918 NO_REGS is returned. */
13920 static enum reg_class
13921 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
13926 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
13928 && MACHOPIC_INDIRECT
13932 /* We cannot copy a symbolic operand directly into anything
13933 other than BASE_REGS for TARGET_ELF. So indicate that a
13934 register from BASE_REGS is needed as an intermediate
13937 On Darwin, pic addresses require a load from memory, which
13938 needs a base register. */
13939 if (rclass != BASE_REGS
13940 && (GET_CODE (in) == SYMBOL_REF
13941 || GET_CODE (in) == HIGH
13942 || GET_CODE (in) == LABEL_REF
13943 || GET_CODE (in) == CONST))
13947 if (GET_CODE (in) == REG)
13949 regno = REGNO (in);
13950 if (regno >= FIRST_PSEUDO_REGISTER)
13952 regno = true_regnum (in);
13953 if (regno >= FIRST_PSEUDO_REGISTER)
13957 else if (GET_CODE (in) == SUBREG)
13959 regno = true_regnum (in);
13960 if (regno >= FIRST_PSEUDO_REGISTER)
13966 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
13968 if (rclass == GENERAL_REGS || rclass == BASE_REGS
13969 || (regno >= 0 && INT_REGNO_P (regno)))
13972 /* Constants, memory, and FP registers can go into FP registers. */
13973 if ((regno == -1 || FP_REGNO_P (regno))
13974 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
13975 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
13977 /* Memory, and FP/altivec registers can go into fp/altivec registers under
13980 && (regno == -1 || VSX_REGNO_P (regno))
13981 && VSX_REG_CLASS_P (rclass))
13984 /* Memory, and AltiVec registers can go into AltiVec registers. */
13985 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
13986 && rclass == ALTIVEC_REGS)
13989 /* We can copy among the CR registers. */
13990 if ((rclass == CR_REGS || rclass == CR0_REGS)
13991 && regno >= 0 && CR_REGNO_P (regno))
13994 /* Otherwise, we need GENERAL_REGS. */
13995 return GENERAL_REGS;
13998 /* Debug version of rs6000_secondary_reload_class. */
13999 static enum reg_class
14000 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14001 enum machine_mode mode, rtx in)
14003 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14005 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14006 "mode = %s, input rtx:\n",
14007 reg_class_names[ret], reg_class_names[rclass],
14008 GET_MODE_NAME (mode));
14014 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14017 rs6000_cannot_change_mode_class (enum machine_mode from,
14018 enum machine_mode to,
14019 enum reg_class rclass)
14021 unsigned from_size = GET_MODE_SIZE (from);
14022 unsigned to_size = GET_MODE_SIZE (to);
14024 if (from_size != to_size)
14026 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14027 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14028 && reg_classes_intersect_p (xclass, rclass));
14031 if (TARGET_E500_DOUBLE
14032 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14033 || (((to) == TFmode) + ((from) == TFmode)) == 1
14034 || (((to) == DDmode) + ((from) == DDmode)) == 1
14035 || (((to) == TDmode) + ((from) == TDmode)) == 1
14036 || (((to) == DImode) + ((from) == DImode)) == 1))
14039 /* Since the VSX register set includes traditional floating point registers
14040 and altivec registers, just check for the size being different instead of
14041 trying to check whether the modes are vector modes. Otherwise it won't
14042 allow say DF and DI to change classes. */
14043 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14044 return (from_size != 8 && from_size != 16);
14046 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14047 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14050 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14051 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14057 /* Debug version of rs6000_cannot_change_mode_class. */
14059 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14060 enum machine_mode to,
14061 enum reg_class rclass)
14063 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14066 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14067 "to = %s, rclass = %s\n",
14068 ret ? "true" : "false",
14069 GET_MODE_NAME (from), GET_MODE_NAME (to),
14070 reg_class_names[rclass]);
14075 /* Given a comparison operation, return the bit number in CCR to test. We
14076 know this is a valid comparison.
14078 SCC_P is 1 if this is for an scc. That means that %D will have been
14079 used instead of %C, so the bits will be in different places.
14081 Return -1 if OP isn't a valid comparison for some reason. */
14084 ccr_bit (rtx op, int scc_p)
14086 enum rtx_code code = GET_CODE (op);
14087 enum machine_mode cc_mode;
14092 if (!COMPARISON_P (op))
14095 reg = XEXP (op, 0);
14097 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14099 cc_mode = GET_MODE (reg);
14100 cc_regnum = REGNO (reg);
14101 base_bit = 4 * (cc_regnum - CR0_REGNO);
14103 validate_condition_mode (code, cc_mode);
14105 /* When generating a sCOND operation, only positive conditions are
14108 || code == EQ || code == GT || code == LT || code == UNORDERED
14109 || code == GTU || code == LTU);
14114 return scc_p ? base_bit + 3 : base_bit + 2;
14116 return base_bit + 2;
14117 case GT: case GTU: case UNLE:
14118 return base_bit + 1;
14119 case LT: case LTU: case UNGE:
14121 case ORDERED: case UNORDERED:
14122 return base_bit + 3;
14125 /* If scc, we will have done a cror to put the bit in the
14126 unordered position. So test that bit. For integer, this is ! LT
14127 unless this is an scc insn. */
14128 return scc_p ? base_bit + 3 : base_bit;
14131 return scc_p ? base_bit + 3 : base_bit + 1;
14134 gcc_unreachable ();
14138 /* Return the GOT register. */
14141 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14143 /* The second flow pass currently (June 1999) can't update
14144 regs_ever_live without disturbing other parts of the compiler, so
14145 update it here to make the prolog/epilogue code happy. */
14146 if (!can_create_pseudo_p ()
14147 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14148 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14150 crtl->uses_pic_offset_table = 1;
14152 return pic_offset_table_rtx;
14155 /* Function to init struct machine_function.
14156 This will be called, via a pointer variable,
14157 from push_function_context. */
14159 static struct machine_function *
14160 rs6000_init_machine_status (void)
14162 return GGC_CNEW (machine_function);
14165 /* These macros test for integers and extract the low-order bits. */
14167 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14168 && GET_MODE (X) == VOIDmode)
14170 #define INT_LOWPART(X) \
14171 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14174 extract_MB (rtx op)
14177 unsigned long val = INT_LOWPART (op);
14179 /* If the high bit is zero, the value is the first 1 bit we find
14181 if ((val & 0x80000000) == 0)
14183 gcc_assert (val & 0xffffffff);
14186 while (((val <<= 1) & 0x80000000) == 0)
14191 /* If the high bit is set and the low bit is not, or the mask is all
14192 1's, the value is zero. */
14193 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14196 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14199 while (((val >>= 1) & 1) != 0)
14206 extract_ME (rtx op)
14209 unsigned long val = INT_LOWPART (op);
14211 /* If the low bit is zero, the value is the first 1 bit we find from
14213 if ((val & 1) == 0)
14215 gcc_assert (val & 0xffffffff);
14218 while (((val >>= 1) & 1) == 0)
14224 /* If the low bit is set and the high bit is not, or the mask is all
14225 1's, the value is 31. */
14226 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14229 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14232 while (((val <<= 1) & 0x80000000) != 0)
14238 /* Locate some local-dynamic symbol still in use by this function
14239 so that we can print its name in some tls_ld pattern. */
14241 static const char *
14242 rs6000_get_some_local_dynamic_name (void)
14246 if (cfun->machine->some_ld_name)
14247 return cfun->machine->some_ld_name;
14249 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14251 && for_each_rtx (&PATTERN (insn),
14252 rs6000_get_some_local_dynamic_name_1, 0))
14253 return cfun->machine->some_ld_name;
14255 gcc_unreachable ();
14258 /* Helper function for rs6000_get_some_local_dynamic_name. */
14261 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14265 if (GET_CODE (x) == SYMBOL_REF)
14267 const char *str = XSTR (x, 0);
14268 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14270 cfun->machine->some_ld_name = str;
14278 /* Write out a function code label. */
14281 rs6000_output_function_entry (FILE *file, const char *fname)
14283 if (fname[0] != '.')
14285 switch (DEFAULT_ABI)
14288 gcc_unreachable ();
14294 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14303 RS6000_OUTPUT_BASENAME (file, fname);
14306 /* Print an operand. Recognize special options, documented below. */
14309 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14310 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14312 #define SMALL_DATA_RELOC "sda21"
14313 #define SMALL_DATA_REG 0
14317 print_operand (FILE *file, rtx x, int code)
14321 unsigned HOST_WIDE_INT uval;
14326 /* Write out an instruction after the call which may be replaced
14327 with glue code by the loader. This depends on the AIX version. */
14328 asm_fprintf (file, RS6000_CALL_GLUE);
14331 /* %a is output_address. */
14334 /* If X is a constant integer whose low-order 5 bits are zero,
14335 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14336 in the AIX assembler where "sri" with a zero shift count
14337 writes a trash instruction. */
14338 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
14345 /* If constant, low-order 16 bits of constant, unsigned.
14346 Otherwise, write normally. */
14348 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14350 print_operand (file, x, 0);
14354 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14355 for 64-bit mask direction. */
14356 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14359 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14363 /* X is a CR register. Print the number of the GT bit of the CR. */
14364 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14365 output_operand_lossage ("invalid %%c value");
14367 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
14371 /* Like 'J' but get to the GT bit only. */
14372 gcc_assert (GET_CODE (x) == REG);
14374 /* Bit 1 is GT bit. */
14375 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14377 /* Add one for shift count in rlinm for scc. */
14378 fprintf (file, "%d", i + 1);
14382 /* X is a CR register. Print the number of the EQ bit of the CR */
14383 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14384 output_operand_lossage ("invalid %%E value");
14386 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14390 /* X is a CR register. Print the shift count needed to move it
14391 to the high-order four bits. */
14392 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14393 output_operand_lossage ("invalid %%f value");
14395 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14399 /* Similar, but print the count for the rotate in the opposite
14401 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14402 output_operand_lossage ("invalid %%F value");
14404 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14408 /* X is a constant integer. If it is negative, print "m",
14409 otherwise print "z". This is to make an aze or ame insn. */
14410 if (GET_CODE (x) != CONST_INT)
14411 output_operand_lossage ("invalid %%G value");
14412 else if (INTVAL (x) >= 0)
14419 /* If constant, output low-order five bits. Otherwise, write
14422 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14424 print_operand (file, x, 0);
14428 /* If constant, output low-order six bits. Otherwise, write
14431 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14433 print_operand (file, x, 0);
14437 /* Print `i' if this is a constant, else nothing. */
14443 /* Write the bit number in CCR for jump. */
14444 i = ccr_bit (x, 0);
14446 output_operand_lossage ("invalid %%j code");
14448 fprintf (file, "%d", i);
14452 /* Similar, but add one for shift count in rlinm for scc and pass
14453 scc flag to `ccr_bit'. */
14454 i = ccr_bit (x, 1);
14456 output_operand_lossage ("invalid %%J code");
14458 /* If we want bit 31, write a shift count of zero, not 32. */
14459 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14463 /* X must be a constant. Write the 1's complement of the
14466 output_operand_lossage ("invalid %%k value");
14468 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14472 /* X must be a symbolic constant on ELF. Write an
14473 expression suitable for an 'addi' that adds in the low 16
14474 bits of the MEM. */
14475 if (GET_CODE (x) != CONST)
14477 print_operand_address (file, x);
14478 fputs ("@l", file);
14482 if (GET_CODE (XEXP (x, 0)) != PLUS
14483 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14484 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14485 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14486 output_operand_lossage ("invalid %%K value");
14487 print_operand_address (file, XEXP (XEXP (x, 0), 0));
14488 fputs ("@l", file);
14489 /* For GNU as, there must be a non-alphanumeric character
14490 between 'l' and the number. The '-' is added by
14491 print_operand() already. */
14492 if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
14494 print_operand (file, XEXP (XEXP (x, 0), 1), 0);
14498 /* %l is output_asm_label. */
14501 /* Write second word of DImode or DFmode reference. Works on register
14502 or non-indexed memory only. */
14503 if (GET_CODE (x) == REG)
14504 fputs (reg_names[REGNO (x) + 1], file);
14505 else if (GET_CODE (x) == MEM)
14507 /* Handle possible auto-increment. Since it is pre-increment and
14508 we have already done it, we can just use an offset of word. */
14509 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14510 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14511 output_address (plus_constant (XEXP (XEXP (x, 0), 0),
14513 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14514 output_address (plus_constant (XEXP (XEXP (x, 0), 0),
14517 output_address (XEXP (adjust_address_nv (x, SImode,
14521 if (small_data_operand (x, GET_MODE (x)))
14522 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14523 reg_names[SMALL_DATA_REG]);
14528 /* MB value for a mask operand. */
14529 if (! mask_operand (x, SImode))
14530 output_operand_lossage ("invalid %%m value");
14532 fprintf (file, "%d", extract_MB (x));
14536 /* ME value for a mask operand. */
14537 if (! mask_operand (x, SImode))
14538 output_operand_lossage ("invalid %%M value");
14540 fprintf (file, "%d", extract_ME (x));
14543 /* %n outputs the negative of its operand. */
14546 /* Write the number of elements in the vector times 4. */
14547 if (GET_CODE (x) != PARALLEL)
14548 output_operand_lossage ("invalid %%N value");
14550 fprintf (file, "%d", XVECLEN (x, 0) * 4);
14554 /* Similar, but subtract 1 first. */
14555 if (GET_CODE (x) != PARALLEL)
14556 output_operand_lossage ("invalid %%O value");
14558 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
14562 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14564 || INT_LOWPART (x) < 0
14565 || (i = exact_log2 (INT_LOWPART (x))) < 0)
14566 output_operand_lossage ("invalid %%p value");
14568 fprintf (file, "%d", i);
14572 /* The operand must be an indirect memory reference. The result
14573 is the register name. */
14574 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
14575 || REGNO (XEXP (x, 0)) >= 32)
14576 output_operand_lossage ("invalid %%P value");
14578 fputs (reg_names[REGNO (XEXP (x, 0))], file);
14582 /* This outputs the logical code corresponding to a boolean
14583 expression. The expression may have one or both operands
14584 negated (if one, only the first one). For condition register
14585 logical operations, it will also treat the negated
14586 CR codes as NOTs, but not handle NOTs of them. */
14588 const char *const *t = 0;
14590 enum rtx_code code = GET_CODE (x);
14591 static const char * const tbl[3][3] = {
14592 { "and", "andc", "nor" },
14593 { "or", "orc", "nand" },
14594 { "xor", "eqv", "xor" } };
14598 else if (code == IOR)
14600 else if (code == XOR)
14603 output_operand_lossage ("invalid %%q value");
14605 if (GET_CODE (XEXP (x, 0)) != NOT)
14609 if (GET_CODE (XEXP (x, 1)) == NOT)
14627 /* X is a CR register. Print the mask for `mtcrf'. */
14628 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14629 output_operand_lossage ("invalid %%R value");
14631 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
14635 /* Low 5 bits of 32 - value */
14637 output_operand_lossage ("invalid %%s value");
14639 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
14643 /* PowerPC64 mask position. All 0's is excluded.
14644 CONST_INT 32-bit mask is considered sign-extended so any
14645 transition must occur within the CONST_INT, not on the boundary. */
14646 if (! mask64_operand (x, DImode))
14647 output_operand_lossage ("invalid %%S value");
14649 uval = INT_LOWPART (x);
14651 if (uval & 1) /* Clear Left */
14653 #if HOST_BITS_PER_WIDE_INT > 64
14654 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14658 else /* Clear Right */
14661 #if HOST_BITS_PER_WIDE_INT > 64
14662 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14668 gcc_assert (i >= 0);
14669 fprintf (file, "%d", i);
14673 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
14674 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
14676 /* Bit 3 is OV bit. */
14677 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
14679 /* If we want bit 31, write a shift count of zero, not 32. */
14680 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14684 /* Print the symbolic name of a branch target register. */
14685 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
14686 && REGNO (x) != CTR_REGNO))
14687 output_operand_lossage ("invalid %%T value");
14688 else if (REGNO (x) == LR_REGNO)
14689 fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
14691 fputs ("ctr", file);
14695 /* High-order 16 bits of constant for use in unsigned operand. */
14697 output_operand_lossage ("invalid %%u value");
14699 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
14700 (INT_LOWPART (x) >> 16) & 0xffff);
14704 /* High-order 16 bits of constant for use in signed operand. */
14706 output_operand_lossage ("invalid %%v value");
14708 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
14709 (INT_LOWPART (x) >> 16) & 0xffff);
14713 /* Print `u' if this has an auto-increment or auto-decrement. */
14714 if (GET_CODE (x) == MEM
14715 && (GET_CODE (XEXP (x, 0)) == PRE_INC
14716 || GET_CODE (XEXP (x, 0)) == PRE_DEC
14717 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
14722 /* Print the trap code for this operand. */
14723 switch (GET_CODE (x))
14726 fputs ("eq", file); /* 4 */
14729 fputs ("ne", file); /* 24 */
14732 fputs ("lt", file); /* 16 */
14735 fputs ("le", file); /* 20 */
14738 fputs ("gt", file); /* 8 */
14741 fputs ("ge", file); /* 12 */
14744 fputs ("llt", file); /* 2 */
14747 fputs ("lle", file); /* 6 */
14750 fputs ("lgt", file); /* 1 */
14753 fputs ("lge", file); /* 5 */
14756 gcc_unreachable ();
14761 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
14764 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
14765 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
14767 print_operand (file, x, 0);
14771 /* MB value for a PowerPC64 rldic operand. */
14772 val = (GET_CODE (x) == CONST_INT
14773 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
14778 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
14779 if ((val <<= 1) < 0)
14782 #if HOST_BITS_PER_WIDE_INT == 32
14783 if (GET_CODE (x) == CONST_INT && i >= 0)
14784 i += 32; /* zero-extend high-part was all 0's */
14785 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
14787 val = CONST_DOUBLE_LOW (x);
14793 for ( ; i < 64; i++)
14794 if ((val <<= 1) < 0)
14799 fprintf (file, "%d", i + 1);
14803 /* X is a FPR or Altivec register used in a VSX context. */
14804 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
14805 output_operand_lossage ("invalid %%x value");
14808 int reg = REGNO (x);
14809 int vsx_reg = (FP_REGNO_P (reg)
14811 : reg - FIRST_ALTIVEC_REGNO + 32);
14813 #ifdef TARGET_REGNAMES
14814 if (TARGET_REGNAMES)
14815 fprintf (file, "%%vs%d", vsx_reg);
14818 fprintf (file, "%d", vsx_reg);
14823 if (GET_CODE (x) == MEM
14824 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
14825 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
14826 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
14831 /* Like 'L', for third word of TImode */
14832 if (GET_CODE (x) == REG)
14833 fputs (reg_names[REGNO (x) + 2], file);
14834 else if (GET_CODE (x) == MEM)
14836 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14837 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14838 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
14839 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14840 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
14842 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
14843 if (small_data_operand (x, GET_MODE (x)))
14844 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14845 reg_names[SMALL_DATA_REG]);
14850 /* X is a SYMBOL_REF. Write out the name preceded by a
14851 period and without any trailing data in brackets. Used for function
14852 names. If we are configured for System V (or the embedded ABI) on
14853 the PowerPC, do not emit the period, since those systems do not use
14854 TOCs and the like. */
14855 gcc_assert (GET_CODE (x) == SYMBOL_REF);
14857 /* Mark the decl as referenced so that cgraph will output the
14859 if (SYMBOL_REF_DECL (x))
14860 mark_decl_referenced (SYMBOL_REF_DECL (x));
14862 /* For macho, check to see if we need a stub. */
14865 const char *name = XSTR (x, 0);
14867 if (MACHOPIC_INDIRECT
14868 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
14869 name = machopic_indirection_name (x, /*stub_p=*/true);
14871 assemble_name (file, name);
14873 else if (!DOT_SYMBOLS)
14874 assemble_name (file, XSTR (x, 0));
14876 rs6000_output_function_entry (file, XSTR (x, 0));
14880 /* Like 'L', for last word of TImode. */
14881 if (GET_CODE (x) == REG)
14882 fputs (reg_names[REGNO (x) + 3], file);
14883 else if (GET_CODE (x) == MEM)
14885 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14886 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14887 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
14888 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14889 output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
14891 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
14892 if (small_data_operand (x, GET_MODE (x)))
14893 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14894 reg_names[SMALL_DATA_REG]);
14898 /* Print AltiVec or SPE memory operand. */
14903 gcc_assert (GET_CODE (x) == MEM);
14907 /* Ugly hack because %y is overloaded. */
14908 if ((TARGET_SPE || TARGET_E500_DOUBLE)
14909 && (GET_MODE_SIZE (GET_MODE (x)) == 8
14910 || GET_MODE (x) == TFmode
14911 || GET_MODE (x) == TImode))
14913 /* Handle [reg]. */
14914 if (GET_CODE (tmp) == REG)
14916 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
14919 /* Handle [reg+UIMM]. */
14920 else if (GET_CODE (tmp) == PLUS &&
14921 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
14925 gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
14927 x = INTVAL (XEXP (tmp, 1));
14928 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
14932 /* Fall through. Must be [reg+reg]. */
14934 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
14935 && GET_CODE (tmp) == AND
14936 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
14937 && INTVAL (XEXP (tmp, 1)) == -16)
14938 tmp = XEXP (tmp, 0);
14939 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
14940 && GET_CODE (tmp) == PRE_MODIFY)
14941 tmp = XEXP (tmp, 1);
14942 if (GET_CODE (tmp) == REG)
14943 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
14946 if (!GET_CODE (tmp) == PLUS
14947 || !REG_P (XEXP (tmp, 0))
14948 || !REG_P (XEXP (tmp, 1)))
14950 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
14954 if (REGNO (XEXP (tmp, 0)) == 0)
14955 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
14956 reg_names[ REGNO (XEXP (tmp, 0)) ]);
14958 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
14959 reg_names[ REGNO (XEXP (tmp, 1)) ]);
14965 if (GET_CODE (x) == REG)
14966 fprintf (file, "%s", reg_names[REGNO (x)]);
14967 else if (GET_CODE (x) == MEM)
14969 /* We need to handle PRE_INC and PRE_DEC here, since we need to
14970 know the width from the mode. */
14971 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
14972 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
14973 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
14974 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
14975 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
14976 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
14977 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14978 output_address (XEXP (XEXP (x, 0), 1));
14980 output_address (XEXP (x, 0));
14983 output_addr_const (file, x);
14987 assemble_name (file, rs6000_get_some_local_dynamic_name ());
14991 output_operand_lossage ("invalid %%xn code");
14995 /* Print the address of an operand. */
14998 print_operand_address (FILE *file, rtx x)
15000 if (GET_CODE (x) == REG)
15001 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15002 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15003 || GET_CODE (x) == LABEL_REF)
15005 output_addr_const (file, x);
15006 if (small_data_operand (x, GET_MODE (x)))
15007 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15008 reg_names[SMALL_DATA_REG]);
15010 gcc_assert (!TARGET_TOC);
15012 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
15014 gcc_assert (REG_P (XEXP (x, 0)));
15015 if (REGNO (XEXP (x, 0)) == 0)
15016 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15017 reg_names[ REGNO (XEXP (x, 0)) ]);
15019 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15020 reg_names[ REGNO (XEXP (x, 1)) ]);
15022 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
15023 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15024 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15026 else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
15027 && CONSTANT_P (XEXP (x, 1)))
15029 output_addr_const (file, XEXP (x, 1));
15030 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15034 else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
15035 && CONSTANT_P (XEXP (x, 1)))
15037 fprintf (file, "lo16(");
15038 output_addr_const (file, XEXP (x, 1));
15039 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15042 else if (legitimate_constant_pool_address_p (x))
15044 output_addr_const (file, XEXP (x, 1));
15045 fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
15048 gcc_unreachable ();
15051 /* Implement OUTPUT_ADDR_CONST_EXTRA for address X. */
15054 rs6000_output_addr_const_extra (FILE *file, rtx x)
15056 if (GET_CODE (x) == UNSPEC)
15057 switch (XINT (x, 1))
15059 case UNSPEC_TOCREL:
15060 x = XVECEXP (x, 0, 0);
15061 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15062 output_addr_const (file, x);
15063 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15066 assemble_name (file, toc_label_name);
15068 else if (TARGET_ELF)
15069 fputs ("@toc", file);
15073 case UNSPEC_MACHOPIC_OFFSET:
15074 output_addr_const (file, XVECEXP (x, 0, 0));
15076 machopic_output_function_base_name (file);
15083 /* Target hook for assembling integer objects. The PowerPC version has
15084 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15085 is defined. It also needs to handle DI-mode objects on 64-bit
15089 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15091 #ifdef RELOCATABLE_NEEDS_FIXUP
15092 /* Special handling for SI values. */
15093 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15095 static int recurse = 0;
15097 /* For -mrelocatable, we mark all addresses that need to be fixed up
15098 in the .fixup section. */
15099 if (TARGET_RELOCATABLE
15100 && in_section != toc_section
15101 && in_section != text_section
15102 && !unlikely_text_section_p (in_section)
15104 && GET_CODE (x) != CONST_INT
15105 && GET_CODE (x) != CONST_DOUBLE
15111 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15113 ASM_OUTPUT_LABEL (asm_out_file, buf);
15114 fprintf (asm_out_file, "\t.long\t(");
15115 output_addr_const (asm_out_file, x);
15116 fprintf (asm_out_file, ")@fixup\n");
15117 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15118 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15119 fprintf (asm_out_file, "\t.long\t");
15120 assemble_name (asm_out_file, buf);
15121 fprintf (asm_out_file, "\n\t.previous\n");
15125 /* Remove initial .'s to turn a -mcall-aixdesc function
15126 address into the address of the descriptor, not the function
15128 else if (GET_CODE (x) == SYMBOL_REF
15129 && XSTR (x, 0)[0] == '.'
15130 && DEFAULT_ABI == ABI_AIX)
15132 const char *name = XSTR (x, 0);
15133 while (*name == '.')
15136 fprintf (asm_out_file, "\t.long\t%s\n", name);
15140 #endif /* RELOCATABLE_NEEDS_FIXUP */
15141 return default_assemble_integer (x, size, aligned_p);
15144 #ifdef HAVE_GAS_HIDDEN
15145 /* Emit an assembler directive to set symbol visibility for DECL to
15146 VISIBILITY_TYPE. */
15149 rs6000_assemble_visibility (tree decl, int vis)
15151 /* Functions need to have their entry point symbol visibility set as
15152 well as their descriptor symbol visibility. */
15153 if (DEFAULT_ABI == ABI_AIX
15155 && TREE_CODE (decl) == FUNCTION_DECL)
15157 static const char * const visibility_types[] = {
15158 NULL, "internal", "hidden", "protected"
15161 const char *name, *type;
15163 name = ((* targetm.strip_name_encoding)
15164 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15165 type = visibility_types[vis];
15167 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15168 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15171 default_assemble_visibility (decl, vis);
15176 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15178 /* Reversal of FP compares takes care -- an ordered compare
15179 becomes an unordered compare and vice versa. */
15180 if (mode == CCFPmode
15181 && (!flag_finite_math_only
15182 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15183 || code == UNEQ || code == LTGT))
15184 return reverse_condition_maybe_unordered (code);
15186 return reverse_condition (code);
15189 /* Generate a compare for CODE. Return a brand-new rtx that
15190 represents the result of the compare. */
15193 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15195 enum machine_mode comp_mode;
15196 rtx compare_result;
15197 enum rtx_code code = GET_CODE (cmp);
15198 rtx op0 = XEXP (cmp, 0);
15199 rtx op1 = XEXP (cmp, 1);
15201 if (FLOAT_MODE_P (mode))
15202 comp_mode = CCFPmode;
15203 else if (code == GTU || code == LTU
15204 || code == GEU || code == LEU)
15205 comp_mode = CCUNSmode;
15206 else if ((code == EQ || code == NE)
15207 && GET_CODE (op0) == SUBREG
15208 && GET_CODE (op1) == SUBREG
15209 && SUBREG_PROMOTED_UNSIGNED_P (op0)
15210 && SUBREG_PROMOTED_UNSIGNED_P (op1))
15211 /* These are unsigned values, perhaps there will be a later
15212 ordering compare that can be shared with this one.
15213 Unfortunately we cannot detect the signedness of the operands
15214 for non-subregs. */
15215 comp_mode = CCUNSmode;
15217 comp_mode = CCmode;
15219 /* First, the compare. */
15220 compare_result = gen_reg_rtx (comp_mode);
15222 /* E500 FP compare instructions on the GPRs. Yuck! */
15223 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15224 && FLOAT_MODE_P (mode))
15226 rtx cmp, or_result, compare_result2;
15227 enum machine_mode op_mode = GET_MODE (op0);
15229 if (op_mode == VOIDmode)
15230 op_mode = GET_MODE (op1);
15232 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15233 This explains the following mess. */
15237 case EQ: case UNEQ: case NE: case LTGT:
15241 cmp = (flag_finite_math_only && !flag_trapping_math)
15242 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15243 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15247 cmp = (flag_finite_math_only && !flag_trapping_math)
15248 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15249 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15253 cmp = (flag_finite_math_only && !flag_trapping_math)
15254 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15255 : gen_cmptfeq_gpr (compare_result, op0, op1);
15259 gcc_unreachable ();
15263 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15267 cmp = (flag_finite_math_only && !flag_trapping_math)
15268 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15269 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15273 cmp = (flag_finite_math_only && !flag_trapping_math)
15274 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15275 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15279 cmp = (flag_finite_math_only && !flag_trapping_math)
15280 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15281 : gen_cmptfgt_gpr (compare_result, op0, op1);
15285 gcc_unreachable ();
15289 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15293 cmp = (flag_finite_math_only && !flag_trapping_math)
15294 ? gen_tstsflt_gpr (compare_result, op0, op1)
15295 : gen_cmpsflt_gpr (compare_result, op0, op1);
15299 cmp = (flag_finite_math_only && !flag_trapping_math)
15300 ? gen_tstdflt_gpr (compare_result, op0, op1)
15301 : gen_cmpdflt_gpr (compare_result, op0, op1);
15305 cmp = (flag_finite_math_only && !flag_trapping_math)
15306 ? gen_tsttflt_gpr (compare_result, op0, op1)
15307 : gen_cmptflt_gpr (compare_result, op0, op1);
15311 gcc_unreachable ();
15315 gcc_unreachable ();
15318 /* Synthesize LE and GE from LT/GT || EQ. */
15319 if (code == LE || code == GE || code == LEU || code == GEU)
15325 case LE: code = LT; break;
15326 case GE: code = GT; break;
15327 case LEU: code = LT; break;
15328 case GEU: code = GT; break;
15329 default: gcc_unreachable ();
15332 compare_result2 = gen_reg_rtx (CCFPmode);
15338 cmp = (flag_finite_math_only && !flag_trapping_math)
15339 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15340 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15344 cmp = (flag_finite_math_only && !flag_trapping_math)
15345 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15346 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15350 cmp = (flag_finite_math_only && !flag_trapping_math)
15351 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15352 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15356 gcc_unreachable ();
15360 /* OR them together. */
15361 or_result = gen_reg_rtx (CCFPmode);
15362 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15364 compare_result = or_result;
15369 if (code == NE || code == LTGT)
15379 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15380 CLOBBERs to match cmptf_internal2 pattern. */
15381 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15382 && GET_MODE (op0) == TFmode
15383 && !TARGET_IEEEQUAD
15384 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15385 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15387 gen_rtx_SET (VOIDmode,
15389 gen_rtx_COMPARE (comp_mode, op0, op1)),
15390 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15391 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15392 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15393 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15394 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15395 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15396 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15397 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
15398 else if (GET_CODE (op1) == UNSPEC
15399 && XINT (op1, 1) == UNSPEC_SP_TEST)
15401 rtx op1b = XVECEXP (op1, 0, 0);
15402 comp_mode = CCEQmode;
15403 compare_result = gen_reg_rtx (CCEQmode);
15405 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15407 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15410 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15411 gen_rtx_COMPARE (comp_mode, op0, op1)));
15414 /* Some kinds of FP comparisons need an OR operation;
15415 under flag_finite_math_only we don't bother. */
15416 if (FLOAT_MODE_P (mode)
15417 && !flag_finite_math_only
15418 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15419 && (code == LE || code == GE
15420 || code == UNEQ || code == LTGT
15421 || code == UNGT || code == UNLT))
15423 enum rtx_code or1, or2;
15424 rtx or1_rtx, or2_rtx, compare2_rtx;
15425 rtx or_result = gen_reg_rtx (CCEQmode);
15429 case LE: or1 = LT; or2 = EQ; break;
15430 case GE: or1 = GT; or2 = EQ; break;
15431 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15432 case LTGT: or1 = LT; or2 = GT; break;
15433 case UNGT: or1 = UNORDERED; or2 = GT; break;
15434 case UNLT: or1 = UNORDERED; or2 = LT; break;
15435 default: gcc_unreachable ();
15437 validate_condition_mode (or1, comp_mode);
15438 validate_condition_mode (or2, comp_mode);
15439 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15440 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15441 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15442 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15444 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15446 compare_result = or_result;
15450 validate_condition_mode (code, GET_MODE (compare_result));
15452 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15456 /* Emit the RTL for an sCOND pattern. */
15459 rs6000_emit_sISEL (enum machine_mode mode, rtx operands[])
15462 enum machine_mode op_mode;
15463 enum rtx_code cond_code;
15464 rtx result = operands[0];
15466 condition_rtx = rs6000_generate_compare (operands[1], mode);
15467 cond_code = GET_CODE (condition_rtx);
15469 op_mode = GET_MODE (XEXP (operands[1], 0));
15470 if (op_mode == VOIDmode)
15471 op_mode = GET_MODE (XEXP (operands[1], 1));
15473 if (TARGET_POWERPC64 && GET_MODE (result) == DImode)
15475 PUT_MODE (condition_rtx, DImode);
15476 if (cond_code == GEU || cond_code == GTU || cond_code == LEU
15477 || cond_code == LTU)
15478 emit_insn (gen_isel_unsigned_di (result, condition_rtx,
15479 force_reg (DImode, const1_rtx),
15480 force_reg (DImode, const0_rtx),
15481 XEXP (condition_rtx, 0)));
15483 emit_insn (gen_isel_signed_di (result, condition_rtx,
15484 force_reg (DImode, const1_rtx),
15485 force_reg (DImode, const0_rtx),
15486 XEXP (condition_rtx, 0)));
15490 PUT_MODE (condition_rtx, SImode);
15491 if (cond_code == GEU || cond_code == GTU || cond_code == LEU
15492 || cond_code == LTU)
15493 emit_insn (gen_isel_unsigned_si (result, condition_rtx,
15494 force_reg (SImode, const1_rtx),
15495 force_reg (SImode, const0_rtx),
15496 XEXP (condition_rtx, 0)));
15498 emit_insn (gen_isel_signed_si (result, condition_rtx,
15499 force_reg (SImode, const1_rtx),
15500 force_reg (SImode, const0_rtx),
15501 XEXP (condition_rtx, 0)));
15506 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15509 enum machine_mode op_mode;
15510 enum rtx_code cond_code;
15511 rtx result = operands[0];
15513 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15515 rs6000_emit_sISEL (mode, operands);
15519 condition_rtx = rs6000_generate_compare (operands[1], mode);
15520 cond_code = GET_CODE (condition_rtx);
15522 if (FLOAT_MODE_P (mode)
15523 && !TARGET_FPRS && TARGET_HARD_FLOAT)
15527 PUT_MODE (condition_rtx, SImode);
15528 t = XEXP (condition_rtx, 0);
15530 gcc_assert (cond_code == NE || cond_code == EQ);
15532 if (cond_code == NE)
15533 emit_insn (gen_e500_flip_gt_bit (t, t));
15535 emit_insn (gen_move_from_CR_gt_bit (result, t));
15539 if (cond_code == NE
15540 || cond_code == GE || cond_code == LE
15541 || cond_code == GEU || cond_code == LEU
15542 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
15544 rtx not_result = gen_reg_rtx (CCEQmode);
15545 rtx not_op, rev_cond_rtx;
15546 enum machine_mode cc_mode;
15548 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
15550 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
15551 SImode, XEXP (condition_rtx, 0), const0_rtx);
15552 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
15553 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
15554 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
15557 op_mode = GET_MODE (XEXP (operands[1], 0));
15558 if (op_mode == VOIDmode)
15559 op_mode = GET_MODE (XEXP (operands[1], 1));
15561 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
15563 PUT_MODE (condition_rtx, DImode);
15564 convert_move (result, condition_rtx, 0);
15568 PUT_MODE (condition_rtx, SImode);
15569 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
15573 /* Emit a branch of kind CODE to location LOC. */
15576 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
15578 rtx condition_rtx, loc_ref;
15580 condition_rtx = rs6000_generate_compare (operands[0], mode);
15581 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
15582 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
15583 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
15584 loc_ref, pc_rtx)));
15587 /* Return the string to output a conditional branch to LABEL, which is
15588 the operand number of the label, or -1 if the branch is really a
15589 conditional return.
15591 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15592 condition code register and its mode specifies what kind of
15593 comparison we made.
15595 REVERSED is nonzero if we should reverse the sense of the comparison.
15597 INSN is the insn. */
15600 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
15602 static char string[64];
15603 enum rtx_code code = GET_CODE (op);
15604 rtx cc_reg = XEXP (op, 0);
15605 enum machine_mode mode = GET_MODE (cc_reg);
15606 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
15607 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
15608 int really_reversed = reversed ^ need_longbranch;
15614 validate_condition_mode (code, mode);
15616 /* Work out which way this really branches. We could use
15617 reverse_condition_maybe_unordered here always but this
15618 makes the resulting assembler clearer. */
15619 if (really_reversed)
15621 /* Reversal of FP compares takes care -- an ordered compare
15622 becomes an unordered compare and vice versa. */
15623 if (mode == CCFPmode)
15624 code = reverse_condition_maybe_unordered (code);
15626 code = reverse_condition (code);
15629 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
15631 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15636 /* Opposite of GT. */
15645 gcc_unreachable ();
15651 /* Not all of these are actually distinct opcodes, but
15652 we distinguish them for clarity of the resulting assembler. */
15653 case NE: case LTGT:
15654 ccode = "ne"; break;
15655 case EQ: case UNEQ:
15656 ccode = "eq"; break;
15658 ccode = "ge"; break;
15659 case GT: case GTU: case UNGT:
15660 ccode = "gt"; break;
15662 ccode = "le"; break;
15663 case LT: case LTU: case UNLT:
15664 ccode = "lt"; break;
15665 case UNORDERED: ccode = "un"; break;
15666 case ORDERED: ccode = "nu"; break;
15667 case UNGE: ccode = "nl"; break;
15668 case UNLE: ccode = "ng"; break;
15670 gcc_unreachable ();
15673 /* Maybe we have a guess as to how likely the branch is.
15674 The old mnemonics don't have a way to specify this information. */
15676 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
15677 if (note != NULL_RTX)
15679 /* PROB is the difference from 50%. */
15680 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
15682 /* Only hint for highly probable/improbable branches on newer
15683 cpus as static prediction overrides processor dynamic
15684 prediction. For older cpus we may as well always hint, but
15685 assume not taken for branches that are very close to 50% as a
15686 mispredicted taken branch is more expensive than a
15687 mispredicted not-taken branch. */
15688 if (rs6000_always_hint
15689 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
15690 && br_prob_note_reliable_p (note)))
15692 if (abs (prob) > REG_BR_PROB_BASE / 20
15693 && ((prob > 0) ^ need_longbranch))
15701 s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
15703 s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
15705 /* We need to escape any '%' characters in the reg_names string.
15706 Assume they'd only be the first character.... */
15707 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
15709 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
15713 /* If the branch distance was too far, we may have to use an
15714 unconditional branch to go the distance. */
15715 if (need_longbranch)
15716 s += sprintf (s, ",$+8\n\tb %s", label);
15718 s += sprintf (s, ",%s", label);
15724 /* Return the string to flip the GT bit on a CR. */
15726 output_e500_flip_gt_bit (rtx dst, rtx src)
15728 static char string[64];
15731 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
15732 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
15735 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
15736 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
15738 sprintf (string, "crnot %d,%d", a, b);
15742 /* Return insn for VSX or Altivec comparisons. */
15745 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
15748 enum machine_mode mode = GET_MODE (op0);
15756 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15762 mask = gen_reg_rtx (mode);
15763 emit_insn (gen_rtx_SET (VOIDmode,
15765 gen_rtx_fmt_ee (code, mode, op0, op1)));
15772 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
15773 DMODE is expected destination mode. This is a recursive function. */
15776 rs6000_emit_vector_compare (enum rtx_code rcode,
15778 enum machine_mode dmode)
15781 bool swap_operands = false;
15782 bool try_again = false;
15784 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
15785 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
15787 /* See if the comparison works as is. */
15788 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
15796 swap_operands = true;
15801 swap_operands = true;
15809 /* Invert condition and try again.
15810 e.g., A != B becomes ~(A==B). */
15812 enum rtx_code rev_code;
15813 enum insn_code nor_code;
15816 rev_code = reverse_condition_maybe_unordered (rcode);
15817 if (rev_code == UNKNOWN)
15820 nor_code = optab_handler (one_cmpl_optab, (int)dmode)->insn_code;
15821 if (nor_code == CODE_FOR_nothing)
15824 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
15828 mask = gen_reg_rtx (dmode);
15829 emit_insn (GEN_FCN (nor_code) (mask, mask2));
15837 /* Try GT/GTU/LT/LTU OR EQ */
15840 enum insn_code ior_code;
15841 enum rtx_code new_code;
15862 gcc_unreachable ();
15865 ior_code = optab_handler (ior_optab, (int)dmode)->insn_code;
15866 if (ior_code == CODE_FOR_nothing)
15869 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
15873 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
15877 mask = gen_reg_rtx (dmode);
15878 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
15896 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
15901 /* You only get two chances. */
15905 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
15906 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
15907 operands for the relation operation COND. */
15910 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
15911 rtx cond, rtx cc_op0, rtx cc_op1)
15913 enum machine_mode dest_mode = GET_MODE (dest);
15914 enum rtx_code rcode = GET_CODE (cond);
15915 enum machine_mode cc_mode = CCmode;
15919 bool invert_move = false;
15921 if (VECTOR_UNIT_NONE_P (dest_mode))
15926 /* Swap operands if we can, and fall back to doing the operation as
15927 specified, and doing a NOR to invert the test. */
15933 /* Invert condition and try again.
15934 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
15935 invert_move = true;
15936 rcode = reverse_condition_maybe_unordered (rcode);
15937 if (rcode == UNKNOWN)
15941 /* Mark unsigned tests with CCUNSmode. */
15946 cc_mode = CCUNSmode;
15953 /* Get the vector mask for the given relational operations. */
15954 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
15962 op_true = op_false;
15966 cond2 = gen_rtx_fmt_ee (NE, cc_mode, mask, const0_rtx);
15967 emit_insn (gen_rtx_SET (VOIDmode,
15969 gen_rtx_IF_THEN_ELSE (dest_mode,
15976 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
15977 operands of the last comparison is nonzero/true, FALSE_COND if it
15978 is zero/false. Return 0 if the hardware has no such operation. */
15981 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
15983 enum rtx_code code = GET_CODE (op);
15984 rtx op0 = XEXP (op, 0);
15985 rtx op1 = XEXP (op, 1);
15986 REAL_VALUE_TYPE c1;
15987 enum machine_mode compare_mode = GET_MODE (op0);
15988 enum machine_mode result_mode = GET_MODE (dest);
15990 bool is_against_zero;
15992 /* These modes should always match. */
15993 if (GET_MODE (op1) != compare_mode
15994 /* In the isel case however, we can use a compare immediate, so
15995 op1 may be a small constant. */
15996 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
15998 if (GET_MODE (true_cond) != result_mode)
16000 if (GET_MODE (false_cond) != result_mode)
16003 /* First, work out if the hardware can do this at all, or
16004 if it's too slow.... */
16005 if (!FLOAT_MODE_P (compare_mode))
16008 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16011 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16012 && SCALAR_FLOAT_MODE_P (compare_mode))
16015 is_against_zero = op1 == CONST0_RTX (compare_mode);
16017 /* A floating-point subtract might overflow, underflow, or produce
16018 an inexact result, thus changing the floating-point flags, so it
16019 can't be generated if we care about that. It's safe if one side
16020 of the construct is zero, since then no subtract will be
16022 if (SCALAR_FLOAT_MODE_P (compare_mode)
16023 && flag_trapping_math && ! is_against_zero)
16026 /* Eliminate half of the comparisons by switching operands, this
16027 makes the remaining code simpler. */
16028 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16029 || code == LTGT || code == LT || code == UNLE)
16031 code = reverse_condition_maybe_unordered (code);
16033 true_cond = false_cond;
16037 /* UNEQ and LTGT take four instructions for a comparison with zero,
16038 it'll probably be faster to use a branch here too. */
16039 if (code == UNEQ && HONOR_NANS (compare_mode))
16042 if (GET_CODE (op1) == CONST_DOUBLE)
16043 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16045 /* We're going to try to implement comparisons by performing
16046 a subtract, then comparing against zero. Unfortunately,
16047 Inf - Inf is NaN which is not zero, and so if we don't
16048 know that the operand is finite and the comparison
16049 would treat EQ different to UNORDERED, we can't do it. */
16050 if (HONOR_INFINITIES (compare_mode)
16051 && code != GT && code != UNGE
16052 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16053 /* Constructs of the form (a OP b ? a : b) are safe. */
16054 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16055 || (! rtx_equal_p (op0, true_cond)
16056 && ! rtx_equal_p (op1, true_cond))))
16059 /* At this point we know we can use fsel. */
16061 /* Reduce the comparison to a comparison against zero. */
16062 if (! is_against_zero)
16064 temp = gen_reg_rtx (compare_mode);
16065 emit_insn (gen_rtx_SET (VOIDmode, temp,
16066 gen_rtx_MINUS (compare_mode, op0, op1)));
16068 op1 = CONST0_RTX (compare_mode);
16071 /* If we don't care about NaNs we can reduce some of the comparisons
16072 down to faster ones. */
16073 if (! HONOR_NANS (compare_mode))
16079 true_cond = false_cond;
16092 /* Now, reduce everything down to a GE. */
16099 temp = gen_reg_rtx (compare_mode);
16100 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16105 temp = gen_reg_rtx (compare_mode);
16106 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16111 temp = gen_reg_rtx (compare_mode);
16112 emit_insn (gen_rtx_SET (VOIDmode, temp,
16113 gen_rtx_NEG (compare_mode,
16114 gen_rtx_ABS (compare_mode, op0))));
16119 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16120 temp = gen_reg_rtx (result_mode);
16121 emit_insn (gen_rtx_SET (VOIDmode, temp,
16122 gen_rtx_IF_THEN_ELSE (result_mode,
16123 gen_rtx_GE (VOIDmode,
16125 true_cond, false_cond)));
16126 false_cond = true_cond;
16129 temp = gen_reg_rtx (compare_mode);
16130 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16135 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16136 temp = gen_reg_rtx (result_mode);
16137 emit_insn (gen_rtx_SET (VOIDmode, temp,
16138 gen_rtx_IF_THEN_ELSE (result_mode,
16139 gen_rtx_GE (VOIDmode,
16141 true_cond, false_cond)));
16142 true_cond = false_cond;
16145 temp = gen_reg_rtx (compare_mode);
16146 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16151 gcc_unreachable ();
16154 emit_insn (gen_rtx_SET (VOIDmode, dest,
16155 gen_rtx_IF_THEN_ELSE (result_mode,
16156 gen_rtx_GE (VOIDmode,
16158 true_cond, false_cond)));
16162 /* Same as above, but for ints (isel). */
16165 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16167 rtx condition_rtx, cr;
16168 enum machine_mode mode = GET_MODE (dest);
16170 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16173 /* We still have to do the compare, because isel doesn't do a
16174 compare, it just looks at the CRx bits set by a previous compare
16176 condition_rtx = rs6000_generate_compare (op, mode);
16177 cr = XEXP (condition_rtx, 0);
16179 if (mode == SImode)
16181 if (GET_MODE (cr) == CCmode)
16182 emit_insn (gen_isel_signed_si (dest, condition_rtx,
16183 true_cond, false_cond, cr));
16185 emit_insn (gen_isel_unsigned_si (dest, condition_rtx,
16186 true_cond, false_cond, cr));
16190 if (GET_MODE (cr) == CCmode)
16191 emit_insn (gen_isel_signed_di (dest, condition_rtx,
16192 true_cond, false_cond, cr));
16194 emit_insn (gen_isel_unsigned_di (dest, condition_rtx,
16195 true_cond, false_cond, cr));
16202 output_isel (rtx *operands)
16204 enum rtx_code code;
16206 code = GET_CODE (operands[1]);
16207 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16209 PUT_CODE (operands[1], reverse_condition (code));
16210 return "isel %0,%3,%2,%j1";
16213 return "isel %0,%2,%3,%j1";
16217 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16219 enum machine_mode mode = GET_MODE (op0);
16223 /* VSX/altivec have direct min/max insns. */
16224 if ((code == SMAX || code == SMIN) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode))
16226 emit_insn (gen_rtx_SET (VOIDmode,
16228 gen_rtx_fmt_ee (code, mode, op0, op1)));
16232 if (code == SMAX || code == SMIN)
16237 if (code == SMAX || code == UMAX)
16238 target = emit_conditional_move (dest, c, op0, op1, mode,
16239 op0, op1, mode, 0);
16241 target = emit_conditional_move (dest, c, op0, op1, mode,
16242 op1, op0, mode, 0);
16243 gcc_assert (target);
16244 if (target != dest)
16245 emit_move_insn (dest, target);
16248 /* Emit instructions to perform a load-reserved/store-conditional operation.
16249 The operation performed is an atomic
16250 (set M (CODE:MODE M OP))
16251 If not NULL, BEFORE is atomically set to M before the operation, and
16252 AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
16253 If SYNC_P then a memory barrier is emitted before the operation.
16254 Either OP or M may be wrapped in a NOT operation. */
16257 rs6000_emit_sync (enum rtx_code code, enum machine_mode mode,
16258 rtx m, rtx op, rtx before_param, rtx after_param,
16261 enum machine_mode used_mode;
16262 rtx the_op, set_before, set_after, set_atomic, cc_scratch, before, after;
16265 HOST_WIDE_INT imask = GET_MODE_MASK (mode);
16266 rtx shift = NULL_RTX;
16269 emit_insn (gen_lwsync ());
16273 /* If this is smaller than SImode, we'll have to use SImode with
16275 if (mode == QImode || mode == HImode)
16279 if (MEM_ALIGN (used_m) >= 32)
16282 if (BYTES_BIG_ENDIAN)
16283 ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
16285 shift = GEN_INT (ishift);
16286 used_m = change_address (used_m, SImode, 0);
16290 rtx addrSI, aligned_addr;
16291 int shift_mask = mode == QImode ? 0x18 : 0x10;
16293 addrSI = gen_lowpart_common (SImode,
16294 force_reg (Pmode, XEXP (used_m, 0)));
16295 addrSI = force_reg (SImode, addrSI);
16296 shift = gen_reg_rtx (SImode);
16298 emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
16299 GEN_INT (shift_mask)));
16300 emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
16302 aligned_addr = expand_binop (Pmode, and_optab,
16304 GEN_INT (-4), NULL_RTX,
16305 1, OPTAB_LIB_WIDEN);
16306 used_m = change_address (used_m, SImode, aligned_addr);
16307 set_mem_align (used_m, 32);
16309 /* It's safe to keep the old alias set of USED_M, because
16310 the operation is atomic and only affects the original
16314 if (GET_CODE (op) == NOT)
16316 oldop = lowpart_subreg (SImode, XEXP (op, 0), mode);
16317 oldop = gen_rtx_NOT (SImode, oldop);
16320 oldop = lowpart_subreg (SImode, op, mode);
16326 newop = expand_binop (SImode, and_optab,
16327 oldop, GEN_INT (imask), NULL_RTX,
16328 1, OPTAB_LIB_WIDEN);
16329 emit_insn (gen_ashlsi3 (newop, newop, shift));
16332 case NOT: /* NAND */
16333 newop = expand_binop (SImode, ior_optab,
16334 oldop, GEN_INT (~imask), NULL_RTX,
16335 1, OPTAB_LIB_WIDEN);
16336 emit_insn (gen_rotlsi3 (newop, newop, shift));
16340 newop = expand_binop (SImode, ior_optab,
16341 oldop, GEN_INT (~imask), NULL_RTX,
16342 1, OPTAB_LIB_WIDEN);
16343 emit_insn (gen_rotlsi3 (newop, newop, shift));
16351 newop = expand_binop (SImode, and_optab,
16352 oldop, GEN_INT (imask), NULL_RTX,
16353 1, OPTAB_LIB_WIDEN);
16354 emit_insn (gen_ashlsi3 (newop, newop, shift));
16356 mask = gen_reg_rtx (SImode);
16357 emit_move_insn (mask, GEN_INT (imask));
16358 emit_insn (gen_ashlsi3 (mask, mask, shift));
16361 newop = gen_rtx_PLUS (SImode, m, newop);
16363 newop = gen_rtx_MINUS (SImode, m, newop);
16364 newop = gen_rtx_AND (SImode, newop, mask);
16365 newop = gen_rtx_IOR (SImode, newop,
16366 gen_rtx_AND (SImode,
16367 gen_rtx_NOT (SImode, mask),
16373 gcc_unreachable ();
16377 used_mode = SImode;
16378 before = gen_reg_rtx (used_mode);
16379 after = gen_reg_rtx (used_mode);
16384 before = before_param;
16385 after = after_param;
16387 if (before == NULL_RTX)
16388 before = gen_reg_rtx (used_mode);
16389 if (after == NULL_RTX)
16390 after = gen_reg_rtx (used_mode);
16393 if ((code == PLUS || code == MINUS)
16394 && used_mode != mode)
16395 the_op = op; /* Computed above. */
16396 else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
16397 the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
16398 else if (code == NOT)
16399 the_op = gen_rtx_fmt_ee (IOR, used_mode,
16400 gen_rtx_NOT (used_mode, m),
16401 gen_rtx_NOT (used_mode, op));
16403 the_op = gen_rtx_fmt_ee (code, used_mode, m, op);
16405 set_after = gen_rtx_SET (VOIDmode, after, the_op);
16406 set_before = gen_rtx_SET (VOIDmode, before, used_m);
16407 set_atomic = gen_rtx_SET (VOIDmode, used_m,
16408 gen_rtx_UNSPEC (used_mode,
16409 gen_rtvec (1, the_op),
16411 cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
16413 if ((code == PLUS || code == MINUS) && used_mode != mode)
16414 vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
16415 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
16417 vec = gen_rtvec (4, set_after, set_before, set_atomic, cc_scratch);
16418 emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
16420 /* Shift and mask the return values properly. */
16421 if (used_mode != mode && before_param)
16423 emit_insn (gen_lshrsi3 (before, before, shift));
16424 convert_move (before_param, before, 1);
16427 if (used_mode != mode && after_param)
16429 emit_insn (gen_lshrsi3 (after, after, shift));
16430 convert_move (after_param, after, 1);
16433 /* The previous sequence will end with a branch that's dependent on
16434 the conditional store, so placing an isync will ensure that no
16435 other instructions (especially, no load or store instructions)
16436 can start before the atomic operation completes. */
16438 emit_insn (gen_isync ());
16441 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16442 COND is true. Mark the jump as unlikely to be taken. */
16445 emit_unlikely_jump (rtx cond, rtx label)
16447 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16450 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16451 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16452 add_reg_note (x, REG_BR_PROB, very_unlikely);
16455 /* A subroutine of the atomic operation splitters. Emit a load-locked
16456 instruction in MODE. */
16459 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16461 rtx (*fn) (rtx, rtx) = NULL;
16462 if (mode == SImode)
16463 fn = gen_load_locked_si;
16464 else if (mode == DImode)
16465 fn = gen_load_locked_di;
16466 emit_insn (fn (reg, mem));
16469 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16470 instruction in MODE. */
16473 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16475 rtx (*fn) (rtx, rtx, rtx) = NULL;
16476 if (mode == SImode)
16477 fn = gen_store_conditional_si;
16478 else if (mode == DImode)
16479 fn = gen_store_conditional_di;
16481 /* Emit sync before stwcx. to address PPC405 Erratum. */
16482 if (PPC405_ERRATUM77)
16483 emit_insn (gen_memory_barrier ());
16485 emit_insn (fn (res, mem, val));
16488 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16489 to perform. MEM is the memory on which to operate. VAL is the second
16490 operand of the binary operator. BEFORE and AFTER are optional locations to
16491 return the value of MEM either before of after the operation. SCRATCH is
16492 a scratch register. */
16495 rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
16496 rtx before, rtx after, rtx scratch)
16498 enum machine_mode mode = GET_MODE (mem);
16499 rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
16501 emit_insn (gen_lwsync ());
16503 label = gen_label_rtx ();
16504 emit_label (label);
16505 label = gen_rtx_LABEL_REF (VOIDmode, label);
16507 if (before == NULL_RTX)
16509 emit_load_locked (mode, before, mem);
16512 x = gen_rtx_IOR (mode,
16513 gen_rtx_NOT (mode, before),
16514 gen_rtx_NOT (mode, val));
16515 else if (code == AND)
16516 x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
16518 x = gen_rtx_fmt_ee (code, mode, before, val);
16520 if (after != NULL_RTX)
16521 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
16522 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16524 emit_store_conditional (mode, cond, mem, scratch);
16526 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16527 emit_unlikely_jump (x, label);
16529 emit_insn (gen_isync ());
16532 /* Expand an atomic compare and swap operation. MEM is the memory on which
16533 to operate. OLDVAL is the old value to be compared. NEWVAL is the new
16534 value to be stored. SCRATCH is a scratch GPR. */
16537 rs6000_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
16540 enum machine_mode mode = GET_MODE (mem);
16541 rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
16543 emit_insn (gen_lwsync ());
16545 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16546 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16547 emit_label (XEXP (label1, 0));
16549 emit_load_locked (mode, retval, mem);
16551 x = gen_rtx_COMPARE (CCmode, retval, oldval);
16552 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16554 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16555 emit_unlikely_jump (x, label2);
16557 emit_move_insn (scratch, newval);
16558 emit_store_conditional (mode, cond, mem, scratch);
16560 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16561 emit_unlikely_jump (x, label1);
16563 emit_insn (gen_isync ());
16564 emit_label (XEXP (label2, 0));
16567 /* Expand an atomic test and set operation. MEM is the memory on which
16568 to operate. VAL is the value set. SCRATCH is a scratch GPR. */
16571 rs6000_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
16573 enum machine_mode mode = GET_MODE (mem);
16574 rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
16576 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16577 emit_label (XEXP (label, 0));
16579 emit_load_locked (mode, retval, mem);
16580 emit_move_insn (scratch, val);
16581 emit_store_conditional (mode, cond, mem, scratch);
16583 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16584 emit_unlikely_jump (x, label);
16586 emit_insn (gen_isync ());
16590 rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
16592 enum machine_mode mode = GET_MODE (mem);
16593 rtx addrSI, align, wdst, shift, mask;
16594 HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
16595 HOST_WIDE_INT imask = GET_MODE_MASK (mode);
16597 /* Shift amount for subword relative to aligned word. */
16598 addrSI = force_reg (GET_MODE (XEXP (mem, 0)), XEXP (mem, 0));
16599 addrSI = force_reg (SImode, gen_lowpart_common (SImode, addrSI));
16600 shift = gen_reg_rtx (SImode);
16601 emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
16602 GEN_INT (shift_mask)));
16603 emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
16605 /* Shift and mask old value into position within word. */
16606 oldval = convert_modes (SImode, mode, oldval, 1);
16607 oldval = expand_binop (SImode, and_optab,
16608 oldval, GEN_INT (imask), NULL_RTX,
16609 1, OPTAB_LIB_WIDEN);
16610 emit_insn (gen_ashlsi3 (oldval, oldval, shift));
16612 /* Shift and mask new value into position within word. */
16613 newval = convert_modes (SImode, mode, newval, 1);
16614 newval = expand_binop (SImode, and_optab,
16615 newval, GEN_INT (imask), NULL_RTX,
16616 1, OPTAB_LIB_WIDEN);
16617 emit_insn (gen_ashlsi3 (newval, newval, shift));
16619 /* Mask for insertion. */
16620 mask = gen_reg_rtx (SImode);
16621 emit_move_insn (mask, GEN_INT (imask));
16622 emit_insn (gen_ashlsi3 (mask, mask, shift));
16624 /* Address of aligned word containing subword. */
16625 align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
16626 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16627 mem = change_address (mem, SImode, align);
16628 set_mem_align (mem, 32);
16629 MEM_VOLATILE_P (mem) = 1;
16631 wdst = gen_reg_rtx (SImode);
16632 emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
16633 oldval, newval, mem));
16635 /* Shift the result back. */
16636 emit_insn (gen_lshrsi3 (wdst, wdst, shift));
16638 emit_move_insn (dst, gen_lowpart (mode, wdst));
16642 rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
16643 rtx oldval, rtx newval, rtx mem,
16646 rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
16648 emit_insn (gen_lwsync ());
16649 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16650 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16651 emit_label (XEXP (label1, 0));
16653 emit_load_locked (SImode, scratch, mem);
16655 /* Mask subword within loaded value for comparison with oldval.
16656 Use UNSPEC_AND to avoid clobber.*/
16657 emit_insn (gen_rtx_SET (SImode, dest,
16658 gen_rtx_UNSPEC (SImode,
16659 gen_rtvec (2, scratch, mask),
16662 x = gen_rtx_COMPARE (CCmode, dest, oldval);
16663 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16665 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16666 emit_unlikely_jump (x, label2);
16668 /* Clear subword within loaded value for insertion of new value. */
16669 emit_insn (gen_rtx_SET (SImode, scratch,
16670 gen_rtx_AND (SImode,
16671 gen_rtx_NOT (SImode, mask), scratch)));
16672 emit_insn (gen_iorsi3 (scratch, scratch, newval));
16673 emit_store_conditional (SImode, cond, mem, scratch);
16675 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16676 emit_unlikely_jump (x, label1);
16678 emit_insn (gen_isync ());
16679 emit_label (XEXP (label2, 0));
16683 /* Emit instructions to move SRC to DST. Called by splitters for
16684 multi-register moves. It will emit at most one instruction for
16685 each register that is accessed; that is, it won't emit li/lis pairs
16686 (or equivalent for 64-bit code). One of SRC or DST must be a hard
16690 rs6000_split_multireg_move (rtx dst, rtx src)
16692 /* The register number of the first register being moved. */
16694 /* The mode that is to be moved. */
16695 enum machine_mode mode;
16696 /* The mode that the move is being done in, and its size. */
16697 enum machine_mode reg_mode;
16699 /* The number of registers that will be moved. */
16702 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
16703 mode = GET_MODE (dst);
16704 nregs = hard_regno_nregs[reg][mode];
16705 if (FP_REGNO_P (reg))
16706 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
16707 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
16708 else if (ALTIVEC_REGNO_P (reg))
16709 reg_mode = V16QImode;
16710 else if (TARGET_E500_DOUBLE && mode == TFmode)
16713 reg_mode = word_mode;
16714 reg_mode_size = GET_MODE_SIZE (reg_mode);
16716 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
16718 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
16720 /* Move register range backwards, if we might have destructive
16723 for (i = nregs - 1; i >= 0; i--)
16724 emit_insn (gen_rtx_SET (VOIDmode,
16725 simplify_gen_subreg (reg_mode, dst, mode,
16726 i * reg_mode_size),
16727 simplify_gen_subreg (reg_mode, src, mode,
16728 i * reg_mode_size)));
16734 bool used_update = false;
16735 rtx restore_basereg = NULL_RTX;
16737 if (MEM_P (src) && INT_REGNO_P (reg))
16741 if (GET_CODE (XEXP (src, 0)) == PRE_INC
16742 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
16745 breg = XEXP (XEXP (src, 0), 0);
16746 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
16747 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
16748 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
16749 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
16750 src = replace_equiv_address (src, breg);
16752 else if (! rs6000_offsettable_memref_p (src))
16754 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
16756 rtx basereg = XEXP (XEXP (src, 0), 0);
16759 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
16760 emit_insn (gen_rtx_SET (VOIDmode, ndst,
16761 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
16762 used_update = true;
16765 emit_insn (gen_rtx_SET (VOIDmode, basereg,
16766 XEXP (XEXP (src, 0), 1)));
16767 src = replace_equiv_address (src, basereg);
16771 rtx basereg = gen_rtx_REG (Pmode, reg);
16772 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
16773 src = replace_equiv_address (src, basereg);
16777 breg = XEXP (src, 0);
16778 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
16779 breg = XEXP (breg, 0);
16781 /* If the base register we are using to address memory is
16782 also a destination reg, then change that register last. */
16784 && REGNO (breg) >= REGNO (dst)
16785 && REGNO (breg) < REGNO (dst) + nregs)
16786 j = REGNO (breg) - REGNO (dst);
16788 else if (MEM_P (dst) && INT_REGNO_P (reg))
16792 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
16793 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
16796 breg = XEXP (XEXP (dst, 0), 0);
16797 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
16798 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
16799 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
16801 /* We have to update the breg before doing the store.
16802 Use store with update, if available. */
16806 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
16807 emit_insn (TARGET_32BIT
16808 ? (TARGET_POWERPC64
16809 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
16810 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
16811 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
16812 used_update = true;
16815 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
16816 dst = replace_equiv_address (dst, breg);
16818 else if (!rs6000_offsettable_memref_p (dst)
16819 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
16821 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
16823 rtx basereg = XEXP (XEXP (dst, 0), 0);
16826 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
16827 emit_insn (gen_rtx_SET (VOIDmode,
16828 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
16829 used_update = true;
16832 emit_insn (gen_rtx_SET (VOIDmode, basereg,
16833 XEXP (XEXP (dst, 0), 1)));
16834 dst = replace_equiv_address (dst, basereg);
16838 rtx basereg = XEXP (XEXP (dst, 0), 0);
16839 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
16840 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
16842 && REG_P (offsetreg)
16843 && REGNO (basereg) != REGNO (offsetreg));
16844 if (REGNO (basereg) == 0)
16846 rtx tmp = offsetreg;
16847 offsetreg = basereg;
16850 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
16851 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
16852 dst = replace_equiv_address (dst, basereg);
16855 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
16856 gcc_assert (rs6000_offsettable_memref_p (dst));
16859 for (i = 0; i < nregs; i++)
16861 /* Calculate index to next subword. */
16866 /* If compiler already emitted move of first word by
16867 store with update, no need to do anything. */
16868 if (j == 0 && used_update)
16871 emit_insn (gen_rtx_SET (VOIDmode,
16872 simplify_gen_subreg (reg_mode, dst, mode,
16873 j * reg_mode_size),
16874 simplify_gen_subreg (reg_mode, src, mode,
16875 j * reg_mode_size)));
16877 if (restore_basereg != NULL_RTX)
16878 emit_insn (restore_basereg);
16883 /* This page contains routines that are used to determine what the
16884 function prologue and epilogue code will do and write them out. */
16886 /* Return the first fixed-point register that is required to be
16887 saved. 32 if none. */
16890 first_reg_to_save (void)
16894 /* Find lowest numbered live register. */
16895 for (first_reg = 13; first_reg <= 31; first_reg++)
16896 if (df_regs_ever_live_p (first_reg)
16897 && (! call_used_regs[first_reg]
16898 || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
16899 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
16900 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
16901 || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
16906 && crtl->uses_pic_offset_table
16907 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
16908 return RS6000_PIC_OFFSET_TABLE_REGNUM;
16914 /* Similar, for FP regs. */
16917 first_fp_reg_to_save (void)
16921 /* Find lowest numbered live register. */
16922 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
16923 if (df_regs_ever_live_p (first_reg))
16929 /* Similar, for AltiVec regs. */
16932 first_altivec_reg_to_save (void)
16936 /* Stack frame remains as is unless we are in AltiVec ABI. */
16937 if (! TARGET_ALTIVEC_ABI)
16938 return LAST_ALTIVEC_REGNO + 1;
16940 /* On Darwin, the unwind routines are compiled without
16941 TARGET_ALTIVEC, and use save_world to save/restore the
16942 altivec registers when necessary. */
16943 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
16944 && ! TARGET_ALTIVEC)
16945 return FIRST_ALTIVEC_REGNO + 20;
16947 /* Find lowest numbered live register. */
16948 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
16949 if (df_regs_ever_live_p (i))
16955 /* Return a 32-bit mask of the AltiVec registers we need to set in
16956 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
16957 the 32-bit word is 0. */
16959 static unsigned int
16960 compute_vrsave_mask (void)
16962 unsigned int i, mask = 0;
16964 /* On Darwin, the unwind routines are compiled without
16965 TARGET_ALTIVEC, and use save_world to save/restore the
16966 call-saved altivec registers when necessary. */
16967 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
16968 && ! TARGET_ALTIVEC)
16971 /* First, find out if we use _any_ altivec registers. */
16972 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
16973 if (df_regs_ever_live_p (i))
16974 mask |= ALTIVEC_REG_BIT (i);
16979 /* Next, remove the argument registers from the set. These must
16980 be in the VRSAVE mask set by the caller, so we don't need to add
16981 them in again. More importantly, the mask we compute here is
16982 used to generate CLOBBERs in the set_vrsave insn, and we do not
16983 wish the argument registers to die. */
16984 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
16985 mask &= ~ALTIVEC_REG_BIT (i);
16987 /* Similarly, remove the return value from the set. */
16990 diddle_return_value (is_altivec_return_reg, &yes);
16992 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
16998 /* For a very restricted set of circumstances, we can cut down the
16999 size of prologues/epilogues by calling our own save/restore-the-world
17003 compute_save_world_info (rs6000_stack_t *info_ptr)
17005 info_ptr->world_save_p = 1;
17006 info_ptr->world_save_p
17007 = (WORLD_SAVE_P (info_ptr)
17008 && DEFAULT_ABI == ABI_DARWIN
17009 && ! (cfun->calls_setjmp && flag_exceptions)
17010 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17011 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17012 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17013 && info_ptr->cr_save_p);
17015 /* This will not work in conjunction with sibcalls. Make sure there
17016 are none. (This check is expensive, but seldom executed.) */
17017 if (WORLD_SAVE_P (info_ptr))
17020 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17021 if ( GET_CODE (insn) == CALL_INSN
17022 && SIBLING_CALL_P (insn))
17024 info_ptr->world_save_p = 0;
17029 if (WORLD_SAVE_P (info_ptr))
17031 /* Even if we're not touching VRsave, make sure there's room on the
17032 stack for it, if it looks like we're calling SAVE_WORLD, which
17033 will attempt to save it. */
17034 info_ptr->vrsave_size = 4;
17036 /* If we are going to save the world, we need to save the link register too. */
17037 info_ptr->lr_save_p = 1;
17039 /* "Save" the VRsave register too if we're saving the world. */
17040 if (info_ptr->vrsave_mask == 0)
17041 info_ptr->vrsave_mask = compute_vrsave_mask ();
17043 /* Because the Darwin register save/restore routines only handle
17044 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17046 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17047 && (info_ptr->first_altivec_reg_save
17048 >= FIRST_SAVED_ALTIVEC_REGNO));
17055 is_altivec_return_reg (rtx reg, void *xyes)
17057 bool *yes = (bool *) xyes;
17058 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17063 /* Calculate the stack information for the current function. This is
17064 complicated by having two separate calling sequences, the AIX calling
17065 sequence and the V.4 calling sequence.
17067 AIX (and Darwin/Mac OS X) stack frames look like:
17069 SP----> +---------------------------------------+
17070 | back chain to caller | 0 0
17071 +---------------------------------------+
17072 | saved CR | 4 8 (8-11)
17073 +---------------------------------------+
17075 +---------------------------------------+
17076 | reserved for compilers | 12 24
17077 +---------------------------------------+
17078 | reserved for binders | 16 32
17079 +---------------------------------------+
17080 | saved TOC pointer | 20 40
17081 +---------------------------------------+
17082 | Parameter save area (P) | 24 48
17083 +---------------------------------------+
17084 | Alloca space (A) | 24+P etc.
17085 +---------------------------------------+
17086 | Local variable space (L) | 24+P+A
17087 +---------------------------------------+
17088 | Float/int conversion temporary (X) | 24+P+A+L
17089 +---------------------------------------+
17090 | Save area for AltiVec registers (W) | 24+P+A+L+X
17091 +---------------------------------------+
17092 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17093 +---------------------------------------+
17094 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17095 +---------------------------------------+
17096 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17097 +---------------------------------------+
17098 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17099 +---------------------------------------+
17100 old SP->| back chain to caller's caller |
17101 +---------------------------------------+
17103 The required alignment for AIX configurations is two words (i.e., 8
17107 V.4 stack frames look like:
17109 SP----> +---------------------------------------+
17110 | back chain to caller | 0
17111 +---------------------------------------+
17112 | caller's saved LR | 4
17113 +---------------------------------------+
17114 | Parameter save area (P) | 8
17115 +---------------------------------------+
17116 | Alloca space (A) | 8+P
17117 +---------------------------------------+
17118 | Varargs save area (V) | 8+P+A
17119 +---------------------------------------+
17120 | Local variable space (L) | 8+P+A+V
17121 +---------------------------------------+
17122 | Float/int conversion temporary (X) | 8+P+A+V+L
17123 +---------------------------------------+
17124 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17125 +---------------------------------------+
17126 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17127 +---------------------------------------+
17128 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17129 +---------------------------------------+
17130 | SPE: area for 64-bit GP registers |
17131 +---------------------------------------+
17132 | SPE alignment padding |
17133 +---------------------------------------+
17134 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17135 +---------------------------------------+
17136 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17137 +---------------------------------------+
17138 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17139 +---------------------------------------+
17140 old SP->| back chain to caller's caller |
17141 +---------------------------------------+
17143 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17144 given. (But note below and in sysv4.h that we require only 8 and
17145 may round up the size of our stack frame anyways. The historical
17146 reason is early versions of powerpc-linux which didn't properly
17147 align the stack at program startup. A happy side-effect is that
17148 -mno-eabi libraries can be used with -meabi programs.)
17150 The EABI configuration defaults to the V.4 layout. However,
17151 the stack alignment requirements may differ. If -mno-eabi is not
17152 given, the required stack alignment is 8 bytes; if -mno-eabi is
17153 given, the required alignment is 16 bytes. (But see V.4 comment
17156 #ifndef ABI_STACK_BOUNDARY
17157 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17160 static rs6000_stack_t *
17161 rs6000_stack_info (void)
17163 static rs6000_stack_t info;
17164 rs6000_stack_t *info_ptr = &info;
17165 int reg_size = TARGET_32BIT ? 4 : 8;
17169 HOST_WIDE_INT non_fixed_size;
17171 memset (&info, 0, sizeof (info));
17175 /* Cache value so we don't rescan instruction chain over and over. */
17176 if (cfun->machine->insn_chain_scanned_p == 0)
17177 cfun->machine->insn_chain_scanned_p
17178 = spe_func_has_64bit_regs_p () + 1;
17179 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17182 /* Select which calling sequence. */
17183 info_ptr->abi = DEFAULT_ABI;
17185 /* Calculate which registers need to be saved & save area size. */
17186 info_ptr->first_gp_reg_save = first_reg_to_save ();
17187 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17188 even if it currently looks like we won't. Reload may need it to
17189 get at a constant; if so, it will have already created a constant
17190 pool entry for it. */
17191 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17192 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17193 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17194 && crtl->uses_const_pool
17195 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17196 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17198 first_gp = info_ptr->first_gp_reg_save;
17200 info_ptr->gp_size = reg_size * (32 - first_gp);
17202 /* For the SPE, we have an additional upper 32-bits on each GPR.
17203 Ideally we should save the entire 64-bits only when the upper
17204 half is used in SIMD instructions. Since we only record
17205 registers live (not the size they are used in), this proves
17206 difficult because we'd have to traverse the instruction chain at
17207 the right time, taking reload into account. This is a real pain,
17208 so we opt to save the GPRs in 64-bits always if but one register
17209 gets used in 64-bits. Otherwise, all the registers in the frame
17210 get saved in 32-bits.
17212 So... since when we save all GPRs (except the SP) in 64-bits, the
17213 traditional GP save area will be empty. */
17214 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17215 info_ptr->gp_size = 0;
17217 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17218 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17220 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17221 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17222 - info_ptr->first_altivec_reg_save);
17224 /* Does this function call anything? */
17225 info_ptr->calls_p = (! current_function_is_leaf
17226 || cfun->machine->ra_needs_full_frame);
17228 /* Determine if we need to save the link register. */
17229 if ((DEFAULT_ABI == ABI_AIX
17231 && !TARGET_PROFILE_KERNEL)
17232 #ifdef TARGET_RELOCATABLE
17233 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
17235 || (info_ptr->first_fp_reg_save != 64
17236 && !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
17237 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
17238 || info_ptr->calls_p
17239 || rs6000_ra_ever_killed ())
17241 info_ptr->lr_save_p = 1;
17242 df_set_regs_ever_live (LR_REGNO, true);
17245 /* Determine if we need to save the condition code registers. */
17246 if (df_regs_ever_live_p (CR2_REGNO)
17247 || df_regs_ever_live_p (CR3_REGNO)
17248 || df_regs_ever_live_p (CR4_REGNO))
17250 info_ptr->cr_save_p = 1;
17251 if (DEFAULT_ABI == ABI_V4)
17252 info_ptr->cr_size = reg_size;
17255 /* If the current function calls __builtin_eh_return, then we need
17256 to allocate stack space for registers that will hold data for
17257 the exception handler. */
17258 if (crtl->calls_eh_return)
17261 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17264 /* SPE saves EH registers in 64-bits. */
17265 ehrd_size = i * (TARGET_SPE_ABI
17266 && info_ptr->spe_64bit_regs_used != 0
17267 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17272 /* Determine various sizes. */
17273 info_ptr->reg_size = reg_size;
17274 info_ptr->fixed_size = RS6000_SAVE_AREA;
17275 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17276 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17277 TARGET_ALTIVEC ? 16 : 8);
17278 if (FRAME_GROWS_DOWNWARD)
17279 info_ptr->vars_size
17280 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
17281 + info_ptr->parm_size,
17282 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
17283 - (info_ptr->fixed_size + info_ptr->vars_size
17284 + info_ptr->parm_size);
17286 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17287 info_ptr->spe_gp_size = 8 * (32 - first_gp);
17289 info_ptr->spe_gp_size = 0;
17291 if (TARGET_ALTIVEC_ABI)
17292 info_ptr->vrsave_mask = compute_vrsave_mask ();
17294 info_ptr->vrsave_mask = 0;
17296 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
17297 info_ptr->vrsave_size = 4;
17299 info_ptr->vrsave_size = 0;
17301 compute_save_world_info (info_ptr);
17303 /* Calculate the offsets. */
17304 switch (DEFAULT_ABI)
17308 gcc_unreachable ();
17312 info_ptr->fp_save_offset = - info_ptr->fp_size;
17313 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17315 if (TARGET_ALTIVEC_ABI)
17317 info_ptr->vrsave_save_offset
17318 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
17320 /* Align stack so vector save area is on a quadword boundary.
17321 The padding goes above the vectors. */
17322 if (info_ptr->altivec_size != 0)
17323 info_ptr->altivec_padding_size
17324 = info_ptr->vrsave_save_offset & 0xF;
17326 info_ptr->altivec_padding_size = 0;
17328 info_ptr->altivec_save_offset
17329 = info_ptr->vrsave_save_offset
17330 - info_ptr->altivec_padding_size
17331 - info_ptr->altivec_size;
17332 gcc_assert (info_ptr->altivec_size == 0
17333 || info_ptr->altivec_save_offset % 16 == 0);
17335 /* Adjust for AltiVec case. */
17336 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
17339 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
17340 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
17341 info_ptr->lr_save_offset = 2*reg_size;
17345 info_ptr->fp_save_offset = - info_ptr->fp_size;
17346 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17347 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
17349 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17351 /* Align stack so SPE GPR save area is aligned on a
17352 double-word boundary. */
17353 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
17354 info_ptr->spe_padding_size
17355 = 8 - (-info_ptr->cr_save_offset % 8);
17357 info_ptr->spe_padding_size = 0;
17359 info_ptr->spe_gp_save_offset
17360 = info_ptr->cr_save_offset
17361 - info_ptr->spe_padding_size
17362 - info_ptr->spe_gp_size;
17364 /* Adjust for SPE case. */
17365 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
17367 else if (TARGET_ALTIVEC_ABI)
17369 info_ptr->vrsave_save_offset
17370 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
17372 /* Align stack so vector save area is on a quadword boundary. */
17373 if (info_ptr->altivec_size != 0)
17374 info_ptr->altivec_padding_size
17375 = 16 - (-info_ptr->vrsave_save_offset % 16);
17377 info_ptr->altivec_padding_size = 0;
17379 info_ptr->altivec_save_offset
17380 = info_ptr->vrsave_save_offset
17381 - info_ptr->altivec_padding_size
17382 - info_ptr->altivec_size;
17384 /* Adjust for AltiVec case. */
17385 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
17388 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
17389 info_ptr->ehrd_offset -= ehrd_size;
17390 info_ptr->lr_save_offset = reg_size;
17394 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
17395 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
17396 + info_ptr->gp_size
17397 + info_ptr->altivec_size
17398 + info_ptr->altivec_padding_size
17399 + info_ptr->spe_gp_size
17400 + info_ptr->spe_padding_size
17402 + info_ptr->cr_size
17403 + info_ptr->vrsave_size,
17406 non_fixed_size = (info_ptr->vars_size
17407 + info_ptr->parm_size
17408 + info_ptr->save_size);
17410 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
17411 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
17413 /* Determine if we need to allocate any stack frame:
17415 For AIX we need to push the stack if a frame pointer is needed
17416 (because the stack might be dynamically adjusted), if we are
17417 debugging, if we make calls, or if the sum of fp_save, gp_save,
17418 and local variables are more than the space needed to save all
17419 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
17420 + 18*8 = 288 (GPR13 reserved).
17422 For V.4 we don't have the stack cushion that AIX uses, but assume
17423 that the debugger can handle stackless frames. */
17425 if (info_ptr->calls_p)
17426 info_ptr->push_p = 1;
17428 else if (DEFAULT_ABI == ABI_V4)
17429 info_ptr->push_p = non_fixed_size != 0;
17431 else if (frame_pointer_needed)
17432 info_ptr->push_p = 1;
17434 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
17435 info_ptr->push_p = 1;
17438 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
17440 /* Zero offsets if we're not saving those registers. */
17441 if (info_ptr->fp_size == 0)
17442 info_ptr->fp_save_offset = 0;
17444 if (info_ptr->gp_size == 0)
17445 info_ptr->gp_save_offset = 0;
17447 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
17448 info_ptr->altivec_save_offset = 0;
17450 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
17451 info_ptr->vrsave_save_offset = 0;
17453 if (! TARGET_SPE_ABI
17454 || info_ptr->spe_64bit_regs_used == 0
17455 || info_ptr->spe_gp_size == 0)
17456 info_ptr->spe_gp_save_offset = 0;
17458 if (! info_ptr->lr_save_p)
17459 info_ptr->lr_save_offset = 0;
17461 if (! info_ptr->cr_save_p)
17462 info_ptr->cr_save_offset = 0;
17467 /* Return true if the current function uses any GPRs in 64-bit SIMD
17471 spe_func_has_64bit_regs_p (void)
17475 /* Functions that save and restore all the call-saved registers will
17476 need to save/restore the registers in 64-bits. */
17477 if (crtl->calls_eh_return
17478 || cfun->calls_setjmp
17479 || crtl->has_nonlocal_goto)
17482 insns = get_insns ();
17484 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
17490 /* FIXME: This should be implemented with attributes...
17492 (set_attr "spe64" "true")....then,
17493 if (get_spe64(insn)) return true;
17495 It's the only reliable way to do the stuff below. */
17497 i = PATTERN (insn);
17498 if (GET_CODE (i) == SET)
17500 enum machine_mode mode = GET_MODE (SET_SRC (i));
17502 if (SPE_VECTOR_MODE (mode))
17504 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
17514 debug_stack_info (rs6000_stack_t *info)
17516 const char *abi_string;
17519 info = rs6000_stack_info ();
17521 fprintf (stderr, "\nStack information for function %s:\n",
17522 ((current_function_decl && DECL_NAME (current_function_decl))
17523 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
17528 default: abi_string = "Unknown"; break;
17529 case ABI_NONE: abi_string = "NONE"; break;
17530 case ABI_AIX: abi_string = "AIX"; break;
17531 case ABI_DARWIN: abi_string = "Darwin"; break;
17532 case ABI_V4: abi_string = "V.4"; break;
17535 fprintf (stderr, "\tABI = %5s\n", abi_string);
17537 if (TARGET_ALTIVEC_ABI)
17538 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
17540 if (TARGET_SPE_ABI)
17541 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
17543 if (info->first_gp_reg_save != 32)
17544 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
17546 if (info->first_fp_reg_save != 64)
17547 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
17549 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
17550 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
17551 info->first_altivec_reg_save);
17553 if (info->lr_save_p)
17554 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
17556 if (info->cr_save_p)
17557 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
17559 if (info->vrsave_mask)
17560 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
17563 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
17566 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
17568 if (info->gp_save_offset)
17569 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
17571 if (info->fp_save_offset)
17572 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
17574 if (info->altivec_save_offset)
17575 fprintf (stderr, "\taltivec_save_offset = %5d\n",
17576 info->altivec_save_offset);
17578 if (info->spe_gp_save_offset)
17579 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
17580 info->spe_gp_save_offset);
17582 if (info->vrsave_save_offset)
17583 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
17584 info->vrsave_save_offset);
17586 if (info->lr_save_offset)
17587 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
17589 if (info->cr_save_offset)
17590 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
17592 if (info->varargs_save_offset)
17593 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
17595 if (info->total_size)
17596 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
17599 if (info->vars_size)
17600 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
17603 if (info->parm_size)
17604 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
17606 if (info->fixed_size)
17607 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
17610 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
17612 if (info->spe_gp_size)
17613 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
17616 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
17618 if (info->altivec_size)
17619 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
17621 if (info->vrsave_size)
17622 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
17624 if (info->altivec_padding_size)
17625 fprintf (stderr, "\taltivec_padding_size= %5d\n",
17626 info->altivec_padding_size);
17628 if (info->spe_padding_size)
17629 fprintf (stderr, "\tspe_padding_size = %5d\n",
17630 info->spe_padding_size);
17633 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
17635 if (info->save_size)
17636 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
17638 if (info->reg_size != 4)
17639 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
17641 fprintf (stderr, "\n");
17645 rs6000_return_addr (int count, rtx frame)
17647 /* Currently we don't optimize very well between prolog and body
17648 code and for PIC code the code can be actually quite bad, so
17649 don't try to be too clever here. */
17650 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
17652 cfun->machine->ra_needs_full_frame = 1;
17659 plus_constant (copy_to_reg
17660 (gen_rtx_MEM (Pmode,
17661 memory_address (Pmode, frame))),
17662 RETURN_ADDRESS_OFFSET)));
17665 cfun->machine->ra_need_lr = 1;
17666 return get_hard_reg_initial_val (Pmode, LR_REGNO);
17669 /* Say whether a function is a candidate for sibcall handling or not.
17670 We do not allow indirect calls to be optimized into sibling calls.
17671 Also, we can't do it if there are any vector parameters; there's
17672 nowhere to put the VRsave code so it works; note that functions with
17673 vector parameters are required to have a prototype, so the argument
17674 type info must be available here. (The tail recursion case can work
17675 with vector parameters, but there's no way to distinguish here.) */
17677 rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
17682 if (TARGET_ALTIVEC_VRSAVE)
17684 for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
17685 type; type = TREE_CHAIN (type))
17687 if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
17691 if (DEFAULT_ABI == ABI_DARWIN
17692 || ((*targetm.binds_local_p) (decl)
17693 && (DEFAULT_ABI != ABI_AIX || !DECL_EXTERNAL (decl))))
17695 tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (decl));
17697 if (!lookup_attribute ("longcall", attr_list)
17698 || lookup_attribute ("shortcall", attr_list))
17705 /* NULL if INSN insn is valid within a low-overhead loop.
17706 Otherwise return why doloop cannot be applied.
17707 PowerPC uses the COUNT register for branch on table instructions. */
17709 static const char *
17710 rs6000_invalid_within_doloop (const_rtx insn)
17713 return "Function call in the loop.";
17716 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
17717 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
17718 return "Computed branch in the loop.";
17724 rs6000_ra_ever_killed (void)
17730 if (cfun->is_thunk)
17733 if (cfun->machine->lr_save_state)
17734 return cfun->machine->lr_save_state - 1;
17736 /* regs_ever_live has LR marked as used if any sibcalls are present,
17737 but this should not force saving and restoring in the
17738 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
17739 clobbers LR, so that is inappropriate. */
17741 /* Also, the prologue can generate a store into LR that
17742 doesn't really count, like this:
17745 bcl to set PIC register
17749 When we're called from the epilogue, we need to avoid counting
17750 this as a store. */
17752 push_topmost_sequence ();
17753 top = get_insns ();
17754 pop_topmost_sequence ();
17755 reg = gen_rtx_REG (Pmode, LR_REGNO);
17757 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
17763 if (!SIBLING_CALL_P (insn))
17766 else if (find_regno_note (insn, REG_INC, LR_REGNO))
17768 else if (set_of (reg, insn) != NULL_RTX
17769 && !prologue_epilogue_contains (insn))
17776 /* Emit instructions needed to load the TOC register.
17777 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
17778 a constant pool; or for SVR4 -fpic. */
17781 rs6000_emit_load_toc_table (int fromprolog)
17784 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
17786 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
17789 rtx lab, tmp1, tmp2, got;
17791 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
17792 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
17794 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
17796 got = rs6000_got_sym ();
17797 tmp1 = tmp2 = dest;
17800 tmp1 = gen_reg_rtx (Pmode);
17801 tmp2 = gen_reg_rtx (Pmode);
17803 emit_insn (gen_load_toc_v4_PIC_1 (lab));
17804 emit_move_insn (tmp1,
17805 gen_rtx_REG (Pmode, LR_REGNO));
17806 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
17807 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
17809 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
17811 emit_insn (gen_load_toc_v4_pic_si ());
17812 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
17814 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
17817 rtx temp0 = (fromprolog
17818 ? gen_rtx_REG (Pmode, 0)
17819 : gen_reg_rtx (Pmode));
17825 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
17826 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
17828 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
17829 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
17831 emit_insn (gen_load_toc_v4_PIC_1 (symF));
17832 emit_move_insn (dest,
17833 gen_rtx_REG (Pmode, LR_REGNO));
17834 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
17840 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
17841 emit_insn (gen_load_toc_v4_PIC_1b (tocsym));
17842 emit_move_insn (dest,
17843 gen_rtx_REG (Pmode, LR_REGNO));
17844 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
17846 emit_insn (gen_addsi3 (dest, temp0, dest));
17848 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
17850 /* This is for AIX code running in non-PIC ELF32. */
17853 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
17854 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
17856 emit_insn (gen_elf_high (dest, realsym));
17857 emit_insn (gen_elf_low (dest, dest, realsym));
17861 gcc_assert (DEFAULT_ABI == ABI_AIX);
17864 emit_insn (gen_load_toc_aix_si (dest));
17866 emit_insn (gen_load_toc_aix_di (dest));
17870 /* Emit instructions to restore the link register after determining where
17871 its value has been stored. */
17874 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
17876 rs6000_stack_t *info = rs6000_stack_info ();
17879 operands[0] = source;
17880 operands[1] = scratch;
17882 if (info->lr_save_p)
17884 rtx frame_rtx = stack_pointer_rtx;
17885 HOST_WIDE_INT sp_offset = 0;
17888 if (frame_pointer_needed
17889 || cfun->calls_alloca
17890 || info->total_size > 32767)
17892 tmp = gen_frame_mem (Pmode, frame_rtx);
17893 emit_move_insn (operands[1], tmp);
17894 frame_rtx = operands[1];
17896 else if (info->push_p)
17897 sp_offset = info->total_size;
17899 tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
17900 tmp = gen_frame_mem (Pmode, tmp);
17901 emit_move_insn (tmp, operands[0]);
17904 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
17906 /* Freeze lr_save_p. We've just emitted rtl that depends on the
17907 state of lr_save_p so any change from here on would be a bug. In
17908 particular, stop rs6000_ra_ever_killed from considering the SET
17909 of lr we may have added just above. */
17910 cfun->machine->lr_save_state = info->lr_save_p + 1;
17913 static GTY(()) alias_set_type set = -1;
17916 get_TOC_alias_set (void)
17919 set = new_alias_set ();
17923 /* This returns nonzero if the current function uses the TOC. This is
17924 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
17925 is generated by the ABI_V4 load_toc_* patterns. */
17932 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
17935 rtx pat = PATTERN (insn);
17938 if (GET_CODE (pat) == PARALLEL)
17939 for (i = 0; i < XVECLEN (pat, 0); i++)
17941 rtx sub = XVECEXP (pat, 0, i);
17942 if (GET_CODE (sub) == USE)
17944 sub = XEXP (sub, 0);
17945 if (GET_CODE (sub) == UNSPEC
17946 && XINT (sub, 1) == UNSPEC_TOC)
17956 create_TOC_reference (rtx symbol)
17958 if (TARGET_DEBUG_ADDR)
17960 if (GET_CODE (symbol) == SYMBOL_REF)
17961 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
17965 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
17966 GET_RTX_NAME (GET_CODE (symbol)));
17967 debug_rtx (symbol);
17971 if (!can_create_pseudo_p ())
17972 df_set_regs_ever_live (TOC_REGISTER, true);
17973 return gen_rtx_PLUS (Pmode,
17974 gen_rtx_REG (Pmode, TOC_REGISTER),
17975 gen_rtx_CONST (Pmode,
17976 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_TOCREL)));
17979 /* Issue assembly directives that create a reference to the given DWARF
17980 FRAME_TABLE_LABEL from the current function section. */
17982 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
17984 fprintf (asm_out_file, "\t.ref %s\n",
17985 TARGET_STRIP_NAME_ENCODING (frame_table_label));
17988 /* If _Unwind_* has been called from within the same module,
17989 toc register is not guaranteed to be saved to 40(1) on function
17990 entry. Save it there in that case. */
17993 rs6000_aix_emit_builtin_unwind_init (void)
17996 rtx stack_top = gen_reg_rtx (Pmode);
17997 rtx opcode_addr = gen_reg_rtx (Pmode);
17998 rtx opcode = gen_reg_rtx (SImode);
17999 rtx tocompare = gen_reg_rtx (SImode);
18000 rtx no_toc_save_needed = gen_label_rtx ();
18002 mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
18003 emit_move_insn (stack_top, mem);
18005 mem = gen_frame_mem (Pmode,
18006 gen_rtx_PLUS (Pmode, stack_top,
18007 GEN_INT (2 * GET_MODE_SIZE (Pmode))));
18008 emit_move_insn (opcode_addr, mem);
18009 emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
18010 emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
18011 : 0xE8410028, SImode));
18013 do_compare_rtx_and_jump (opcode, tocompare, EQ, 1,
18014 SImode, NULL_RTX, NULL_RTX,
18015 no_toc_save_needed, -1);
18017 mem = gen_frame_mem (Pmode,
18018 gen_rtx_PLUS (Pmode, stack_top,
18019 GEN_INT (5 * GET_MODE_SIZE (Pmode))));
18020 emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
18021 emit_label (no_toc_save_needed);
18024 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18025 and the change to the stack pointer. */
18028 rs6000_emit_stack_tie (void)
18030 rtx mem = gen_frame_mem (BLKmode,
18031 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
18033 emit_insn (gen_stack_tie (mem));
18036 /* Emit the correct code for allocating stack space, as insns.
18037 If COPY_REG, make sure a copy of the old frame is left there.
18038 The generated code may use hard register 0 as a temporary. */
18041 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg)
18044 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18045 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18046 rtx todec = gen_int_mode (-size, Pmode);
18049 if (INTVAL (todec) != -size)
18051 warning (0, "stack frame too large");
18052 emit_insn (gen_trap ());
18056 if (crtl->limit_stack)
18058 if (REG_P (stack_limit_rtx)
18059 && REGNO (stack_limit_rtx) > 1
18060 && REGNO (stack_limit_rtx) <= 31)
18062 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18063 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18066 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18068 && DEFAULT_ABI == ABI_V4)
18070 rtx toload = gen_rtx_CONST (VOIDmode,
18071 gen_rtx_PLUS (Pmode,
18075 emit_insn (gen_elf_high (tmp_reg, toload));
18076 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18077 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18081 warning (0, "stack limit expression is not supported");
18085 emit_move_insn (copy_reg, stack_reg);
18089 /* Need a note here so that try_split doesn't get confused. */
18090 if (get_last_insn () == NULL_RTX)
18091 emit_note (NOTE_INSN_DELETED);
18092 insn = emit_move_insn (tmp_reg, todec);
18093 try_split (PATTERN (insn), insn, 0);
18097 insn = emit_insn (TARGET_32BIT
18098 ? gen_movsi_update_stack (stack_reg, stack_reg,
18100 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18101 todec, stack_reg));
18102 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18103 it now and set the alias set/attributes. The above gen_*_update
18104 calls will generate a PARALLEL with the MEM set being the first
18106 par = PATTERN (insn);
18107 gcc_assert (GET_CODE (par) == PARALLEL);
18108 set = XVECEXP (par, 0, 0);
18109 gcc_assert (GET_CODE (set) == SET);
18110 mem = SET_DEST (set);
18111 gcc_assert (MEM_P (mem));
18112 MEM_NOTRAP_P (mem) = 1;
18113 set_mem_alias_set (mem, get_frame_alias_set ());
18115 RTX_FRAME_RELATED_P (insn) = 1;
18116 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18117 gen_rtx_SET (VOIDmode, stack_reg,
18118 gen_rtx_PLUS (Pmode, stack_reg,
18119 GEN_INT (-size))));
18122 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18123 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18124 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18125 deduce these equivalences by itself so it wasn't necessary to hold
18126 its hand so much. */
18129 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
18130 rtx reg2, rtx rreg)
18134 /* copy_rtx will not make unique copies of registers, so we need to
18135 ensure we don't have unwanted sharing here. */
18137 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18140 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18142 real = copy_rtx (PATTERN (insn));
18144 if (reg2 != NULL_RTX)
18145 real = replace_rtx (real, reg2, rreg);
18147 real = replace_rtx (real, reg,
18148 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
18149 STACK_POINTER_REGNUM),
18152 /* We expect that 'real' is either a SET or a PARALLEL containing
18153 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18154 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18156 if (GET_CODE (real) == SET)
18160 temp = simplify_rtx (SET_SRC (set));
18162 SET_SRC (set) = temp;
18163 temp = simplify_rtx (SET_DEST (set));
18165 SET_DEST (set) = temp;
18166 if (GET_CODE (SET_DEST (set)) == MEM)
18168 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18170 XEXP (SET_DEST (set), 0) = temp;
18177 gcc_assert (GET_CODE (real) == PARALLEL);
18178 for (i = 0; i < XVECLEN (real, 0); i++)
18179 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18181 rtx set = XVECEXP (real, 0, i);
18183 temp = simplify_rtx (SET_SRC (set));
18185 SET_SRC (set) = temp;
18186 temp = simplify_rtx (SET_DEST (set));
18188 SET_DEST (set) = temp;
18189 if (GET_CODE (SET_DEST (set)) == MEM)
18191 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18193 XEXP (SET_DEST (set), 0) = temp;
18195 RTX_FRAME_RELATED_P (set) = 1;
18199 RTX_FRAME_RELATED_P (insn) = 1;
18200 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
18203 /* Returns an insn that has a vrsave set operation with the
18204 appropriate CLOBBERs. */
18207 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
18210 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
18211 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
18214 = gen_rtx_SET (VOIDmode,
18216 gen_rtx_UNSPEC_VOLATILE (SImode,
18217 gen_rtvec (2, reg, vrsave),
18218 UNSPECV_SET_VRSAVE));
18222 /* We need to clobber the registers in the mask so the scheduler
18223 does not move sets to VRSAVE before sets of AltiVec registers.
18225 However, if the function receives nonlocal gotos, reload will set
18226 all call saved registers live. We will end up with:
18228 (set (reg 999) (mem))
18229 (parallel [ (set (reg vrsave) (unspec blah))
18230 (clobber (reg 999))])
18232 The clobber will cause the store into reg 999 to be dead, and
18233 flow will attempt to delete an epilogue insn. In this case, we
18234 need an unspec use/set of the register. */
18236 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
18237 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
18239 if (!epiloguep || call_used_regs [i])
18240 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
18241 gen_rtx_REG (V4SImode, i));
18244 rtx reg = gen_rtx_REG (V4SImode, i);
18247 = gen_rtx_SET (VOIDmode,
18249 gen_rtx_UNSPEC (V4SImode,
18250 gen_rtvec (1, reg), 27));
18254 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
18256 for (i = 0; i < nclobs; ++i)
18257 XVECEXP (insn, 0, i) = clobs[i];
18262 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
18263 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
18266 emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
18267 unsigned int regno, int offset, HOST_WIDE_INT total_size)
18269 rtx reg, offset_rtx, insn, mem, addr, int_rtx;
18270 rtx replacea, replaceb;
18272 int_rtx = GEN_INT (offset);
18274 /* Some cases that need register indexed addressing. */
18275 if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
18276 || (TARGET_VSX && VSX_VECTOR_MODE (mode))
18277 || (TARGET_E500_DOUBLE && mode == DFmode)
18279 && SPE_VECTOR_MODE (mode)
18280 && !SPE_CONST_OFFSET_OK (offset)))
18282 /* Whomever calls us must make sure r11 is available in the
18283 flow path of instructions in the prologue. */
18284 offset_rtx = gen_rtx_REG (Pmode, 11);
18285 emit_move_insn (offset_rtx, int_rtx);
18287 replacea = offset_rtx;
18288 replaceb = int_rtx;
18292 offset_rtx = int_rtx;
18293 replacea = NULL_RTX;
18294 replaceb = NULL_RTX;
18297 reg = gen_rtx_REG (mode, regno);
18298 addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
18299 mem = gen_frame_mem (mode, addr);
18301 insn = emit_move_insn (mem, reg);
18303 rs6000_frame_related (insn, frame_ptr, total_size, replacea, replaceb);
18306 /* Emit an offset memory reference suitable for a frame store, while
18307 converting to a valid addressing mode. */
18310 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
18312 rtx int_rtx, offset_rtx;
18314 int_rtx = GEN_INT (offset);
18316 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
18317 || (TARGET_E500_DOUBLE && mode == DFmode))
18319 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
18320 emit_move_insn (offset_rtx, int_rtx);
18323 offset_rtx = int_rtx;
18325 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
18328 /* Look for user-defined global regs. We should not save and restore these,
18329 and cannot use stmw/lmw if there are any in its range. */
18332 no_global_regs_above (int first, bool gpr)
18335 int last = gpr ? 32 : 64;
18336 for (i = first; i < last; i++)
18337 if (global_regs[i])
18342 #ifndef TARGET_FIX_AND_CONTINUE
18343 #define TARGET_FIX_AND_CONTINUE 0
18346 /* It's really GPR 13 and FPR 14, but we need the smaller of the two. */
18347 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
18348 #define LAST_SAVRES_REGISTER 31
18349 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
18351 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8];
18353 /* Temporary holding space for an out-of-line register save/restore
18355 static char savres_routine_name[30];
18357 /* Return the name for an out-of-line register save/restore routine.
18358 We are saving/restoring GPRs if GPR is true. */
18361 rs6000_savres_routine_name (rs6000_stack_t *info, int regno,
18362 bool savep, bool gpr, bool lr)
18364 const char *prefix = "";
18365 const char *suffix = "";
18367 /* Different targets are supposed to define
18368 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
18369 routine name could be defined with:
18371 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
18373 This is a nice idea in practice, but in reality, things are
18374 complicated in several ways:
18376 - ELF targets have save/restore routines for GPRs.
18378 - SPE targets use different prefixes for 32/64-bit registers, and
18379 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
18381 - PPC64 ELF targets have routines for save/restore of GPRs that
18382 differ in what they do with the link register, so having a set
18383 prefix doesn't work. (We only use one of the save routines at
18384 the moment, though.)
18386 - PPC32 elf targets have "exit" versions of the restore routines
18387 that restore the link register and can save some extra space.
18388 These require an extra suffix. (There are also "tail" versions
18389 of the restore routines and "GOT" versions of the save routines,
18390 but we don't generate those at present. Same problems apply,
18393 We deal with all this by synthesizing our own prefix/suffix and
18394 using that for the simple sprintf call shown above. */
18397 /* No floating point saves on the SPE. */
18401 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
18403 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
18408 else if (DEFAULT_ABI == ABI_V4)
18414 prefix = savep ? "_savegpr_" : "_restgpr_";
18416 prefix = savep ? "_savefpr_" : "_restfpr_";
18421 else if (DEFAULT_ABI == ABI_AIX)
18423 #ifndef POWERPC_LINUX
18424 /* No out-of-line save/restore routines for GPRs on AIX. */
18425 gcc_assert (!TARGET_AIX || !gpr);
18431 ? (lr ? "_savegpr0_" : "_savegpr1_")
18432 : (lr ? "_restgpr0_" : "_restgpr1_"));
18433 #ifdef POWERPC_LINUX
18435 prefix = (savep ? "_savefpr_" : "_restfpr_");
18439 prefix = savep ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
18440 suffix = savep ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
18443 else if (DEFAULT_ABI == ABI_DARWIN)
18444 sorry ("Out-of-line save/restore routines not supported on Darwin");
18446 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
18448 return savres_routine_name;
18451 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
18452 We are saving/restoring GPRs if GPR is true. */
18455 rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep,
18458 int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
18460 int select = ((savep ? 1 : 0) << 2
18462 /* On the SPE, we never have any FPRs, but we do have
18463 32/64-bit versions of the routines. */
18464 ? (info->spe_64bit_regs_used ? 1 : 0)
18465 : (gpr ? 1 : 0)) << 1)
18468 /* Don't generate bogus routine names. */
18469 gcc_assert (FIRST_SAVRES_REGISTER <= regno
18470 && regno <= LAST_SAVRES_REGISTER);
18472 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
18478 name = rs6000_savres_routine_name (info, regno, savep, gpr, lr);
18480 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
18481 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
18482 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
18488 /* Emit a sequence of insns, including a stack tie if needed, for
18489 resetting the stack pointer. If SAVRES is true, then don't reset the
18490 stack pointer, but move the base of the frame into r11 for use by
18491 out-of-line register restore routines. */
18494 rs6000_emit_stack_reset (rs6000_stack_t *info,
18495 rtx sp_reg_rtx, rtx frame_reg_rtx,
18496 int sp_offset, bool savres)
18498 /* This blockage is needed so that sched doesn't decide to move
18499 the sp change before the register restores. */
18500 if (frame_reg_rtx != sp_reg_rtx
18502 && info->spe_64bit_regs_used != 0
18503 && info->first_gp_reg_save != 32))
18504 rs6000_emit_stack_tie ();
18506 if (frame_reg_rtx != sp_reg_rtx)
18508 if (sp_offset != 0)
18510 rtx dest_reg = savres ? gen_rtx_REG (Pmode, 11) : sp_reg_rtx;
18511 return emit_insn (gen_add3_insn (dest_reg, frame_reg_rtx,
18512 GEN_INT (sp_offset)));
18515 return emit_move_insn (sp_reg_rtx, frame_reg_rtx);
18517 else if (sp_offset != 0)
18519 /* If we are restoring registers out-of-line, we will be using the
18520 "exit" variants of the restore routines, which will reset the
18521 stack for us. But we do need to point r11 into the right place
18522 for those routines. */
18523 rtx dest_reg = (savres
18524 ? gen_rtx_REG (Pmode, 11)
18527 rtx insn = emit_insn (gen_add3_insn (dest_reg, sp_reg_rtx,
18528 GEN_INT (sp_offset)));
18535 /* Construct a parallel rtx describing the effect of a call to an
18536 out-of-line register save/restore routine. */
18539 rs6000_make_savres_rtx (rs6000_stack_t *info,
18540 rtx frame_reg_rtx, int save_area_offset,
18541 enum machine_mode reg_mode,
18542 bool savep, bool gpr, bool lr)
18545 int offset, start_reg, end_reg, n_regs;
18546 int reg_size = GET_MODE_SIZE (reg_mode);
18552 ? info->first_gp_reg_save
18553 : info->first_fp_reg_save);
18554 end_reg = gpr ? 32 : 64;
18555 n_regs = end_reg - start_reg;
18556 p = rtvec_alloc ((lr ? 4 : 3) + n_regs);
18559 RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
18561 RTVEC_ELT (p, offset++)
18562 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
18564 sym = rs6000_savres_routine_sym (info, savep, gpr, lr);
18565 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
18566 RTVEC_ELT (p, offset++)
18567 = gen_rtx_USE (VOIDmode,
18568 gen_rtx_REG (Pmode, DEFAULT_ABI != ABI_AIX ? 11
18572 for (i = 0; i < end_reg - start_reg; i++)
18574 rtx addr, reg, mem;
18575 reg = gen_rtx_REG (reg_mode, start_reg + i);
18576 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18577 GEN_INT (save_area_offset + reg_size*i));
18578 mem = gen_frame_mem (reg_mode, addr);
18580 RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode,
18582 savep ? reg : mem);
18587 rtx addr, reg, mem;
18588 reg = gen_rtx_REG (Pmode, 0);
18589 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18590 GEN_INT (info->lr_save_offset));
18591 mem = gen_frame_mem (Pmode, addr);
18592 RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode, mem, reg);
18595 return gen_rtx_PARALLEL (VOIDmode, p);
18598 /* Determine whether the gp REG is really used. */
18601 rs6000_reg_live_or_pic_offset_p (int reg)
18603 return ((df_regs_ever_live_p (reg)
18604 && (!call_used_regs[reg]
18605 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
18606 && TARGET_TOC && TARGET_MINIMAL_TOC)))
18607 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
18608 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
18609 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
18613 SAVRES_MULTIPLE = 0x1,
18614 SAVRES_INLINE_FPRS = 0x2,
18615 SAVRES_INLINE_GPRS = 0x4,
18616 SAVRES_NOINLINE_GPRS_SAVES_LR = 0x8,
18617 SAVRES_NOINLINE_FPRS_SAVES_LR = 0x10,
18618 SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x20
18621 /* Determine the strategy for savings/restoring registers. */
18624 rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
18625 int using_static_chain_p, int sibcall)
18627 bool using_multiple_p;
18629 bool savres_fprs_inline;
18630 bool savres_gprs_inline;
18631 bool noclobber_global_gprs
18632 = no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true);
18635 using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64
18636 && (!TARGET_SPE_ABI
18637 || info->spe_64bit_regs_used == 0)
18638 && info->first_gp_reg_save < 31
18639 && noclobber_global_gprs);
18640 /* Don't bother to try to save things out-of-line if r11 is occupied
18641 by the static chain. It would require too much fiddling and the
18642 static chain is rarely used anyway. */
18643 common = (using_static_chain_p
18645 || crtl->calls_eh_return
18646 || !info->lr_save_p
18647 || cfun->machine->ra_need_lr
18648 || info->total_size > 32767);
18649 savres_fprs_inline = (common
18650 || info->first_fp_reg_save == 64
18651 || !no_global_regs_above (info->first_fp_reg_save,
18653 /* The out-of-line FP routines use
18654 double-precision stores; we can't use those
18655 routines if we don't have such stores. */
18656 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
18657 || FP_SAVE_INLINE (info->first_fp_reg_save));
18658 savres_gprs_inline = (common
18659 /* Saving CR interferes with the exit routines
18660 used on the SPE, so just punt here. */
18663 && info->spe_64bit_regs_used != 0
18664 && info->cr_save_p != 0)
18665 || info->first_gp_reg_save == 32
18666 || !noclobber_global_gprs
18667 || GP_SAVE_INLINE (info->first_gp_reg_save));
18670 /* If we are going to use store multiple, then don't even bother
18671 with the out-of-line routines, since the store-multiple instruction
18672 will always be smaller. */
18673 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
18676 /* The situation is more complicated with load multiple. We'd
18677 prefer to use the out-of-line routines for restores, since the
18678 "exit" out-of-line routines can handle the restore of LR and
18679 the frame teardown. But we can only use the out-of-line
18680 routines if we know that we've used store multiple or
18681 out-of-line routines in the prologue, i.e. if we've saved all
18682 the registers from first_gp_reg_save. Otherwise, we risk
18683 loading garbage from the stack. Furthermore, we can only use
18684 the "exit" out-of-line gpr restore if we haven't saved any
18686 bool saved_all = !savres_gprs_inline || using_multiple_p;
18688 if (saved_all && info->first_fp_reg_save != 64)
18689 /* We can't use the exit routine; use load multiple if it's
18691 savres_gprs_inline = savres_gprs_inline || using_multiple_p;
18694 strategy = (using_multiple_p
18695 | (savres_fprs_inline << 1)
18696 | (savres_gprs_inline << 2));
18697 #ifdef POWERPC_LINUX
18700 if (!savres_fprs_inline)
18701 strategy |= SAVRES_NOINLINE_FPRS_SAVES_LR;
18702 else if (!savres_gprs_inline && info->first_fp_reg_save == 64)
18703 strategy |= SAVRES_NOINLINE_GPRS_SAVES_LR;
18706 if (TARGET_AIX && !savres_fprs_inline)
18707 strategy |= SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR;
18712 /* Emit function prologue as insns. */
18715 rs6000_emit_prologue (void)
18717 rs6000_stack_t *info = rs6000_stack_info ();
18718 enum machine_mode reg_mode = Pmode;
18719 int reg_size = TARGET_32BIT ? 4 : 8;
18720 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18721 rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
18722 rtx frame_reg_rtx = sp_reg_rtx;
18723 rtx cr_save_rtx = NULL_RTX;
18726 int saving_FPRs_inline;
18727 int saving_GPRs_inline;
18728 int using_store_multiple;
18729 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
18730 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
18731 && call_used_regs[STATIC_CHAIN_REGNUM]);
18732 HOST_WIDE_INT sp_offset = 0;
18734 if (TARGET_FIX_AND_CONTINUE)
18736 /* gdb on darwin arranges to forward a function from the old
18737 address by modifying the first 5 instructions of the function
18738 to branch to the overriding function. This is necessary to
18739 permit function pointers that point to the old function to
18740 actually forward to the new function. */
18741 emit_insn (gen_nop ());
18742 emit_insn (gen_nop ());
18743 emit_insn (gen_nop ());
18744 emit_insn (gen_nop ());
18745 emit_insn (gen_nop ());
18748 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
18750 reg_mode = V2SImode;
18754 strategy = rs6000_savres_strategy (info, /*savep=*/true,
18755 /*static_chain_p=*/using_static_chain_p,
18757 using_store_multiple = strategy & SAVRES_MULTIPLE;
18758 saving_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
18759 saving_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
18761 /* For V.4, update stack before we do any saving and set back pointer. */
18762 if (! WORLD_SAVE_P (info)
18764 && (DEFAULT_ABI == ABI_V4
18765 || crtl->calls_eh_return))
18767 bool need_r11 = (TARGET_SPE
18768 ? (!saving_GPRs_inline
18769 && info->spe_64bit_regs_used == 0)
18770 : (!saving_FPRs_inline || !saving_GPRs_inline));
18771 rtx copy_reg = need_r11 ? gen_rtx_REG (Pmode, 11) : NULL;
18773 if (info->total_size < 32767)
18774 sp_offset = info->total_size;
18776 frame_reg_rtx = copy_reg;
18777 else if (info->cr_save_p
18779 || info->first_fp_reg_save < 64
18780 || info->first_gp_reg_save < 32
18781 || info->altivec_size != 0
18782 || info->vrsave_mask != 0
18783 || crtl->calls_eh_return)
18785 copy_reg = frame_ptr_rtx;
18786 frame_reg_rtx = copy_reg;
18790 /* The prologue won't be saving any regs so there is no need
18791 to set up a frame register to access any frame save area.
18792 We also won't be using sp_offset anywhere below, but set
18793 the correct value anyway to protect against future
18794 changes to this function. */
18795 sp_offset = info->total_size;
18797 rs6000_emit_allocate_stack (info->total_size, copy_reg);
18798 if (frame_reg_rtx != sp_reg_rtx)
18799 rs6000_emit_stack_tie ();
18802 /* Handle world saves specially here. */
18803 if (WORLD_SAVE_P (info))
18810 /* save_world expects lr in r0. */
18811 reg0 = gen_rtx_REG (Pmode, 0);
18812 if (info->lr_save_p)
18814 insn = emit_move_insn (reg0,
18815 gen_rtx_REG (Pmode, LR_REGNO));
18816 RTX_FRAME_RELATED_P (insn) = 1;
18819 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
18820 assumptions about the offsets of various bits of the stack
18822 gcc_assert (info->gp_save_offset == -220
18823 && info->fp_save_offset == -144
18824 && info->lr_save_offset == 8
18825 && info->cr_save_offset == 4
18828 && (!crtl->calls_eh_return
18829 || info->ehrd_offset == -432)
18830 && info->vrsave_save_offset == -224
18831 && info->altivec_save_offset == -416);
18833 treg = gen_rtx_REG (SImode, 11);
18834 emit_move_insn (treg, GEN_INT (-info->total_size));
18836 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
18837 in R11. It also clobbers R12, so beware! */
18839 /* Preserve CR2 for save_world prologues */
18841 sz += 32 - info->first_gp_reg_save;
18842 sz += 64 - info->first_fp_reg_save;
18843 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
18844 p = rtvec_alloc (sz);
18846 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
18847 gen_rtx_REG (SImode,
18849 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
18850 gen_rtx_SYMBOL_REF (Pmode,
18852 /* We do floats first so that the instruction pattern matches
18854 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
18856 rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
18857 ? DFmode : SFmode),
18858 info->first_fp_reg_save + i);
18859 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18860 GEN_INT (info->fp_save_offset
18861 + sp_offset + 8 * i));
18862 rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
18863 ? DFmode : SFmode), addr);
18865 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
18867 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
18869 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
18870 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18871 GEN_INT (info->altivec_save_offset
18872 + sp_offset + 16 * i));
18873 rtx mem = gen_frame_mem (V4SImode, addr);
18875 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
18877 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
18879 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
18880 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18881 GEN_INT (info->gp_save_offset
18882 + sp_offset + reg_size * i));
18883 rtx mem = gen_frame_mem (reg_mode, addr);
18885 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
18889 /* CR register traditionally saved as CR2. */
18890 rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
18891 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18892 GEN_INT (info->cr_save_offset
18894 rtx mem = gen_frame_mem (reg_mode, addr);
18896 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
18898 /* Explain about use of R0. */
18899 if (info->lr_save_p)
18901 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18902 GEN_INT (info->lr_save_offset
18904 rtx mem = gen_frame_mem (reg_mode, addr);
18906 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
18908 /* Explain what happens to the stack pointer. */
18910 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
18911 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
18914 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
18915 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
18916 treg, GEN_INT (-info->total_size));
18917 sp_offset = info->total_size;
18920 /* If we use the link register, get it into r0. */
18921 if (!WORLD_SAVE_P (info) && info->lr_save_p)
18923 rtx addr, reg, mem;
18925 insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
18926 gen_rtx_REG (Pmode, LR_REGNO));
18927 RTX_FRAME_RELATED_P (insn) = 1;
18929 if (!(strategy & (SAVRES_NOINLINE_GPRS_SAVES_LR
18930 | SAVRES_NOINLINE_FPRS_SAVES_LR)))
18932 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
18933 GEN_INT (info->lr_save_offset + sp_offset));
18934 reg = gen_rtx_REG (Pmode, 0);
18935 mem = gen_rtx_MEM (Pmode, addr);
18936 /* This should not be of rs6000_sr_alias_set, because of
18937 __builtin_return_address. */
18939 insn = emit_move_insn (mem, reg);
18940 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
18941 NULL_RTX, NULL_RTX);
18945 /* If we need to save CR, put it into r12 or r11. */
18946 if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
18951 = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_AIX && !saving_GPRs_inline
18953 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
18954 RTX_FRAME_RELATED_P (insn) = 1;
18955 /* Now, there's no way that dwarf2out_frame_debug_expr is going
18956 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
18957 But that's OK. All we have to do is specify that _one_ condition
18958 code register is saved in this stack slot. The thrower's epilogue
18959 will then restore all the call-saved registers.
18960 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
18961 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
18962 gen_rtx_REG (SImode, CR2_REGNO));
18963 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
18966 /* Do any required saving of fpr's. If only one or two to save, do
18967 it ourselves. Otherwise, call function. */
18968 if (!WORLD_SAVE_P (info) && saving_FPRs_inline)
18971 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
18972 if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
18973 && ! call_used_regs[info->first_fp_reg_save+i]))
18974 emit_frame_save (frame_reg_rtx, frame_ptr_rtx,
18975 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
18977 info->first_fp_reg_save + i,
18978 info->fp_save_offset + sp_offset + 8 * i,
18981 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
18985 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
18986 info->fp_save_offset + sp_offset,
18988 /*savep=*/true, /*gpr=*/false,
18990 & SAVRES_NOINLINE_FPRS_SAVES_LR)
18992 insn = emit_insn (par);
18993 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
18994 NULL_RTX, NULL_RTX);
18997 /* Save GPRs. This is done as a PARALLEL if we are using
18998 the store-multiple instructions. */
18999 if (!WORLD_SAVE_P (info)
19001 && info->spe_64bit_regs_used != 0
19002 && info->first_gp_reg_save != 32)
19005 rtx spe_save_area_ptr;
19007 /* Determine whether we can address all of the registers that need
19008 to be saved with an offset from the stack pointer that fits in
19009 the small const field for SPE memory instructions. */
19010 int spe_regs_addressable_via_sp
19011 = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
19012 + (32 - info->first_gp_reg_save - 1) * reg_size)
19013 && saving_GPRs_inline);
19016 if (spe_regs_addressable_via_sp)
19018 spe_save_area_ptr = frame_reg_rtx;
19019 spe_offset = info->spe_gp_save_offset + sp_offset;
19023 /* Make r11 point to the start of the SPE save area. We need
19024 to be careful here if r11 is holding the static chain. If
19025 it is, then temporarily save it in r0. We would use r0 as
19026 our base register here, but using r0 as a base register in
19027 loads and stores means something different from what we
19029 int ool_adjust = (saving_GPRs_inline
19031 : (info->first_gp_reg_save
19032 - (FIRST_SAVRES_REGISTER+1))*8);
19033 HOST_WIDE_INT offset = (info->spe_gp_save_offset
19034 + sp_offset - ool_adjust);
19036 if (using_static_chain_p)
19038 rtx r0 = gen_rtx_REG (Pmode, 0);
19039 gcc_assert (info->first_gp_reg_save > 11);
19041 emit_move_insn (r0, gen_rtx_REG (Pmode, 11));
19044 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19045 insn = emit_insn (gen_addsi3 (spe_save_area_ptr,
19047 GEN_INT (offset)));
19048 /* We need to make sure the move to r11 gets noted for
19049 properly outputting unwind information. */
19050 if (!saving_GPRs_inline)
19051 rs6000_frame_related (insn, frame_reg_rtx, offset,
19052 NULL_RTX, NULL_RTX);
19056 if (saving_GPRs_inline)
19058 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19059 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19061 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
19062 rtx offset, addr, mem;
19064 /* We're doing all this to ensure that the offset fits into
19065 the immediate offset of 'evstdd'. */
19066 gcc_assert (SPE_CONST_OFFSET_OK (reg_size * i + spe_offset));
19068 offset = GEN_INT (reg_size * i + spe_offset);
19069 addr = gen_rtx_PLUS (Pmode, spe_save_area_ptr, offset);
19070 mem = gen_rtx_MEM (V2SImode, addr);
19072 insn = emit_move_insn (mem, reg);
19074 rs6000_frame_related (insn, spe_save_area_ptr,
19075 info->spe_gp_save_offset
19076 + sp_offset + reg_size * i,
19077 offset, const0_rtx);
19084 par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
19086 /*savep=*/true, /*gpr=*/true,
19088 insn = emit_insn (par);
19089 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19090 NULL_RTX, NULL_RTX);
19094 /* Move the static chain pointer back. */
19095 if (using_static_chain_p && !spe_regs_addressable_via_sp)
19096 emit_move_insn (gen_rtx_REG (Pmode, 11), gen_rtx_REG (Pmode, 0));
19098 else if (!WORLD_SAVE_P (info) && !saving_GPRs_inline)
19102 /* Need to adjust r11 (r12) if we saved any FPRs. */
19103 if (info->first_fp_reg_save != 64)
19105 rtx dest_reg = gen_rtx_REG (reg_mode, DEFAULT_ABI == ABI_AIX
19107 rtx offset = GEN_INT (sp_offset
19108 + (-8 * (64-info->first_fp_reg_save)));
19109 emit_insn (gen_add3_insn (dest_reg, frame_reg_rtx, offset));
19112 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
19113 info->gp_save_offset + sp_offset,
19115 /*savep=*/true, /*gpr=*/true,
19117 & SAVRES_NOINLINE_GPRS_SAVES_LR)
19119 insn = emit_insn (par);
19120 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19121 NULL_RTX, NULL_RTX);
19123 else if (!WORLD_SAVE_P (info) && using_store_multiple)
19127 p = rtvec_alloc (32 - info->first_gp_reg_save);
19128 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19130 rtx addr, reg, mem;
19131 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
19132 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19133 GEN_INT (info->gp_save_offset
19136 mem = gen_frame_mem (reg_mode, addr);
19138 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
19140 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19141 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19142 NULL_RTX, NULL_RTX);
19144 else if (!WORLD_SAVE_P (info))
19147 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19148 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19150 rtx addr, reg, mem;
19151 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
19153 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19154 GEN_INT (info->gp_save_offset
19157 mem = gen_frame_mem (reg_mode, addr);
19159 insn = emit_move_insn (mem, reg);
19160 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19161 NULL_RTX, NULL_RTX);
19165 /* ??? There's no need to emit actual instructions here, but it's the
19166 easiest way to get the frame unwind information emitted. */
19167 if (crtl->calls_eh_return)
19169 unsigned int i, regno;
19171 /* In AIX ABI we need to pretend we save r2 here. */
19174 rtx addr, reg, mem;
19176 reg = gen_rtx_REG (reg_mode, 2);
19177 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19178 GEN_INT (sp_offset + 5 * reg_size));
19179 mem = gen_frame_mem (reg_mode, addr);
19181 insn = emit_move_insn (mem, reg);
19182 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19183 NULL_RTX, NULL_RTX);
19184 PATTERN (insn) = gen_blockage ();
19189 regno = EH_RETURN_DATA_REGNO (i);
19190 if (regno == INVALID_REGNUM)
19193 emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, regno,
19194 info->ehrd_offset + sp_offset
19195 + reg_size * (int) i,
19200 /* Save CR if we use any that must be preserved. */
19201 if (!WORLD_SAVE_P (info) && info->cr_save_p)
19203 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19204 GEN_INT (info->cr_save_offset + sp_offset));
19205 rtx mem = gen_frame_mem (SImode, addr);
19206 /* See the large comment above about why CR2_REGNO is used. */
19207 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
19209 /* If r12 was used to hold the original sp, copy cr into r0 now
19211 if (REGNO (frame_reg_rtx) == 12)
19215 cr_save_rtx = gen_rtx_REG (SImode, 0);
19216 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19217 RTX_FRAME_RELATED_P (insn) = 1;
19218 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
19219 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19221 insn = emit_move_insn (mem, cr_save_rtx);
19223 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19224 NULL_RTX, NULL_RTX);
19227 /* Update stack and set back pointer unless this is V.4,
19228 for which it was done previously. */
19229 if (!WORLD_SAVE_P (info) && info->push_p
19230 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
19232 rtx copy_reg = NULL;
19234 if (info->total_size < 32767)
19235 sp_offset = info->total_size;
19236 else if (info->altivec_size != 0
19237 || info->vrsave_mask != 0)
19239 copy_reg = frame_ptr_rtx;
19240 frame_reg_rtx = copy_reg;
19243 sp_offset = info->total_size;
19244 rs6000_emit_allocate_stack (info->total_size, copy_reg);
19245 if (frame_reg_rtx != sp_reg_rtx)
19246 rs6000_emit_stack_tie ();
19249 /* Set frame pointer, if needed. */
19250 if (frame_pointer_needed)
19252 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
19254 RTX_FRAME_RELATED_P (insn) = 1;
19257 /* Save AltiVec registers if needed. Save here because the red zone does
19258 not include AltiVec registers. */
19259 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
19263 /* There should be a non inline version of this, for when we
19264 are saving lots of vector registers. */
19265 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
19266 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19268 rtx areg, savereg, mem;
19271 offset = info->altivec_save_offset + sp_offset
19272 + 16 * (i - info->first_altivec_reg_save);
19274 savereg = gen_rtx_REG (V4SImode, i);
19276 areg = gen_rtx_REG (Pmode, 0);
19277 emit_move_insn (areg, GEN_INT (offset));
19279 /* AltiVec addressing mode is [reg+reg]. */
19280 mem = gen_frame_mem (V4SImode,
19281 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
19283 insn = emit_move_insn (mem, savereg);
19285 rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
19286 areg, GEN_INT (offset));
19290 /* VRSAVE is a bit vector representing which AltiVec registers
19291 are used. The OS uses this to determine which vector
19292 registers to save on a context switch. We need to save
19293 VRSAVE on the stack frame, add whatever AltiVec registers we
19294 used in this function, and do the corresponding magic in the
19297 if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
19298 && info->vrsave_mask != 0)
19300 rtx reg, mem, vrsave;
19303 /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
19304 as frame_reg_rtx and r11 as the static chain pointer for
19305 nested functions. */
19306 reg = gen_rtx_REG (SImode, 0);
19307 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
19309 emit_insn (gen_get_vrsave_internal (reg));
19311 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
19313 if (!WORLD_SAVE_P (info))
19316 offset = info->vrsave_save_offset + sp_offset;
19317 mem = gen_frame_mem (SImode,
19318 gen_rtx_PLUS (Pmode, frame_reg_rtx,
19319 GEN_INT (offset)));
19320 insn = emit_move_insn (mem, reg);
19323 /* Include the registers in the mask. */
19324 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
19326 insn = emit_insn (generate_set_vrsave (reg, info, 0));
19329 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
19330 if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
19331 || (DEFAULT_ABI == ABI_V4
19332 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
19333 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM)))
19335 /* If emit_load_toc_table will use the link register, we need to save
19336 it. We use R12 for this purpose because emit_load_toc_table
19337 can use register 0. This allows us to use a plain 'blr' to return
19338 from the procedure more often. */
19339 int save_LR_around_toc_setup = (TARGET_ELF
19340 && DEFAULT_ABI != ABI_AIX
19342 && ! info->lr_save_p
19343 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
19344 if (save_LR_around_toc_setup)
19346 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
19348 insn = emit_move_insn (frame_ptr_rtx, lr);
19349 RTX_FRAME_RELATED_P (insn) = 1;
19351 rs6000_emit_load_toc_table (TRUE);
19353 insn = emit_move_insn (lr, frame_ptr_rtx);
19354 RTX_FRAME_RELATED_P (insn) = 1;
19357 rs6000_emit_load_toc_table (TRUE);
19361 if (DEFAULT_ABI == ABI_DARWIN
19362 && flag_pic && crtl->uses_pic_offset_table)
19364 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
19365 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
19367 /* Save and restore LR locally around this call (in R0). */
19368 if (!info->lr_save_p)
19369 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
19371 emit_insn (gen_load_macho_picbase (src));
19373 emit_move_insn (gen_rtx_REG (Pmode,
19374 RS6000_PIC_OFFSET_TABLE_REGNUM),
19377 if (!info->lr_save_p)
19378 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
19383 /* Write function prologue. */
19386 rs6000_output_function_prologue (FILE *file,
19387 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
19389 rs6000_stack_t *info = rs6000_stack_info ();
19391 if (TARGET_DEBUG_STACK)
19392 debug_stack_info (info);
19394 /* Write .extern for any function we will call to save and restore
19396 if (info->first_fp_reg_save < 64
19397 && !FP_SAVE_INLINE (info->first_fp_reg_save))
19400 int regno = info->first_fp_reg_save - 32;
19402 name = rs6000_savres_routine_name (info, regno, /*savep=*/true,
19403 /*gpr=*/false, /*lr=*/false);
19404 fprintf (file, "\t.extern %s\n", name);
19406 name = rs6000_savres_routine_name (info, regno, /*savep=*/false,
19407 /*gpr=*/false, /*lr=*/true);
19408 fprintf (file, "\t.extern %s\n", name);
19411 /* Write .extern for AIX common mode routines, if needed. */
19412 if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
19414 fputs ("\t.extern __mulh\n", file);
19415 fputs ("\t.extern __mull\n", file);
19416 fputs ("\t.extern __divss\n", file);
19417 fputs ("\t.extern __divus\n", file);
19418 fputs ("\t.extern __quoss\n", file);
19419 fputs ("\t.extern __quous\n", file);
19420 common_mode_defined = 1;
19423 if (! HAVE_prologue)
19429 /* A NOTE_INSN_DELETED is supposed to be at the start and end of
19430 the "toplevel" insn chain. */
19431 emit_note (NOTE_INSN_DELETED);
19432 rs6000_emit_prologue ();
19433 emit_note (NOTE_INSN_DELETED);
19435 /* Expand INSN_ADDRESSES so final() doesn't crash. */
19439 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
19441 INSN_ADDRESSES_NEW (insn, addr);
19446 prologue = get_insns ();
19449 if (TARGET_DEBUG_STACK)
19450 debug_rtx_list (prologue, 100);
19452 emit_insn_before_noloc (prologue, BB_HEAD (ENTRY_BLOCK_PTR->next_bb),
19456 rs6000_pic_labelno++;
19459 /* Non-zero if vmx regs are restored before the frame pop, zero if
19460 we restore after the pop when possible. */
19461 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
19463 /* Reload CR from REG. */
19466 rs6000_restore_saved_cr (rtx reg, int using_mfcr_multiple)
19471 if (using_mfcr_multiple)
19473 for (i = 0; i < 8; i++)
19474 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
19476 gcc_assert (count);
19479 if (using_mfcr_multiple && count > 1)
19484 p = rtvec_alloc (count);
19487 for (i = 0; i < 8; i++)
19488 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
19490 rtvec r = rtvec_alloc (2);
19491 RTVEC_ELT (r, 0) = reg;
19492 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
19493 RTVEC_ELT (p, ndx) =
19494 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
19495 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
19498 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19499 gcc_assert (ndx == count);
19502 for (i = 0; i < 8; i++)
19503 if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
19505 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
19511 /* Return true if OFFSET from stack pointer can be clobbered by signals.
19512 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
19513 below stack pointer not cloberred by signals. */
19516 offset_below_red_zone_p (HOST_WIDE_INT offset)
19518 return offset < (DEFAULT_ABI == ABI_V4
19520 : TARGET_32BIT ? -220 : -288);
19523 /* Emit function epilogue as insns. */
19526 rs6000_emit_epilogue (int sibcall)
19528 rs6000_stack_t *info;
19529 int restoring_GPRs_inline;
19530 int restoring_FPRs_inline;
19531 int using_load_multiple;
19532 int using_mtcr_multiple;
19533 int use_backchain_to_restore_sp;
19537 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
19538 rtx frame_reg_rtx = sp_reg_rtx;
19539 rtx cfa_restores = NULL_RTX;
19541 rtx cr_save_reg = NULL_RTX;
19542 enum machine_mode reg_mode = Pmode;
19543 int reg_size = TARGET_32BIT ? 4 : 8;
19546 info = rs6000_stack_info ();
19548 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19550 reg_mode = V2SImode;
19554 strategy = rs6000_savres_strategy (info, /*savep=*/false,
19555 /*static_chain_p=*/0, sibcall);
19556 using_load_multiple = strategy & SAVRES_MULTIPLE;
19557 restoring_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
19558 restoring_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
19559 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
19560 || rs6000_cpu == PROCESSOR_PPC603
19561 || rs6000_cpu == PROCESSOR_PPC750
19563 /* Restore via the backchain when we have a large frame, since this
19564 is more efficient than an addis, addi pair. The second condition
19565 here will not trigger at the moment; We don't actually need a
19566 frame pointer for alloca, but the generic parts of the compiler
19567 give us one anyway. */
19568 use_backchain_to_restore_sp = (info->total_size > 32767
19569 || info->total_size
19570 + (info->lr_save_p ? info->lr_save_offset : 0)
19572 || (cfun->calls_alloca
19573 && !frame_pointer_needed));
19574 restore_lr = (info->lr_save_p
19575 && (restoring_FPRs_inline
19576 || (strategy & SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR))
19577 && (restoring_GPRs_inline
19578 || info->first_fp_reg_save < 64));
19580 if (WORLD_SAVE_P (info))
19584 const char *alloc_rname;
19587 /* eh_rest_world_r10 will return to the location saved in the LR
19588 stack slot (which is not likely to be our caller.)
19589 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
19590 rest_world is similar, except any R10 parameter is ignored.
19591 The exception-handling stuff that was here in 2.95 is no
19592 longer necessary. */
19596 + 32 - info->first_gp_reg_save
19597 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
19598 + 63 + 1 - info->first_fp_reg_save);
19600 strcpy (rname, ((crtl->calls_eh_return) ?
19601 "*eh_rest_world_r10" : "*rest_world"));
19602 alloc_rname = ggc_strdup (rname);
19605 RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
19606 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19607 gen_rtx_REG (Pmode,
19610 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
19611 /* The instruction pattern requires a clobber here;
19612 it is shared with the restVEC helper. */
19614 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
19617 /* CR register traditionally saved as CR2. */
19618 rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
19619 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19620 GEN_INT (info->cr_save_offset));
19621 rtx mem = gen_frame_mem (reg_mode, addr);
19623 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
19626 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19628 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
19629 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19630 GEN_INT (info->gp_save_offset
19632 rtx mem = gen_frame_mem (reg_mode, addr);
19634 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
19636 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19638 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
19639 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19640 GEN_INT (info->altivec_save_offset
19642 rtx mem = gen_frame_mem (V4SImode, addr);
19644 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
19646 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
19648 rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
19649 ? DFmode : SFmode),
19650 info->first_fp_reg_save + i);
19651 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19652 GEN_INT (info->fp_save_offset
19654 rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
19655 ? DFmode : SFmode), addr);
19657 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
19660 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
19662 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
19664 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
19666 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
19668 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
19669 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
19674 /* frame_reg_rtx + sp_offset points to the top of this stack frame. */
19676 sp_offset = info->total_size;
19678 /* Restore AltiVec registers if we must do so before adjusting the
19680 if (TARGET_ALTIVEC_ABI
19681 && info->altivec_size != 0
19682 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
19683 || (DEFAULT_ABI != ABI_V4
19684 && offset_below_red_zone_p (info->altivec_save_offset))))
19688 if (use_backchain_to_restore_sp)
19690 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
19691 emit_move_insn (frame_reg_rtx,
19692 gen_rtx_MEM (Pmode, sp_reg_rtx));
19695 else if (frame_pointer_needed)
19696 frame_reg_rtx = hard_frame_pointer_rtx;
19698 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
19699 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19701 rtx addr, areg, mem, reg;
19703 areg = gen_rtx_REG (Pmode, 0);
19705 (areg, GEN_INT (info->altivec_save_offset
19707 + 16 * (i - info->first_altivec_reg_save)));
19709 /* AltiVec addressing mode is [reg+reg]. */
19710 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
19711 mem = gen_frame_mem (V4SImode, addr);
19713 reg = gen_rtx_REG (V4SImode, i);
19714 emit_move_insn (reg, mem);
19715 if (offset_below_red_zone_p (info->altivec_save_offset
19716 + (i - info->first_altivec_reg_save)
19718 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
19723 /* Restore VRSAVE if we must do so before adjusting the stack. */
19725 && TARGET_ALTIVEC_VRSAVE
19726 && info->vrsave_mask != 0
19727 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
19728 || (DEFAULT_ABI != ABI_V4
19729 && offset_below_red_zone_p (info->vrsave_save_offset))))
19731 rtx addr, mem, reg;
19733 if (frame_reg_rtx == sp_reg_rtx)
19735 if (use_backchain_to_restore_sp)
19737 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
19738 emit_move_insn (frame_reg_rtx,
19739 gen_rtx_MEM (Pmode, sp_reg_rtx));
19742 else if (frame_pointer_needed)
19743 frame_reg_rtx = hard_frame_pointer_rtx;
19746 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19747 GEN_INT (info->vrsave_save_offset + sp_offset));
19748 mem = gen_frame_mem (SImode, addr);
19749 reg = gen_rtx_REG (SImode, 12);
19750 emit_move_insn (reg, mem);
19752 emit_insn (generate_set_vrsave (reg, info, 1));
19756 /* If we have a large stack frame, restore the old stack pointer
19757 using the backchain. */
19758 if (use_backchain_to_restore_sp)
19760 if (frame_reg_rtx == sp_reg_rtx)
19762 /* Under V.4, don't reset the stack pointer until after we're done
19763 loading the saved registers. */
19764 if (DEFAULT_ABI == ABI_V4)
19765 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
19767 insn = emit_move_insn (frame_reg_rtx,
19768 gen_rtx_MEM (Pmode, sp_reg_rtx));
19771 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
19772 && DEFAULT_ABI == ABI_V4)
19773 /* frame_reg_rtx has been set up by the altivec restore. */
19777 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
19778 frame_reg_rtx = sp_reg_rtx;
19781 /* If we have a frame pointer, we can restore the old stack pointer
19783 else if (frame_pointer_needed)
19785 frame_reg_rtx = sp_reg_rtx;
19786 if (DEFAULT_ABI == ABI_V4)
19787 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
19788 /* Prevent reordering memory accesses against stack pointer restore. */
19789 else if (cfun->calls_alloca
19790 || offset_below_red_zone_p (-info->total_size))
19792 rtx mem1 = gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx);
19793 rtx mem2 = gen_rtx_MEM (BLKmode, sp_reg_rtx);
19794 MEM_NOTRAP_P (mem1) = 1;
19795 MEM_NOTRAP_P (mem2) = 1;
19796 emit_insn (gen_frame_tie (mem1, mem2));
19799 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
19800 GEN_INT (info->total_size)));
19803 else if (info->push_p
19804 && DEFAULT_ABI != ABI_V4
19805 && !crtl->calls_eh_return)
19807 /* Prevent reordering memory accesses against stack pointer restore. */
19808 if (cfun->calls_alloca
19809 || offset_below_red_zone_p (-info->total_size))
19811 rtx mem = gen_rtx_MEM (BLKmode, sp_reg_rtx);
19812 MEM_NOTRAP_P (mem) = 1;
19813 emit_insn (gen_stack_tie (mem));
19815 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
19816 GEN_INT (info->total_size)));
19819 if (insn && frame_reg_rtx == sp_reg_rtx)
19823 REG_NOTES (insn) = cfa_restores;
19824 cfa_restores = NULL_RTX;
19826 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
19827 RTX_FRAME_RELATED_P (insn) = 1;
19830 /* Restore AltiVec registers if we have not done so already. */
19831 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
19832 && TARGET_ALTIVEC_ABI
19833 && info->altivec_size != 0
19834 && (DEFAULT_ABI == ABI_V4
19835 || !offset_below_red_zone_p (info->altivec_save_offset)))
19839 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
19840 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19842 rtx addr, areg, mem, reg;
19844 areg = gen_rtx_REG (Pmode, 0);
19846 (areg, GEN_INT (info->altivec_save_offset
19848 + 16 * (i - info->first_altivec_reg_save)));
19850 /* AltiVec addressing mode is [reg+reg]. */
19851 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
19852 mem = gen_frame_mem (V4SImode, addr);
19854 reg = gen_rtx_REG (V4SImode, i);
19855 emit_move_insn (reg, mem);
19856 if (DEFAULT_ABI == ABI_V4)
19857 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
19862 /* Restore VRSAVE if we have not done so already. */
19863 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
19865 && TARGET_ALTIVEC_VRSAVE
19866 && info->vrsave_mask != 0
19867 && (DEFAULT_ABI == ABI_V4
19868 || !offset_below_red_zone_p (info->vrsave_save_offset)))
19870 rtx addr, mem, reg;
19872 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19873 GEN_INT (info->vrsave_save_offset + sp_offset));
19874 mem = gen_frame_mem (SImode, addr);
19875 reg = gen_rtx_REG (SImode, 12);
19876 emit_move_insn (reg, mem);
19878 emit_insn (generate_set_vrsave (reg, info, 1));
19881 /* Get the old lr if we saved it. If we are restoring registers
19882 out-of-line, then the out-of-line routines can do this for us. */
19883 if (restore_lr && restoring_GPRs_inline)
19885 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
19886 info->lr_save_offset + sp_offset);
19888 emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
19891 /* Get the old cr if we saved it. */
19892 if (info->cr_save_p)
19894 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19895 GEN_INT (info->cr_save_offset + sp_offset));
19896 rtx mem = gen_frame_mem (SImode, addr);
19898 cr_save_reg = gen_rtx_REG (SImode,
19899 DEFAULT_ABI == ABI_AIX
19900 && !restoring_GPRs_inline
19901 && info->first_fp_reg_save < 64
19903 emit_move_insn (cr_save_reg, mem);
19906 /* Set LR here to try to overlap restores below. LR is always saved
19907 above incoming stack, so it never needs REG_CFA_RESTORE. */
19908 if (restore_lr && restoring_GPRs_inline)
19909 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
19910 gen_rtx_REG (Pmode, 0));
19912 /* Load exception handler data registers, if needed. */
19913 if (crtl->calls_eh_return)
19915 unsigned int i, regno;
19919 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19920 GEN_INT (sp_offset + 5 * reg_size));
19921 rtx mem = gen_frame_mem (reg_mode, addr);
19923 emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
19930 regno = EH_RETURN_DATA_REGNO (i);
19931 if (regno == INVALID_REGNUM)
19934 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
19935 info->ehrd_offset + sp_offset
19936 + reg_size * (int) i);
19938 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
19942 /* Restore GPRs. This is done as a PARALLEL if we are using
19943 the load-multiple instructions. */
19945 && info->spe_64bit_regs_used != 0
19946 && info->first_gp_reg_save != 32)
19948 /* Determine whether we can address all of the registers that need
19949 to be saved with an offset from the stack pointer that fits in
19950 the small const field for SPE memory instructions. */
19951 int spe_regs_addressable_via_sp
19952 = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
19953 + (32 - info->first_gp_reg_save - 1) * reg_size)
19954 && restoring_GPRs_inline);
19957 if (spe_regs_addressable_via_sp)
19958 spe_offset = info->spe_gp_save_offset + sp_offset;
19961 rtx old_frame_reg_rtx = frame_reg_rtx;
19962 /* Make r11 point to the start of the SPE save area. We worried about
19963 not clobbering it when we were saving registers in the prologue.
19964 There's no need to worry here because the static chain is passed
19965 anew to every function. */
19966 int ool_adjust = (restoring_GPRs_inline
19968 : (info->first_gp_reg_save
19969 - (FIRST_SAVRES_REGISTER+1))*8);
19971 if (frame_reg_rtx == sp_reg_rtx)
19972 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
19973 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
19974 GEN_INT (info->spe_gp_save_offset
19977 /* Keep the invariant that frame_reg_rtx + sp_offset points
19978 at the top of the stack frame. */
19979 sp_offset = -info->spe_gp_save_offset;
19984 if (restoring_GPRs_inline)
19986 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19987 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19989 rtx offset, addr, mem, reg;
19991 /* We're doing all this to ensure that the immediate offset
19992 fits into the immediate field of 'evldd'. */
19993 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
19995 offset = GEN_INT (spe_offset + reg_size * i);
19996 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
19997 mem = gen_rtx_MEM (V2SImode, addr);
19998 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20000 insn = emit_move_insn (reg, mem);
20001 if (DEFAULT_ABI == ABI_V4)
20003 if (frame_pointer_needed
20004 && info->first_gp_reg_save + i
20005 == HARD_FRAME_POINTER_REGNUM)
20007 add_reg_note (insn, REG_CFA_DEF_CFA,
20008 plus_constant (frame_reg_rtx,
20010 RTX_FRAME_RELATED_P (insn) = 1;
20013 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
20022 par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
20024 /*savep=*/false, /*gpr=*/true,
20026 emit_jump_insn (par);
20027 /* We don't want anybody else emitting things after we jumped
20032 else if (!restoring_GPRs_inline)
20034 /* We are jumping to an out-of-line function. */
20035 bool can_use_exit = info->first_fp_reg_save == 64;
20038 /* Emit stack reset code if we need it. */
20040 rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
20041 sp_offset, can_use_exit);
20044 emit_insn (gen_add3_insn (gen_rtx_REG (Pmode, DEFAULT_ABI == ABI_AIX
20047 GEN_INT (sp_offset - info->fp_size)));
20048 if (REGNO (frame_reg_rtx) == 11)
20049 sp_offset += info->fp_size;
20052 par = rs6000_make_savres_rtx (info, frame_reg_rtx,
20053 info->gp_save_offset, reg_mode,
20054 /*savep=*/false, /*gpr=*/true,
20055 /*lr=*/can_use_exit);
20059 if (info->cr_save_p)
20061 rs6000_restore_saved_cr (cr_save_reg, using_mtcr_multiple);
20062 if (DEFAULT_ABI == ABI_V4)
20064 = alloc_reg_note (REG_CFA_RESTORE,
20065 gen_rtx_REG (SImode, CR2_REGNO),
20069 emit_jump_insn (par);
20071 /* We don't want anybody else emitting things after we jumped
20076 insn = emit_insn (par);
20077 if (DEFAULT_ABI == ABI_V4)
20079 if (frame_pointer_needed)
20081 add_reg_note (insn, REG_CFA_DEF_CFA,
20082 plus_constant (frame_reg_rtx, sp_offset));
20083 RTX_FRAME_RELATED_P (insn) = 1;
20086 for (i = info->first_gp_reg_save; i < 32; i++)
20088 = alloc_reg_note (REG_CFA_RESTORE,
20089 gen_rtx_REG (reg_mode, i), cfa_restores);
20092 else if (using_load_multiple)
20095 p = rtvec_alloc (32 - info->first_gp_reg_save);
20096 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20098 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20099 GEN_INT (info->gp_save_offset
20102 rtx mem = gen_frame_mem (reg_mode, addr);
20103 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20105 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, reg, mem);
20106 if (DEFAULT_ABI == ABI_V4)
20107 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
20110 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20111 if (DEFAULT_ABI == ABI_V4 && frame_pointer_needed)
20113 add_reg_note (insn, REG_CFA_DEF_CFA,
20114 plus_constant (frame_reg_rtx, sp_offset));
20115 RTX_FRAME_RELATED_P (insn) = 1;
20120 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20121 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
20123 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20124 GEN_INT (info->gp_save_offset
20127 rtx mem = gen_frame_mem (reg_mode, addr);
20128 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20130 insn = emit_move_insn (reg, mem);
20131 if (DEFAULT_ABI == ABI_V4)
20133 if (frame_pointer_needed
20134 && info->first_gp_reg_save + i
20135 == HARD_FRAME_POINTER_REGNUM)
20137 add_reg_note (insn, REG_CFA_DEF_CFA,
20138 plus_constant (frame_reg_rtx, sp_offset));
20139 RTX_FRAME_RELATED_P (insn) = 1;
20142 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
20148 if (restore_lr && !restoring_GPRs_inline)
20150 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
20151 info->lr_save_offset + sp_offset);
20153 emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
20154 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
20155 gen_rtx_REG (Pmode, 0));
20158 /* Restore fpr's if we need to do it without calling a function. */
20159 if (restoring_FPRs_inline)
20160 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20161 if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
20162 && ! call_used_regs[info->first_fp_reg_save+i]))
20164 rtx addr, mem, reg;
20165 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20166 GEN_INT (info->fp_save_offset
20169 mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
20170 ? DFmode : SFmode), addr);
20171 reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
20172 ? DFmode : SFmode),
20173 info->first_fp_reg_save + i);
20175 emit_move_insn (reg, mem);
20176 if (DEFAULT_ABI == ABI_V4)
20177 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
20181 /* If we saved cr, restore it here. Just those that were used. */
20182 if (info->cr_save_p)
20184 rs6000_restore_saved_cr (cr_save_reg, using_mtcr_multiple);
20185 if (DEFAULT_ABI == ABI_V4)
20187 = alloc_reg_note (REG_CFA_RESTORE, gen_rtx_REG (SImode, CR2_REGNO),
20191 /* If this is V.4, unwind the stack pointer after all of the loads
20193 insn = rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
20194 sp_offset, !restoring_FPRs_inline);
20199 REG_NOTES (insn) = cfa_restores;
20200 cfa_restores = NULL_RTX;
20202 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20203 RTX_FRAME_RELATED_P (insn) = 1;
20206 if (crtl->calls_eh_return)
20208 rtx sa = EH_RETURN_STACKADJ_RTX;
20209 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
20215 bool lr = (strategy & SAVRES_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20216 if (! restoring_FPRs_inline)
20217 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
20219 p = rtvec_alloc (2);
20221 RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
20222 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
20223 ? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
20224 : gen_rtx_CLOBBER (VOIDmode,
20225 gen_rtx_REG (Pmode, 65)));
20227 /* If we have to restore more than two FP registers, branch to the
20228 restore function. It will return to our caller. */
20229 if (! restoring_FPRs_inline)
20234 sym = rs6000_savres_routine_sym (info,
20238 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
20239 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
20240 gen_rtx_REG (Pmode,
20241 DEFAULT_ABI == ABI_AIX
20243 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20246 addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
20247 GEN_INT (info->fp_save_offset + 8*i));
20248 mem = gen_frame_mem (DFmode, addr);
20250 RTVEC_ELT (p, i+4) =
20251 gen_rtx_SET (VOIDmode,
20252 gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
20257 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20261 /* Write function epilogue. */
20264 rs6000_output_function_epilogue (FILE *file,
20265 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20267 if (! HAVE_epilogue)
20269 rtx insn = get_last_insn ();
20270 /* If the last insn was a BARRIER, we don't have to write anything except
20271 the trace table. */
20272 if (GET_CODE (insn) == NOTE)
20273 insn = prev_nonnote_insn (insn);
20274 if (insn == 0 || GET_CODE (insn) != BARRIER)
20276 /* This is slightly ugly, but at least we don't have two
20277 copies of the epilogue-emitting code. */
20280 /* A NOTE_INSN_DELETED is supposed to be at the start
20281 and end of the "toplevel" insn chain. */
20282 emit_note (NOTE_INSN_DELETED);
20283 rs6000_emit_epilogue (FALSE);
20284 emit_note (NOTE_INSN_DELETED);
20286 /* Expand INSN_ADDRESSES so final() doesn't crash. */
20290 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
20292 INSN_ADDRESSES_NEW (insn, addr);
20297 if (TARGET_DEBUG_STACK)
20298 debug_rtx_list (get_insns (), 100);
20299 final (get_insns (), file, FALSE);
20305 macho_branch_islands ();
20306 /* Mach-O doesn't support labels at the end of objects, so if
20307 it looks like we might want one, insert a NOP. */
20309 rtx insn = get_last_insn ();
20312 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
20313 insn = PREV_INSN (insn);
20317 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
20318 fputs ("\tnop\n", file);
20322 /* Output a traceback table here. See /usr/include/sys/debug.h for info
20325 We don't output a traceback table if -finhibit-size-directive was
20326 used. The documentation for -finhibit-size-directive reads
20327 ``don't output a @code{.size} assembler directive, or anything
20328 else that would cause trouble if the function is split in the
20329 middle, and the two halves are placed at locations far apart in
20330 memory.'' The traceback table has this property, since it
20331 includes the offset from the start of the function to the
20332 traceback table itself.
20334 System V.4 Powerpc's (and the embedded ABI derived from it) use a
20335 different traceback table. */
20336 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
20337 && rs6000_traceback != traceback_none && !cfun->is_thunk)
20339 const char *fname = NULL;
20340 const char *language_string = lang_hooks.name;
20341 int fixed_parms = 0, float_parms = 0, parm_info = 0;
20343 int optional_tbtab;
20344 rs6000_stack_t *info = rs6000_stack_info ();
20346 if (rs6000_traceback == traceback_full)
20347 optional_tbtab = 1;
20348 else if (rs6000_traceback == traceback_part)
20349 optional_tbtab = 0;
20351 optional_tbtab = !optimize_size && !TARGET_ELF;
20353 if (optional_tbtab)
20355 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
20356 while (*fname == '.') /* V.4 encodes . in the name */
20359 /* Need label immediately before tbtab, so we can compute
20360 its offset from the function start. */
20361 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
20362 ASM_OUTPUT_LABEL (file, fname);
20365 /* The .tbtab pseudo-op can only be used for the first eight
20366 expressions, since it can't handle the possibly variable
20367 length fields that follow. However, if you omit the optional
20368 fields, the assembler outputs zeros for all optional fields
20369 anyways, giving each variable length field is minimum length
20370 (as defined in sys/debug.h). Thus we can not use the .tbtab
20371 pseudo-op at all. */
20373 /* An all-zero word flags the start of the tbtab, for debuggers
20374 that have to find it by searching forward from the entry
20375 point or from the current pc. */
20376 fputs ("\t.long 0\n", file);
20378 /* Tbtab format type. Use format type 0. */
20379 fputs ("\t.byte 0,", file);
20381 /* Language type. Unfortunately, there does not seem to be any
20382 official way to discover the language being compiled, so we
20383 use language_string.
20384 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
20385 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
20386 a number, so for now use 9. LTO isn't assigned a number either,
20387 so for now use 0. */
20388 if (! strcmp (language_string, "GNU C")
20389 || ! strcmp (language_string, "GNU GIMPLE"))
20391 else if (! strcmp (language_string, "GNU F77")
20392 || ! strcmp (language_string, "GNU Fortran"))
20394 else if (! strcmp (language_string, "GNU Pascal"))
20396 else if (! strcmp (language_string, "GNU Ada"))
20398 else if (! strcmp (language_string, "GNU C++")
20399 || ! strcmp (language_string, "GNU Objective-C++"))
20401 else if (! strcmp (language_string, "GNU Java"))
20403 else if (! strcmp (language_string, "GNU Objective-C"))
20406 gcc_unreachable ();
20407 fprintf (file, "%d,", i);
20409 /* 8 single bit fields: global linkage (not set for C extern linkage,
20410 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
20411 from start of procedure stored in tbtab, internal function, function
20412 has controlled storage, function has no toc, function uses fp,
20413 function logs/aborts fp operations. */
20414 /* Assume that fp operations are used if any fp reg must be saved. */
20415 fprintf (file, "%d,",
20416 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
20418 /* 6 bitfields: function is interrupt handler, name present in
20419 proc table, function calls alloca, on condition directives
20420 (controls stack walks, 3 bits), saves condition reg, saves
20422 /* The `function calls alloca' bit seems to be set whenever reg 31 is
20423 set up as a frame pointer, even when there is no alloca call. */
20424 fprintf (file, "%d,",
20425 ((optional_tbtab << 6)
20426 | ((optional_tbtab & frame_pointer_needed) << 5)
20427 | (info->cr_save_p << 1)
20428 | (info->lr_save_p)));
20430 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
20432 fprintf (file, "%d,",
20433 (info->push_p << 7) | (64 - info->first_fp_reg_save));
20435 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
20436 fprintf (file, "%d,", (32 - first_reg_to_save ()));
20438 if (optional_tbtab)
20440 /* Compute the parameter info from the function decl argument
20443 int next_parm_info_bit = 31;
20445 for (decl = DECL_ARGUMENTS (current_function_decl);
20446 decl; decl = TREE_CHAIN (decl))
20448 rtx parameter = DECL_INCOMING_RTL (decl);
20449 enum machine_mode mode = GET_MODE (parameter);
20451 if (GET_CODE (parameter) == REG)
20453 if (SCALAR_FLOAT_MODE_P (mode))
20474 gcc_unreachable ();
20477 /* If only one bit will fit, don't or in this entry. */
20478 if (next_parm_info_bit > 0)
20479 parm_info |= (bits << (next_parm_info_bit - 1));
20480 next_parm_info_bit -= 2;
20484 fixed_parms += ((GET_MODE_SIZE (mode)
20485 + (UNITS_PER_WORD - 1))
20487 next_parm_info_bit -= 1;
20493 /* Number of fixed point parameters. */
20494 /* This is actually the number of words of fixed point parameters; thus
20495 an 8 byte struct counts as 2; and thus the maximum value is 8. */
20496 fprintf (file, "%d,", fixed_parms);
20498 /* 2 bitfields: number of floating point parameters (7 bits), parameters
20500 /* This is actually the number of fp registers that hold parameters;
20501 and thus the maximum value is 13. */
20502 /* Set parameters on stack bit if parameters are not in their original
20503 registers, regardless of whether they are on the stack? Xlc
20504 seems to set the bit when not optimizing. */
20505 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
20507 if (! optional_tbtab)
20510 /* Optional fields follow. Some are variable length. */
20512 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
20513 11 double float. */
20514 /* There is an entry for each parameter in a register, in the order that
20515 they occur in the parameter list. Any intervening arguments on the
20516 stack are ignored. If the list overflows a long (max possible length
20517 34 bits) then completely leave off all elements that don't fit. */
20518 /* Only emit this long if there was at least one parameter. */
20519 if (fixed_parms || float_parms)
20520 fprintf (file, "\t.long %d\n", parm_info);
20522 /* Offset from start of code to tb table. */
20523 fputs ("\t.long ", file);
20524 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
20525 RS6000_OUTPUT_BASENAME (file, fname);
20527 rs6000_output_function_entry (file, fname);
20530 /* Interrupt handler mask. */
20531 /* Omit this long, since we never set the interrupt handler bit
20534 /* Number of CTL (controlled storage) anchors. */
20535 /* Omit this long, since the has_ctl bit is never set above. */
20537 /* Displacement into stack of each CTL anchor. */
20538 /* Omit this list of longs, because there are no CTL anchors. */
20540 /* Length of function name. */
20543 fprintf (file, "\t.short %d\n", (int) strlen (fname));
20545 /* Function name. */
20546 assemble_string (fname, strlen (fname));
20548 /* Register for alloca automatic storage; this is always reg 31.
20549 Only emit this if the alloca bit was set above. */
20550 if (frame_pointer_needed)
20551 fputs ("\t.byte 31\n", file);
20553 fputs ("\t.align 2\n", file);
20557 /* A C compound statement that outputs the assembler code for a thunk
20558 function, used to implement C++ virtual function calls with
20559 multiple inheritance. The thunk acts as a wrapper around a virtual
20560 function, adjusting the implicit object parameter before handing
20561 control off to the real function.
20563 First, emit code to add the integer DELTA to the location that
20564 contains the incoming first argument. Assume that this argument
20565 contains a pointer, and is the one used to pass the `this' pointer
20566 in C++. This is the incoming argument *before* the function
20567 prologue, e.g. `%o0' on a sparc. The addition must preserve the
20568 values of all other incoming arguments.
20570 After the addition, emit code to jump to FUNCTION, which is a
20571 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
20572 not touch the return address. Hence returning from FUNCTION will
20573 return to whoever called the current `thunk'.
20575 The effect must be as if FUNCTION had been called directly with the
20576 adjusted first argument. This macro is responsible for emitting
20577 all of the code for a thunk function; output_function_prologue()
20578 and output_function_epilogue() are not invoked.
20580 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
20581 been extracted from it.) It might possibly be useful on some
20582 targets, but probably not.
20584 If you do not define this macro, the target-independent code in the
20585 C++ frontend will generate a less efficient heavyweight thunk that
20586 calls FUNCTION instead of jumping to it. The generic approach does
20587 not support varargs. */
20590 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
20591 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
20594 rtx this_rtx, insn, funexp;
20596 reload_completed = 1;
20597 epilogue_completed = 1;
20599 /* Mark the end of the (empty) prologue. */
20600 emit_note (NOTE_INSN_PROLOGUE_END);
20602 /* Find the "this" pointer. If the function returns a structure,
20603 the structure return pointer is in r3. */
20604 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
20605 this_rtx = gen_rtx_REG (Pmode, 4);
20607 this_rtx = gen_rtx_REG (Pmode, 3);
20609 /* Apply the constant offset, if required. */
20611 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
20613 /* Apply the offset from the vtable, if required. */
20616 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
20617 rtx tmp = gen_rtx_REG (Pmode, 12);
20619 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
20620 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
20622 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
20623 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
20627 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
20629 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
20631 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
20634 /* Generate a tail call to the target function. */
20635 if (!TREE_USED (function))
20637 assemble_external (function);
20638 TREE_USED (function) = 1;
20640 funexp = XEXP (DECL_RTL (function), 0);
20641 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
20644 if (MACHOPIC_INDIRECT)
20645 funexp = machopic_indirect_call_target (funexp);
20648 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
20649 generate sibcall RTL explicitly. */
20650 insn = emit_call_insn (
20651 gen_rtx_PARALLEL (VOIDmode,
20653 gen_rtx_CALL (VOIDmode,
20654 funexp, const0_rtx),
20655 gen_rtx_USE (VOIDmode, const0_rtx),
20656 gen_rtx_USE (VOIDmode,
20657 gen_rtx_REG (SImode,
20659 gen_rtx_RETURN (VOIDmode))));
20660 SIBLING_CALL_P (insn) = 1;
20663 /* Run just enough of rest_of_compilation to get the insns emitted.
20664 There's not really enough bulk here to make other passes such as
20665 instruction scheduling worth while. Note that use_thunk calls
20666 assemble_start_function and assemble_end_function. */
20667 insn = get_insns ();
20668 insn_locators_alloc ();
20669 shorten_branches (insn);
20670 final_start_function (insn, file, 1);
20671 final (insn, file, 1);
20672 final_end_function ();
20674 reload_completed = 0;
20675 epilogue_completed = 0;
20678 /* A quick summary of the various types of 'constant-pool tables'
20681 Target Flags Name One table per
20682 AIX (none) AIX TOC object file
20683 AIX -mfull-toc AIX TOC object file
20684 AIX -mminimal-toc AIX minimal TOC translation unit
20685 SVR4/EABI (none) SVR4 SDATA object file
20686 SVR4/EABI -fpic SVR4 pic object file
20687 SVR4/EABI -fPIC SVR4 PIC translation unit
20688 SVR4/EABI -mrelocatable EABI TOC function
20689 SVR4/EABI -maix AIX TOC object file
20690 SVR4/EABI -maix -mminimal-toc
20691 AIX minimal TOC translation unit
20693 Name Reg. Set by entries contains:
20694 made by addrs? fp? sum?
20696 AIX TOC 2 crt0 as Y option option
20697 AIX minimal TOC 30 prolog gcc Y Y option
20698 SVR4 SDATA 13 crt0 gcc N Y N
20699 SVR4 pic 30 prolog ld Y not yet N
20700 SVR4 PIC 30 prolog gcc Y option option
20701 EABI TOC 30 prolog gcc Y option option
20705 /* Hash functions for the hash table. */
20708 rs6000_hash_constant (rtx k)
20710 enum rtx_code code = GET_CODE (k);
20711 enum machine_mode mode = GET_MODE (k);
20712 unsigned result = (code << 3) ^ mode;
20713 const char *format;
20716 format = GET_RTX_FORMAT (code);
20717 flen = strlen (format);
20723 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
20726 if (mode != VOIDmode)
20727 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
20739 for (; fidx < flen; fidx++)
20740 switch (format[fidx])
20745 const char *str = XSTR (k, fidx);
20746 len = strlen (str);
20747 result = result * 613 + len;
20748 for (i = 0; i < len; i++)
20749 result = result * 613 + (unsigned) str[i];
20754 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
20758 result = result * 613 + (unsigned) XINT (k, fidx);
20761 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
20762 result = result * 613 + (unsigned) XWINT (k, fidx);
20766 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
20767 result = result * 613 + (unsigned) (XWINT (k, fidx)
20774 gcc_unreachable ();
20781 toc_hash_function (const void *hash_entry)
20783 const struct toc_hash_struct *thc =
20784 (const struct toc_hash_struct *) hash_entry;
20785 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
20788 /* Compare H1 and H2 for equivalence. */
20791 toc_hash_eq (const void *h1, const void *h2)
20793 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
20794 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
20796 if (((const struct toc_hash_struct *) h1)->key_mode
20797 != ((const struct toc_hash_struct *) h2)->key_mode)
20800 return rtx_equal_p (r1, r2);
20803 /* These are the names given by the C++ front-end to vtables, and
20804 vtable-like objects. Ideally, this logic should not be here;
20805 instead, there should be some programmatic way of inquiring as
20806 to whether or not an object is a vtable. */
20808 #define VTABLE_NAME_P(NAME) \
20809 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
20810 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
20811 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
20812 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
20813 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
20815 #ifdef NO_DOLLAR_IN_LABEL
20816 /* Return a GGC-allocated character string translating dollar signs in
20817 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
20820 rs6000_xcoff_strip_dollar (const char *name)
20825 p = strchr (name, '$');
20827 if (p == 0 || p == name)
20830 len = strlen (name);
20831 strip = (char *) alloca (len + 1);
20832 strcpy (strip, name);
20833 p = strchr (strip, '$');
20837 p = strchr (p + 1, '$');
20840 return ggc_alloc_string (strip, len);
20845 rs6000_output_symbol_ref (FILE *file, rtx x)
20847 /* Currently C++ toc references to vtables can be emitted before it
20848 is decided whether the vtable is public or private. If this is
20849 the case, then the linker will eventually complain that there is
20850 a reference to an unknown section. Thus, for vtables only,
20851 we emit the TOC reference to reference the symbol and not the
20853 const char *name = XSTR (x, 0);
20855 if (VTABLE_NAME_P (name))
20857 RS6000_OUTPUT_BASENAME (file, name);
20860 assemble_name (file, name);
20863 /* Output a TOC entry. We derive the entry name from what is being
20867 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
20870 const char *name = buf;
20872 HOST_WIDE_INT offset = 0;
20874 gcc_assert (!TARGET_NO_TOC);
20876 /* When the linker won't eliminate them, don't output duplicate
20877 TOC entries (this happens on AIX if there is any kind of TOC,
20878 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
20880 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
20882 struct toc_hash_struct *h;
20885 /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
20886 time because GGC is not initialized at that point. */
20887 if (toc_hash_table == NULL)
20888 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
20889 toc_hash_eq, NULL);
20891 h = GGC_NEW (struct toc_hash_struct);
20893 h->key_mode = mode;
20894 h->labelno = labelno;
20896 found = htab_find_slot (toc_hash_table, h, INSERT);
20897 if (*found == NULL)
20899 else /* This is indeed a duplicate.
20900 Set this label equal to that label. */
20902 fputs ("\t.set ", file);
20903 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
20904 fprintf (file, "%d,", labelno);
20905 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
20906 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
20912 /* If we're going to put a double constant in the TOC, make sure it's
20913 aligned properly when strict alignment is on. */
20914 if (GET_CODE (x) == CONST_DOUBLE
20915 && STRICT_ALIGNMENT
20916 && GET_MODE_BITSIZE (mode) >= 64
20917 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
20918 ASM_OUTPUT_ALIGN (file, 3);
20921 (*targetm.asm_out.internal_label) (file, "LC", labelno);
20923 /* Handle FP constants specially. Note that if we have a minimal
20924 TOC, things we put here aren't actually in the TOC, so we can allow
20926 if (GET_CODE (x) == CONST_DOUBLE &&
20927 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
20929 REAL_VALUE_TYPE rv;
20932 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
20933 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
20934 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
20936 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
20940 if (TARGET_MINIMAL_TOC)
20941 fputs (DOUBLE_INT_ASM_OP, file);
20943 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
20944 k[0] & 0xffffffff, k[1] & 0xffffffff,
20945 k[2] & 0xffffffff, k[3] & 0xffffffff);
20946 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
20947 k[0] & 0xffffffff, k[1] & 0xffffffff,
20948 k[2] & 0xffffffff, k[3] & 0xffffffff);
20953 if (TARGET_MINIMAL_TOC)
20954 fputs ("\t.long ", file);
20956 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
20957 k[0] & 0xffffffff, k[1] & 0xffffffff,
20958 k[2] & 0xffffffff, k[3] & 0xffffffff);
20959 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
20960 k[0] & 0xffffffff, k[1] & 0xffffffff,
20961 k[2] & 0xffffffff, k[3] & 0xffffffff);
20965 else if (GET_CODE (x) == CONST_DOUBLE &&
20966 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
20968 REAL_VALUE_TYPE rv;
20971 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
20973 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
20974 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
20976 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
20980 if (TARGET_MINIMAL_TOC)
20981 fputs (DOUBLE_INT_ASM_OP, file);
20983 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
20984 k[0] & 0xffffffff, k[1] & 0xffffffff);
20985 fprintf (file, "0x%lx%08lx\n",
20986 k[0] & 0xffffffff, k[1] & 0xffffffff);
20991 if (TARGET_MINIMAL_TOC)
20992 fputs ("\t.long ", file);
20994 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
20995 k[0] & 0xffffffff, k[1] & 0xffffffff);
20996 fprintf (file, "0x%lx,0x%lx\n",
20997 k[0] & 0xffffffff, k[1] & 0xffffffff);
21001 else if (GET_CODE (x) == CONST_DOUBLE &&
21002 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
21004 REAL_VALUE_TYPE rv;
21007 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21008 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21009 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
21011 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
21015 if (TARGET_MINIMAL_TOC)
21016 fputs (DOUBLE_INT_ASM_OP, file);
21018 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
21019 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
21024 if (TARGET_MINIMAL_TOC)
21025 fputs ("\t.long ", file);
21027 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
21028 fprintf (file, "0x%lx\n", l & 0xffffffff);
21032 else if (GET_MODE (x) == VOIDmode
21033 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
21035 unsigned HOST_WIDE_INT low;
21036 HOST_WIDE_INT high;
21038 if (GET_CODE (x) == CONST_DOUBLE)
21040 low = CONST_DOUBLE_LOW (x);
21041 high = CONST_DOUBLE_HIGH (x);
21044 #if HOST_BITS_PER_WIDE_INT == 32
21047 high = (low & 0x80000000) ? ~0 : 0;
21051 low = INTVAL (x) & 0xffffffff;
21052 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
21056 /* TOC entries are always Pmode-sized, but since this
21057 is a bigendian machine then if we're putting smaller
21058 integer constants in the TOC we have to pad them.
21059 (This is still a win over putting the constants in
21060 a separate constant pool, because then we'd have
21061 to have both a TOC entry _and_ the actual constant.)
21063 For a 32-bit target, CONST_INT values are loaded and shifted
21064 entirely within `low' and can be stored in one TOC entry. */
21066 /* It would be easy to make this work, but it doesn't now. */
21067 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
21069 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
21071 #if HOST_BITS_PER_WIDE_INT == 32
21072 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
21073 POINTER_SIZE, &low, &high, 0);
21076 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
21077 high = (HOST_WIDE_INT) low >> 32;
21084 if (TARGET_MINIMAL_TOC)
21085 fputs (DOUBLE_INT_ASM_OP, file);
21087 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
21088 (long) high & 0xffffffff, (long) low & 0xffffffff);
21089 fprintf (file, "0x%lx%08lx\n",
21090 (long) high & 0xffffffff, (long) low & 0xffffffff);
21095 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
21097 if (TARGET_MINIMAL_TOC)
21098 fputs ("\t.long ", file);
21100 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
21101 (long) high & 0xffffffff, (long) low & 0xffffffff);
21102 fprintf (file, "0x%lx,0x%lx\n",
21103 (long) high & 0xffffffff, (long) low & 0xffffffff);
21107 if (TARGET_MINIMAL_TOC)
21108 fputs ("\t.long ", file);
21110 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
21111 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
21117 if (GET_CODE (x) == CONST)
21119 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
21120 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
21122 base = XEXP (XEXP (x, 0), 0);
21123 offset = INTVAL (XEXP (XEXP (x, 0), 1));
21126 switch (GET_CODE (base))
21129 name = XSTR (base, 0);
21133 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
21134 CODE_LABEL_NUMBER (XEXP (base, 0)));
21138 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
21142 gcc_unreachable ();
21145 if (TARGET_MINIMAL_TOC)
21146 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
21149 fputs ("\t.tc ", file);
21150 RS6000_OUTPUT_BASENAME (file, name);
21153 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
21155 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
21157 fputs ("[TC],", file);
21160 /* Currently C++ toc references to vtables can be emitted before it
21161 is decided whether the vtable is public or private. If this is
21162 the case, then the linker will eventually complain that there is
21163 a TOC reference to an unknown section. Thus, for vtables only,
21164 we emit the TOC reference to reference the symbol and not the
21166 if (VTABLE_NAME_P (name))
21168 RS6000_OUTPUT_BASENAME (file, name);
21170 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
21171 else if (offset > 0)
21172 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
21175 output_addr_const (file, x);
21179 /* Output an assembler pseudo-op to write an ASCII string of N characters
21180 starting at P to FILE.
21182 On the RS/6000, we have to do this using the .byte operation and
21183 write out special characters outside the quoted string.
21184 Also, the assembler is broken; very long strings are truncated,
21185 so we must artificially break them up early. */
21188 output_ascii (FILE *file, const char *p, int n)
21191 int i, count_string;
21192 const char *for_string = "\t.byte \"";
21193 const char *for_decimal = "\t.byte ";
21194 const char *to_close = NULL;
21197 for (i = 0; i < n; i++)
21200 if (c >= ' ' && c < 0177)
21203 fputs (for_string, file);
21206 /* Write two quotes to get one. */
21214 for_decimal = "\"\n\t.byte ";
21218 if (count_string >= 512)
21220 fputs (to_close, file);
21222 for_string = "\t.byte \"";
21223 for_decimal = "\t.byte ";
21231 fputs (for_decimal, file);
21232 fprintf (file, "%d", c);
21234 for_string = "\n\t.byte \"";
21235 for_decimal = ", ";
21241 /* Now close the string if we have written one. Then end the line. */
21243 fputs (to_close, file);
21246 /* Generate a unique section name for FILENAME for a section type
21247 represented by SECTION_DESC. Output goes into BUF.
21249 SECTION_DESC can be any string, as long as it is different for each
21250 possible section type.
21252 We name the section in the same manner as xlc. The name begins with an
21253 underscore followed by the filename (after stripping any leading directory
21254 names) with the last period replaced by the string SECTION_DESC. If
21255 FILENAME does not contain a period, SECTION_DESC is appended to the end of
21259 rs6000_gen_section_name (char **buf, const char *filename,
21260 const char *section_desc)
21262 const char *q, *after_last_slash, *last_period = 0;
21266 after_last_slash = filename;
21267 for (q = filename; *q; q++)
21270 after_last_slash = q + 1;
21271 else if (*q == '.')
21275 len = strlen (after_last_slash) + strlen (section_desc) + 2;
21276 *buf = (char *) xmalloc (len);
21281 for (q = after_last_slash; *q; q++)
21283 if (q == last_period)
21285 strcpy (p, section_desc);
21286 p += strlen (section_desc);
21290 else if (ISALNUM (*q))
21294 if (last_period == 0)
21295 strcpy (p, section_desc);
21300 /* Emit profile function. */
21303 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
21305 /* Non-standard profiling for kernels, which just saves LR then calls
21306 _mcount without worrying about arg saves. The idea is to change
21307 the function prologue as little as possible as it isn't easy to
21308 account for arg save/restore code added just for _mcount. */
21309 if (TARGET_PROFILE_KERNEL)
21312 if (DEFAULT_ABI == ABI_AIX)
21314 #ifndef NO_PROFILE_COUNTERS
21315 # define NO_PROFILE_COUNTERS 0
21317 if (NO_PROFILE_COUNTERS)
21318 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
21319 LCT_NORMAL, VOIDmode, 0);
21323 const char *label_name;
21326 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
21327 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
21328 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
21330 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
21331 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
21334 else if (DEFAULT_ABI == ABI_DARWIN)
21336 const char *mcount_name = RS6000_MCOUNT;
21337 int caller_addr_regno = LR_REGNO;
21339 /* Be conservative and always set this, at least for now. */
21340 crtl->uses_pic_offset_table = 1;
21343 /* For PIC code, set up a stub and collect the caller's address
21344 from r0, which is where the prologue puts it. */
21345 if (MACHOPIC_INDIRECT
21346 && crtl->uses_pic_offset_table)
21347 caller_addr_regno = 0;
21349 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
21350 LCT_NORMAL, VOIDmode, 1,
21351 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
21355 /* Write function profiler code. */
21358 output_function_profiler (FILE *file, int labelno)
21362 switch (DEFAULT_ABI)
21365 gcc_unreachable ();
21370 warning (0, "no profiling of 64-bit code for this ABI");
21373 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
21374 fprintf (file, "\tmflr %s\n", reg_names[0]);
21375 if (NO_PROFILE_COUNTERS)
21377 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
21378 reg_names[0], reg_names[1]);
21380 else if (TARGET_SECURE_PLT && flag_pic)
21382 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
21383 reg_names[0], reg_names[1]);
21384 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
21385 asm_fprintf (file, "\t{cau|addis} %s,%s,",
21386 reg_names[12], reg_names[12]);
21387 assemble_name (file, buf);
21388 asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
21389 assemble_name (file, buf);
21390 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
21392 else if (flag_pic == 1)
21394 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
21395 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
21396 reg_names[0], reg_names[1]);
21397 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
21398 asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
21399 assemble_name (file, buf);
21400 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
21402 else if (flag_pic > 1)
21404 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
21405 reg_names[0], reg_names[1]);
21406 /* Now, we need to get the address of the label. */
21407 fputs ("\tbcl 20,31,1f\n\t.long ", file);
21408 assemble_name (file, buf);
21409 fputs ("-.\n1:", file);
21410 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
21411 asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
21412 reg_names[0], reg_names[11]);
21413 asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
21414 reg_names[0], reg_names[0], reg_names[11]);
21418 asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
21419 assemble_name (file, buf);
21420 fputs ("@ha\n", file);
21421 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
21422 reg_names[0], reg_names[1]);
21423 asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
21424 assemble_name (file, buf);
21425 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
21428 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
21429 fprintf (file, "\tbl %s%s\n",
21430 RS6000_MCOUNT, flag_pic ? "@plt" : "");
21435 if (!TARGET_PROFILE_KERNEL)
21437 /* Don't do anything, done in output_profile_hook (). */
21441 gcc_assert (!TARGET_32BIT);
21443 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
21444 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
21446 if (cfun->static_chain_decl != NULL)
21448 asm_fprintf (file, "\tstd %s,24(%s)\n",
21449 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
21450 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
21451 asm_fprintf (file, "\tld %s,24(%s)\n",
21452 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
21455 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
21463 /* The following variable value is the last issued insn. */
21465 static rtx last_scheduled_insn;
21467 /* The following variable helps to balance issuing of load and
21468 store instructions */
21470 static int load_store_pendulum;
21472 /* Power4 load update and store update instructions are cracked into a
21473 load or store and an integer insn which are executed in the same cycle.
21474 Branches have their own dispatch slot which does not count against the
21475 GCC issue rate, but it changes the program flow so there are no other
21476 instructions to issue in this cycle. */
21479 rs6000_variable_issue_1 (rtx insn, int more)
21481 last_scheduled_insn = insn;
21482 if (GET_CODE (PATTERN (insn)) == USE
21483 || GET_CODE (PATTERN (insn)) == CLOBBER)
21485 cached_can_issue_more = more;
21486 return cached_can_issue_more;
21489 if (insn_terminates_group_p (insn, current_group))
21491 cached_can_issue_more = 0;
21492 return cached_can_issue_more;
21495 /* If no reservation, but reach here */
21496 if (recog_memoized (insn) < 0)
21499 if (rs6000_sched_groups)
21501 if (is_microcoded_insn (insn))
21502 cached_can_issue_more = 0;
21503 else if (is_cracked_insn (insn))
21504 cached_can_issue_more = more > 2 ? more - 2 : 0;
21506 cached_can_issue_more = more - 1;
21508 return cached_can_issue_more;
21511 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
21514 cached_can_issue_more = more - 1;
21515 return cached_can_issue_more;
21519 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
21521 int r = rs6000_variable_issue_1 (insn, more);
21523 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
21527 /* Adjust the cost of a scheduling dependency. Return the new cost of
21528 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
21531 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
21533 enum attr_type attr_type;
21535 if (! recog_memoized (insn))
21538 switch (REG_NOTE_KIND (link))
21542 /* Data dependency; DEP_INSN writes a register that INSN reads
21543 some cycles later. */
21545 /* Separate a load from a narrower, dependent store. */
21546 if (rs6000_sched_groups
21547 && GET_CODE (PATTERN (insn)) == SET
21548 && GET_CODE (PATTERN (dep_insn)) == SET
21549 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
21550 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
21551 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
21552 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
21555 attr_type = get_attr_type (insn);
21560 /* Tell the first scheduling pass about the latency between
21561 a mtctr and bctr (and mtlr and br/blr). The first
21562 scheduling pass will not know about this latency since
21563 the mtctr instruction, which has the latency associated
21564 to it, will be generated by reload. */
21565 return TARGET_POWER ? 5 : 4;
21567 /* Leave some extra cycles between a compare and its
21568 dependent branch, to inhibit expensive mispredicts. */
21569 if ((rs6000_cpu_attr == CPU_PPC603
21570 || rs6000_cpu_attr == CPU_PPC604
21571 || rs6000_cpu_attr == CPU_PPC604E
21572 || rs6000_cpu_attr == CPU_PPC620
21573 || rs6000_cpu_attr == CPU_PPC630
21574 || rs6000_cpu_attr == CPU_PPC750
21575 || rs6000_cpu_attr == CPU_PPC7400
21576 || rs6000_cpu_attr == CPU_PPC7450
21577 || rs6000_cpu_attr == CPU_POWER4
21578 || rs6000_cpu_attr == CPU_POWER5
21579 || rs6000_cpu_attr == CPU_POWER7
21580 || rs6000_cpu_attr == CPU_CELL)
21581 && recog_memoized (dep_insn)
21582 && (INSN_CODE (dep_insn) >= 0))
21584 switch (get_attr_type (dep_insn))
21588 case TYPE_DELAYED_COMPARE:
21589 case TYPE_IMUL_COMPARE:
21590 case TYPE_LMUL_COMPARE:
21591 case TYPE_FPCOMPARE:
21592 case TYPE_CR_LOGICAL:
21593 case TYPE_DELAYED_CR:
21602 case TYPE_STORE_UX:
21604 case TYPE_FPSTORE_U:
21605 case TYPE_FPSTORE_UX:
21606 if ((rs6000_cpu == PROCESSOR_POWER6)
21607 && recog_memoized (dep_insn)
21608 && (INSN_CODE (dep_insn) >= 0))
21611 if (GET_CODE (PATTERN (insn)) != SET)
21612 /* If this happens, we have to extend this to schedule
21613 optimally. Return default for now. */
21616 /* Adjust the cost for the case where the value written
21617 by a fixed point operation is used as the address
21618 gen value on a store. */
21619 switch (get_attr_type (dep_insn))
21626 if (! store_data_bypass_p (dep_insn, insn))
21630 case TYPE_LOAD_EXT:
21631 case TYPE_LOAD_EXT_U:
21632 case TYPE_LOAD_EXT_UX:
21633 case TYPE_VAR_SHIFT_ROTATE:
21634 case TYPE_VAR_DELAYED_COMPARE:
21636 if (! store_data_bypass_p (dep_insn, insn))
21642 case TYPE_FAST_COMPARE:
21645 case TYPE_INSERT_WORD:
21646 case TYPE_INSERT_DWORD:
21647 case TYPE_FPLOAD_U:
21648 case TYPE_FPLOAD_UX:
21650 case TYPE_STORE_UX:
21651 case TYPE_FPSTORE_U:
21652 case TYPE_FPSTORE_UX:
21654 if (! store_data_bypass_p (dep_insn, insn))
21662 case TYPE_IMUL_COMPARE:
21663 case TYPE_LMUL_COMPARE:
21665 if (! store_data_bypass_p (dep_insn, insn))
21671 if (! store_data_bypass_p (dep_insn, insn))
21677 if (! store_data_bypass_p (dep_insn, insn))
21690 case TYPE_LOAD_EXT:
21691 case TYPE_LOAD_EXT_U:
21692 case TYPE_LOAD_EXT_UX:
21693 if ((rs6000_cpu == PROCESSOR_POWER6)
21694 && recog_memoized (dep_insn)
21695 && (INSN_CODE (dep_insn) >= 0))
21698 /* Adjust the cost for the case where the value written
21699 by a fixed point instruction is used within the address
21700 gen portion of a subsequent load(u)(x) */
21701 switch (get_attr_type (dep_insn))
21708 if (set_to_load_agen (dep_insn, insn))
21712 case TYPE_LOAD_EXT:
21713 case TYPE_LOAD_EXT_U:
21714 case TYPE_LOAD_EXT_UX:
21715 case TYPE_VAR_SHIFT_ROTATE:
21716 case TYPE_VAR_DELAYED_COMPARE:
21718 if (set_to_load_agen (dep_insn, insn))
21724 case TYPE_FAST_COMPARE:
21727 case TYPE_INSERT_WORD:
21728 case TYPE_INSERT_DWORD:
21729 case TYPE_FPLOAD_U:
21730 case TYPE_FPLOAD_UX:
21732 case TYPE_STORE_UX:
21733 case TYPE_FPSTORE_U:
21734 case TYPE_FPSTORE_UX:
21736 if (set_to_load_agen (dep_insn, insn))
21744 case TYPE_IMUL_COMPARE:
21745 case TYPE_LMUL_COMPARE:
21747 if (set_to_load_agen (dep_insn, insn))
21753 if (set_to_load_agen (dep_insn, insn))
21759 if (set_to_load_agen (dep_insn, insn))
21770 if ((rs6000_cpu == PROCESSOR_POWER6)
21771 && recog_memoized (dep_insn)
21772 && (INSN_CODE (dep_insn) >= 0)
21773 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
21780 /* Fall out to return default cost. */
21784 case REG_DEP_OUTPUT:
21785 /* Output dependency; DEP_INSN writes a register that INSN writes some
21787 if ((rs6000_cpu == PROCESSOR_POWER6)
21788 && recog_memoized (dep_insn)
21789 && (INSN_CODE (dep_insn) >= 0))
21791 attr_type = get_attr_type (insn);
21796 if (get_attr_type (dep_insn) == TYPE_FP)
21800 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
21808 /* Anti dependency; DEP_INSN reads a register that INSN writes some
21813 gcc_unreachable ();
21819 /* Debug version of rs6000_adjust_cost. */
21822 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
21824 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
21830 switch (REG_NOTE_KIND (link))
21832 default: dep = "unknown depencency"; break;
21833 case REG_DEP_TRUE: dep = "data dependency"; break;
21834 case REG_DEP_OUTPUT: dep = "output dependency"; break;
21835 case REG_DEP_ANTI: dep = "anti depencency"; break;
21839 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
21840 "%s, insn:\n", ret, cost, dep);
21848 /* The function returns a true if INSN is microcoded.
21849 Return false otherwise. */
21852 is_microcoded_insn (rtx insn)
21854 if (!insn || !NONDEBUG_INSN_P (insn)
21855 || GET_CODE (PATTERN (insn)) == USE
21856 || GET_CODE (PATTERN (insn)) == CLOBBER)
21859 if (rs6000_cpu_attr == CPU_CELL)
21860 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
21862 if (rs6000_sched_groups)
21864 enum attr_type type = get_attr_type (insn);
21865 if (type == TYPE_LOAD_EXT_U
21866 || type == TYPE_LOAD_EXT_UX
21867 || type == TYPE_LOAD_UX
21868 || type == TYPE_STORE_UX
21869 || type == TYPE_MFCR)
21876 /* The function returns true if INSN is cracked into 2 instructions
21877 by the processor (and therefore occupies 2 issue slots). */
21880 is_cracked_insn (rtx insn)
21882 if (!insn || !NONDEBUG_INSN_P (insn)
21883 || GET_CODE (PATTERN (insn)) == USE
21884 || GET_CODE (PATTERN (insn)) == CLOBBER)
21887 if (rs6000_sched_groups)
21889 enum attr_type type = get_attr_type (insn);
21890 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
21891 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
21892 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
21893 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
21894 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
21895 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
21896 || type == TYPE_IDIV || type == TYPE_LDIV
21897 || type == TYPE_INSERT_WORD)
21904 /* The function returns true if INSN can be issued only from
21905 the branch slot. */
21908 is_branch_slot_insn (rtx insn)
21910 if (!insn || !NONDEBUG_INSN_P (insn)
21911 || GET_CODE (PATTERN (insn)) == USE
21912 || GET_CODE (PATTERN (insn)) == CLOBBER)
21915 if (rs6000_sched_groups)
21917 enum attr_type type = get_attr_type (insn);
21918 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
21926 /* The function returns true if out_inst sets a value that is
21927 used in the address generation computation of in_insn */
21929 set_to_load_agen (rtx out_insn, rtx in_insn)
21931 rtx out_set, in_set;
21933 /* For performance reasons, only handle the simple case where
21934 both loads are a single_set. */
21935 out_set = single_set (out_insn);
21938 in_set = single_set (in_insn);
21940 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
21946 /* The function returns true if the target storage location of
21947 out_insn is adjacent to the target storage location of in_insn */
21948 /* Return 1 if memory locations are adjacent. */
21951 adjacent_mem_locations (rtx insn1, rtx insn2)
21954 rtx a = get_store_dest (PATTERN (insn1));
21955 rtx b = get_store_dest (PATTERN (insn2));
21957 if ((GET_CODE (XEXP (a, 0)) == REG
21958 || (GET_CODE (XEXP (a, 0)) == PLUS
21959 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
21960 && (GET_CODE (XEXP (b, 0)) == REG
21961 || (GET_CODE (XEXP (b, 0)) == PLUS
21962 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
21964 HOST_WIDE_INT val0 = 0, val1 = 0, val_diff;
21967 if (GET_CODE (XEXP (a, 0)) == PLUS)
21969 reg0 = XEXP (XEXP (a, 0), 0);
21970 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
21973 reg0 = XEXP (a, 0);
21975 if (GET_CODE (XEXP (b, 0)) == PLUS)
21977 reg1 = XEXP (XEXP (b, 0), 0);
21978 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
21981 reg1 = XEXP (b, 0);
21983 val_diff = val1 - val0;
21985 return ((REGNO (reg0) == REGNO (reg1))
21986 && ((MEM_SIZE (a) && val_diff == INTVAL (MEM_SIZE (a)))
21987 || (MEM_SIZE (b) && val_diff == -INTVAL (MEM_SIZE (b)))));
21993 /* A C statement (sans semicolon) to update the integer scheduling
21994 priority INSN_PRIORITY (INSN). Increase the priority to execute the
21995 INSN earlier, reduce the priority to execute INSN later. Do not
21996 define this macro if you do not need to adjust the scheduling
21997 priorities of insns. */
22000 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
22002 /* On machines (like the 750) which have asymmetric integer units,
22003 where one integer unit can do multiply and divides and the other
22004 can't, reduce the priority of multiply/divide so it is scheduled
22005 before other integer operations. */
22008 if (! INSN_P (insn))
22011 if (GET_CODE (PATTERN (insn)) == USE)
22014 switch (rs6000_cpu_attr) {
22016 switch (get_attr_type (insn))
22023 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
22024 priority, priority);
22025 if (priority >= 0 && priority < 0x01000000)
22032 if (insn_must_be_first_in_group (insn)
22033 && reload_completed
22034 && current_sched_info->sched_max_insns_priority
22035 && rs6000_sched_restricted_insns_priority)
22038 /* Prioritize insns that can be dispatched only in the first
22040 if (rs6000_sched_restricted_insns_priority == 1)
22041 /* Attach highest priority to insn. This means that in
22042 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
22043 precede 'priority' (critical path) considerations. */
22044 return current_sched_info->sched_max_insns_priority;
22045 else if (rs6000_sched_restricted_insns_priority == 2)
22046 /* Increase priority of insn by a minimal amount. This means that in
22047 haifa-sched.c:ready_sort(), only 'priority' (critical path)
22048 considerations precede dispatch-slot restriction considerations. */
22049 return (priority + 1);
22052 if (rs6000_cpu == PROCESSOR_POWER6
22053 && ((load_store_pendulum == -2 && is_load_insn (insn))
22054 || (load_store_pendulum == 2 && is_store_insn (insn))))
22055 /* Attach highest priority to insn if the scheduler has just issued two
22056 stores and this instruction is a load, or two loads and this instruction
22057 is a store. Power6 wants loads and stores scheduled alternately
22059 return current_sched_info->sched_max_insns_priority;
22064 /* Return true if the instruction is nonpipelined on the Cell. */
22066 is_nonpipeline_insn (rtx insn)
22068 enum attr_type type;
22069 if (!insn || !NONDEBUG_INSN_P (insn)
22070 || GET_CODE (PATTERN (insn)) == USE
22071 || GET_CODE (PATTERN (insn)) == CLOBBER)
22074 type = get_attr_type (insn);
22075 if (type == TYPE_IMUL
22076 || type == TYPE_IMUL2
22077 || type == TYPE_IMUL3
22078 || type == TYPE_LMUL
22079 || type == TYPE_IDIV
22080 || type == TYPE_LDIV
22081 || type == TYPE_SDIV
22082 || type == TYPE_DDIV
22083 || type == TYPE_SSQRT
22084 || type == TYPE_DSQRT
22085 || type == TYPE_MFCR
22086 || type == TYPE_MFCRF
22087 || type == TYPE_MFJMPR)
22095 /* Return how many instructions the machine can issue per cycle. */
22098 rs6000_issue_rate (void)
22100 /* Unless scheduling for register pressure, use issue rate of 1 for
22101 first scheduling pass to decrease degradation. */
22102 if (!reload_completed && !flag_sched_pressure)
22105 switch (rs6000_cpu_attr) {
22106 case CPU_RIOS1: /* ? */
22108 case CPU_PPC601: /* ? */
22117 case CPU_PPCE300C2:
22118 case CPU_PPCE300C3:
22119 case CPU_PPCE500MC:
22120 case CPU_PPCE500MC64:
22139 /* Return how many instructions to look ahead for better insn
22143 rs6000_use_sched_lookahead (void)
22145 if (rs6000_cpu_attr == CPU_PPC8540)
22147 if (rs6000_cpu_attr == CPU_CELL)
22148 return (reload_completed ? 8 : 0);
22152 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
22154 rs6000_use_sched_lookahead_guard (rtx insn)
22156 if (rs6000_cpu_attr != CPU_CELL)
22159 if (insn == NULL_RTX || !INSN_P (insn))
22162 if (!reload_completed
22163 || is_nonpipeline_insn (insn)
22164 || is_microcoded_insn (insn))
22170 /* Determine is PAT refers to memory. */
22173 is_mem_ref (rtx pat)
22179 /* stack_tie does not produce any real memory traffic. */
22180 if (GET_CODE (pat) == UNSPEC
22181 && XINT (pat, 1) == UNSPEC_TIE)
22184 if (GET_CODE (pat) == MEM)
22187 /* Recursively process the pattern. */
22188 fmt = GET_RTX_FORMAT (GET_CODE (pat));
22190 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
22193 ret |= is_mem_ref (XEXP (pat, i));
22194 else if (fmt[i] == 'E')
22195 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
22196 ret |= is_mem_ref (XVECEXP (pat, i, j));
22202 /* Determine if PAT is a PATTERN of a load insn. */
22205 is_load_insn1 (rtx pat)
22207 if (!pat || pat == NULL_RTX)
22210 if (GET_CODE (pat) == SET)
22211 return is_mem_ref (SET_SRC (pat));
22213 if (GET_CODE (pat) == PARALLEL)
22217 for (i = 0; i < XVECLEN (pat, 0); i++)
22218 if (is_load_insn1 (XVECEXP (pat, 0, i)))
22225 /* Determine if INSN loads from memory. */
22228 is_load_insn (rtx insn)
22230 if (!insn || !INSN_P (insn))
22233 if (GET_CODE (insn) == CALL_INSN)
22236 return is_load_insn1 (PATTERN (insn));
22239 /* Determine if PAT is a PATTERN of a store insn. */
22242 is_store_insn1 (rtx pat)
22244 if (!pat || pat == NULL_RTX)
22247 if (GET_CODE (pat) == SET)
22248 return is_mem_ref (SET_DEST (pat));
22250 if (GET_CODE (pat) == PARALLEL)
22254 for (i = 0; i < XVECLEN (pat, 0); i++)
22255 if (is_store_insn1 (XVECEXP (pat, 0, i)))
22262 /* Determine if INSN stores to memory. */
22265 is_store_insn (rtx insn)
22267 if (!insn || !INSN_P (insn))
22270 return is_store_insn1 (PATTERN (insn));
22273 /* Return the dest of a store insn. */
22276 get_store_dest (rtx pat)
22278 gcc_assert (is_store_insn1 (pat));
22280 if (GET_CODE (pat) == SET)
22281 return SET_DEST (pat);
22282 else if (GET_CODE (pat) == PARALLEL)
22286 for (i = 0; i < XVECLEN (pat, 0); i++)
22288 rtx inner_pat = XVECEXP (pat, 0, i);
22289 if (GET_CODE (inner_pat) == SET
22290 && is_mem_ref (SET_DEST (inner_pat)))
22294 /* We shouldn't get here, because we should have either a simple
22295 store insn or a store with update which are covered above. */
22299 /* Returns whether the dependence between INSN and NEXT is considered
22300 costly by the given target. */
22303 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
22308 /* If the flag is not enabled - no dependence is considered costly;
22309 allow all dependent insns in the same group.
22310 This is the most aggressive option. */
22311 if (rs6000_sched_costly_dep == no_dep_costly)
22314 /* If the flag is set to 1 - a dependence is always considered costly;
22315 do not allow dependent instructions in the same group.
22316 This is the most conservative option. */
22317 if (rs6000_sched_costly_dep == all_deps_costly)
22320 insn = DEP_PRO (dep);
22321 next = DEP_CON (dep);
22323 if (rs6000_sched_costly_dep == store_to_load_dep_costly
22324 && is_load_insn (next)
22325 && is_store_insn (insn))
22326 /* Prevent load after store in the same group. */
22329 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
22330 && is_load_insn (next)
22331 && is_store_insn (insn)
22332 && DEP_TYPE (dep) == REG_DEP_TRUE)
22333 /* Prevent load after store in the same group if it is a true
22337 /* The flag is set to X; dependences with latency >= X are considered costly,
22338 and will not be scheduled in the same group. */
22339 if (rs6000_sched_costly_dep <= max_dep_latency
22340 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
22346 /* Return the next insn after INSN that is found before TAIL is reached,
22347 skipping any "non-active" insns - insns that will not actually occupy
22348 an issue slot. Return NULL_RTX if such an insn is not found. */
22351 get_next_active_insn (rtx insn, rtx tail)
22353 if (insn == NULL_RTX || insn == tail)
22358 insn = NEXT_INSN (insn);
22359 if (insn == NULL_RTX || insn == tail)
22364 || (NONJUMP_INSN_P (insn)
22365 && GET_CODE (PATTERN (insn)) != USE
22366 && GET_CODE (PATTERN (insn)) != CLOBBER
22367 && INSN_CODE (insn) != CODE_FOR_stack_tie))
22373 /* We are about to begin issuing insns for this clock cycle. */
22376 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
22377 rtx *ready ATTRIBUTE_UNUSED,
22378 int *pn_ready ATTRIBUTE_UNUSED,
22379 int clock_var ATTRIBUTE_UNUSED)
22381 int n_ready = *pn_ready;
22384 fprintf (dump, "// rs6000_sched_reorder :\n");
22386 /* Reorder the ready list, if the second to last ready insn
22387 is a nonepipeline insn. */
22388 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
22390 if (is_nonpipeline_insn (ready[n_ready - 1])
22391 && (recog_memoized (ready[n_ready - 2]) > 0))
22392 /* Simply swap first two insns. */
22394 rtx tmp = ready[n_ready - 1];
22395 ready[n_ready - 1] = ready[n_ready - 2];
22396 ready[n_ready - 2] = tmp;
22400 if (rs6000_cpu == PROCESSOR_POWER6)
22401 load_store_pendulum = 0;
22403 return rs6000_issue_rate ();
22406 /* Like rs6000_sched_reorder, but called after issuing each insn. */
22409 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
22410 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
22413 fprintf (dump, "// rs6000_sched_reorder2 :\n");
22415 /* For Power6, we need to handle some special cases to try and keep the
22416 store queue from overflowing and triggering expensive flushes.
22418 This code monitors how load and store instructions are being issued
22419 and skews the ready list one way or the other to increase the likelihood
22420 that a desired instruction is issued at the proper time.
22422 A couple of things are done. First, we maintain a "load_store_pendulum"
22423 to track the current state of load/store issue.
22425 - If the pendulum is at zero, then no loads or stores have been
22426 issued in the current cycle so we do nothing.
22428 - If the pendulum is 1, then a single load has been issued in this
22429 cycle and we attempt to locate another load in the ready list to
22432 - If the pendulum is -2, then two stores have already been
22433 issued in this cycle, so we increase the priority of the first load
22434 in the ready list to increase it's likelihood of being chosen first
22437 - If the pendulum is -1, then a single store has been issued in this
22438 cycle and we attempt to locate another store in the ready list to
22439 issue with it, preferring a store to an adjacent memory location to
22440 facilitate store pairing in the store queue.
22442 - If the pendulum is 2, then two loads have already been
22443 issued in this cycle, so we increase the priority of the first store
22444 in the ready list to increase it's likelihood of being chosen first
22447 - If the pendulum < -2 or > 2, then do nothing.
22449 Note: This code covers the most common scenarios. There exist non
22450 load/store instructions which make use of the LSU and which
22451 would need to be accounted for to strictly model the behavior
22452 of the machine. Those instructions are currently unaccounted
22453 for to help minimize compile time overhead of this code.
22455 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
22461 if (is_store_insn (last_scheduled_insn))
22462 /* Issuing a store, swing the load_store_pendulum to the left */
22463 load_store_pendulum--;
22464 else if (is_load_insn (last_scheduled_insn))
22465 /* Issuing a load, swing the load_store_pendulum to the right */
22466 load_store_pendulum++;
22468 return cached_can_issue_more;
22470 /* If the pendulum is balanced, or there is only one instruction on
22471 the ready list, then all is well, so return. */
22472 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
22473 return cached_can_issue_more;
22475 if (load_store_pendulum == 1)
22477 /* A load has been issued in this cycle. Scan the ready list
22478 for another load to issue with it */
22483 if (is_load_insn (ready[pos]))
22485 /* Found a load. Move it to the head of the ready list,
22486 and adjust it's priority so that it is more likely to
22489 for (i=pos; i<*pn_ready-1; i++)
22490 ready[i] = ready[i + 1];
22491 ready[*pn_ready-1] = tmp;
22493 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
22494 INSN_PRIORITY (tmp)++;
22500 else if (load_store_pendulum == -2)
22502 /* Two stores have been issued in this cycle. Increase the
22503 priority of the first load in the ready list to favor it for
22504 issuing in the next cycle. */
22509 if (is_load_insn (ready[pos])
22511 && INSN_PRIORITY_KNOWN (ready[pos]))
22513 INSN_PRIORITY (ready[pos])++;
22515 /* Adjust the pendulum to account for the fact that a load
22516 was found and increased in priority. This is to prevent
22517 increasing the priority of multiple loads */
22518 load_store_pendulum--;
22525 else if (load_store_pendulum == -1)
22527 /* A store has been issued in this cycle. Scan the ready list for
22528 another store to issue with it, preferring a store to an adjacent
22530 int first_store_pos = -1;
22536 if (is_store_insn (ready[pos]))
22538 /* Maintain the index of the first store found on the
22540 if (first_store_pos == -1)
22541 first_store_pos = pos;
22543 if (is_store_insn (last_scheduled_insn)
22544 && adjacent_mem_locations (last_scheduled_insn,ready[pos]))
22546 /* Found an adjacent store. Move it to the head of the
22547 ready list, and adjust it's priority so that it is
22548 more likely to stay there */
22550 for (i=pos; i<*pn_ready-1; i++)
22551 ready[i] = ready[i + 1];
22552 ready[*pn_ready-1] = tmp;
22554 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
22555 INSN_PRIORITY (tmp)++;
22557 first_store_pos = -1;
22565 if (first_store_pos >= 0)
22567 /* An adjacent store wasn't found, but a non-adjacent store was,
22568 so move the non-adjacent store to the front of the ready
22569 list, and adjust its priority so that it is more likely to
22571 tmp = ready[first_store_pos];
22572 for (i=first_store_pos; i<*pn_ready-1; i++)
22573 ready[i] = ready[i + 1];
22574 ready[*pn_ready-1] = tmp;
22575 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
22576 INSN_PRIORITY (tmp)++;
22579 else if (load_store_pendulum == 2)
22581 /* Two loads have been issued in this cycle. Increase the priority
22582 of the first store in the ready list to favor it for issuing in
22588 if (is_store_insn (ready[pos])
22590 && INSN_PRIORITY_KNOWN (ready[pos]))
22592 INSN_PRIORITY (ready[pos])++;
22594 /* Adjust the pendulum to account for the fact that a store
22595 was found and increased in priority. This is to prevent
22596 increasing the priority of multiple stores */
22597 load_store_pendulum++;
22606 return cached_can_issue_more;
22609 /* Return whether the presence of INSN causes a dispatch group termination
22610 of group WHICH_GROUP.
22612 If WHICH_GROUP == current_group, this function will return true if INSN
22613 causes the termination of the current group (i.e, the dispatch group to
22614 which INSN belongs). This means that INSN will be the last insn in the
22615 group it belongs to.
22617 If WHICH_GROUP == previous_group, this function will return true if INSN
22618 causes the termination of the previous group (i.e, the dispatch group that
22619 precedes the group to which INSN belongs). This means that INSN will be
22620 the first insn in the group it belongs to). */
22623 insn_terminates_group_p (rtx insn, enum group_termination which_group)
22630 first = insn_must_be_first_in_group (insn);
22631 last = insn_must_be_last_in_group (insn);
22636 if (which_group == current_group)
22638 else if (which_group == previous_group)
22646 insn_must_be_first_in_group (rtx insn)
22648 enum attr_type type;
22651 || GET_CODE (insn) == NOTE
22652 || DEBUG_INSN_P (insn)
22653 || GET_CODE (PATTERN (insn)) == USE
22654 || GET_CODE (PATTERN (insn)) == CLOBBER)
22657 switch (rs6000_cpu)
22659 case PROCESSOR_POWER5:
22660 if (is_cracked_insn (insn))
22662 case PROCESSOR_POWER4:
22663 if (is_microcoded_insn (insn))
22666 if (!rs6000_sched_groups)
22669 type = get_attr_type (insn);
22676 case TYPE_DELAYED_CR:
22677 case TYPE_CR_LOGICAL:
22691 case PROCESSOR_POWER6:
22692 type = get_attr_type (insn);
22696 case TYPE_INSERT_DWORD:
22700 case TYPE_VAR_SHIFT_ROTATE:
22707 case TYPE_INSERT_WORD:
22708 case TYPE_DELAYED_COMPARE:
22709 case TYPE_IMUL_COMPARE:
22710 case TYPE_LMUL_COMPARE:
22711 case TYPE_FPCOMPARE:
22722 case TYPE_LOAD_EXT_UX:
22724 case TYPE_STORE_UX:
22725 case TYPE_FPLOAD_U:
22726 case TYPE_FPLOAD_UX:
22727 case TYPE_FPSTORE_U:
22728 case TYPE_FPSTORE_UX:
22734 case PROCESSOR_POWER7:
22735 type = get_attr_type (insn);
22739 case TYPE_CR_LOGICAL:
22746 case TYPE_DELAYED_COMPARE:
22747 case TYPE_VAR_DELAYED_COMPARE:
22753 case TYPE_LOAD_EXT:
22754 case TYPE_LOAD_EXT_U:
22755 case TYPE_LOAD_EXT_UX:
22757 case TYPE_STORE_UX:
22758 case TYPE_FPLOAD_U:
22759 case TYPE_FPLOAD_UX:
22760 case TYPE_FPSTORE_U:
22761 case TYPE_FPSTORE_UX:
22777 insn_must_be_last_in_group (rtx insn)
22779 enum attr_type type;
22782 || GET_CODE (insn) == NOTE
22783 || DEBUG_INSN_P (insn)
22784 || GET_CODE (PATTERN (insn)) == USE
22785 || GET_CODE (PATTERN (insn)) == CLOBBER)
22788 switch (rs6000_cpu) {
22789 case PROCESSOR_POWER4:
22790 case PROCESSOR_POWER5:
22791 if (is_microcoded_insn (insn))
22794 if (is_branch_slot_insn (insn))
22798 case PROCESSOR_POWER6:
22799 type = get_attr_type (insn);
22806 case TYPE_VAR_SHIFT_ROTATE:
22813 case TYPE_DELAYED_COMPARE:
22814 case TYPE_IMUL_COMPARE:
22815 case TYPE_LMUL_COMPARE:
22816 case TYPE_FPCOMPARE:
22830 case PROCESSOR_POWER7:
22831 type = get_attr_type (insn);
22839 case TYPE_LOAD_EXT_U:
22840 case TYPE_LOAD_EXT_UX:
22841 case TYPE_STORE_UX:
22854 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
22855 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
22858 is_costly_group (rtx *group_insns, rtx next_insn)
22861 int issue_rate = rs6000_issue_rate ();
22863 for (i = 0; i < issue_rate; i++)
22865 sd_iterator_def sd_it;
22867 rtx insn = group_insns[i];
22872 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
22874 rtx next = DEP_CON (dep);
22876 if (next == next_insn
22877 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
22885 /* Utility of the function redefine_groups.
22886 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
22887 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
22888 to keep it "far" (in a separate group) from GROUP_INSNS, following
22889 one of the following schemes, depending on the value of the flag
22890 -minsert_sched_nops = X:
22891 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
22892 in order to force NEXT_INSN into a separate group.
22893 (2) X < sched_finish_regroup_exact: insert exactly X nops.
22894 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
22895 insertion (has a group just ended, how many vacant issue slots remain in the
22896 last group, and how many dispatch groups were encountered so far). */
22899 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
22900 rtx next_insn, bool *group_end, int can_issue_more,
22905 int issue_rate = rs6000_issue_rate ();
22906 bool end = *group_end;
22909 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
22910 return can_issue_more;
22912 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
22913 return can_issue_more;
22915 force = is_costly_group (group_insns, next_insn);
22917 return can_issue_more;
22919 if (sched_verbose > 6)
22920 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
22921 *group_count ,can_issue_more);
22923 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
22926 can_issue_more = 0;
22928 /* Since only a branch can be issued in the last issue_slot, it is
22929 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
22930 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
22931 in this case the last nop will start a new group and the branch
22932 will be forced to the new group. */
22933 if (can_issue_more && !is_branch_slot_insn (next_insn))
22936 while (can_issue_more > 0)
22939 emit_insn_before (nop, next_insn);
22947 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
22949 int n_nops = rs6000_sched_insert_nops;
22951 /* Nops can't be issued from the branch slot, so the effective
22952 issue_rate for nops is 'issue_rate - 1'. */
22953 if (can_issue_more == 0)
22954 can_issue_more = issue_rate;
22956 if (can_issue_more == 0)
22958 can_issue_more = issue_rate - 1;
22961 for (i = 0; i < issue_rate; i++)
22963 group_insns[i] = 0;
22970 emit_insn_before (nop, next_insn);
22971 if (can_issue_more == issue_rate - 1) /* new group begins */
22974 if (can_issue_more == 0)
22976 can_issue_more = issue_rate - 1;
22979 for (i = 0; i < issue_rate; i++)
22981 group_insns[i] = 0;
22987 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
22990 /* Is next_insn going to start a new group? */
22993 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
22994 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
22995 || (can_issue_more < issue_rate &&
22996 insn_terminates_group_p (next_insn, previous_group)));
22997 if (*group_end && end)
23000 if (sched_verbose > 6)
23001 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
23002 *group_count, can_issue_more);
23003 return can_issue_more;
23006 return can_issue_more;
23009 /* This function tries to synch the dispatch groups that the compiler "sees"
23010 with the dispatch groups that the processor dispatcher is expected to
23011 form in practice. It tries to achieve this synchronization by forcing the
23012 estimated processor grouping on the compiler (as opposed to the function
23013 'pad_goups' which tries to force the scheduler's grouping on the processor).
23015 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
23016 examines the (estimated) dispatch groups that will be formed by the processor
23017 dispatcher. It marks these group boundaries to reflect the estimated
23018 processor grouping, overriding the grouping that the scheduler had marked.
23019 Depending on the value of the flag '-minsert-sched-nops' this function can
23020 force certain insns into separate groups or force a certain distance between
23021 them by inserting nops, for example, if there exists a "costly dependence"
23024 The function estimates the group boundaries that the processor will form as
23025 follows: It keeps track of how many vacant issue slots are available after
23026 each insn. A subsequent insn will start a new group if one of the following
23028 - no more vacant issue slots remain in the current dispatch group.
23029 - only the last issue slot, which is the branch slot, is vacant, but the next
23030 insn is not a branch.
23031 - only the last 2 or less issue slots, including the branch slot, are vacant,
23032 which means that a cracked insn (which occupies two issue slots) can't be
23033 issued in this group.
23034 - less than 'issue_rate' slots are vacant, and the next insn always needs to
23035 start a new group. */
23038 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
23040 rtx insn, next_insn;
23042 int can_issue_more;
23045 int group_count = 0;
23049 issue_rate = rs6000_issue_rate ();
23050 group_insns = XALLOCAVEC (rtx, issue_rate);
23051 for (i = 0; i < issue_rate; i++)
23053 group_insns[i] = 0;
23055 can_issue_more = issue_rate;
23057 insn = get_next_active_insn (prev_head_insn, tail);
23060 while (insn != NULL_RTX)
23062 slot = (issue_rate - can_issue_more);
23063 group_insns[slot] = insn;
23065 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
23066 if (insn_terminates_group_p (insn, current_group))
23067 can_issue_more = 0;
23069 next_insn = get_next_active_insn (insn, tail);
23070 if (next_insn == NULL_RTX)
23071 return group_count + 1;
23073 /* Is next_insn going to start a new group? */
23075 = (can_issue_more == 0
23076 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
23077 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
23078 || (can_issue_more < issue_rate &&
23079 insn_terminates_group_p (next_insn, previous_group)));
23081 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
23082 next_insn, &group_end, can_issue_more,
23088 can_issue_more = 0;
23089 for (i = 0; i < issue_rate; i++)
23091 group_insns[i] = 0;
23095 if (GET_MODE (next_insn) == TImode && can_issue_more)
23096 PUT_MODE (next_insn, VOIDmode);
23097 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
23098 PUT_MODE (next_insn, TImode);
23101 if (can_issue_more == 0)
23102 can_issue_more = issue_rate;
23105 return group_count;
23108 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
23109 dispatch group boundaries that the scheduler had marked. Pad with nops
23110 any dispatch groups which have vacant issue slots, in order to force the
23111 scheduler's grouping on the processor dispatcher. The function
23112 returns the number of dispatch groups found. */
23115 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
23117 rtx insn, next_insn;
23120 int can_issue_more;
23122 int group_count = 0;
23124 /* Initialize issue_rate. */
23125 issue_rate = rs6000_issue_rate ();
23126 can_issue_more = issue_rate;
23128 insn = get_next_active_insn (prev_head_insn, tail);
23129 next_insn = get_next_active_insn (insn, tail);
23131 while (insn != NULL_RTX)
23134 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
23136 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
23138 if (next_insn == NULL_RTX)
23143 /* If the scheduler had marked group termination at this location
23144 (between insn and next_insn), and neither insn nor next_insn will
23145 force group termination, pad the group with nops to force group
23148 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
23149 && !insn_terminates_group_p (insn, current_group)
23150 && !insn_terminates_group_p (next_insn, previous_group))
23152 if (!is_branch_slot_insn (next_insn))
23155 while (can_issue_more)
23158 emit_insn_before (nop, next_insn);
23163 can_issue_more = issue_rate;
23168 next_insn = get_next_active_insn (insn, tail);
23171 return group_count;
23174 /* We're beginning a new block. Initialize data structures as necessary. */
23177 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
23178 int sched_verbose ATTRIBUTE_UNUSED,
23179 int max_ready ATTRIBUTE_UNUSED)
23181 last_scheduled_insn = NULL_RTX;
23182 load_store_pendulum = 0;
23185 /* The following function is called at the end of scheduling BB.
23186 After reload, it inserts nops at insn group bundling. */
23189 rs6000_sched_finish (FILE *dump, int sched_verbose)
23194 fprintf (dump, "=== Finishing schedule.\n");
23196 if (reload_completed && rs6000_sched_groups)
23198 /* Do not run sched_finish hook when selective scheduling enabled. */
23199 if (sel_sched_p ())
23202 if (rs6000_sched_insert_nops == sched_finish_none)
23205 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
23206 n_groups = pad_groups (dump, sched_verbose,
23207 current_sched_info->prev_head,
23208 current_sched_info->next_tail);
23210 n_groups = redefine_groups (dump, sched_verbose,
23211 current_sched_info->prev_head,
23212 current_sched_info->next_tail);
23214 if (sched_verbose >= 6)
23216 fprintf (dump, "ngroups = %d\n", n_groups);
23217 print_rtl (dump, current_sched_info->prev_head);
23218 fprintf (dump, "Done finish_sched\n");
23223 struct _rs6000_sched_context
23225 short cached_can_issue_more;
23226 rtx last_scheduled_insn;
23227 int load_store_pendulum;
23230 typedef struct _rs6000_sched_context rs6000_sched_context_def;
23231 typedef rs6000_sched_context_def *rs6000_sched_context_t;
23233 /* Allocate store for new scheduling context. */
23235 rs6000_alloc_sched_context (void)
23237 return xmalloc (sizeof (rs6000_sched_context_def));
23240 /* If CLEAN_P is true then initializes _SC with clean data,
23241 and from the global context otherwise. */
23243 rs6000_init_sched_context (void *_sc, bool clean_p)
23245 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
23249 sc->cached_can_issue_more = 0;
23250 sc->last_scheduled_insn = NULL_RTX;
23251 sc->load_store_pendulum = 0;
23255 sc->cached_can_issue_more = cached_can_issue_more;
23256 sc->last_scheduled_insn = last_scheduled_insn;
23257 sc->load_store_pendulum = load_store_pendulum;
23261 /* Sets the global scheduling context to the one pointed to by _SC. */
23263 rs6000_set_sched_context (void *_sc)
23265 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
23267 gcc_assert (sc != NULL);
23269 cached_can_issue_more = sc->cached_can_issue_more;
23270 last_scheduled_insn = sc->last_scheduled_insn;
23271 load_store_pendulum = sc->load_store_pendulum;
23276 rs6000_free_sched_context (void *_sc)
23278 gcc_assert (_sc != NULL);
23284 /* Length in units of the trampoline for entering a nested function. */
23287 rs6000_trampoline_size (void)
23291 switch (DEFAULT_ABI)
23294 gcc_unreachable ();
23297 ret = (TARGET_32BIT) ? 12 : 24;
23302 ret = (TARGET_32BIT) ? 40 : 48;
23309 /* Emit RTL insns to initialize the variable parts of a trampoline.
23310 FNADDR is an RTX for the address of the function's pure code.
23311 CXT is an RTX for the static chain value for the function. */
23314 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
23316 int regsize = (TARGET_32BIT) ? 4 : 8;
23317 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
23318 rtx ctx_reg = force_reg (Pmode, cxt);
23319 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
23321 switch (DEFAULT_ABI)
23324 gcc_unreachable ();
23326 /* Under AIX, just build the 3 word function descriptor */
23329 rtx fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
23330 rtx fn_reg = gen_reg_rtx (Pmode);
23331 rtx toc_reg = gen_reg_rtx (Pmode);
23333 /* Macro to shorten the code expansions below. */
23334 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
23336 m_tramp = replace_equiv_address (m_tramp, addr);
23338 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
23339 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
23340 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
23341 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
23342 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
23348 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
23351 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
23352 LCT_NORMAL, VOIDmode, 4,
23354 GEN_INT (rs6000_trampoline_size ()), SImode,
23362 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
23363 identifier as an argument, so the front end shouldn't look it up. */
23366 rs6000_attribute_takes_identifier_p (const_tree attr_id)
23368 return is_attribute_p ("altivec", attr_id);
23371 /* Handle the "altivec" attribute. The attribute may have
23372 arguments as follows:
23374 __attribute__((altivec(vector__)))
23375 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
23376 __attribute__((altivec(bool__))) (always followed by 'unsigned')
23378 and may appear more than once (e.g., 'vector bool char') in a
23379 given declaration. */
23382 rs6000_handle_altivec_attribute (tree *node,
23383 tree name ATTRIBUTE_UNUSED,
23385 int flags ATTRIBUTE_UNUSED,
23386 bool *no_add_attrs)
23388 tree type = *node, result = NULL_TREE;
23389 enum machine_mode mode;
23392 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
23393 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
23394 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
23397 while (POINTER_TYPE_P (type)
23398 || TREE_CODE (type) == FUNCTION_TYPE
23399 || TREE_CODE (type) == METHOD_TYPE
23400 || TREE_CODE (type) == ARRAY_TYPE)
23401 type = TREE_TYPE (type);
23403 mode = TYPE_MODE (type);
23405 /* Check for invalid AltiVec type qualifiers. */
23406 if (type == long_double_type_node)
23407 error ("use of %<long double%> in AltiVec types is invalid");
23408 else if (type == boolean_type_node)
23409 error ("use of boolean types in AltiVec types is invalid");
23410 else if (TREE_CODE (type) == COMPLEX_TYPE)
23411 error ("use of %<complex%> in AltiVec types is invalid");
23412 else if (DECIMAL_FLOAT_MODE_P (mode))
23413 error ("use of decimal floating point types in AltiVec types is invalid");
23414 else if (!TARGET_VSX)
23416 if (type == long_unsigned_type_node || type == long_integer_type_node)
23419 error ("use of %<long%> in AltiVec types is invalid for "
23420 "64-bit code without -mvsx");
23421 else if (rs6000_warn_altivec_long)
23422 warning (0, "use of %<long%> in AltiVec types is deprecated; "
23425 else if (type == long_long_unsigned_type_node
23426 || type == long_long_integer_type_node)
23427 error ("use of %<long long%> in AltiVec types is invalid without "
23429 else if (type == double_type_node)
23430 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
23433 switch (altivec_type)
23436 unsigned_p = TYPE_UNSIGNED (type);
23440 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
23443 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
23446 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
23449 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
23451 case SFmode: result = V4SF_type_node; break;
23452 case DFmode: result = V2DF_type_node; break;
23453 /* If the user says 'vector int bool', we may be handed the 'bool'
23454 attribute _before_ the 'vector' attribute, and so select the
23455 proper type in the 'b' case below. */
23456 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
23457 case V2DImode: case V2DFmode:
23465 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
23466 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
23467 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
23468 case QImode: case V16QImode: result = bool_V16QI_type_node;
23475 case V8HImode: result = pixel_V8HI_type_node;
23481 /* Propagate qualifiers attached to the element type
23482 onto the vector type. */
23483 if (result && result != type && TYPE_QUALS (type))
23484 result = build_qualified_type (result, TYPE_QUALS (type));
23486 *no_add_attrs = true; /* No need to hang on to the attribute. */
23489 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
23494 /* AltiVec defines four built-in scalar types that serve as vector
23495 elements; we must teach the compiler how to mangle them. */
23497 static const char *
23498 rs6000_mangle_type (const_tree type)
23500 type = TYPE_MAIN_VARIANT (type);
23502 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
23503 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
23506 if (type == bool_char_type_node) return "U6__boolc";
23507 if (type == bool_short_type_node) return "U6__bools";
23508 if (type == pixel_type_node) return "u7__pixel";
23509 if (type == bool_int_type_node) return "U6__booli";
23510 if (type == bool_long_type_node) return "U6__booll";
23512 /* Mangle IBM extended float long double as `g' (__float128) on
23513 powerpc*-linux where long-double-64 previously was the default. */
23514 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
23516 && TARGET_LONG_DOUBLE_128
23517 && !TARGET_IEEEQUAD)
23520 /* For all other types, use normal C++ mangling. */
23524 /* Handle a "longcall" or "shortcall" attribute; arguments as in
23525 struct attribute_spec.handler. */
23528 rs6000_handle_longcall_attribute (tree *node, tree name,
23529 tree args ATTRIBUTE_UNUSED,
23530 int flags ATTRIBUTE_UNUSED,
23531 bool *no_add_attrs)
23533 if (TREE_CODE (*node) != FUNCTION_TYPE
23534 && TREE_CODE (*node) != FIELD_DECL
23535 && TREE_CODE (*node) != TYPE_DECL)
23537 warning (OPT_Wattributes, "%qE attribute only applies to functions",
23539 *no_add_attrs = true;
23545 /* Set longcall attributes on all functions declared when
23546 rs6000_default_long_calls is true. */
23548 rs6000_set_default_type_attributes (tree type)
23550 if (rs6000_default_long_calls
23551 && (TREE_CODE (type) == FUNCTION_TYPE
23552 || TREE_CODE (type) == METHOD_TYPE))
23553 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
23555 TYPE_ATTRIBUTES (type));
23558 darwin_set_default_type_attributes (type);
23562 /* Return a reference suitable for calling a function with the
23563 longcall attribute. */
23566 rs6000_longcall_ref (rtx call_ref)
23568 const char *call_name;
23571 if (GET_CODE (call_ref) != SYMBOL_REF)
23574 /* System V adds '.' to the internal name, so skip them. */
23575 call_name = XSTR (call_ref, 0);
23576 if (*call_name == '.')
23578 while (*call_name == '.')
23581 node = get_identifier (call_name);
23582 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
23585 return force_reg (Pmode, call_ref);
23588 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
23589 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
23592 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
23593 struct attribute_spec.handler. */
23595 rs6000_handle_struct_attribute (tree *node, tree name,
23596 tree args ATTRIBUTE_UNUSED,
23597 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
23600 if (DECL_P (*node))
23602 if (TREE_CODE (*node) == TYPE_DECL)
23603 type = &TREE_TYPE (*node);
23608 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
23609 || TREE_CODE (*type) == UNION_TYPE)))
23611 warning (OPT_Wattributes, "%qE attribute ignored", name);
23612 *no_add_attrs = true;
23615 else if ((is_attribute_p ("ms_struct", name)
23616 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
23617 || ((is_attribute_p ("gcc_struct", name)
23618 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
23620 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
23622 *no_add_attrs = true;
23629 rs6000_ms_bitfield_layout_p (const_tree record_type)
23631 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
23632 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
23633 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
23636 #ifdef USING_ELFOS_H
23638 /* A get_unnamed_section callback, used for switching to toc_section. */
23641 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
23643 if (DEFAULT_ABI == ABI_AIX
23644 && TARGET_MINIMAL_TOC
23645 && !TARGET_RELOCATABLE)
23647 if (!toc_initialized)
23649 toc_initialized = 1;
23650 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
23651 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
23652 fprintf (asm_out_file, "\t.tc ");
23653 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
23654 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
23655 fprintf (asm_out_file, "\n");
23657 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
23658 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
23659 fprintf (asm_out_file, " = .+32768\n");
23662 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
23664 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
23665 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
23668 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
23669 if (!toc_initialized)
23671 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
23672 fprintf (asm_out_file, " = .+32768\n");
23673 toc_initialized = 1;
23678 /* Implement TARGET_ASM_INIT_SECTIONS. */
23681 rs6000_elf_asm_init_sections (void)
23684 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
23687 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
23688 SDATA2_SECTION_ASM_OP);
23691 /* Implement TARGET_SELECT_RTX_SECTION. */
23694 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
23695 unsigned HOST_WIDE_INT align)
23697 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
23698 return toc_section;
23700 return default_elf_select_rtx_section (mode, x, align);
23703 /* For a SYMBOL_REF, set generic flags and then perform some
23704 target-specific processing.
23706 When the AIX ABI is requested on a non-AIX system, replace the
23707 function name with the real name (with a leading .) rather than the
23708 function descriptor name. This saves a lot of overriding code to
23709 read the prefixes. */
23712 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
23714 default_encode_section_info (decl, rtl, first);
23717 && TREE_CODE (decl) == FUNCTION_DECL
23719 && DEFAULT_ABI == ABI_AIX)
23721 rtx sym_ref = XEXP (rtl, 0);
23722 size_t len = strlen (XSTR (sym_ref, 0));
23723 char *str = XALLOCAVEC (char, len + 2);
23725 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
23726 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
23731 compare_section_name (const char *section, const char *templ)
23735 len = strlen (templ);
23736 return (strncmp (section, templ, len) == 0
23737 && (section[len] == 0 || section[len] == '.'));
23741 rs6000_elf_in_small_data_p (const_tree decl)
23743 if (rs6000_sdata == SDATA_NONE)
23746 /* We want to merge strings, so we never consider them small data. */
23747 if (TREE_CODE (decl) == STRING_CST)
23750 /* Functions are never in the small data area. */
23751 if (TREE_CODE (decl) == FUNCTION_DECL)
23754 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
23756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
23757 if (compare_section_name (section, ".sdata")
23758 || compare_section_name (section, ".sdata2")
23759 || compare_section_name (section, ".gnu.linkonce.s")
23760 || compare_section_name (section, ".sbss")
23761 || compare_section_name (section, ".sbss2")
23762 || compare_section_name (section, ".gnu.linkonce.sb")
23763 || strcmp (section, ".PPC.EMB.sdata0") == 0
23764 || strcmp (section, ".PPC.EMB.sbss0") == 0)
23769 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
23772 && (unsigned HOST_WIDE_INT) size <= g_switch_value
23773 /* If it's not public, and we're not going to reference it there,
23774 there's no need to put it in the small data section. */
23775 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
23782 #endif /* USING_ELFOS_H */
23784 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
23787 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
23789 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
23792 /* Return a REG that occurs in ADDR with coefficient 1.
23793 ADDR can be effectively incremented by incrementing REG.
23795 r0 is special and we must not select it as an address
23796 register by this routine since our caller will try to
23797 increment the returned register via an "la" instruction. */
23800 find_addr_reg (rtx addr)
23802 while (GET_CODE (addr) == PLUS)
23804 if (GET_CODE (XEXP (addr, 0)) == REG
23805 && REGNO (XEXP (addr, 0)) != 0)
23806 addr = XEXP (addr, 0);
23807 else if (GET_CODE (XEXP (addr, 1)) == REG
23808 && REGNO (XEXP (addr, 1)) != 0)
23809 addr = XEXP (addr, 1);
23810 else if (CONSTANT_P (XEXP (addr, 0)))
23811 addr = XEXP (addr, 1);
23812 else if (CONSTANT_P (XEXP (addr, 1)))
23813 addr = XEXP (addr, 0);
23815 gcc_unreachable ();
23817 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
23822 rs6000_fatal_bad_address (rtx op)
23824 fatal_insn ("bad address", op);
23829 static tree branch_island_list = 0;
23831 /* Remember to generate a branch island for far calls to the given
23835 add_compiler_branch_island (tree label_name, tree function_name,
23838 tree branch_island = build_tree_list (function_name, label_name);
23839 TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
23840 TREE_CHAIN (branch_island) = branch_island_list;
23841 branch_island_list = branch_island;
23844 #define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
23845 #define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
23846 #define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
23847 TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
23849 /* Generate far-jump branch islands for everything on the
23850 branch_island_list. Invoked immediately after the last instruction
23851 of the epilogue has been emitted; the branch-islands must be
23852 appended to, and contiguous with, the function body. Mach-O stubs
23853 are generated in machopic_output_stub(). */
23856 macho_branch_islands (void)
23859 tree branch_island;
23861 for (branch_island = branch_island_list;
23863 branch_island = TREE_CHAIN (branch_island))
23865 const char *label =
23866 IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
23868 IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island));
23869 char name_buf[512];
23870 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
23871 if (name[0] == '*' || name[0] == '&')
23872 strcpy (name_buf, name+1);
23876 strcpy (name_buf+1, name);
23878 strcpy (tmp_buf, "\n");
23879 strcat (tmp_buf, label);
23880 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
23881 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
23882 dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
23883 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
23886 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
23887 strcat (tmp_buf, label);
23888 strcat (tmp_buf, "_pic\n");
23889 strcat (tmp_buf, label);
23890 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
23892 strcat (tmp_buf, "\taddis r11,r11,ha16(");
23893 strcat (tmp_buf, name_buf);
23894 strcat (tmp_buf, " - ");
23895 strcat (tmp_buf, label);
23896 strcat (tmp_buf, "_pic)\n");
23898 strcat (tmp_buf, "\tmtlr r0\n");
23900 strcat (tmp_buf, "\taddi r12,r11,lo16(");
23901 strcat (tmp_buf, name_buf);
23902 strcat (tmp_buf, " - ");
23903 strcat (tmp_buf, label);
23904 strcat (tmp_buf, "_pic)\n");
23906 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
23910 strcat (tmp_buf, ":\nlis r12,hi16(");
23911 strcat (tmp_buf, name_buf);
23912 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
23913 strcat (tmp_buf, name_buf);
23914 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
23916 output_asm_insn (tmp_buf, 0);
23917 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
23918 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
23919 dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
23920 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
23923 branch_island_list = 0;
23926 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
23927 already there or not. */
23930 no_previous_def (tree function_name)
23932 tree branch_island;
23933 for (branch_island = branch_island_list;
23935 branch_island = TREE_CHAIN (branch_island))
23936 if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
23941 /* GET_PREV_LABEL gets the label name from the previous definition of
23945 get_prev_label (tree function_name)
23947 tree branch_island;
23948 for (branch_island = branch_island_list;
23950 branch_island = TREE_CHAIN (branch_island))
23951 if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
23952 return BRANCH_ISLAND_LABEL_NAME (branch_island);
23956 #ifndef DARWIN_LINKER_GENERATES_ISLANDS
23957 #define DARWIN_LINKER_GENERATES_ISLANDS 0
23960 /* KEXTs still need branch islands. */
23961 #define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
23962 || flag_mkernel || flag_apple_kext)
23964 /* INSN is either a function call or a millicode call. It may have an
23965 unconditional jump in its delay slot.
23967 CALL_DEST is the routine we are calling. */
23970 output_call (rtx insn, rtx *operands, int dest_operand_number,
23971 int cookie_operand_number)
23973 static char buf[256];
23974 if (DARWIN_GENERATE_ISLANDS
23975 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
23976 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
23979 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
23981 if (no_previous_def (funname))
23983 rtx label_rtx = gen_label_rtx ();
23984 char *label_buf, temp_buf[256];
23985 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
23986 CODE_LABEL_NUMBER (label_rtx));
23987 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
23988 labelname = get_identifier (label_buf);
23989 add_compiler_branch_island (labelname, funname, insn_line (insn));
23992 labelname = get_prev_label (funname);
23994 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
23995 instruction will reach 'foo', otherwise link as 'bl L42'".
23996 "L42" should be a 'branch island', that will do a far jump to
23997 'foo'. Branch islands are generated in
23998 macho_branch_islands(). */
23999 sprintf (buf, "jbsr %%z%d,%.246s",
24000 dest_operand_number, IDENTIFIER_POINTER (labelname));
24003 sprintf (buf, "bl %%z%d", dest_operand_number);
24007 /* Generate PIC and indirect symbol stubs. */
24010 machopic_output_stub (FILE *file, const char *symb, const char *stub)
24012 unsigned int length;
24013 char *symbol_name, *lazy_ptr_name;
24014 char *local_label_0;
24015 static int label = 0;
24017 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
24018 symb = (*targetm.strip_name_encoding) (symb);
24021 length = strlen (symb);
24022 symbol_name = XALLOCAVEC (char, length + 32);
24023 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
24025 lazy_ptr_name = XALLOCAVEC (char, length + 32);
24026 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
24029 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
24031 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
24035 fprintf (file, "\t.align 5\n");
24037 fprintf (file, "%s:\n", stub);
24038 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24041 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
24042 sprintf (local_label_0, "\"L%011d$spb\"", label);
24044 fprintf (file, "\tmflr r0\n");
24045 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
24046 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
24047 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
24048 lazy_ptr_name, local_label_0);
24049 fprintf (file, "\tmtlr r0\n");
24050 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
24051 (TARGET_64BIT ? "ldu" : "lwzu"),
24052 lazy_ptr_name, local_label_0);
24053 fprintf (file, "\tmtctr r12\n");
24054 fprintf (file, "\tbctr\n");
24058 fprintf (file, "\t.align 4\n");
24060 fprintf (file, "%s:\n", stub);
24061 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24063 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
24064 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
24065 (TARGET_64BIT ? "ldu" : "lwzu"),
24067 fprintf (file, "\tmtctr r12\n");
24068 fprintf (file, "\tbctr\n");
24071 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
24072 fprintf (file, "%s:\n", lazy_ptr_name);
24073 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24074 fprintf (file, "%sdyld_stub_binding_helper\n",
24075 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
24078 /* Legitimize PIC addresses. If the address is already
24079 position-independent, we return ORIG. Newly generated
24080 position-independent addresses go into a reg. This is REG if non
24081 zero, otherwise we allocate register(s) as necessary. */
24083 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
24086 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
24091 if (reg == NULL && ! reload_in_progress && ! reload_completed)
24092 reg = gen_reg_rtx (Pmode);
24094 if (GET_CODE (orig) == CONST)
24098 if (GET_CODE (XEXP (orig, 0)) == PLUS
24099 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
24102 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
24104 /* Use a different reg for the intermediate value, as
24105 it will be marked UNCHANGING. */
24106 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
24107 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
24110 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
24113 if (GET_CODE (offset) == CONST_INT)
24115 if (SMALL_INT (offset))
24116 return plus_constant (base, INTVAL (offset));
24117 else if (! reload_in_progress && ! reload_completed)
24118 offset = force_reg (Pmode, offset);
24121 rtx mem = force_const_mem (Pmode, orig);
24122 return machopic_legitimize_pic_address (mem, Pmode, reg);
24125 return gen_rtx_PLUS (Pmode, base, offset);
24128 /* Fall back on generic machopic code. */
24129 return machopic_legitimize_pic_address (orig, mode, reg);
24132 /* Output a .machine directive for the Darwin assembler, and call
24133 the generic start_file routine. */
24136 rs6000_darwin_file_start (void)
24138 static const struct
24144 { "ppc64", "ppc64", MASK_64BIT },
24145 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
24146 { "power4", "ppc970", 0 },
24147 { "G5", "ppc970", 0 },
24148 { "7450", "ppc7450", 0 },
24149 { "7400", "ppc7400", MASK_ALTIVEC },
24150 { "G4", "ppc7400", 0 },
24151 { "750", "ppc750", 0 },
24152 { "740", "ppc750", 0 },
24153 { "G3", "ppc750", 0 },
24154 { "604e", "ppc604e", 0 },
24155 { "604", "ppc604", 0 },
24156 { "603e", "ppc603", 0 },
24157 { "603", "ppc603", 0 },
24158 { "601", "ppc601", 0 },
24159 { NULL, "ppc", 0 } };
24160 const char *cpu_id = "";
24163 rs6000_file_start ();
24164 darwin_file_start ();
24166 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
24167 for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
24168 if (rs6000_select[i].set_arch_p && rs6000_select[i].string
24169 && rs6000_select[i].string[0] != '\0')
24170 cpu_id = rs6000_select[i].string;
24172 /* Look through the mapping array. Pick the first name that either
24173 matches the argument, has a bit set in IF_SET that is also set
24174 in the target flags, or has a NULL name. */
24177 while (mapping[i].arg != NULL
24178 && strcmp (mapping[i].arg, cpu_id) != 0
24179 && (mapping[i].if_set & target_flags) == 0)
24182 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
24185 #endif /* TARGET_MACHO */
24189 rs6000_elf_reloc_rw_mask (void)
24193 else if (DEFAULT_ABI == ABI_AIX)
24199 /* Record an element in the table of global constructors. SYMBOL is
24200 a SYMBOL_REF of the function to be called; PRIORITY is a number
24201 between 0 and MAX_INIT_PRIORITY.
24203 This differs from default_named_section_asm_out_constructor in
24204 that we have special handling for -mrelocatable. */
24207 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
24209 const char *section = ".ctors";
24212 if (priority != DEFAULT_INIT_PRIORITY)
24214 sprintf (buf, ".ctors.%.5u",
24215 /* Invert the numbering so the linker puts us in the proper
24216 order; constructors are run from right to left, and the
24217 linker sorts in increasing order. */
24218 MAX_INIT_PRIORITY - priority);
24222 switch_to_section (get_section (section, SECTION_WRITE, NULL));
24223 assemble_align (POINTER_SIZE);
24225 if (TARGET_RELOCATABLE)
24227 fputs ("\t.long (", asm_out_file);
24228 output_addr_const (asm_out_file, symbol);
24229 fputs (")@fixup\n", asm_out_file);
24232 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
24236 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
24238 const char *section = ".dtors";
24241 if (priority != DEFAULT_INIT_PRIORITY)
24243 sprintf (buf, ".dtors.%.5u",
24244 /* Invert the numbering so the linker puts us in the proper
24245 order; constructors are run from right to left, and the
24246 linker sorts in increasing order. */
24247 MAX_INIT_PRIORITY - priority);
24251 switch_to_section (get_section (section, SECTION_WRITE, NULL));
24252 assemble_align (POINTER_SIZE);
24254 if (TARGET_RELOCATABLE)
24256 fputs ("\t.long (", asm_out_file);
24257 output_addr_const (asm_out_file, symbol);
24258 fputs (")@fixup\n", asm_out_file);
24261 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
24265 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
24269 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
24270 ASM_OUTPUT_LABEL (file, name);
24271 fputs (DOUBLE_INT_ASM_OP, file);
24272 rs6000_output_function_entry (file, name);
24273 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
24276 fputs ("\t.size\t", file);
24277 assemble_name (file, name);
24278 fputs (",24\n\t.type\t.", file);
24279 assemble_name (file, name);
24280 fputs (",@function\n", file);
24281 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
24283 fputs ("\t.globl\t.", file);
24284 assemble_name (file, name);
24289 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
24290 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
24291 rs6000_output_function_entry (file, name);
24292 fputs (":\n", file);
24296 if (TARGET_RELOCATABLE
24297 && !TARGET_SECURE_PLT
24298 && (get_pool_size () != 0 || crtl->profile)
24303 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
24305 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
24306 fprintf (file, "\t.long ");
24307 assemble_name (file, buf);
24309 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24310 assemble_name (file, buf);
24314 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
24315 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
24317 if (DEFAULT_ABI == ABI_AIX)
24319 const char *desc_name, *orig_name;
24321 orig_name = (*targetm.strip_name_encoding) (name);
24322 desc_name = orig_name;
24323 while (*desc_name == '.')
24326 if (TREE_PUBLIC (decl))
24327 fprintf (file, "\t.globl %s\n", desc_name);
24329 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24330 fprintf (file, "%s:\n", desc_name);
24331 fprintf (file, "\t.long %s\n", orig_name);
24332 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
24333 if (DEFAULT_ABI == ABI_AIX)
24334 fputs ("\t.long 0\n", file);
24335 fprintf (file, "\t.previous\n");
24337 ASM_OUTPUT_LABEL (file, name);
24341 rs6000_elf_end_indicate_exec_stack (void)
24344 file_end_indicate_exec_stack ();
24350 rs6000_xcoff_asm_output_anchor (rtx symbol)
24354 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
24355 SYMBOL_REF_BLOCK_OFFSET (symbol));
24356 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
24360 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
24362 fputs (GLOBAL_ASM_OP, stream);
24363 RS6000_OUTPUT_BASENAME (stream, name);
24364 putc ('\n', stream);
24367 /* A get_unnamed_decl callback, used for read-only sections. PTR
24368 points to the section string variable. */
24371 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
24373 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
24374 *(const char *const *) directive,
24375 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
24378 /* Likewise for read-write sections. */
24381 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
24383 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
24384 *(const char *const *) directive,
24385 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
24388 /* A get_unnamed_section callback, used for switching to toc_section. */
24391 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24393 if (TARGET_MINIMAL_TOC)
24395 /* toc_section is always selected at least once from
24396 rs6000_xcoff_file_start, so this is guaranteed to
24397 always be defined once and only once in each file. */
24398 if (!toc_initialized)
24400 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
24401 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
24402 toc_initialized = 1;
24404 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
24405 (TARGET_32BIT ? "" : ",3"));
24408 fputs ("\t.toc\n", asm_out_file);
24411 /* Implement TARGET_ASM_INIT_SECTIONS. */
24414 rs6000_xcoff_asm_init_sections (void)
24416 read_only_data_section
24417 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
24418 &xcoff_read_only_section_name);
24420 private_data_section
24421 = get_unnamed_section (SECTION_WRITE,
24422 rs6000_xcoff_output_readwrite_section_asm_op,
24423 &xcoff_private_data_section_name);
24425 read_only_private_data_section
24426 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
24427 &xcoff_private_data_section_name);
24430 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
24432 readonly_data_section = read_only_data_section;
24433 exception_section = data_section;
24437 rs6000_xcoff_reloc_rw_mask (void)
24443 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
24444 tree decl ATTRIBUTE_UNUSED)
24447 static const char * const suffix[3] = { "PR", "RO", "RW" };
24449 if (flags & SECTION_CODE)
24451 else if (flags & SECTION_WRITE)
24456 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
24457 (flags & SECTION_CODE) ? "." : "",
24458 name, suffix[smclass], flags & SECTION_ENTSIZE);
24462 rs6000_xcoff_select_section (tree decl, int reloc,
24463 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
24465 if (decl_readonly_section (decl, reloc))
24467 if (TREE_PUBLIC (decl))
24468 return read_only_data_section;
24470 return read_only_private_data_section;
24474 if (TREE_PUBLIC (decl))
24475 return data_section;
24477 return private_data_section;
24482 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
24486 /* Use select_section for private and uninitialized data. */
24487 if (!TREE_PUBLIC (decl)
24488 || DECL_COMMON (decl)
24489 || DECL_INITIAL (decl) == NULL_TREE
24490 || DECL_INITIAL (decl) == error_mark_node
24491 || (flag_zero_initialized_in_bss
24492 && initializer_zerop (DECL_INITIAL (decl))))
24495 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
24496 name = (*targetm.strip_name_encoding) (name);
24497 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
24500 /* Select section for constant in constant pool.
24502 On RS/6000, all constants are in the private read-only data area.
24503 However, if this is being placed in the TOC it must be output as a
24507 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
24508 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
24510 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
24511 return toc_section;
24513 return read_only_private_data_section;
24516 /* Remove any trailing [DS] or the like from the symbol name. */
24518 static const char *
24519 rs6000_xcoff_strip_name_encoding (const char *name)
24524 len = strlen (name);
24525 if (name[len - 1] == ']')
24526 return ggc_alloc_string (name, len - 4);
24531 /* Section attributes. AIX is always PIC. */
24533 static unsigned int
24534 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
24536 unsigned int align;
24537 unsigned int flags = default_section_type_flags (decl, name, reloc);
24539 /* Align to at least UNIT size. */
24540 if (flags & SECTION_CODE)
24541 align = MIN_UNITS_PER_WORD;
24543 /* Increase alignment of large objects if not already stricter. */
24544 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
24545 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
24546 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
24548 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
24551 /* Output at beginning of assembler file.
24553 Initialize the section names for the RS/6000 at this point.
24555 Specify filename, including full path, to assembler.
24557 We want to go into the TOC section so at least one .toc will be emitted.
24558 Also, in order to output proper .bs/.es pairs, we need at least one static
24559 [RW] section emitted.
24561 Finally, declare mcount when profiling to make the assembler happy. */
24564 rs6000_xcoff_file_start (void)
24566 rs6000_gen_section_name (&xcoff_bss_section_name,
24567 main_input_filename, ".bss_");
24568 rs6000_gen_section_name (&xcoff_private_data_section_name,
24569 main_input_filename, ".rw_");
24570 rs6000_gen_section_name (&xcoff_read_only_section_name,
24571 main_input_filename, ".ro_");
24573 fputs ("\t.file\t", asm_out_file);
24574 output_quoted_string (asm_out_file, main_input_filename);
24575 fputc ('\n', asm_out_file);
24576 if (write_symbols != NO_DEBUG)
24577 switch_to_section (private_data_section);
24578 switch_to_section (text_section);
24580 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
24581 rs6000_file_start ();
24584 /* Output at end of assembler file.
24585 On the RS/6000, referencing data should automatically pull in text. */
24588 rs6000_xcoff_file_end (void)
24590 switch_to_section (text_section);
24591 fputs ("_section_.text:\n", asm_out_file);
24592 switch_to_section (data_section);
24593 fputs (TARGET_32BIT
24594 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
24597 #endif /* TARGET_XCOFF */
24599 /* Compute a (partial) cost for rtx X. Return true if the complete
24600 cost has been computed, and false if subexpressions should be
24601 scanned. In either case, *TOTAL contains the cost result. */
24604 rs6000_rtx_costs (rtx x, int code, int outer_code, int *total,
24607 enum machine_mode mode = GET_MODE (x);
24611 /* On the RS/6000, if it is valid in the insn, it is free. */
24613 if (((outer_code == SET
24614 || outer_code == PLUS
24615 || outer_code == MINUS)
24616 && (satisfies_constraint_I (x)
24617 || satisfies_constraint_L (x)))
24618 || (outer_code == AND
24619 && (satisfies_constraint_K (x)
24621 ? satisfies_constraint_L (x)
24622 : satisfies_constraint_J (x))
24623 || mask_operand (x, mode)
24625 && mask64_operand (x, DImode))))
24626 || ((outer_code == IOR || outer_code == XOR)
24627 && (satisfies_constraint_K (x)
24629 ? satisfies_constraint_L (x)
24630 : satisfies_constraint_J (x))))
24631 || outer_code == ASHIFT
24632 || outer_code == ASHIFTRT
24633 || outer_code == LSHIFTRT
24634 || outer_code == ROTATE
24635 || outer_code == ROTATERT
24636 || outer_code == ZERO_EXTRACT
24637 || (outer_code == MULT
24638 && satisfies_constraint_I (x))
24639 || ((outer_code == DIV || outer_code == UDIV
24640 || outer_code == MOD || outer_code == UMOD)
24641 && exact_log2 (INTVAL (x)) >= 0)
24642 || (outer_code == COMPARE
24643 && (satisfies_constraint_I (x)
24644 || satisfies_constraint_K (x)))
24645 || (outer_code == EQ
24646 && (satisfies_constraint_I (x)
24647 || satisfies_constraint_K (x)
24649 ? satisfies_constraint_L (x)
24650 : satisfies_constraint_J (x))))
24651 || (outer_code == GTU
24652 && satisfies_constraint_I (x))
24653 || (outer_code == LTU
24654 && satisfies_constraint_P (x)))
24659 else if ((outer_code == PLUS
24660 && reg_or_add_cint_operand (x, VOIDmode))
24661 || (outer_code == MINUS
24662 && reg_or_sub_cint_operand (x, VOIDmode))
24663 || ((outer_code == SET
24664 || outer_code == IOR
24665 || outer_code == XOR)
24667 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
24669 *total = COSTS_N_INSNS (1);
24675 if (mode == DImode && code == CONST_DOUBLE)
24677 if ((outer_code == IOR || outer_code == XOR)
24678 && CONST_DOUBLE_HIGH (x) == 0
24679 && (CONST_DOUBLE_LOW (x)
24680 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
24685 else if ((outer_code == AND && and64_2_operand (x, DImode))
24686 || ((outer_code == SET
24687 || outer_code == IOR
24688 || outer_code == XOR)
24689 && CONST_DOUBLE_HIGH (x) == 0))
24691 *total = COSTS_N_INSNS (1);
24701 /* When optimizing for size, MEM should be slightly more expensive
24702 than generating address, e.g., (plus (reg) (const)).
24703 L1 cache latency is about two instructions. */
24704 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
24712 if (mode == DFmode)
24714 if (GET_CODE (XEXP (x, 0)) == MULT)
24716 /* FNMA accounted in outer NEG. */
24717 if (outer_code == NEG)
24718 *total = rs6000_cost->dmul - rs6000_cost->fp;
24720 *total = rs6000_cost->dmul;
24723 *total = rs6000_cost->fp;
24725 else if (mode == SFmode)
24727 /* FNMA accounted in outer NEG. */
24728 if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
24731 *total = rs6000_cost->fp;
24734 *total = COSTS_N_INSNS (1);
24738 if (mode == DFmode)
24740 if (GET_CODE (XEXP (x, 0)) == MULT
24741 || GET_CODE (XEXP (x, 1)) == MULT)
24743 /* FNMA accounted in outer NEG. */
24744 if (outer_code == NEG)
24745 *total = rs6000_cost->dmul - rs6000_cost->fp;
24747 *total = rs6000_cost->dmul;
24750 *total = rs6000_cost->fp;
24752 else if (mode == SFmode)
24754 /* FNMA accounted in outer NEG. */
24755 if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
24758 *total = rs6000_cost->fp;
24761 *total = COSTS_N_INSNS (1);
24765 if (GET_CODE (XEXP (x, 1)) == CONST_INT
24766 && satisfies_constraint_I (XEXP (x, 1)))
24768 if (INTVAL (XEXP (x, 1)) >= -256
24769 && INTVAL (XEXP (x, 1)) <= 255)
24770 *total = rs6000_cost->mulsi_const9;
24772 *total = rs6000_cost->mulsi_const;
24774 /* FMA accounted in outer PLUS/MINUS. */
24775 else if ((mode == DFmode || mode == SFmode)
24776 && (outer_code == PLUS || outer_code == MINUS))
24778 else if (mode == DFmode)
24779 *total = rs6000_cost->dmul;
24780 else if (mode == SFmode)
24781 *total = rs6000_cost->fp;
24782 else if (mode == DImode)
24783 *total = rs6000_cost->muldi;
24785 *total = rs6000_cost->mulsi;
24790 if (FLOAT_MODE_P (mode))
24792 *total = mode == DFmode ? rs6000_cost->ddiv
24793 : rs6000_cost->sdiv;
24800 if (GET_CODE (XEXP (x, 1)) == CONST_INT
24801 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
24803 if (code == DIV || code == MOD)
24805 *total = COSTS_N_INSNS (2);
24808 *total = COSTS_N_INSNS (1);
24812 if (GET_MODE (XEXP (x, 1)) == DImode)
24813 *total = rs6000_cost->divdi;
24815 *total = rs6000_cost->divsi;
24817 /* Add in shift and subtract for MOD. */
24818 if (code == MOD || code == UMOD)
24819 *total += COSTS_N_INSNS (2);
24824 *total = COSTS_N_INSNS (4);
24828 *total = COSTS_N_INSNS (6);
24832 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
24844 *total = COSTS_N_INSNS (1);
24852 /* Handle mul_highpart. */
24853 if (outer_code == TRUNCATE
24854 && GET_CODE (XEXP (x, 0)) == MULT)
24856 if (mode == DImode)
24857 *total = rs6000_cost->muldi;
24859 *total = rs6000_cost->mulsi;
24862 else if (outer_code == AND)
24865 *total = COSTS_N_INSNS (1);
24870 if (GET_CODE (XEXP (x, 0)) == MEM)
24873 *total = COSTS_N_INSNS (1);
24879 if (!FLOAT_MODE_P (mode))
24881 *total = COSTS_N_INSNS (1);
24887 case UNSIGNED_FLOAT:
24890 case FLOAT_TRUNCATE:
24891 *total = rs6000_cost->fp;
24895 if (mode == DFmode)
24898 *total = rs6000_cost->fp;
24902 switch (XINT (x, 1))
24905 *total = rs6000_cost->fp;
24917 *total = COSTS_N_INSNS (1);
24920 else if (FLOAT_MODE_P (mode)
24921 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
24923 *total = rs6000_cost->fp;
24931 /* Carry bit requires mode == Pmode.
24932 NEG or PLUS already counted so only add one. */
24934 && (outer_code == NEG || outer_code == PLUS))
24936 *total = COSTS_N_INSNS (1);
24939 if (outer_code == SET)
24941 if (XEXP (x, 1) == const0_rtx)
24943 if (TARGET_ISEL && !TARGET_MFCRF)
24944 *total = COSTS_N_INSNS (8);
24946 *total = COSTS_N_INSNS (2);
24949 else if (mode == Pmode)
24951 *total = COSTS_N_INSNS (3);
24960 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
24962 if (TARGET_ISEL && !TARGET_MFCRF)
24963 *total = COSTS_N_INSNS (8);
24965 *total = COSTS_N_INSNS (2);
24969 if (outer_code == COMPARE)
24983 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
24986 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int *total,
24989 bool ret = rs6000_rtx_costs (x, code, outer_code, total, speed);
24992 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
24993 "total = %d, speed = %s, x:\n",
24994 ret ? "complete" : "scan inner",
24995 GET_RTX_NAME (code),
24996 GET_RTX_NAME (outer_code),
24998 speed ? "true" : "false");
25005 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
25008 rs6000_debug_address_cost (rtx x, bool speed)
25010 int ret = TARGET_ADDRESS_COST (x, speed);
25012 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
25013 ret, speed ? "true" : "false");
25020 /* A C expression returning the cost of moving data from a register of class
25021 CLASS1 to one of CLASS2. */
25024 rs6000_register_move_cost (enum machine_mode mode,
25025 enum reg_class from, enum reg_class to)
25029 /* Moves from/to GENERAL_REGS. */
25030 if (reg_classes_intersect_p (to, GENERAL_REGS)
25031 || reg_classes_intersect_p (from, GENERAL_REGS))
25033 if (! reg_classes_intersect_p (to, GENERAL_REGS))
25036 if (from == FLOAT_REGS || from == ALTIVEC_REGS || from == VSX_REGS)
25037 ret = (rs6000_memory_move_cost (mode, from, 0)
25038 + rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
25040 /* It's more expensive to move CR_REGS than CR0_REGS because of the
25042 else if (from == CR_REGS)
25045 /* Power6 has slower LR/CTR moves so make them more expensive than
25046 memory in order to bias spills to memory .*/
25047 else if (rs6000_cpu == PROCESSOR_POWER6
25048 && reg_classes_intersect_p (from, LINK_OR_CTR_REGS))
25049 ret = 6 * hard_regno_nregs[0][mode];
25052 /* A move will cost one instruction per GPR moved. */
25053 ret = 2 * hard_regno_nregs[0][mode];
25056 /* If we have VSX, we can easily move between FPR or Altivec registers. */
25057 else if (VECTOR_UNIT_VSX_P (mode)
25058 && reg_classes_intersect_p (to, VSX_REGS)
25059 && reg_classes_intersect_p (from, VSX_REGS))
25060 ret = 2 * hard_regno_nregs[32][mode];
25062 /* Moving between two similar registers is just one instruction. */
25063 else if (reg_classes_intersect_p (to, from))
25064 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
25066 /* Everything else has to go through GENERAL_REGS. */
25068 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
25069 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
25071 if (TARGET_DEBUG_COST)
25073 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
25074 ret, GET_MODE_NAME (mode), reg_class_names[from],
25075 reg_class_names[to]);
25080 /* A C expressions returning the cost of moving data of MODE from a register to
25084 rs6000_memory_move_cost (enum machine_mode mode, enum reg_class rclass,
25085 int in ATTRIBUTE_UNUSED)
25089 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
25090 ret = 4 * hard_regno_nregs[0][mode];
25091 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
25092 ret = 4 * hard_regno_nregs[32][mode];
25093 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
25094 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
25096 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
25098 if (TARGET_DEBUG_COST)
25100 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
25101 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
25106 /* Returns a code for a target-specific builtin that implements
25107 reciprocal of the function, or NULL_TREE if not available. */
25110 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
25111 bool sqrt ATTRIBUTE_UNUSED)
25113 if (! (TARGET_RECIP && TARGET_PPC_GFXOPT && !optimize_size
25114 && flag_finite_math_only && !flag_trapping_math
25115 && flag_unsafe_math_optimizations))
25123 case BUILT_IN_SQRTF:
25124 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
25131 /* Newton-Raphson approximation of single-precision floating point divide n/d.
25132 Assumes no trapping math and finite arguments. */
25135 rs6000_emit_swdivsf (rtx dst, rtx n, rtx d)
25137 rtx x0, e0, e1, y1, u0, v0, one;
25139 x0 = gen_reg_rtx (SFmode);
25140 e0 = gen_reg_rtx (SFmode);
25141 e1 = gen_reg_rtx (SFmode);
25142 y1 = gen_reg_rtx (SFmode);
25143 u0 = gen_reg_rtx (SFmode);
25144 v0 = gen_reg_rtx (SFmode);
25145 one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
25147 /* x0 = 1./d estimate */
25148 emit_insn (gen_rtx_SET (VOIDmode, x0,
25149 gen_rtx_UNSPEC (SFmode, gen_rtvec (1, d),
25151 /* e0 = 1. - d * x0 */
25152 emit_insn (gen_rtx_SET (VOIDmode, e0,
25153 gen_rtx_MINUS (SFmode, one,
25154 gen_rtx_MULT (SFmode, d, x0))));
25155 /* e1 = e0 + e0 * e0 */
25156 emit_insn (gen_rtx_SET (VOIDmode, e1,
25157 gen_rtx_PLUS (SFmode,
25158 gen_rtx_MULT (SFmode, e0, e0), e0)));
25159 /* y1 = x0 + e1 * x0 */
25160 emit_insn (gen_rtx_SET (VOIDmode, y1,
25161 gen_rtx_PLUS (SFmode,
25162 gen_rtx_MULT (SFmode, e1, x0), x0)));
25164 emit_insn (gen_rtx_SET (VOIDmode, u0,
25165 gen_rtx_MULT (SFmode, n, y1)));
25166 /* v0 = n - d * u0 */
25167 emit_insn (gen_rtx_SET (VOIDmode, v0,
25168 gen_rtx_MINUS (SFmode, n,
25169 gen_rtx_MULT (SFmode, d, u0))));
25170 /* dst = u0 + v0 * y1 */
25171 emit_insn (gen_rtx_SET (VOIDmode, dst,
25172 gen_rtx_PLUS (SFmode,
25173 gen_rtx_MULT (SFmode, v0, y1), u0)));
25176 /* Newton-Raphson approximation of double-precision floating point divide n/d.
25177 Assumes no trapping math and finite arguments. */
25180 rs6000_emit_swdivdf (rtx dst, rtx n, rtx d)
25182 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
25184 x0 = gen_reg_rtx (DFmode);
25185 e0 = gen_reg_rtx (DFmode);
25186 e1 = gen_reg_rtx (DFmode);
25187 e2 = gen_reg_rtx (DFmode);
25188 y1 = gen_reg_rtx (DFmode);
25189 y2 = gen_reg_rtx (DFmode);
25190 y3 = gen_reg_rtx (DFmode);
25191 u0 = gen_reg_rtx (DFmode);
25192 v0 = gen_reg_rtx (DFmode);
25193 one = force_reg (DFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, DFmode));
25195 /* x0 = 1./d estimate */
25196 emit_insn (gen_rtx_SET (VOIDmode, x0,
25197 gen_rtx_UNSPEC (DFmode, gen_rtvec (1, d),
25199 /* e0 = 1. - d * x0 */
25200 emit_insn (gen_rtx_SET (VOIDmode, e0,
25201 gen_rtx_MINUS (DFmode, one,
25202 gen_rtx_MULT (SFmode, d, x0))));
25203 /* y1 = x0 + e0 * x0 */
25204 emit_insn (gen_rtx_SET (VOIDmode, y1,
25205 gen_rtx_PLUS (DFmode,
25206 gen_rtx_MULT (DFmode, e0, x0), x0)));
25208 emit_insn (gen_rtx_SET (VOIDmode, e1,
25209 gen_rtx_MULT (DFmode, e0, e0)));
25210 /* y2 = y1 + e1 * y1 */
25211 emit_insn (gen_rtx_SET (VOIDmode, y2,
25212 gen_rtx_PLUS (DFmode,
25213 gen_rtx_MULT (DFmode, e1, y1), y1)));
25215 emit_insn (gen_rtx_SET (VOIDmode, e2,
25216 gen_rtx_MULT (DFmode, e1, e1)));
25217 /* y3 = y2 + e2 * y2 */
25218 emit_insn (gen_rtx_SET (VOIDmode, y3,
25219 gen_rtx_PLUS (DFmode,
25220 gen_rtx_MULT (DFmode, e2, y2), y2)));
25222 emit_insn (gen_rtx_SET (VOIDmode, u0,
25223 gen_rtx_MULT (DFmode, n, y3)));
25224 /* v0 = n - d * u0 */
25225 emit_insn (gen_rtx_SET (VOIDmode, v0,
25226 gen_rtx_MINUS (DFmode, n,
25227 gen_rtx_MULT (DFmode, d, u0))));
25228 /* dst = u0 + v0 * y3 */
25229 emit_insn (gen_rtx_SET (VOIDmode, dst,
25230 gen_rtx_PLUS (DFmode,
25231 gen_rtx_MULT (DFmode, v0, y3), u0)));
25235 /* Newton-Raphson approximation of single-precision floating point rsqrt.
25236 Assumes no trapping math and finite arguments. */
25239 rs6000_emit_swrsqrtsf (rtx dst, rtx src)
25241 rtx x0, x1, x2, y1, u0, u1, u2, v0, v1, v2, t0,
25242 half, one, halfthree, c1, cond, label;
25244 x0 = gen_reg_rtx (SFmode);
25245 x1 = gen_reg_rtx (SFmode);
25246 x2 = gen_reg_rtx (SFmode);
25247 y1 = gen_reg_rtx (SFmode);
25248 u0 = gen_reg_rtx (SFmode);
25249 u1 = gen_reg_rtx (SFmode);
25250 u2 = gen_reg_rtx (SFmode);
25251 v0 = gen_reg_rtx (SFmode);
25252 v1 = gen_reg_rtx (SFmode);
25253 v2 = gen_reg_rtx (SFmode);
25254 t0 = gen_reg_rtx (SFmode);
25255 halfthree = gen_reg_rtx (SFmode);
25256 cond = gen_rtx_REG (CCFPmode, CR1_REGNO);
25257 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
25259 /* check 0.0, 1.0, NaN, Inf by testing src * src = src */
25260 emit_insn (gen_rtx_SET (VOIDmode, t0,
25261 gen_rtx_MULT (SFmode, src, src)));
25263 emit_insn (gen_rtx_SET (VOIDmode, cond,
25264 gen_rtx_COMPARE (CCFPmode, t0, src)));
25265 c1 = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
25266 emit_unlikely_jump (c1, label);
25268 half = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf, SFmode));
25269 one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
25271 /* halfthree = 1.5 = 1.0 + 0.5 */
25272 emit_insn (gen_rtx_SET (VOIDmode, halfthree,
25273 gen_rtx_PLUS (SFmode, one, half)));
25275 /* x0 = rsqrt estimate */
25276 emit_insn (gen_rtx_SET (VOIDmode, x0,
25277 gen_rtx_UNSPEC (SFmode, gen_rtvec (1, src),
25280 /* y1 = 0.5 * src = 1.5 * src - src -> fewer constants */
25281 emit_insn (gen_rtx_SET (VOIDmode, y1,
25282 gen_rtx_MINUS (SFmode,
25283 gen_rtx_MULT (SFmode, src, halfthree),
25286 /* x1 = x0 * (1.5 - y1 * (x0 * x0)) */
25287 emit_insn (gen_rtx_SET (VOIDmode, u0,
25288 gen_rtx_MULT (SFmode, x0, x0)));
25289 emit_insn (gen_rtx_SET (VOIDmode, v0,
25290 gen_rtx_MINUS (SFmode,
25292 gen_rtx_MULT (SFmode, y1, u0))));
25293 emit_insn (gen_rtx_SET (VOIDmode, x1,
25294 gen_rtx_MULT (SFmode, x0, v0)));
25296 /* x2 = x1 * (1.5 - y1 * (x1 * x1)) */
25297 emit_insn (gen_rtx_SET (VOIDmode, u1,
25298 gen_rtx_MULT (SFmode, x1, x1)));
25299 emit_insn (gen_rtx_SET (VOIDmode, v1,
25300 gen_rtx_MINUS (SFmode,
25302 gen_rtx_MULT (SFmode, y1, u1))));
25303 emit_insn (gen_rtx_SET (VOIDmode, x2,
25304 gen_rtx_MULT (SFmode, x1, v1)));
25306 /* dst = x2 * (1.5 - y1 * (x2 * x2)) */
25307 emit_insn (gen_rtx_SET (VOIDmode, u2,
25308 gen_rtx_MULT (SFmode, x2, x2)));
25309 emit_insn (gen_rtx_SET (VOIDmode, v2,
25310 gen_rtx_MINUS (SFmode,
25312 gen_rtx_MULT (SFmode, y1, u2))));
25313 emit_insn (gen_rtx_SET (VOIDmode, dst,
25314 gen_rtx_MULT (SFmode, x2, v2)));
25316 emit_label (XEXP (label, 0));
25319 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
25320 (Power7) targets. DST is the target, and SRC is the argument operand. */
25323 rs6000_emit_popcount (rtx dst, rtx src)
25325 enum machine_mode mode = GET_MODE (dst);
25328 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
25329 if (TARGET_POPCNTD)
25331 if (mode == SImode)
25332 emit_insn (gen_popcntwsi2 (dst, src));
25334 emit_insn (gen_popcntddi2 (dst, src));
25338 tmp1 = gen_reg_rtx (mode);
25340 if (mode == SImode)
25342 emit_insn (gen_popcntbsi2 (tmp1, src));
25343 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
25345 tmp2 = force_reg (SImode, tmp2);
25346 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
25350 emit_insn (gen_popcntbdi2 (tmp1, src));
25351 tmp2 = expand_mult (DImode, tmp1,
25352 GEN_INT ((HOST_WIDE_INT)
25353 0x01010101 << 32 | 0x01010101),
25355 tmp2 = force_reg (DImode, tmp2);
25356 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
25361 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
25362 target, and SRC is the argument operand. */
25365 rs6000_emit_parity (rtx dst, rtx src)
25367 enum machine_mode mode = GET_MODE (dst);
25370 tmp = gen_reg_rtx (mode);
25371 if (mode == SImode)
25373 /* Is mult+shift >= shift+xor+shift+xor? */
25374 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
25376 rtx tmp1, tmp2, tmp3, tmp4;
25378 tmp1 = gen_reg_rtx (SImode);
25379 emit_insn (gen_popcntbsi2 (tmp1, src));
25381 tmp2 = gen_reg_rtx (SImode);
25382 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
25383 tmp3 = gen_reg_rtx (SImode);
25384 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
25386 tmp4 = gen_reg_rtx (SImode);
25387 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
25388 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
25391 rs6000_emit_popcount (tmp, src);
25392 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
25396 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
25397 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
25399 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
25401 tmp1 = gen_reg_rtx (DImode);
25402 emit_insn (gen_popcntbdi2 (tmp1, src));
25404 tmp2 = gen_reg_rtx (DImode);
25405 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
25406 tmp3 = gen_reg_rtx (DImode);
25407 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
25409 tmp4 = gen_reg_rtx (DImode);
25410 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
25411 tmp5 = gen_reg_rtx (DImode);
25412 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
25414 tmp6 = gen_reg_rtx (DImode);
25415 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
25416 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
25419 rs6000_emit_popcount (tmp, src);
25420 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
25424 /* Return an RTX representing where to find the function value of a
25425 function returning MODE. */
25427 rs6000_complex_function_value (enum machine_mode mode)
25429 unsigned int regno;
25431 enum machine_mode inner = GET_MODE_INNER (mode);
25432 unsigned int inner_bytes = GET_MODE_SIZE (inner);
25434 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
25435 regno = FP_ARG_RETURN;
25438 regno = GP_ARG_RETURN;
25440 /* 32-bit is OK since it'll go in r3/r4. */
25441 if (TARGET_32BIT && inner_bytes >= 4)
25442 return gen_rtx_REG (mode, regno);
25445 if (inner_bytes >= 8)
25446 return gen_rtx_REG (mode, regno);
25448 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
25450 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
25451 GEN_INT (inner_bytes));
25452 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
25455 /* Target hook for TARGET_FUNCTION_VALUE.
25457 On the SPE, both FPs and vectors are returned in r3.
25459 On RS/6000 an integer value is in r3 and a floating-point value is in
25460 fp1, unless -msoft-float. */
25463 rs6000_function_value (const_tree valtype,
25464 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
25465 bool outgoing ATTRIBUTE_UNUSED)
25467 enum machine_mode mode;
25468 unsigned int regno;
25470 /* Special handling for structs in darwin64. */
25471 if (rs6000_darwin64_abi
25472 && TYPE_MODE (valtype) == BLKmode
25473 && TREE_CODE (valtype) == RECORD_TYPE
25474 && int_size_in_bytes (valtype) > 0)
25476 CUMULATIVE_ARGS valcum;
25480 valcum.fregno = FP_ARG_MIN_REG;
25481 valcum.vregno = ALTIVEC_ARG_MIN_REG;
25482 /* Do a trial code generation as if this were going to be passed as
25483 an argument; if any part goes in memory, we return NULL. */
25484 valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
25487 /* Otherwise fall through to standard ABI rules. */
25490 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
25492 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
25493 return gen_rtx_PARALLEL (DImode,
25495 gen_rtx_EXPR_LIST (VOIDmode,
25496 gen_rtx_REG (SImode, GP_ARG_RETURN),
25498 gen_rtx_EXPR_LIST (VOIDmode,
25499 gen_rtx_REG (SImode,
25500 GP_ARG_RETURN + 1),
25503 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
25505 return gen_rtx_PARALLEL (DCmode,
25507 gen_rtx_EXPR_LIST (VOIDmode,
25508 gen_rtx_REG (SImode, GP_ARG_RETURN),
25510 gen_rtx_EXPR_LIST (VOIDmode,
25511 gen_rtx_REG (SImode,
25512 GP_ARG_RETURN + 1),
25514 gen_rtx_EXPR_LIST (VOIDmode,
25515 gen_rtx_REG (SImode,
25516 GP_ARG_RETURN + 2),
25518 gen_rtx_EXPR_LIST (VOIDmode,
25519 gen_rtx_REG (SImode,
25520 GP_ARG_RETURN + 3),
25524 mode = TYPE_MODE (valtype);
25525 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
25526 || POINTER_TYPE_P (valtype))
25527 mode = TARGET_32BIT ? SImode : DImode;
25529 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
25530 /* _Decimal128 must use an even/odd register pair. */
25531 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
25532 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
25533 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
25534 regno = FP_ARG_RETURN;
25535 else if (TREE_CODE (valtype) == COMPLEX_TYPE
25536 && targetm.calls.split_complex_arg)
25537 return rs6000_complex_function_value (mode);
25538 else if (TREE_CODE (valtype) == VECTOR_TYPE
25539 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
25540 && ALTIVEC_VECTOR_MODE (mode))
25541 regno = ALTIVEC_ARG_RETURN;
25542 else if (TREE_CODE (valtype) == VECTOR_TYPE
25543 && TARGET_VSX && TARGET_ALTIVEC_ABI
25544 && VSX_VECTOR_MODE (mode))
25545 regno = ALTIVEC_ARG_RETURN;
25546 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
25547 && (mode == DFmode || mode == DCmode
25548 || mode == TFmode || mode == TCmode))
25549 return spe_build_register_parallel (mode, GP_ARG_RETURN);
25551 regno = GP_ARG_RETURN;
25553 return gen_rtx_REG (mode, regno);
25556 /* Define how to find the value returned by a library function
25557 assuming the value has mode MODE. */
25559 rs6000_libcall_value (enum machine_mode mode)
25561 unsigned int regno;
25563 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
25565 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
25566 return gen_rtx_PARALLEL (DImode,
25568 gen_rtx_EXPR_LIST (VOIDmode,
25569 gen_rtx_REG (SImode, GP_ARG_RETURN),
25571 gen_rtx_EXPR_LIST (VOIDmode,
25572 gen_rtx_REG (SImode,
25573 GP_ARG_RETURN + 1),
25577 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
25578 /* _Decimal128 must use an even/odd register pair. */
25579 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
25580 else if (SCALAR_FLOAT_MODE_P (mode)
25581 && TARGET_HARD_FLOAT && TARGET_FPRS
25582 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
25583 regno = FP_ARG_RETURN;
25584 else if (ALTIVEC_VECTOR_MODE (mode)
25585 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
25586 regno = ALTIVEC_ARG_RETURN;
25587 else if (VSX_VECTOR_MODE (mode)
25588 && TARGET_VSX && TARGET_ALTIVEC_ABI)
25589 regno = ALTIVEC_ARG_RETURN;
25590 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
25591 return rs6000_complex_function_value (mode);
25592 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
25593 && (mode == DFmode || mode == DCmode
25594 || mode == TFmode || mode == TCmode))
25595 return spe_build_register_parallel (mode, GP_ARG_RETURN);
25597 regno = GP_ARG_RETURN;
25599 return gen_rtx_REG (mode, regno);
25603 /* Given FROM and TO register numbers, say whether this elimination is allowed.
25604 Frame pointer elimination is automatically handled.
25606 For the RS/6000, if frame pointer elimination is being done, we would like
25607 to convert ap into fp, not sp.
25609 We need r30 if -mminimal-toc was specified, and there are constant pool
25613 rs6000_can_eliminate (const int from, const int to)
25615 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
25616 ? ! frame_pointer_needed
25617 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
25618 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
25622 /* Define the offset between two registers, FROM to be eliminated and its
25623 replacement TO, at the start of a routine. */
25625 rs6000_initial_elimination_offset (int from, int to)
25627 rs6000_stack_t *info = rs6000_stack_info ();
25628 HOST_WIDE_INT offset;
25630 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
25631 offset = info->push_p ? 0 : -info->total_size;
25632 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
25634 offset = info->push_p ? 0 : -info->total_size;
25635 if (FRAME_GROWS_DOWNWARD)
25636 offset += info->fixed_size + info->vars_size + info->parm_size;
25638 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
25639 offset = FRAME_GROWS_DOWNWARD
25640 ? info->fixed_size + info->vars_size + info->parm_size
25642 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
25643 offset = info->total_size;
25644 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
25645 offset = info->push_p ? info->total_size : 0;
25646 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
25649 gcc_unreachable ();
25655 rs6000_dwarf_register_span (rtx reg)
25659 unsigned regno = REGNO (reg);
25660 enum machine_mode mode = GET_MODE (reg);
25664 && (SPE_VECTOR_MODE (GET_MODE (reg))
25665 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
25666 && mode != SFmode && mode != SDmode && mode != SCmode)))
25671 regno = REGNO (reg);
25673 /* The duality of the SPE register size wreaks all kinds of havoc.
25674 This is a way of distinguishing r0 in 32-bits from r0 in
25676 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
25677 gcc_assert (words <= 4);
25678 for (i = 0; i < words; i++, regno++)
25680 if (BYTES_BIG_ENDIAN)
25682 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
25683 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
25687 parts[2 * i] = gen_rtx_REG (SImode, regno);
25688 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
25692 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
25695 /* Fill in sizes for SPE register high parts in table used by unwinder. */
25698 rs6000_init_dwarf_reg_sizes_extra (tree address)
25703 enum machine_mode mode = TYPE_MODE (char_type_node);
25704 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
25705 rtx mem = gen_rtx_MEM (BLKmode, addr);
25706 rtx value = gen_int_mode (4, mode);
25708 for (i = 1201; i < 1232; i++)
25710 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
25711 HOST_WIDE_INT offset
25712 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
25714 emit_move_insn (adjust_address (mem, mode, offset), value);
25719 /* Map internal gcc register numbers to DWARF2 register numbers. */
25722 rs6000_dbx_register_number (unsigned int regno)
25724 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
25726 if (regno == MQ_REGNO)
25728 if (regno == LR_REGNO)
25730 if (regno == CTR_REGNO)
25732 if (CR_REGNO_P (regno))
25733 return regno - CR0_REGNO + 86;
25734 if (regno == CA_REGNO)
25735 return 101; /* XER */
25736 if (ALTIVEC_REGNO_P (regno))
25737 return regno - FIRST_ALTIVEC_REGNO + 1124;
25738 if (regno == VRSAVE_REGNO)
25740 if (regno == VSCR_REGNO)
25742 if (regno == SPE_ACC_REGNO)
25744 if (regno == SPEFSCR_REGNO)
25746 /* SPE high reg number. We get these values of regno from
25747 rs6000_dwarf_register_span. */
25748 gcc_assert (regno >= 1200 && regno < 1232);
25752 /* target hook eh_return_filter_mode */
25753 static enum machine_mode
25754 rs6000_eh_return_filter_mode (void)
25756 return TARGET_32BIT ? SImode : word_mode;
25759 /* Target hook for scalar_mode_supported_p. */
25761 rs6000_scalar_mode_supported_p (enum machine_mode mode)
25763 if (DECIMAL_FLOAT_MODE_P (mode))
25764 return default_decimal_float_supported_p ();
25766 return default_scalar_mode_supported_p (mode);
25769 /* Target hook for vector_mode_supported_p. */
25771 rs6000_vector_mode_supported_p (enum machine_mode mode)
25774 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
25777 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
25780 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
25787 /* Target hook for invalid_arg_for_unprototyped_fn. */
25788 static const char *
25789 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
25791 return (!rs6000_darwin64_abi
25793 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
25794 && (funcdecl == NULL_TREE
25795 || (TREE_CODE (funcdecl) == FUNCTION_DECL
25796 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
25797 ? N_("AltiVec argument passed to unprototyped function")
25801 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
25802 setup by using __stack_chk_fail_local hidden function instead of
25803 calling __stack_chk_fail directly. Otherwise it is better to call
25804 __stack_chk_fail directly. */
25807 rs6000_stack_protect_fail (void)
25809 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
25810 ? default_hidden_stack_protect_fail ()
25811 : default_external_stack_protect_fail ();
25815 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
25816 int num_operands ATTRIBUTE_UNUSED)
25818 if (rs6000_warn_cell_microcode)
25821 int insn_code_number = recog_memoized (insn);
25822 location_t location = locator_location (INSN_LOCATOR (insn));
25824 /* Punt on insns we cannot recognize. */
25825 if (insn_code_number < 0)
25828 temp = get_insn_template (insn_code_number, insn);
25830 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
25831 warning_at (location, OPT_mwarn_cell_microcode,
25832 "emitting microcode insn %s\t[%s] #%d",
25833 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
25834 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
25835 warning_at (location, OPT_mwarn_cell_microcode,
25836 "emitting conditional microcode insn %s\t[%s] #%d",
25837 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
25841 #include "gt-rs6000.h"