1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
52 #include "langhooks.h"
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Vector to say how input registers are mapped to output registers.
284 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
285 eliminate it. You must use -fomit-frame-pointer to get that. */
286 char leaf_reg_remap[] =
287 { 0, 1, 2, 3, 4, 5, 6, 7,
288 -1, -1, -1, -1, -1, -1, 14, -1,
289 -1, -1, -1, -1, -1, -1, -1, -1,
290 8, 9, 10, 11, 12, 13, -1, 15,
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63,
296 64, 65, 66, 67, 68, 69, 70, 71,
297 72, 73, 74, 75, 76, 77, 78, 79,
298 80, 81, 82, 83, 84, 85, 86, 87,
299 88, 89, 90, 91, 92, 93, 94, 95,
300 96, 97, 98, 99, 100};
302 /* Vector, indexed by hard register number, which contains 1
303 for a register that is allowable in a candidate for leaf
304 function treatment. */
305 char sparc_leaf_regs[] =
306 { 1, 1, 1, 1, 1, 1, 1, 1,
307 0, 0, 0, 0, 0, 0, 1, 0,
308 0, 0, 0, 0, 0, 0, 0, 0,
309 1, 1, 1, 1, 1, 1, 0, 1,
310 1, 1, 1, 1, 1, 1, 1, 1,
311 1, 1, 1, 1, 1, 1, 1, 1,
312 1, 1, 1, 1, 1, 1, 1, 1,
313 1, 1, 1, 1, 1, 1, 1, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
320 struct GTY(()) machine_function
322 /* Some local-dynamic TLS symbol name. */
323 const char *some_ld_name;
325 /* True if the current function is leaf and uses only leaf regs,
326 so that the SPARC leaf function optimization can be applied.
327 Private version of current_function_uses_only_leaf_regs, see
328 sparc_expand_prologue for the rationale. */
331 /* True if the data calculated by sparc_expand_prologue are valid. */
332 bool prologue_data_valid_p;
335 #define sparc_leaf_function_p cfun->machine->leaf_function_p
336 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
338 /* Register we pretend to think the frame pointer is allocated to.
339 Normally, this is %fp, but if we are in a leaf procedure, this
340 is %sp+"something". We record "something" separately as it may
341 be too big for reg+constant addressing. */
342 static rtx frame_base_reg;
343 static HOST_WIDE_INT frame_base_offset;
345 /* 1 if the next opcode is to be specially indented. */
346 int sparc_indent_opcode = 0;
348 static bool sparc_handle_option (size_t, const char *, int);
349 static void sparc_init_modes (void);
350 static void scan_record_type (tree, int *, int *, int *);
351 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
352 tree, int, int, int *, int *);
354 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
355 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357 static void sparc_output_addr_vec (rtx);
358 static void sparc_output_addr_diff_vec (rtx);
359 static void sparc_output_deferred_case_vectors (void);
360 static rtx sparc_builtin_saveregs (void);
361 static int epilogue_renumber (rtx *, int);
362 static bool sparc_assemble_integer (rtx, unsigned int, int);
363 static int set_extends (rtx);
364 static void emit_pic_helper (void);
365 static void load_pic_register (bool);
366 static int save_or_restore_regs (int, int, rtx, int, int);
367 static void emit_save_or_restore_regs (int);
368 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
369 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
370 #ifdef OBJECT_FORMAT_ELF
371 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
374 static int sparc_adjust_cost (rtx, rtx, rtx, int);
375 static int sparc_issue_rate (void);
376 static void sparc_sched_init (FILE *, int, int);
377 static int sparc_use_sched_lookahead (void);
379 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
380 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
381 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
382 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
383 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
385 static bool sparc_function_ok_for_sibcall (tree, tree);
386 static void sparc_init_libfuncs (void);
387 static void sparc_init_builtins (void);
388 static void sparc_vis_init_builtins (void);
389 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
390 static tree sparc_fold_builtin (tree, tree, bool);
391 static int sparc_vis_mul8x16 (int, int);
392 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
393 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
394 HOST_WIDE_INT, tree);
395 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
396 HOST_WIDE_INT, const_tree);
397 static struct machine_function * sparc_init_machine_status (void);
398 static bool sparc_cannot_force_const_mem (rtx);
399 static rtx sparc_tls_get_addr (void);
400 static rtx sparc_tls_got (void);
401 static const char *get_some_local_dynamic_name (void);
402 static int get_some_local_dynamic_name_1 (rtx *, void *);
403 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
404 static bool sparc_promote_prototypes (const_tree);
405 static rtx sparc_struct_value_rtx (tree, int);
406 static bool sparc_return_in_memory (const_tree, const_tree);
407 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
408 static void sparc_va_start (tree, rtx);
409 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
410 static bool sparc_vector_mode_supported_p (enum machine_mode);
411 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
412 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
413 enum machine_mode, const_tree, bool);
414 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
415 enum machine_mode, tree, bool);
416 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
417 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
418 static void sparc_file_end (void);
419 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
420 static const char *sparc_mangle_type (const_tree);
422 #ifdef SUBTARGET_ATTRIBUTE_TABLE
423 const struct attribute_spec sparc_attribute_table[];
426 /* Option handling. */
429 enum cmodel sparc_cmodel;
431 char sparc_hard_reg_printed[8];
433 struct sparc_cpu_select sparc_select[] =
435 /* switch name, tune arch */
436 { (char *)0, "default", 1, 1 },
437 { (char *)0, "-mcpu=", 1, 1 },
438 { (char *)0, "-mtune=", 1, 0 },
442 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
443 enum processor_type sparc_cpu;
445 /* Whether
\fan FPU option was specified. */
446 static bool fpu_option_set = false;
448 /* Initialize the GCC target structure. */
450 /* The sparc default is to use .half rather than .short for aligned
451 HI objects. Use .word instead of .long on non-ELF systems. */
452 #undef TARGET_ASM_ALIGNED_HI_OP
453 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
454 #ifndef OBJECT_FORMAT_ELF
455 #undef TARGET_ASM_ALIGNED_SI_OP
456 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
459 #undef TARGET_ASM_UNALIGNED_HI_OP
460 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
461 #undef TARGET_ASM_UNALIGNED_SI_OP
462 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
463 #undef TARGET_ASM_UNALIGNED_DI_OP
464 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
466 /* The target hook has to handle DI-mode values. */
467 #undef TARGET_ASM_INTEGER
468 #define TARGET_ASM_INTEGER sparc_assemble_integer
470 #undef TARGET_ASM_FUNCTION_PROLOGUE
471 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
472 #undef TARGET_ASM_FUNCTION_EPILOGUE
473 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
475 #undef TARGET_SCHED_ADJUST_COST
476 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
477 #undef TARGET_SCHED_ISSUE_RATE
478 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
479 #undef TARGET_SCHED_INIT
480 #define TARGET_SCHED_INIT sparc_sched_init
481 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
482 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
484 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
485 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
487 #undef TARGET_INIT_LIBFUNCS
488 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
489 #undef TARGET_INIT_BUILTINS
490 #define TARGET_INIT_BUILTINS sparc_init_builtins
492 #undef TARGET_LEGITIMIZE_ADDRESS
493 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
495 #undef TARGET_EXPAND_BUILTIN
496 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
497 #undef TARGET_FOLD_BUILTIN
498 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
501 #undef TARGET_HAVE_TLS
502 #define TARGET_HAVE_TLS true
505 #undef TARGET_CANNOT_FORCE_CONST_MEM
506 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
508 #undef TARGET_ASM_OUTPUT_MI_THUNK
509 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
510 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
511 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
513 #undef TARGET_RTX_COSTS
514 #define TARGET_RTX_COSTS sparc_rtx_costs
515 #undef TARGET_ADDRESS_COST
516 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
518 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
519 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
520 test for this value. */
521 #undef TARGET_PROMOTE_FUNCTION_ARGS
522 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
524 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
525 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
526 test for this value. */
527 #undef TARGET_PROMOTE_FUNCTION_RETURN
528 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
530 #undef TARGET_PROMOTE_PROTOTYPES
531 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
533 #undef TARGET_STRUCT_VALUE_RTX
534 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
535 #undef TARGET_RETURN_IN_MEMORY
536 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
537 #undef TARGET_MUST_PASS_IN_STACK
538 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539 #undef TARGET_PASS_BY_REFERENCE
540 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
541 #undef TARGET_ARG_PARTIAL_BYTES
542 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
544 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
545 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
546 #undef TARGET_STRICT_ARGUMENT_NAMING
547 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
549 #undef TARGET_EXPAND_BUILTIN_VA_START
550 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
551 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
552 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
554 #undef TARGET_VECTOR_MODE_SUPPORTED_P
555 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
557 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
558 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
560 #ifdef SUBTARGET_INSERT_ATTRIBUTES
561 #undef TARGET_INSERT_ATTRIBUTES
562 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
565 #ifdef SUBTARGET_ATTRIBUTE_TABLE
566 #undef TARGET_ATTRIBUTE_TABLE
567 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
570 #undef TARGET_RELAXED_ORDERING
571 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
573 #undef TARGET_DEFAULT_TARGET_FLAGS
574 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
575 #undef TARGET_HANDLE_OPTION
576 #define TARGET_HANDLE_OPTION sparc_handle_option
578 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
579 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
580 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
583 #undef TARGET_ASM_FILE_END
584 #define TARGET_ASM_FILE_END sparc_file_end
586 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
587 #undef TARGET_MANGLE_TYPE
588 #define TARGET_MANGLE_TYPE sparc_mangle_type
591 struct gcc_target targetm = TARGET_INITIALIZER;
593 /* Implement TARGET_HANDLE_OPTION. */
596 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
601 case OPT_mhard_float:
602 case OPT_msoft_float:
603 fpu_option_set = true;
607 sparc_select[1].string = arg;
611 sparc_select[2].string = arg;
618 /* Validate and override various options, and do some machine dependent
622 sparc_override_options (void)
624 static struct code_model {
625 const char *const name;
626 const enum cmodel value;
627 } const cmodels[] = {
629 { "medlow", CM_MEDLOW },
630 { "medmid", CM_MEDMID },
631 { "medany", CM_MEDANY },
632 { "embmedany", CM_EMBMEDANY },
633 { NULL, (enum cmodel) 0 }
635 const struct code_model *cmodel;
636 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
637 static struct cpu_default {
639 const char *const name;
640 } const cpu_default[] = {
641 /* There must be one entry here for each TARGET_CPU value. */
642 { TARGET_CPU_sparc, "cypress" },
643 { TARGET_CPU_sparclet, "tsc701" },
644 { TARGET_CPU_sparclite, "f930" },
645 { TARGET_CPU_v8, "v8" },
646 { TARGET_CPU_hypersparc, "hypersparc" },
647 { TARGET_CPU_sparclite86x, "sparclite86x" },
648 { TARGET_CPU_supersparc, "supersparc" },
649 { TARGET_CPU_v9, "v9" },
650 { TARGET_CPU_ultrasparc, "ultrasparc" },
651 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
652 { TARGET_CPU_niagara, "niagara" },
653 { TARGET_CPU_niagara2, "niagara2" },
656 const struct cpu_default *def;
657 /* Table of values for -m{cpu,tune}=. */
658 static struct cpu_table {
659 const char *const name;
660 const enum processor_type processor;
663 } const cpu_table[] = {
664 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
665 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
666 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
667 /* TI TMS390Z55 supersparc */
668 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
669 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
670 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
671 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
672 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
673 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
674 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
675 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
677 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
679 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
680 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
681 /* TI ultrasparc I, II, IIi */
682 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
683 /* Although insns using %y are deprecated, it is a clear win on current
685 |MASK_DEPRECATED_V8_INSNS},
686 /* TI ultrasparc III */
687 /* ??? Check if %y issue still holds true in ultra3. */
688 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
690 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
691 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
692 { 0, (enum processor_type) 0, 0, 0 }
694 const struct cpu_table *cpu;
695 const struct sparc_cpu_select *sel;
698 #ifndef SPARC_BI_ARCH
699 /* Check for unsupported architecture size. */
700 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
701 error ("%s is not supported by this configuration",
702 DEFAULT_ARCH32_P ? "-m64" : "-m32");
705 /* We force all 64bit archs to use 128 bit long double */
706 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
708 error ("-mlong-double-64 not allowed with -m64");
709 target_flags |= MASK_LONG_DOUBLE_128;
712 /* Code model selection. */
713 sparc_cmodel = SPARC_DEFAULT_CMODEL;
717 sparc_cmodel = CM_32;
720 if (sparc_cmodel_string != NULL)
724 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
725 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
727 if (cmodel->name == NULL)
728 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
730 sparc_cmodel = cmodel->value;
733 error ("-mcmodel= is not supported on 32 bit systems");
736 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
738 /* Set the default CPU. */
739 for (def = &cpu_default[0]; def->name; ++def)
740 if (def->cpu == TARGET_CPU_DEFAULT)
742 gcc_assert (def->name);
743 sparc_select[0].string = def->name;
745 for (sel = &sparc_select[0]; sel->name; ++sel)
749 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
750 if (! strcmp (sel->string, cpu->name))
753 sparc_cpu = cpu->processor;
757 target_flags &= ~cpu->disable;
758 target_flags |= cpu->enable;
764 error ("bad value (%s) for %s switch", sel->string, sel->name);
768 /* If -mfpu or -mno-fpu was explicitly used, don't override with
769 the processor default. */
771 target_flags = (target_flags & ~MASK_FPU) | fpu;
773 /* Don't allow -mvis if FPU is disabled. */
775 target_flags &= ~MASK_VIS;
777 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
779 -m64 also implies v9. */
780 if (TARGET_VIS || TARGET_ARCH64)
782 target_flags |= MASK_V9;
783 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
786 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
787 if (TARGET_V9 && TARGET_ARCH32)
788 target_flags |= MASK_DEPRECATED_V8_INSNS;
790 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
791 if (! TARGET_V9 || TARGET_ARCH64)
792 target_flags &= ~MASK_V8PLUS;
794 /* Don't use stack biasing in 32 bit mode. */
796 target_flags &= ~MASK_STACK_BIAS;
798 /* Supply a default value for align_functions. */
799 if (align_functions == 0
800 && (sparc_cpu == PROCESSOR_ULTRASPARC
801 || sparc_cpu == PROCESSOR_ULTRASPARC3
802 || sparc_cpu == PROCESSOR_NIAGARA
803 || sparc_cpu == PROCESSOR_NIAGARA2))
804 align_functions = 32;
806 /* Validate PCC_STRUCT_RETURN. */
807 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
808 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
810 /* Only use .uaxword when compiling for a 64-bit target. */
812 targetm.asm_out.unaligned_op.di = NULL;
814 /* Do various machine dependent initializations. */
817 /* Acquire unique alias sets for our private stuff. */
818 sparc_sr_alias_set = new_alias_set ();
819 struct_value_alias_set = new_alias_set ();
821 /* Set up function hooks. */
822 init_machine_status = sparc_init_machine_status;
827 case PROCESSOR_CYPRESS:
828 sparc_costs = &cypress_costs;
831 case PROCESSOR_SPARCLITE:
832 case PROCESSOR_SUPERSPARC:
833 sparc_costs = &supersparc_costs;
837 case PROCESSOR_HYPERSPARC:
838 case PROCESSOR_SPARCLITE86X:
839 sparc_costs = &hypersparc_costs;
841 case PROCESSOR_SPARCLET:
842 case PROCESSOR_TSC701:
843 sparc_costs = &sparclet_costs;
846 case PROCESSOR_ULTRASPARC:
847 sparc_costs = &ultrasparc_costs;
849 case PROCESSOR_ULTRASPARC3:
850 sparc_costs = &ultrasparc3_costs;
852 case PROCESSOR_NIAGARA:
853 sparc_costs = &niagara_costs;
855 case PROCESSOR_NIAGARA2:
856 sparc_costs = &niagara2_costs;
860 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
861 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
862 target_flags |= MASK_LONG_DOUBLE_128;
865 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
866 set_param_value ("simultaneous-prefetches",
867 ((sparc_cpu == PROCESSOR_ULTRASPARC
868 || sparc_cpu == PROCESSOR_NIAGARA
869 || sparc_cpu == PROCESSOR_NIAGARA2)
871 : (sparc_cpu == PROCESSOR_ULTRASPARC3
873 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
874 set_param_value ("l1-cache-line-size",
875 ((sparc_cpu == PROCESSOR_ULTRASPARC
876 || sparc_cpu == PROCESSOR_ULTRASPARC3
877 || sparc_cpu == PROCESSOR_NIAGARA
878 || sparc_cpu == PROCESSOR_NIAGARA2)
882 #ifdef SUBTARGET_ATTRIBUTE_TABLE
883 /* Table of valid machine attributes. */
884 const struct attribute_spec sparc_attribute_table[] =
886 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
887 SUBTARGET_ATTRIBUTE_TABLE,
888 { NULL, 0, 0, false, false, false, NULL }
892 /* Miscellaneous utilities. */
894 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
895 or branch on register contents instructions. */
898 v9_regcmp_p (enum rtx_code code)
900 return (code == EQ || code == NE || code == GE || code == LT
901 || code == LE || code == GT);
904 /* Nonzero if OP is a floating point constant which can
905 be loaded into an integer register using a single
906 sethi instruction. */
911 if (GET_CODE (op) == CONST_DOUBLE)
916 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
917 REAL_VALUE_TO_TARGET_SINGLE (r, i);
918 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
924 /* Nonzero if OP is a floating point constant which can
925 be loaded into an integer register using a single
931 if (GET_CODE (op) == CONST_DOUBLE)
936 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
937 REAL_VALUE_TO_TARGET_SINGLE (r, i);
938 return SPARC_SIMM13_P (i);
944 /* Nonzero if OP is a floating point constant which can
945 be loaded into an integer register using a high/losum
946 instruction sequence. */
949 fp_high_losum_p (rtx op)
951 /* The constraints calling this should only be in
952 SFmode move insns, so any constant which cannot
953 be moved using a single insn will do. */
954 if (GET_CODE (op) == CONST_DOUBLE)
959 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
960 REAL_VALUE_TO_TARGET_SINGLE (r, i);
961 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
967 /* Expand a move instruction. Return true if all work is done. */
970 sparc_expand_move (enum machine_mode mode, rtx *operands)
972 /* Handle sets of MEM first. */
973 if (GET_CODE (operands[0]) == MEM)
975 /* 0 is a register (or a pair of registers) on SPARC. */
976 if (register_or_zero_operand (operands[1], mode))
979 if (!reload_in_progress)
981 operands[0] = validize_mem (operands[0]);
982 operands[1] = force_reg (mode, operands[1]);
986 /* Fixup TLS cases. */
988 && CONSTANT_P (operands[1])
989 && GET_CODE (operands[1]) != HIGH
990 && sparc_tls_referenced_p (operands [1]))
992 rtx sym = operands[1];
995 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
997 addend = XEXP (XEXP (sym, 0), 1);
998 sym = XEXP (XEXP (sym, 0), 0);
1001 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1003 sym = legitimize_tls_address (sym);
1006 sym = gen_rtx_PLUS (mode, sym, addend);
1007 sym = force_operand (sym, operands[0]);
1012 /* Fixup PIC cases. */
1013 if (flag_pic && CONSTANT_P (operands[1]))
1015 if (pic_address_needs_scratch (operands[1]))
1016 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1018 /* VxWorks does not impose a fixed gap between segments; the run-time
1019 gap can be different from the object-file gap. We therefore can't
1020 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1021 are absolutely sure that X is in the same segment as the GOT.
1022 Unfortunately, the flexibility of linker scripts means that we
1023 can't be sure of that in general, so assume that _G_O_T_-relative
1024 accesses are never valid on VxWorks. */
1025 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1029 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1035 gcc_assert (TARGET_ARCH64);
1036 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1041 if (symbolic_operand (operands[1], mode))
1043 operands[1] = legitimize_pic_address (operands[1],
1045 (reload_in_progress ?
1052 /* If we are trying to toss an integer constant into FP registers,
1053 or loading a FP or vector constant, force it into memory. */
1054 if (CONSTANT_P (operands[1])
1055 && REG_P (operands[0])
1056 && (SPARC_FP_REG_P (REGNO (operands[0]))
1057 || SCALAR_FLOAT_MODE_P (mode)
1058 || VECTOR_MODE_P (mode)))
1060 /* emit_group_store will send such bogosity to us when it is
1061 not storing directly into memory. So fix this up to avoid
1062 crashes in output_constant_pool. */
1063 if (operands [1] == const0_rtx)
1064 operands[1] = CONST0_RTX (mode);
1066 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1067 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1068 && const_zero_operand (operands[1], mode))
1071 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1072 /* We are able to build any SF constant in integer registers
1073 with at most 2 instructions. */
1075 /* And any DF constant in integer registers. */
1077 && (reload_completed || reload_in_progress))))
1080 operands[1] = force_const_mem (mode, operands[1]);
1081 if (!reload_in_progress)
1082 operands[1] = validize_mem (operands[1]);
1086 /* Accept non-constants and valid constants unmodified. */
1087 if (!CONSTANT_P (operands[1])
1088 || GET_CODE (operands[1]) == HIGH
1089 || input_operand (operands[1], mode))
1095 /* All QImode constants require only one insn, so proceed. */
1100 sparc_emit_set_const32 (operands[0], operands[1]);
1104 /* input_operand should have filtered out 32-bit mode. */
1105 sparc_emit_set_const64 (operands[0], operands[1]);
1115 /* Load OP1, a 32-bit constant, into OP0, a register.
1116 We know it can't be done in one insn when we get
1117 here, the move expander guarantees this. */
1120 sparc_emit_set_const32 (rtx op0, rtx op1)
1122 enum machine_mode mode = GET_MODE (op0);
1125 if (reload_in_progress || reload_completed)
1128 temp = gen_reg_rtx (mode);
1130 if (GET_CODE (op1) == CONST_INT)
1132 gcc_assert (!small_int_operand (op1, mode)
1133 && !const_high_operand (op1, mode));
1135 /* Emit them as real moves instead of a HIGH/LO_SUM,
1136 this way CSE can see everything and reuse intermediate
1137 values if it wants. */
1138 emit_insn (gen_rtx_SET (VOIDmode, temp,
1139 GEN_INT (INTVAL (op1)
1140 & ~(HOST_WIDE_INT)0x3ff)));
1142 emit_insn (gen_rtx_SET (VOIDmode,
1144 gen_rtx_IOR (mode, temp,
1145 GEN_INT (INTVAL (op1) & 0x3ff))));
1149 /* A symbol, emit in the traditional way. */
1150 emit_insn (gen_rtx_SET (VOIDmode, temp,
1151 gen_rtx_HIGH (mode, op1)));
1152 emit_insn (gen_rtx_SET (VOIDmode,
1153 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1157 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1158 If TEMP is nonzero, we are forbidden to use any other scratch
1159 registers. Otherwise, we are allowed to generate them as needed.
1161 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1162 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1165 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1167 rtx temp1, temp2, temp3, temp4, temp5;
1170 if (temp && GET_MODE (temp) == TImode)
1173 temp = gen_rtx_REG (DImode, REGNO (temp));
1176 /* SPARC-V9 code-model support. */
1177 switch (sparc_cmodel)
1180 /* The range spanned by all instructions in the object is less
1181 than 2^31 bytes (2GB) and the distance from any instruction
1182 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1183 than 2^31 bytes (2GB).
1185 The executable must be in the low 4TB of the virtual address
1188 sethi %hi(symbol), %temp1
1189 or %temp1, %lo(symbol), %reg */
1191 temp1 = temp; /* op0 is allowed. */
1193 temp1 = gen_reg_rtx (DImode);
1195 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1196 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1200 /* The range spanned by all instructions in the object is less
1201 than 2^31 bytes (2GB) and the distance from any instruction
1202 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1203 than 2^31 bytes (2GB).
1205 The executable must be in the low 16TB of the virtual address
1208 sethi %h44(symbol), %temp1
1209 or %temp1, %m44(symbol), %temp2
1210 sllx %temp2, 12, %temp3
1211 or %temp3, %l44(symbol), %reg */
1216 temp3 = temp; /* op0 is allowed. */
1220 temp1 = gen_reg_rtx (DImode);
1221 temp2 = gen_reg_rtx (DImode);
1222 temp3 = gen_reg_rtx (DImode);
1225 emit_insn (gen_seth44 (temp1, op1));
1226 emit_insn (gen_setm44 (temp2, temp1, op1));
1227 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1228 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1229 emit_insn (gen_setl44 (op0, temp3, op1));
1233 /* The range spanned by all instructions in the object is less
1234 than 2^31 bytes (2GB) and the distance from any instruction
1235 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1236 than 2^31 bytes (2GB).
1238 The executable can be placed anywhere in the virtual address
1241 sethi %hh(symbol), %temp1
1242 sethi %lm(symbol), %temp2
1243 or %temp1, %hm(symbol), %temp3
1244 sllx %temp3, 32, %temp4
1245 or %temp4, %temp2, %temp5
1246 or %temp5, %lo(symbol), %reg */
1249 /* It is possible that one of the registers we got for operands[2]
1250 might coincide with that of operands[0] (which is why we made
1251 it TImode). Pick the other one to use as our scratch. */
1252 if (rtx_equal_p (temp, op0))
1254 gcc_assert (ti_temp);
1255 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1258 temp2 = temp; /* op0 is _not_ allowed, see above. */
1265 temp1 = gen_reg_rtx (DImode);
1266 temp2 = gen_reg_rtx (DImode);
1267 temp3 = gen_reg_rtx (DImode);
1268 temp4 = gen_reg_rtx (DImode);
1269 temp5 = gen_reg_rtx (DImode);
1272 emit_insn (gen_sethh (temp1, op1));
1273 emit_insn (gen_setlm (temp2, op1));
1274 emit_insn (gen_sethm (temp3, temp1, op1));
1275 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1276 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1277 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1278 gen_rtx_PLUS (DImode, temp4, temp2)));
1279 emit_insn (gen_setlo (op0, temp5, op1));
1283 /* Old old old backwards compatibility kruft here.
1284 Essentially it is MEDLOW with a fixed 64-bit
1285 virtual base added to all data segment addresses.
1286 Text-segment stuff is computed like MEDANY, we can't
1287 reuse the code above because the relocation knobs
1290 Data segment: sethi %hi(symbol), %temp1
1291 add %temp1, EMBMEDANY_BASE_REG, %temp2
1292 or %temp2, %lo(symbol), %reg */
1293 if (data_segment_operand (op1, GET_MODE (op1)))
1297 temp1 = temp; /* op0 is allowed. */
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1306 emit_insn (gen_embmedany_sethi (temp1, op1));
1307 emit_insn (gen_embmedany_brsum (temp2, temp1));
1308 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1311 /* Text segment: sethi %uhi(symbol), %temp1
1312 sethi %hi(symbol), %temp2
1313 or %temp1, %ulo(symbol), %temp3
1314 sllx %temp3, 32, %temp4
1315 or %temp4, %temp2, %temp5
1316 or %temp5, %lo(symbol), %reg */
1321 /* It is possible that one of the registers we got for operands[2]
1322 might coincide with that of operands[0] (which is why we made
1323 it TImode). Pick the other one to use as our scratch. */
1324 if (rtx_equal_p (temp, op0))
1326 gcc_assert (ti_temp);
1327 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1330 temp2 = temp; /* op0 is _not_ allowed, see above. */
1337 temp1 = gen_reg_rtx (DImode);
1338 temp2 = gen_reg_rtx (DImode);
1339 temp3 = gen_reg_rtx (DImode);
1340 temp4 = gen_reg_rtx (DImode);
1341 temp5 = gen_reg_rtx (DImode);
1344 emit_insn (gen_embmedany_textuhi (temp1, op1));
1345 emit_insn (gen_embmedany_texthi (temp2, op1));
1346 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1347 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1348 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1349 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1350 gen_rtx_PLUS (DImode, temp4, temp2)));
1351 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1360 #if HOST_BITS_PER_WIDE_INT == 32
1362 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1367 /* These avoid problems when cross compiling. If we do not
1368 go through all this hair then the optimizer will see
1369 invalid REG_EQUAL notes or in some cases none at all. */
1370 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1372 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1373 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1375 /* The optimizer is not to assume anything about exactly
1376 which bits are set for a HIGH, they are unspecified.
1377 Unfortunately this leads to many missed optimizations
1378 during CSE. We mask out the non-HIGH bits, and matches
1379 a plain movdi, to alleviate this problem. */
1381 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1383 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1387 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1389 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1393 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1395 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1399 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1401 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1404 /* Worker routines for 64-bit constant formation on arch64.
1405 One of the key things to be doing in these emissions is
1406 to create as many temp REGs as possible. This makes it
1407 possible for half-built constants to be used later when
1408 such values are similar to something required later on.
1409 Without doing this, the optimizer cannot see such
1412 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1413 unsigned HOST_WIDE_INT, int);
1416 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1417 unsigned HOST_WIDE_INT low_bits, int is_neg)
1419 unsigned HOST_WIDE_INT high_bits;
1422 high_bits = (~low_bits) & 0xffffffff;
1424 high_bits = low_bits;
1426 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1429 emit_insn (gen_rtx_SET (VOIDmode, op0,
1430 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1434 /* If we are XOR'ing with -1, then we should emit a one's complement
1435 instead. This way the combiner will notice logical operations
1436 such as ANDN later on and substitute. */
1437 if ((low_bits & 0x3ff) == 0x3ff)
1439 emit_insn (gen_rtx_SET (VOIDmode, op0,
1440 gen_rtx_NOT (DImode, temp)));
1444 emit_insn (gen_rtx_SET (VOIDmode, op0,
1445 gen_safe_XOR64 (temp,
1446 (-(HOST_WIDE_INT)0x400
1447 | (low_bits & 0x3ff)))));
1452 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1453 unsigned HOST_WIDE_INT, int);
1456 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1457 unsigned HOST_WIDE_INT high_bits,
1458 unsigned HOST_WIDE_INT low_immediate,
1463 if ((high_bits & 0xfffffc00) != 0)
1465 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 if ((high_bits & ~0xfffffc00) != 0)
1467 emit_insn (gen_rtx_SET (VOIDmode, op0,
1468 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1474 emit_insn (gen_safe_SET64 (temp, high_bits));
1478 /* Now shift it up into place. */
1479 emit_insn (gen_rtx_SET (VOIDmode, op0,
1480 gen_rtx_ASHIFT (DImode, temp2,
1481 GEN_INT (shift_count))));
1483 /* If there is a low immediate part piece, finish up by
1484 putting that in as well. */
1485 if (low_immediate != 0)
1486 emit_insn (gen_rtx_SET (VOIDmode, op0,
1487 gen_safe_OR64 (op0, low_immediate)));
1490 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1491 unsigned HOST_WIDE_INT);
1493 /* Full 64-bit constant decomposition. Even though this is the
1494 'worst' case, we still optimize a few things away. */
1496 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1497 unsigned HOST_WIDE_INT high_bits,
1498 unsigned HOST_WIDE_INT low_bits)
1502 if (reload_in_progress || reload_completed)
1505 sub_temp = gen_reg_rtx (DImode);
1507 if ((high_bits & 0xfffffc00) != 0)
1509 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1510 if ((high_bits & ~0xfffffc00) != 0)
1511 emit_insn (gen_rtx_SET (VOIDmode,
1513 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1519 emit_insn (gen_safe_SET64 (temp, high_bits));
1523 if (!reload_in_progress && !reload_completed)
1525 rtx temp2 = gen_reg_rtx (DImode);
1526 rtx temp3 = gen_reg_rtx (DImode);
1527 rtx temp4 = gen_reg_rtx (DImode);
1529 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1530 gen_rtx_ASHIFT (DImode, sub_temp,
1533 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1534 if ((low_bits & ~0xfffffc00) != 0)
1536 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1537 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1538 emit_insn (gen_rtx_SET (VOIDmode, op0,
1539 gen_rtx_PLUS (DImode, temp4, temp3)));
1543 emit_insn (gen_rtx_SET (VOIDmode, op0,
1544 gen_rtx_PLUS (DImode, temp4, temp2)));
1549 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1550 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1551 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1554 /* We are in the middle of reload, so this is really
1555 painful. However we do still make an attempt to
1556 avoid emitting truly stupid code. */
1557 if (low1 != const0_rtx)
1559 emit_insn (gen_rtx_SET (VOIDmode, op0,
1560 gen_rtx_ASHIFT (DImode, sub_temp,
1561 GEN_INT (to_shift))));
1562 emit_insn (gen_rtx_SET (VOIDmode, op0,
1563 gen_rtx_IOR (DImode, op0, low1)));
1571 if (low2 != const0_rtx)
1573 emit_insn (gen_rtx_SET (VOIDmode, op0,
1574 gen_rtx_ASHIFT (DImode, sub_temp,
1575 GEN_INT (to_shift))));
1576 emit_insn (gen_rtx_SET (VOIDmode, op0,
1577 gen_rtx_IOR (DImode, op0, low2)));
1585 emit_insn (gen_rtx_SET (VOIDmode, op0,
1586 gen_rtx_ASHIFT (DImode, sub_temp,
1587 GEN_INT (to_shift))));
1588 if (low3 != const0_rtx)
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_IOR (DImode, op0, low3)));
1595 /* Analyze a 64-bit constant for certain properties. */
1596 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1597 unsigned HOST_WIDE_INT,
1598 int *, int *, int *);
1601 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1602 unsigned HOST_WIDE_INT low_bits,
1603 int *hbsp, int *lbsp, int *abbasp)
1605 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1608 lowest_bit_set = highest_bit_set = -1;
1612 if ((lowest_bit_set == -1)
1613 && ((low_bits >> i) & 1))
1615 if ((highest_bit_set == -1)
1616 && ((high_bits >> (32 - i - 1)) & 1))
1617 highest_bit_set = (64 - i - 1);
1620 && ((highest_bit_set == -1)
1621 || (lowest_bit_set == -1)));
1627 if ((lowest_bit_set == -1)
1628 && ((high_bits >> i) & 1))
1629 lowest_bit_set = i + 32;
1630 if ((highest_bit_set == -1)
1631 && ((low_bits >> (32 - i - 1)) & 1))
1632 highest_bit_set = 32 - i - 1;
1635 && ((highest_bit_set == -1)
1636 || (lowest_bit_set == -1)));
1638 /* If there are no bits set this should have gone out
1639 as one instruction! */
1640 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1641 all_bits_between_are_set = 1;
1642 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1646 if ((low_bits & (1 << i)) != 0)
1651 if ((high_bits & (1 << (i - 32))) != 0)
1654 all_bits_between_are_set = 0;
1657 *hbsp = highest_bit_set;
1658 *lbsp = lowest_bit_set;
1659 *abbasp = all_bits_between_are_set;
1662 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1665 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1666 unsigned HOST_WIDE_INT low_bits)
1668 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1671 || high_bits == 0xffffffff)
1674 analyze_64bit_constant (high_bits, low_bits,
1675 &highest_bit_set, &lowest_bit_set,
1676 &all_bits_between_are_set);
1678 if ((highest_bit_set == 63
1679 || lowest_bit_set == 0)
1680 && all_bits_between_are_set != 0)
1683 if ((highest_bit_set - lowest_bit_set) < 21)
1689 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1690 unsigned HOST_WIDE_INT,
1693 static unsigned HOST_WIDE_INT
1694 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1695 unsigned HOST_WIDE_INT low_bits,
1696 int lowest_bit_set, int shift)
1698 HOST_WIDE_INT hi, lo;
1700 if (lowest_bit_set < 32)
1702 lo = (low_bits >> lowest_bit_set) << shift;
1703 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1708 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1710 gcc_assert (! (hi & lo));
1714 /* Here we are sure to be arch64 and this is an integer constant
1715 being loaded into a register. Emit the most efficient
1716 insn sequence possible. Detection of all the 1-insn cases
1717 has been done already. */
1719 sparc_emit_set_const64 (rtx op0, rtx op1)
1721 unsigned HOST_WIDE_INT high_bits, low_bits;
1722 int lowest_bit_set, highest_bit_set;
1723 int all_bits_between_are_set;
1726 /* Sanity check that we know what we are working with. */
1727 gcc_assert (TARGET_ARCH64
1728 && (GET_CODE (op0) == SUBREG
1729 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1731 if (reload_in_progress || reload_completed)
1734 if (GET_CODE (op1) != CONST_INT)
1736 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1741 temp = gen_reg_rtx (DImode);
1743 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1744 low_bits = (INTVAL (op1) & 0xffffffff);
1746 /* low_bits bits 0 --> 31
1747 high_bits bits 32 --> 63 */
1749 analyze_64bit_constant (high_bits, low_bits,
1750 &highest_bit_set, &lowest_bit_set,
1751 &all_bits_between_are_set);
1753 /* First try for a 2-insn sequence. */
1755 /* These situations are preferred because the optimizer can
1756 * do more things with them:
1758 * sllx %reg, shift, %reg
1760 * srlx %reg, shift, %reg
1761 * 3) mov some_small_const, %reg
1762 * sllx %reg, shift, %reg
1764 if (((highest_bit_set == 63
1765 || lowest_bit_set == 0)
1766 && all_bits_between_are_set != 0)
1767 || ((highest_bit_set - lowest_bit_set) < 12))
1769 HOST_WIDE_INT the_const = -1;
1770 int shift = lowest_bit_set;
1772 if ((highest_bit_set != 63
1773 && lowest_bit_set != 0)
1774 || all_bits_between_are_set == 0)
1777 create_simple_focus_bits (high_bits, low_bits,
1780 else if (lowest_bit_set == 0)
1781 shift = -(63 - highest_bit_set);
1783 gcc_assert (SPARC_SIMM13_P (the_const));
1784 gcc_assert (shift != 0);
1786 emit_insn (gen_safe_SET64 (temp, the_const));
1788 emit_insn (gen_rtx_SET (VOIDmode,
1790 gen_rtx_ASHIFT (DImode,
1794 emit_insn (gen_rtx_SET (VOIDmode,
1796 gen_rtx_LSHIFTRT (DImode,
1798 GEN_INT (-shift))));
1802 /* Now a range of 22 or less bits set somewhere.
1803 * 1) sethi %hi(focus_bits), %reg
1804 * sllx %reg, shift, %reg
1805 * 2) sethi %hi(focus_bits), %reg
1806 * srlx %reg, shift, %reg
1808 if ((highest_bit_set - lowest_bit_set) < 21)
1810 unsigned HOST_WIDE_INT focus_bits =
1811 create_simple_focus_bits (high_bits, low_bits,
1812 lowest_bit_set, 10);
1814 gcc_assert (SPARC_SETHI_P (focus_bits));
1815 gcc_assert (lowest_bit_set != 10);
1817 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1819 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1820 if (lowest_bit_set < 10)
1821 emit_insn (gen_rtx_SET (VOIDmode,
1823 gen_rtx_LSHIFTRT (DImode, temp,
1824 GEN_INT (10 - lowest_bit_set))));
1825 else if (lowest_bit_set > 10)
1826 emit_insn (gen_rtx_SET (VOIDmode,
1828 gen_rtx_ASHIFT (DImode, temp,
1829 GEN_INT (lowest_bit_set - 10))));
1833 /* 1) sethi %hi(low_bits), %reg
1834 * or %reg, %lo(low_bits), %reg
1835 * 2) sethi %hi(~low_bits), %reg
1836 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1839 || high_bits == 0xffffffff)
1841 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1842 (high_bits == 0xffffffff));
1846 /* Now, try 3-insn sequences. */
1848 /* 1) sethi %hi(high_bits), %reg
1849 * or %reg, %lo(high_bits), %reg
1850 * sllx %reg, 32, %reg
1854 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1858 /* We may be able to do something quick
1859 when the constant is negated, so try that. */
1860 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1861 (~low_bits) & 0xfffffc00))
1863 /* NOTE: The trailing bits get XOR'd so we need the
1864 non-negated bits, not the negated ones. */
1865 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1867 if ((((~high_bits) & 0xffffffff) == 0
1868 && ((~low_bits) & 0x80000000) == 0)
1869 || (((~high_bits) & 0xffffffff) == 0xffffffff
1870 && ((~low_bits) & 0x80000000) != 0))
1872 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1874 if ((SPARC_SETHI_P (fast_int)
1875 && (~high_bits & 0xffffffff) == 0)
1876 || SPARC_SIMM13_P (fast_int))
1877 emit_insn (gen_safe_SET64 (temp, fast_int));
1879 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1884 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1885 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1886 sparc_emit_set_const64 (temp, negated_const);
1889 /* If we are XOR'ing with -1, then we should emit a one's complement
1890 instead. This way the combiner will notice logical operations
1891 such as ANDN later on and substitute. */
1892 if (trailing_bits == 0x3ff)
1894 emit_insn (gen_rtx_SET (VOIDmode, op0,
1895 gen_rtx_NOT (DImode, temp)));
1899 emit_insn (gen_rtx_SET (VOIDmode,
1901 gen_safe_XOR64 (temp,
1902 (-0x400 | trailing_bits))));
1907 /* 1) sethi %hi(xxx), %reg
1908 * or %reg, %lo(xxx), %reg
1909 * sllx %reg, yyy, %reg
1911 * ??? This is just a generalized version of the low_bits==0
1912 * thing above, FIXME...
1914 if ((highest_bit_set - lowest_bit_set) < 32)
1916 unsigned HOST_WIDE_INT focus_bits =
1917 create_simple_focus_bits (high_bits, low_bits,
1920 /* We can't get here in this state. */
1921 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1923 /* So what we know is that the set bits straddle the
1924 middle of the 64-bit word. */
1925 sparc_emit_set_const64_quick2 (op0, temp,
1931 /* 1) sethi %hi(high_bits), %reg
1932 * or %reg, %lo(high_bits), %reg
1933 * sllx %reg, 32, %reg
1934 * or %reg, low_bits, %reg
1936 if (SPARC_SIMM13_P(low_bits)
1937 && ((int)low_bits > 0))
1939 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1943 /* The easiest way when all else fails, is full decomposition. */
1945 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1946 high_bits, low_bits, ~high_bits, ~low_bits);
1948 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1950 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1952 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1953 return the mode to be used for the comparison. For floating-point,
1954 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1955 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1956 processing is needed. */
1959 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1961 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1987 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1988 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1990 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1991 return CCX_NOOVmode;
1997 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2004 /* Emit the compare insn and return the CC reg for a CODE comparison
2005 with operands X and Y. */
2008 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2010 enum machine_mode mode;
2013 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2016 mode = SELECT_CC_MODE (code, x, y);
2018 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2019 fcc regs (cse can't tell they're really call clobbered regs and will
2020 remove a duplicate comparison even if there is an intervening function
2021 call - it will then try to reload the cc reg via an int reg which is why
2022 we need the movcc patterns). It is possible to provide the movcc
2023 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2024 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2025 to tell cse that CCFPE mode registers (even pseudos) are call
2028 /* ??? This is an experiment. Rather than making changes to cse which may
2029 or may not be easy/clean, we do our own cse. This is possible because
2030 we will generate hard registers. Cse knows they're call clobbered (it
2031 doesn't know the same thing about pseudos). If we guess wrong, no big
2032 deal, but if we win, great! */
2034 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2035 #if 1 /* experiment */
2038 /* We cycle through the registers to ensure they're all exercised. */
2039 static int next_fcc_reg = 0;
2040 /* Previous x,y for each fcc reg. */
2041 static rtx prev_args[4][2];
2043 /* Scan prev_args for x,y. */
2044 for (reg = 0; reg < 4; reg++)
2045 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2050 prev_args[reg][0] = x;
2051 prev_args[reg][1] = y;
2052 next_fcc_reg = (next_fcc_reg + 1) & 3;
2054 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2057 cc_reg = gen_reg_rtx (mode);
2058 #endif /* ! experiment */
2059 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2060 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2062 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2064 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2065 will only result in an unrecognizable insn so no point in asserting. */
2066 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2072 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2075 gen_compare_reg (rtx cmp)
2077 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2080 /* This function is used for v9 only.
2081 DEST is the target of the Scc insn.
2082 CODE is the code for an Scc's comparison.
2083 X and Y are the values we compare.
2085 This function is needed to turn
2088 (gt (reg:CCX 100 %icc)
2092 (gt:DI (reg:CCX 100 %icc)
2095 IE: The instruction recognizer needs to see the mode of the comparison to
2096 find the right instruction. We could use "gt:DI" right in the
2097 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2100 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2103 && (GET_MODE (x) == DImode
2104 || GET_MODE (dest) == DImode))
2107 /* Try to use the movrCC insns. */
2109 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2111 && v9_regcmp_p (compare_code))
2116 /* Special case for op0 != 0. This can be done with one instruction if
2119 if (compare_code == NE
2120 && GET_MODE (dest) == DImode
2121 && rtx_equal_p (op0, dest))
2123 emit_insn (gen_rtx_SET (VOIDmode, dest,
2124 gen_rtx_IF_THEN_ELSE (DImode,
2125 gen_rtx_fmt_ee (compare_code, DImode,
2132 if (reg_overlap_mentioned_p (dest, op0))
2134 /* Handle the case where dest == x.
2135 We "early clobber" the result. */
2136 op0 = gen_reg_rtx (GET_MODE (x));
2137 emit_move_insn (op0, x);
2140 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2141 if (GET_MODE (op0) != DImode)
2143 temp = gen_reg_rtx (DImode);
2144 convert_move (temp, op0, 0);
2148 emit_insn (gen_rtx_SET (VOIDmode, dest,
2149 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2150 gen_rtx_fmt_ee (compare_code, DImode,
2158 x = gen_compare_reg_1 (compare_code, x, y);
2161 gcc_assert (GET_MODE (x) != CC_NOOVmode
2162 && GET_MODE (x) != CCX_NOOVmode);
2164 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2165 emit_insn (gen_rtx_SET (VOIDmode, dest,
2166 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2167 gen_rtx_fmt_ee (compare_code,
2168 GET_MODE (x), x, y),
2169 const1_rtx, dest)));
2175 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2176 without jumps using the addx/subx instructions. */
2179 emit_scc_insn (rtx operands[])
2186 /* The quad-word fp compare library routines all return nonzero to indicate
2187 true, which is different from the equivalent libgcc routines, so we must
2188 handle them specially here. */
2189 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2191 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2192 GET_CODE (operands[1]));
2193 operands[2] = XEXP (operands[1], 0);
2194 operands[3] = XEXP (operands[1], 1);
2197 code = GET_CODE (operands[1]);
2201 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2202 more applications). The exception to this is "reg != 0" which can
2203 be done in one instruction on v9 (so we do it). */
2206 if (GET_MODE (x) == SImode)
2208 rtx pat = gen_seqsi_special (operands[0], x, y);
2212 else if (GET_MODE (x) == DImode)
2214 rtx pat = gen_seqdi_special (operands[0], x, y);
2222 if (GET_MODE (x) == SImode)
2224 rtx pat = gen_snesi_special (operands[0], x, y);
2228 else if (GET_MODE (x) == DImode)
2230 rtx pat = gen_snedi_special (operands[0], x, y);
2236 /* For the rest, on v9 we can use conditional moves. */
2240 if (gen_v9_scc (operands[0], code, x, y))
2244 /* We can do LTU and GEU using the addx/subx instructions too. And
2245 for GTU/LEU, if both operands are registers swap them and fall
2246 back to the easy case. */
2247 if (code == GTU || code == LEU)
2249 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2250 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2255 code = swap_condition (code);
2259 if (code == LTU || code == GEU)
2261 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2262 gen_rtx_fmt_ee (code, SImode,
2263 gen_compare_reg_1 (code, x, y),
2268 /* Nope, do branches. */
2272 /* Emit a conditional jump insn for the v9 architecture using comparison code
2273 CODE and jump target LABEL.
2274 This function exists to take advantage of the v9 brxx insns. */
2277 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2279 emit_jump_insn (gen_rtx_SET (VOIDmode,
2281 gen_rtx_IF_THEN_ELSE (VOIDmode,
2282 gen_rtx_fmt_ee (code, GET_MODE (op0),
2284 gen_rtx_LABEL_REF (VOIDmode, label),
2289 emit_conditional_branch_insn (rtx operands[])
2291 /* The quad-word fp compare library routines all return nonzero to indicate
2292 true, which is different from the equivalent libgcc routines, so we must
2293 handle them specially here. */
2294 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2296 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2297 GET_CODE (operands[0]));
2298 operands[1] = XEXP (operands[0], 0);
2299 operands[2] = XEXP (operands[0], 1);
2302 if (TARGET_ARCH64 && operands[2] == const0_rtx
2303 && GET_CODE (operands[1]) == REG
2304 && GET_MODE (operands[1]) == DImode)
2306 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2310 operands[1] = gen_compare_reg (operands[0]);
2311 operands[2] = const0_rtx;
2312 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2313 operands[1], operands[2]);
2314 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2319 /* Generate a DFmode part of a hard TFmode register.
2320 REG is the TFmode hard register, LOW is 1 for the
2321 low 64bit of the register and 0 otherwise.
2324 gen_df_reg (rtx reg, int low)
2326 int regno = REGNO (reg);
2328 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2329 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2330 return gen_rtx_REG (DFmode, regno);
2333 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2334 Unlike normal calls, TFmode operands are passed by reference. It is
2335 assumed that no more than 3 operands are required. */
2338 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2340 rtx ret_slot = NULL, arg[3], func_sym;
2343 /* We only expect to be called for conversions, unary, and binary ops. */
2344 gcc_assert (nargs == 2 || nargs == 3);
2346 for (i = 0; i < nargs; ++i)
2348 rtx this_arg = operands[i];
2351 /* TFmode arguments and return values are passed by reference. */
2352 if (GET_MODE (this_arg) == TFmode)
2354 int force_stack_temp;
2356 force_stack_temp = 0;
2357 if (TARGET_BUGGY_QP_LIB && i == 0)
2358 force_stack_temp = 1;
2360 if (GET_CODE (this_arg) == MEM
2361 && ! force_stack_temp)
2362 this_arg = XEXP (this_arg, 0);
2363 else if (CONSTANT_P (this_arg)
2364 && ! force_stack_temp)
2366 this_slot = force_const_mem (TFmode, this_arg);
2367 this_arg = XEXP (this_slot, 0);
2371 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2373 /* Operand 0 is the return value. We'll copy it out later. */
2375 emit_move_insn (this_slot, this_arg);
2377 ret_slot = this_slot;
2379 this_arg = XEXP (this_slot, 0);
2386 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2388 if (GET_MODE (operands[0]) == TFmode)
2391 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2392 arg[0], GET_MODE (arg[0]),
2393 arg[1], GET_MODE (arg[1]));
2395 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2396 arg[0], GET_MODE (arg[0]),
2397 arg[1], GET_MODE (arg[1]),
2398 arg[2], GET_MODE (arg[2]));
2401 emit_move_insn (operands[0], ret_slot);
2407 gcc_assert (nargs == 2);
2409 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2410 GET_MODE (operands[0]), 1,
2411 arg[1], GET_MODE (arg[1]));
2413 if (ret != operands[0])
2414 emit_move_insn (operands[0], ret);
2418 /* Expand soft-float TFmode calls to sparc abi routines. */
2421 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2443 emit_soft_tfmode_libcall (func, 3, operands);
2447 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2451 gcc_assert (code == SQRT);
2454 emit_soft_tfmode_libcall (func, 2, operands);
2458 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2465 switch (GET_MODE (operands[1]))
2478 case FLOAT_TRUNCATE:
2479 switch (GET_MODE (operands[0]))
2493 switch (GET_MODE (operands[1]))
2498 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2508 case UNSIGNED_FLOAT:
2509 switch (GET_MODE (operands[1]))
2514 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2525 switch (GET_MODE (operands[0]))
2539 switch (GET_MODE (operands[0]))
2556 emit_soft_tfmode_libcall (func, 2, operands);
2559 /* Expand a hard-float tfmode operation. All arguments must be in
2563 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2567 if (GET_RTX_CLASS (code) == RTX_UNARY)
2569 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2570 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2574 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2575 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2576 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2577 operands[1], operands[2]);
2580 if (register_operand (operands[0], VOIDmode))
2583 dest = gen_reg_rtx (GET_MODE (operands[0]));
2585 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2587 if (dest != operands[0])
2588 emit_move_insn (operands[0], dest);
2592 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2594 if (TARGET_HARD_QUAD)
2595 emit_hard_tfmode_operation (code, operands);
2597 emit_soft_tfmode_binop (code, operands);
2601 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2603 if (TARGET_HARD_QUAD)
2604 emit_hard_tfmode_operation (code, operands);
2606 emit_soft_tfmode_unop (code, operands);
2610 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2612 if (TARGET_HARD_QUAD)
2613 emit_hard_tfmode_operation (code, operands);
2615 emit_soft_tfmode_cvt (code, operands);
2618 /* Return nonzero if a branch/jump/call instruction will be emitting
2619 nop into its delay slot. */
2622 empty_delay_slot (rtx insn)
2626 /* If no previous instruction (should not happen), return true. */
2627 if (PREV_INSN (insn) == NULL)
2630 seq = NEXT_INSN (PREV_INSN (insn));
2631 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2637 /* Return nonzero if TRIAL can go into the call delay slot. */
2640 tls_call_delay (rtx trial)
2645 call __tls_get_addr, %tgd_call (foo)
2646 add %l7, %o0, %o0, %tgd_add (foo)
2647 while Sun as/ld does not. */
2648 if (TARGET_GNU_TLS || !TARGET_TLS)
2651 pat = PATTERN (trial);
2653 /* We must reject tgd_add{32|64}, i.e.
2654 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2655 and tldm_add{32|64}, i.e.
2656 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2658 if (GET_CODE (pat) == SET
2659 && GET_CODE (SET_SRC (pat)) == PLUS)
2661 rtx unspec = XEXP (SET_SRC (pat), 1);
2663 if (GET_CODE (unspec) == UNSPEC
2664 && (XINT (unspec, 1) == UNSPEC_TLSGD
2665 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2672 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2673 instruction. RETURN_P is true if the v9 variant 'return' is to be
2674 considered in the test too.
2676 TRIAL must be a SET whose destination is a REG appropriate for the
2677 'restore' instruction or, if RETURN_P is true, for the 'return'
2681 eligible_for_restore_insn (rtx trial, bool return_p)
2683 rtx pat = PATTERN (trial);
2684 rtx src = SET_SRC (pat);
2686 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2687 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2688 && arith_operand (src, GET_MODE (src)))
2691 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2693 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2696 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2697 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2698 && arith_double_operand (src, GET_MODE (src)))
2699 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2701 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2702 else if (! TARGET_FPU && register_operand (src, SFmode))
2705 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2706 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2709 /* If we have the 'return' instruction, anything that does not use
2710 local or output registers and can go into a delay slot wins. */
2711 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2712 && (get_attr_in_uncond_branch_delay (trial)
2713 == IN_UNCOND_BRANCH_DELAY_TRUE))
2716 /* The 'restore src1,src2,dest' pattern for SImode. */
2717 else if (GET_CODE (src) == PLUS
2718 && register_operand (XEXP (src, 0), SImode)
2719 && arith_operand (XEXP (src, 1), SImode))
2722 /* The 'restore src1,src2,dest' pattern for DImode. */
2723 else if (GET_CODE (src) == PLUS
2724 && register_operand (XEXP (src, 0), DImode)
2725 && arith_double_operand (XEXP (src, 1), DImode))
2728 /* The 'restore src1,%lo(src2),dest' pattern. */
2729 else if (GET_CODE (src) == LO_SUM
2730 && ! TARGET_CM_MEDMID
2731 && ((register_operand (XEXP (src, 0), SImode)
2732 && immediate_operand (XEXP (src, 1), SImode))
2734 && register_operand (XEXP (src, 0), DImode)
2735 && immediate_operand (XEXP (src, 1), DImode))))
2738 /* The 'restore src,src,dest' pattern. */
2739 else if (GET_CODE (src) == ASHIFT
2740 && (register_operand (XEXP (src, 0), SImode)
2741 || register_operand (XEXP (src, 0), DImode))
2742 && XEXP (src, 1) == const1_rtx)
2748 /* Return nonzero if TRIAL can go into the function return's
2752 eligible_for_return_delay (rtx trial)
2756 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2759 if (get_attr_length (trial) != 1)
2762 /* If there are any call-saved registers, we should scan TRIAL if it
2763 does not reference them. For now just make it easy. */
2767 /* If the function uses __builtin_eh_return, the eh_return machinery
2768 occupies the delay slot. */
2769 if (crtl->calls_eh_return)
2772 /* In the case of a true leaf function, anything can go into the slot. */
2773 if (sparc_leaf_function_p)
2774 return get_attr_in_uncond_branch_delay (trial)
2775 == IN_UNCOND_BRANCH_DELAY_TRUE;
2777 pat = PATTERN (trial);
2779 /* Otherwise, only operations which can be done in tandem with
2780 a `restore' or `return' insn can go into the delay slot. */
2781 if (GET_CODE (SET_DEST (pat)) != REG
2782 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2785 /* If this instruction sets up floating point register and we have a return
2786 instruction, it can probably go in. But restore will not work
2788 if (REGNO (SET_DEST (pat)) >= 32)
2790 && ! epilogue_renumber (&pat, 1)
2791 && (get_attr_in_uncond_branch_delay (trial)
2792 == IN_UNCOND_BRANCH_DELAY_TRUE));
2794 return eligible_for_restore_insn (trial, true);
2797 /* Return nonzero if TRIAL can go into the sibling call's
2801 eligible_for_sibcall_delay (rtx trial)
2805 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2808 if (get_attr_length (trial) != 1)
2811 pat = PATTERN (trial);
2813 if (sparc_leaf_function_p)
2815 /* If the tail call is done using the call instruction,
2816 we have to restore %o7 in the delay slot. */
2817 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2820 /* %g1 is used to build the function address */
2821 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2827 /* Otherwise, only operations which can be done in tandem with
2828 a `restore' insn can go into the delay slot. */
2829 if (GET_CODE (SET_DEST (pat)) != REG
2830 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2831 || REGNO (SET_DEST (pat)) >= 32)
2834 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2836 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2839 return eligible_for_restore_insn (trial, false);
2843 short_branch (int uid1, int uid2)
2845 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2847 /* Leave a few words of "slop". */
2848 if (delta >= -1023 && delta <= 1022)
2854 /* Return nonzero if REG is not used after INSN.
2855 We assume REG is a reload reg, and therefore does
2856 not live past labels or calls or jumps. */
2858 reg_unused_after (rtx reg, rtx insn)
2860 enum rtx_code code, prev_code = UNKNOWN;
2862 while ((insn = NEXT_INSN (insn)))
2864 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2867 code = GET_CODE (insn);
2868 if (GET_CODE (insn) == CODE_LABEL)
2873 rtx set = single_set (insn);
2874 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2877 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2879 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2887 /* Determine if it's legal to put X into the constant pool. This
2888 is not possible if X contains the address of a symbol that is
2889 not constant (TLS) or not known at final link time (PIC). */
2892 sparc_cannot_force_const_mem (rtx x)
2894 switch (GET_CODE (x))
2899 /* Accept all non-symbolic constants. */
2903 /* Labels are OK iff we are non-PIC. */
2904 return flag_pic != 0;
2907 /* 'Naked' TLS symbol references are never OK,
2908 non-TLS symbols are OK iff we are non-PIC. */
2909 if (SYMBOL_REF_TLS_MODEL (x))
2912 return flag_pic != 0;
2915 return sparc_cannot_force_const_mem (XEXP (x, 0));
2918 return sparc_cannot_force_const_mem (XEXP (x, 0))
2919 || sparc_cannot_force_const_mem (XEXP (x, 1));
2928 static GTY(()) char pic_helper_symbol_name[256];
2929 static GTY(()) rtx pic_helper_symbol;
2930 static GTY(()) bool pic_helper_emitted_p = false;
2931 static GTY(()) rtx global_offset_table;
2933 /* Ensure that we are not using patterns that are not OK with PIC. */
2941 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2942 && (GET_CODE (recog_data.operand[i]) != CONST
2943 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2944 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2945 == global_offset_table)
2946 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2954 /* Return true if X is an address which needs a temporary register when
2955 reloaded while generating PIC code. */
2958 pic_address_needs_scratch (rtx x)
2960 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2961 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2962 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2963 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2964 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2970 /* Determine if a given RTX is a valid constant. We already know this
2971 satisfies CONSTANT_P. */
2974 legitimate_constant_p (rtx x)
2978 switch (GET_CODE (x))
2981 /* TLS symbols are not constant. */
2982 if (SYMBOL_REF_TLS_MODEL (x))
2987 inner = XEXP (x, 0);
2989 /* Offsets of TLS symbols are never valid.
2990 Discourage CSE from creating them. */
2991 if (GET_CODE (inner) == PLUS
2992 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2997 if (GET_MODE (x) == VOIDmode)
3000 /* Floating point constants are generally not ok.
3001 The only exception is 0.0 in VIS. */
3003 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3004 && const_zero_operand (x, GET_MODE (x)))
3010 /* Vector constants are generally not ok.
3011 The only exception is 0 in VIS. */
3013 && const_zero_operand (x, GET_MODE (x)))
3025 /* Determine if a given RTX is a valid constant address. */
3028 constant_address_p (rtx x)
3030 switch (GET_CODE (x))
3038 if (flag_pic && pic_address_needs_scratch (x))
3040 return legitimate_constant_p (x);
3043 return !flag_pic && legitimate_constant_p (x);
3050 /* Nonzero if the constant value X is a legitimate general operand
3051 when generating PIC code. It is given that flag_pic is on and
3052 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3055 legitimate_pic_operand_p (rtx x)
3057 if (pic_address_needs_scratch (x))
3059 if (SPARC_SYMBOL_REF_TLS_P (x)
3060 || (GET_CODE (x) == CONST
3061 && GET_CODE (XEXP (x, 0)) == PLUS
3062 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
3067 /* Return nonzero if ADDR is a valid memory address.
3068 STRICT specifies whether strict register checking applies. */
3071 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
3073 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3075 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3077 else if (GET_CODE (addr) == PLUS)
3079 rs1 = XEXP (addr, 0);
3080 rs2 = XEXP (addr, 1);
3082 /* Canonicalize. REG comes first, if there are no regs,
3083 LO_SUM comes first. */
3085 && GET_CODE (rs1) != SUBREG
3087 || GET_CODE (rs2) == SUBREG
3088 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3090 rs1 = XEXP (addr, 1);
3091 rs2 = XEXP (addr, 0);
3095 && rs1 == pic_offset_table_rtx
3097 && GET_CODE (rs2) != SUBREG
3098 && GET_CODE (rs2) != LO_SUM
3099 && GET_CODE (rs2) != MEM
3100 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
3101 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3102 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3104 || GET_CODE (rs1) == SUBREG)
3105 && RTX_OK_FOR_OFFSET_P (rs2)))
3110 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3111 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3113 /* We prohibit REG + REG for TFmode when there are no quad move insns
3114 and we consequently need to split. We do this because REG+REG
3115 is not an offsettable address. If we get the situation in reload
3116 where source and destination of a movtf pattern are both MEMs with
3117 REG+REG address, then only one of them gets converted to an
3118 offsettable address. */
3120 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3123 /* We prohibit REG + REG on ARCH32 if not optimizing for
3124 DFmode/DImode because then mem_min_alignment is likely to be zero
3125 after reload and the forced split would lack a matching splitter
3127 if (TARGET_ARCH32 && !optimize
3128 && (mode == DFmode || mode == DImode))
3131 else if (USE_AS_OFFSETABLE_LO10
3132 && GET_CODE (rs1) == LO_SUM
3134 && ! TARGET_CM_MEDMID
3135 && RTX_OK_FOR_OLO10_P (rs2))
3138 imm1 = XEXP (rs1, 1);
3139 rs1 = XEXP (rs1, 0);
3140 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3144 else if (GET_CODE (addr) == LO_SUM)
3146 rs1 = XEXP (addr, 0);
3147 imm1 = XEXP (addr, 1);
3149 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3152 /* We can't allow TFmode in 32-bit mode, because an offset greater
3153 than the alignment (8) may cause the LO_SUM to overflow. */
3154 if (mode == TFmode && TARGET_ARCH32)
3157 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3162 if (GET_CODE (rs1) == SUBREG)
3163 rs1 = SUBREG_REG (rs1);
3169 if (GET_CODE (rs2) == SUBREG)
3170 rs2 = SUBREG_REG (rs2);
3177 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3178 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3183 if ((REGNO (rs1) >= 32
3184 && REGNO (rs1) != FRAME_POINTER_REGNUM
3185 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3187 && (REGNO (rs2) >= 32
3188 && REGNO (rs2) != FRAME_POINTER_REGNUM
3189 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3195 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3197 static GTY(()) rtx sparc_tls_symbol;
3200 sparc_tls_get_addr (void)
3202 if (!sparc_tls_symbol)
3203 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3205 return sparc_tls_symbol;
3209 sparc_tls_got (void)
3214 crtl->uses_pic_offset_table = 1;
3215 return pic_offset_table_rtx;
3218 if (!global_offset_table)
3219 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3220 temp = gen_reg_rtx (Pmode);
3221 emit_move_insn (temp, global_offset_table);
3225 /* Return 1 if *X is a thread-local symbol. */
3228 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3230 return SPARC_SYMBOL_REF_TLS_P (*x);
3233 /* Return 1 if X contains a thread-local symbol. */
3236 sparc_tls_referenced_p (rtx x)
3238 if (!TARGET_HAVE_TLS)
3241 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3244 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3245 this (thread-local) address. */
3248 legitimize_tls_address (rtx addr)
3250 rtx temp1, temp2, temp3, ret, o0, got, insn;
3252 gcc_assert (can_create_pseudo_p ());
3254 if (GET_CODE (addr) == SYMBOL_REF)
3255 switch (SYMBOL_REF_TLS_MODEL (addr))
3257 case TLS_MODEL_GLOBAL_DYNAMIC:
3259 temp1 = gen_reg_rtx (SImode);
3260 temp2 = gen_reg_rtx (SImode);
3261 ret = gen_reg_rtx (Pmode);
3262 o0 = gen_rtx_REG (Pmode, 8);
3263 got = sparc_tls_got ();
3264 emit_insn (gen_tgd_hi22 (temp1, addr));
3265 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3268 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3269 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3274 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3275 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3278 CALL_INSN_FUNCTION_USAGE (insn)
3279 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3280 CALL_INSN_FUNCTION_USAGE (insn));
3281 insn = get_insns ();
3283 emit_libcall_block (insn, ret, o0, addr);
3286 case TLS_MODEL_LOCAL_DYNAMIC:
3288 temp1 = gen_reg_rtx (SImode);
3289 temp2 = gen_reg_rtx (SImode);
3290 temp3 = gen_reg_rtx (Pmode);
3291 ret = gen_reg_rtx (Pmode);
3292 o0 = gen_rtx_REG (Pmode, 8);
3293 got = sparc_tls_got ();
3294 emit_insn (gen_tldm_hi22 (temp1));
3295 emit_insn (gen_tldm_lo10 (temp2, temp1));
3298 emit_insn (gen_tldm_add32 (o0, got, temp2));
3299 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3304 emit_insn (gen_tldm_add64 (o0, got, temp2));
3305 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3308 CALL_INSN_FUNCTION_USAGE (insn)
3309 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3310 CALL_INSN_FUNCTION_USAGE (insn));
3311 insn = get_insns ();
3313 emit_libcall_block (insn, temp3, o0,
3314 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3315 UNSPEC_TLSLD_BASE));
3316 temp1 = gen_reg_rtx (SImode);
3317 temp2 = gen_reg_rtx (SImode);
3318 emit_insn (gen_tldo_hix22 (temp1, addr));
3319 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3321 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3323 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3326 case TLS_MODEL_INITIAL_EXEC:
3327 temp1 = gen_reg_rtx (SImode);
3328 temp2 = gen_reg_rtx (SImode);
3329 temp3 = gen_reg_rtx (Pmode);
3330 got = sparc_tls_got ();
3331 emit_insn (gen_tie_hi22 (temp1, addr));
3332 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3334 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3336 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3339 ret = gen_reg_rtx (Pmode);
3341 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3344 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3348 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3351 case TLS_MODEL_LOCAL_EXEC:
3352 temp1 = gen_reg_rtx (Pmode);
3353 temp2 = gen_reg_rtx (Pmode);
3356 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3357 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3361 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3362 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3364 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3372 gcc_unreachable (); /* for now ... */
3378 /* Legitimize PIC addresses. If the address is already position-independent,
3379 we return ORIG. Newly generated position-independent addresses go into a
3380 reg. This is REG if nonzero, otherwise we allocate register(s) as
3384 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3387 if (GET_CODE (orig) == SYMBOL_REF
3388 /* See the comment in sparc_expand_move. */
3389 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3391 rtx pic_ref, address;
3396 gcc_assert (! reload_in_progress && ! reload_completed);
3397 reg = gen_reg_rtx (Pmode);
3402 /* If not during reload, allocate another temp reg here for loading
3403 in the address, so that these instructions can be optimized
3405 rtx temp_reg = ((reload_in_progress || reload_completed)
3406 ? reg : gen_reg_rtx (Pmode));
3408 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3409 won't get confused into thinking that these two instructions
3410 are loading in the true address of the symbol. If in the
3411 future a PIC rtx exists, that should be used instead. */
3414 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3415 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3419 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3420 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3427 pic_ref = gen_const_mem (Pmode,
3428 gen_rtx_PLUS (Pmode,
3429 pic_offset_table_rtx, address));
3430 crtl->uses_pic_offset_table = 1;
3431 insn = emit_move_insn (reg, pic_ref);
3432 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3434 set_unique_reg_note (insn, REG_EQUAL, orig);
3437 else if (GET_CODE (orig) == CONST)
3441 if (GET_CODE (XEXP (orig, 0)) == PLUS
3442 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3447 gcc_assert (! reload_in_progress && ! reload_completed);
3448 reg = gen_reg_rtx (Pmode);
3451 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3452 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3453 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3454 base == reg ? 0 : reg);
3456 if (GET_CODE (offset) == CONST_INT)
3458 if (SMALL_INT (offset))
3459 return plus_constant (base, INTVAL (offset));
3460 else if (! reload_in_progress && ! reload_completed)
3461 offset = force_reg (Pmode, offset);
3463 /* If we reach here, then something is seriously wrong. */
3466 return gen_rtx_PLUS (Pmode, base, offset);
3468 else if (GET_CODE (orig) == LABEL_REF)
3469 /* ??? Why do we do this? */
3470 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3471 the register is live instead, in case it is eliminated. */
3472 crtl->uses_pic_offset_table = 1;
3477 /* Try machine-dependent ways of modifying an illegitimate address X
3478 to be legitimate. If we find one, return the new, valid address.
3480 OLDX is the address as it was before break_out_memory_refs was called.
3481 In some cases it is useful to look at this to decide what needs to be done.
3483 MODE is the mode of the operand pointed to by X.
3485 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3488 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3489 enum machine_mode mode)
3493 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3494 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3495 force_operand (XEXP (x, 0), NULL_RTX));
3496 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3497 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3498 force_operand (XEXP (x, 1), NULL_RTX));
3499 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3500 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3502 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3503 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3504 force_operand (XEXP (x, 1), NULL_RTX));
3506 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3509 if (SPARC_SYMBOL_REF_TLS_P (x))
3510 x = legitimize_tls_address (x);
3512 x = legitimize_pic_address (x, mode, 0);
3513 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3514 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3515 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3516 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3517 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3518 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3519 else if (GET_CODE (x) == SYMBOL_REF
3520 || GET_CODE (x) == CONST
3521 || GET_CODE (x) == LABEL_REF)
3522 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3526 /* Emit the special PIC helper function. */
3529 emit_pic_helper (void)
3531 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3534 switch_to_section (text_section);
3536 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3538 ASM_OUTPUT_ALIGN (asm_out_file, align);
3539 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3540 if (flag_delayed_branch)
3541 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3542 pic_name, pic_name);
3544 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3545 pic_name, pic_name);
3547 pic_helper_emitted_p = true;
3550 /* Emit code to load the PIC register. */
3553 load_pic_register (bool delay_pic_helper)
3555 int orig_flag_pic = flag_pic;
3557 if (TARGET_VXWORKS_RTP)
3559 emit_insn (gen_vxworks_load_got ());
3560 emit_use (pic_offset_table_rtx);
3564 /* If we haven't initialized the special PIC symbols, do so now. */
3565 if (!pic_helper_symbol_name[0])
3567 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3568 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3569 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3572 /* If we haven't emitted the special PIC helper function, do so now unless
3573 we are requested to delay it. */
3574 if (!delay_pic_helper && !pic_helper_emitted_p)
3579 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3580 pic_helper_symbol));
3582 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3583 pic_helper_symbol));
3584 flag_pic = orig_flag_pic;
3586 /* Need to emit this whether or not we obey regdecls,
3587 since setjmp/longjmp can cause life info to screw up.
3588 ??? In the case where we don't obey regdecls, this is not sufficient
3589 since we may not fall out the bottom. */
3590 emit_use (pic_offset_table_rtx);
3593 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3594 address of the call target. */
3597 sparc_emit_call_insn (rtx pat, rtx addr)
3601 insn = emit_call_insn (pat);
3603 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3604 if (TARGET_VXWORKS_RTP
3606 && GET_CODE (addr) == SYMBOL_REF
3607 && (SYMBOL_REF_DECL (addr)
3608 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3609 : !SYMBOL_REF_LOCAL_P (addr)))
3611 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3612 crtl->uses_pic_offset_table = 1;
3616 /* Return 1 if RTX is a MEM which is known to be aligned to at
3617 least a DESIRED byte boundary. */
3620 mem_min_alignment (rtx mem, int desired)
3622 rtx addr, base, offset;
3624 /* If it's not a MEM we can't accept it. */
3625 if (GET_CODE (mem) != MEM)
3629 if (!TARGET_UNALIGNED_DOUBLES
3630 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3633 /* ??? The rest of the function predates MEM_ALIGN so
3634 there is probably a bit of redundancy. */
3635 addr = XEXP (mem, 0);
3636 base = offset = NULL_RTX;
3637 if (GET_CODE (addr) == PLUS)
3639 if (GET_CODE (XEXP (addr, 0)) == REG)
3641 base = XEXP (addr, 0);
3643 /* What we are saying here is that if the base
3644 REG is aligned properly, the compiler will make
3645 sure any REG based index upon it will be so
3647 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3648 offset = XEXP (addr, 1);
3650 offset = const0_rtx;
3653 else if (GET_CODE (addr) == REG)
3656 offset = const0_rtx;
3659 if (base != NULL_RTX)
3661 int regno = REGNO (base);
3663 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3665 /* Check if the compiler has recorded some information
3666 about the alignment of the base REG. If reload has
3667 completed, we already matched with proper alignments.
3668 If not running global_alloc, reload might give us
3669 unaligned pointer to local stack though. */
3671 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3672 || (optimize && reload_completed))
3673 && (INTVAL (offset) & (desired - 1)) == 0)
3678 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3682 else if (! TARGET_UNALIGNED_DOUBLES
3683 || CONSTANT_P (addr)
3684 || GET_CODE (addr) == LO_SUM)
3686 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3687 is true, in which case we can only assume that an access is aligned if
3688 it is to a constant address, or the address involves a LO_SUM. */
3692 /* An obviously unaligned address. */
3697 /* Vectors to keep interesting information about registers where it can easily
3698 be got. We used to use the actual mode value as the bit number, but there
3699 are more than 32 modes now. Instead we use two tables: one indexed by
3700 hard register number, and one indexed by mode. */
3702 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3703 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3704 mapped into one sparc_mode_class mode. */
3706 enum sparc_mode_class {
3707 S_MODE, D_MODE, T_MODE, O_MODE,
3708 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3712 /* Modes for single-word and smaller quantities. */
3713 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3715 /* Modes for double-word and smaller quantities. */
3716 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3718 /* Modes for quad-word and smaller quantities. */
3719 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3721 /* Modes for 8-word and smaller quantities. */
3722 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3724 /* Modes for single-float quantities. We must allow any single word or
3725 smaller quantity. This is because the fix/float conversion instructions
3726 take integer inputs/outputs from the float registers. */
3727 #define SF_MODES (S_MODES)
3729 /* Modes for double-float and smaller quantities. */
3730 #define DF_MODES (S_MODES | D_MODES)
3732 /* Modes for double-float only quantities. */
3733 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3735 /* Modes for quad-float only quantities. */
3736 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3738 /* Modes for quad-float and smaller quantities. */
3739 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3741 /* Modes for quad-float and double-float quantities. */
3742 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3744 /* Modes for quad-float pair only quantities. */
3745 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3747 /* Modes for quad-float pairs and smaller quantities. */
3748 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3750 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3752 /* Modes for condition codes. */
3753 #define CC_MODES (1 << (int) CC_MODE)
3754 #define CCFP_MODES (1 << (int) CCFP_MODE)
3756 /* Value is 1 if register/mode pair is acceptable on sparc.
3757 The funny mixture of D and T modes is because integer operations
3758 do not specially operate on tetra quantities, so non-quad-aligned
3759 registers can hold quadword quantities (except %o4 and %i4 because
3760 they cross fixed registers). */
3762 /* This points to either the 32 bit or the 64 bit version. */
3763 const int *hard_regno_mode_classes;
3765 static const int hard_32bit_mode_classes[] = {
3766 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3767 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3768 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3769 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3771 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3772 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3773 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3774 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3776 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3777 and none can hold SFmode/SImode values. */
3778 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3779 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3780 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3781 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3784 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3790 static const int hard_64bit_mode_classes[] = {
3791 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3792 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3793 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3794 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3796 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3797 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3798 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3799 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3801 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3802 and none can hold SFmode/SImode values. */
3803 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3804 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3805 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3806 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3809 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3815 int sparc_mode_class [NUM_MACHINE_MODES];
3817 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3820 sparc_init_modes (void)
3824 for (i = 0; i < NUM_MACHINE_MODES; i++)
3826 switch (GET_MODE_CLASS (i))
3829 case MODE_PARTIAL_INT:
3830 case MODE_COMPLEX_INT:
3831 if (GET_MODE_SIZE (i) <= 4)
3832 sparc_mode_class[i] = 1 << (int) S_MODE;
3833 else if (GET_MODE_SIZE (i) == 8)
3834 sparc_mode_class[i] = 1 << (int) D_MODE;
3835 else if (GET_MODE_SIZE (i) == 16)
3836 sparc_mode_class[i] = 1 << (int) T_MODE;
3837 else if (GET_MODE_SIZE (i) == 32)
3838 sparc_mode_class[i] = 1 << (int) O_MODE;
3840 sparc_mode_class[i] = 0;
3842 case MODE_VECTOR_INT:
3843 if (GET_MODE_SIZE (i) <= 4)
3844 sparc_mode_class[i] = 1 << (int)SF_MODE;
3845 else if (GET_MODE_SIZE (i) == 8)
3846 sparc_mode_class[i] = 1 << (int)DF_MODE;
3849 case MODE_COMPLEX_FLOAT:
3850 if (GET_MODE_SIZE (i) <= 4)
3851 sparc_mode_class[i] = 1 << (int) SF_MODE;
3852 else if (GET_MODE_SIZE (i) == 8)
3853 sparc_mode_class[i] = 1 << (int) DF_MODE;
3854 else if (GET_MODE_SIZE (i) == 16)
3855 sparc_mode_class[i] = 1 << (int) TF_MODE;
3856 else if (GET_MODE_SIZE (i) == 32)
3857 sparc_mode_class[i] = 1 << (int) OF_MODE;
3859 sparc_mode_class[i] = 0;
3862 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3863 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3865 sparc_mode_class[i] = 1 << (int) CC_MODE;
3868 sparc_mode_class[i] = 0;
3874 hard_regno_mode_classes = hard_64bit_mode_classes;
3876 hard_regno_mode_classes = hard_32bit_mode_classes;
3878 /* Initialize the array used by REGNO_REG_CLASS. */
3879 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3881 if (i < 16 && TARGET_V8PLUS)
3882 sparc_regno_reg_class[i] = I64_REGS;
3883 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3884 sparc_regno_reg_class[i] = GENERAL_REGS;
3886 sparc_regno_reg_class[i] = FP_REGS;
3888 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3890 sparc_regno_reg_class[i] = FPCC_REGS;
3892 sparc_regno_reg_class[i] = NO_REGS;
3896 /* Compute the frame size required by the function. This function is called
3897 during the reload pass and also by sparc_expand_prologue. */
3900 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3902 int outgoing_args_size = (crtl->outgoing_args_size
3903 + REG_PARM_STACK_SPACE (current_function_decl));
3904 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3909 for (i = 0; i < 8; i++)
3910 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3915 for (i = 0; i < 8; i += 2)
3916 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3917 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3921 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3922 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3923 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3926 /* Set up values for use in prologue and epilogue. */
3927 num_gfregs = n_regs;
3932 && crtl->outgoing_args_size == 0)
3933 actual_fsize = apparent_fsize = 0;
3936 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3937 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3938 apparent_fsize += n_regs * 4;
3939 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3942 /* Make sure nothing can clobber our register windows.
3943 If a SAVE must be done, or there is a stack-local variable,
3944 the register window area must be allocated. */
3945 if (! leaf_function_p || size > 0)
3946 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3948 return SPARC_STACK_ALIGN (actual_fsize);
3951 /* Output any necessary .register pseudo-ops. */
3954 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3956 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3962 /* Check if %g[2367] were used without
3963 .register being printed for them already. */
3964 for (i = 2; i < 8; i++)
3966 if (df_regs_ever_live_p (i)
3967 && ! sparc_hard_reg_printed [i])
3969 sparc_hard_reg_printed [i] = 1;
3970 /* %g7 is used as TLS base register, use #ignore
3971 for it instead of #scratch. */
3972 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3973 i == 7 ? "ignore" : "scratch");
3980 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3981 as needed. LOW should be double-word aligned for 32-bit registers.
3982 Return the new OFFSET. */
3985 #define SORR_RESTORE 1
3988 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3993 if (TARGET_ARCH64 && high <= 32)
3995 for (i = low; i < high; i++)
3997 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3999 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4000 set_mem_alias_set (mem, sparc_sr_alias_set);
4001 if (action == SORR_SAVE)
4003 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4004 RTX_FRAME_RELATED_P (insn) = 1;
4006 else /* action == SORR_RESTORE */
4007 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4014 for (i = low; i < high; i += 2)
4016 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4017 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4018 enum machine_mode mode;
4023 mode = i < 32 ? DImode : DFmode;
4028 mode = i < 32 ? SImode : SFmode;
4033 mode = i < 32 ? SImode : SFmode;
4040 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4041 set_mem_alias_set (mem, sparc_sr_alias_set);
4042 if (action == SORR_SAVE)
4044 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4045 RTX_FRAME_RELATED_P (insn) = 1;
4047 else /* action == SORR_RESTORE */
4048 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4050 /* Always preserve double-word alignment. */
4051 offset = (offset + 7) & -8;
4058 /* Emit code to save call-saved registers. */
4061 emit_save_or_restore_regs (int action)
4063 HOST_WIDE_INT offset;
4066 offset = frame_base_offset - apparent_fsize;
4068 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4070 /* ??? This might be optimized a little as %g1 might already have a
4071 value close enough that a single add insn will do. */
4072 /* ??? Although, all of this is probably only a temporary fix
4073 because if %g1 can hold a function result, then
4074 sparc_expand_epilogue will lose (the result will be
4076 base = gen_rtx_REG (Pmode, 1);
4077 emit_move_insn (base, GEN_INT (offset));
4078 emit_insn (gen_rtx_SET (VOIDmode,
4080 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4084 base = frame_base_reg;
4086 offset = save_or_restore_regs (0, 8, base, offset, action);
4087 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4090 /* Generate a save_register_window insn. */
4093 gen_save_register_window (rtx increment)
4096 return gen_save_register_windowdi (increment);
4098 return gen_save_register_windowsi (increment);
4101 /* Generate an increment for the stack pointer. */
4104 gen_stack_pointer_inc (rtx increment)
4106 return gen_rtx_SET (VOIDmode,
4108 gen_rtx_PLUS (Pmode,
4113 /* Generate a decrement for the stack pointer. */
4116 gen_stack_pointer_dec (rtx decrement)
4118 return gen_rtx_SET (VOIDmode,
4120 gen_rtx_MINUS (Pmode,
4125 /* Expand the function prologue. The prologue is responsible for reserving
4126 storage for the frame, saving the call-saved registers and loading the
4127 PIC register if needed. */
4130 sparc_expand_prologue (void)
4135 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4136 on the final value of the flag means deferring the prologue/epilogue
4137 expansion until just before the second scheduling pass, which is too
4138 late to emit multiple epilogues or return insns.
4140 Of course we are making the assumption that the value of the flag
4141 will not change between now and its final value. Of the three parts
4142 of the formula, only the last one can reasonably vary. Let's take a
4143 closer look, after assuming that the first two ones are set to true
4144 (otherwise the last value is effectively silenced).
4146 If only_leaf_regs_used returns false, the global predicate will also
4147 be false so the actual frame size calculated below will be positive.
4148 As a consequence, the save_register_window insn will be emitted in
4149 the instruction stream; now this insn explicitly references %fp
4150 which is not a leaf register so only_leaf_regs_used will always
4151 return false subsequently.
4153 If only_leaf_regs_used returns true, we hope that the subsequent
4154 optimization passes won't cause non-leaf registers to pop up. For
4155 example, the regrename pass has special provisions to not rename to
4156 non-leaf registers in a leaf function. */
4157 sparc_leaf_function_p
4158 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4160 /* Need to use actual_fsize, since we are also allocating
4161 space for our callee (and our own register save area). */
4163 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4165 /* Advertise that the data calculated just above are now valid. */
4166 sparc_prologue_data_valid_p = true;
4168 if (sparc_leaf_function_p)
4170 frame_base_reg = stack_pointer_rtx;
4171 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4175 frame_base_reg = hard_frame_pointer_rtx;
4176 frame_base_offset = SPARC_STACK_BIAS;
4179 if (actual_fsize == 0)
4181 else if (sparc_leaf_function_p)
4183 if (actual_fsize <= 4096)
4184 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4185 else if (actual_fsize <= 8192)
4187 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4188 /* %sp is still the CFA register. */
4189 RTX_FRAME_RELATED_P (insn) = 1;
4191 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4195 rtx reg = gen_rtx_REG (Pmode, 1);
4196 emit_move_insn (reg, GEN_INT (-actual_fsize));
4197 insn = emit_insn (gen_stack_pointer_inc (reg));
4198 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4199 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4202 RTX_FRAME_RELATED_P (insn) = 1;
4206 if (actual_fsize <= 4096)
4207 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4208 else if (actual_fsize <= 8192)
4210 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4211 /* %sp is not the CFA register anymore. */
4212 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4216 rtx reg = gen_rtx_REG (Pmode, 1);
4217 emit_move_insn (reg, GEN_INT (-actual_fsize));
4218 insn = emit_insn (gen_save_register_window (reg));
4221 RTX_FRAME_RELATED_P (insn) = 1;
4222 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4223 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4227 emit_save_or_restore_regs (SORR_SAVE);
4229 /* Load the PIC register if needed. */
4230 if (flag_pic && crtl->uses_pic_offset_table)
4231 load_pic_register (false);
4234 /* This function generates the assembly code for function entry, which boils
4235 down to emitting the necessary .register directives. */
4238 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4240 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4241 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4243 sparc_output_scratch_registers (file);
4246 /* Expand the function epilogue, either normal or part of a sibcall.
4247 We emit all the instructions except the return or the call. */
4250 sparc_expand_epilogue (void)
4253 emit_save_or_restore_regs (SORR_RESTORE);
4255 if (actual_fsize == 0)
4257 else if (sparc_leaf_function_p)
4259 if (actual_fsize <= 4096)
4260 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4261 else if (actual_fsize <= 8192)
4263 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4264 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4268 rtx reg = gen_rtx_REG (Pmode, 1);
4269 emit_move_insn (reg, GEN_INT (-actual_fsize));
4270 emit_insn (gen_stack_pointer_dec (reg));
4275 /* Return true if it is appropriate to emit `return' instructions in the
4276 body of a function. */
4279 sparc_can_use_return_insn_p (void)
4281 return sparc_prologue_data_valid_p
4282 && (actual_fsize == 0 || !sparc_leaf_function_p);
4285 /* This function generates the assembly code for function exit. */
4288 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4290 /* If code does not drop into the epilogue, we have to still output
4291 a dummy nop for the sake of sane backtraces. Otherwise, if the
4292 last two instructions of a function were "call foo; dslot;" this
4293 can make the return PC of foo (i.e. address of call instruction
4294 plus 8) point to the first instruction in the next function. */
4296 rtx insn, last_real_insn;
4298 insn = get_last_insn ();
4300 last_real_insn = prev_real_insn (insn);
4302 && GET_CODE (last_real_insn) == INSN
4303 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4304 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4306 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4307 fputs("\tnop\n", file);
4309 sparc_output_deferred_case_vectors ();
4312 /* Output a 'restore' instruction. */
4315 output_restore (rtx pat)
4321 fputs ("\t restore\n", asm_out_file);
4325 gcc_assert (GET_CODE (pat) == SET);
4327 operands[0] = SET_DEST (pat);
4328 pat = SET_SRC (pat);
4330 switch (GET_CODE (pat))
4333 operands[1] = XEXP (pat, 0);
4334 operands[2] = XEXP (pat, 1);
4335 output_asm_insn (" restore %r1, %2, %Y0", operands);
4338 operands[1] = XEXP (pat, 0);
4339 operands[2] = XEXP (pat, 1);
4340 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4343 operands[1] = XEXP (pat, 0);
4344 gcc_assert (XEXP (pat, 1) == const1_rtx);
4345 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4349 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4354 /* Output a return. */
4357 output_return (rtx insn)
4359 if (sparc_leaf_function_p)
4361 /* This is a leaf function so we don't have to bother restoring the
4362 register window, which frees us from dealing with the convoluted
4363 semantics of restore/return. We simply output the jump to the
4364 return address and the insn in the delay slot (if any). */
4366 gcc_assert (! crtl->calls_eh_return);
4368 return "jmp\t%%o7+%)%#";
4372 /* This is a regular function so we have to restore the register window.
4373 We may have a pending insn for the delay slot, which will be either
4374 combined with the 'restore' instruction or put in the delay slot of
4375 the 'return' instruction. */
4377 if (crtl->calls_eh_return)
4379 /* If the function uses __builtin_eh_return, the eh_return
4380 machinery occupies the delay slot. */
4381 gcc_assert (! final_sequence);
4383 if (! flag_delayed_branch)
4384 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4387 fputs ("\treturn\t%i7+8\n", asm_out_file);
4389 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4391 if (flag_delayed_branch)
4392 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4394 fputs ("\t nop\n", asm_out_file);
4396 else if (final_sequence)
4400 delay = NEXT_INSN (insn);
4403 pat = PATTERN (delay);
4405 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4407 epilogue_renumber (&pat, 0);
4408 return "return\t%%i7+%)%#";
4412 output_asm_insn ("jmp\t%%i7+%)", NULL);
4413 output_restore (pat);
4414 PATTERN (delay) = gen_blockage ();
4415 INSN_CODE (delay) = -1;
4420 /* The delay slot is empty. */
4422 return "return\t%%i7+%)\n\t nop";
4423 else if (flag_delayed_branch)
4424 return "jmp\t%%i7+%)\n\t restore";
4426 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4433 /* Output a sibling call. */
4436 output_sibcall (rtx insn, rtx call_operand)
4440 gcc_assert (flag_delayed_branch);
4442 operands[0] = call_operand;
4444 if (sparc_leaf_function_p)
4446 /* This is a leaf function so we don't have to bother restoring the
4447 register window. We simply output the jump to the function and
4448 the insn in the delay slot (if any). */
4450 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4453 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4456 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4457 it into branch if possible. */
4458 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4463 /* This is a regular function so we have to restore the register window.
4464 We may have a pending insn for the delay slot, which will be combined
4465 with the 'restore' instruction. */
4467 output_asm_insn ("call\t%a0, 0", operands);
4471 rtx delay = NEXT_INSN (insn);
4474 output_restore (PATTERN (delay));
4476 PATTERN (delay) = gen_blockage ();
4477 INSN_CODE (delay) = -1;
4480 output_restore (NULL_RTX);
4486 /* Functions for handling argument passing.
4488 For 32-bit, the first 6 args are normally in registers and the rest are
4489 pushed. Any arg that starts within the first 6 words is at least
4490 partially passed in a register unless its data type forbids.
4492 For 64-bit, the argument registers are laid out as an array of 16 elements
4493 and arguments are added sequentially. The first 6 int args and up to the
4494 first 16 fp args (depending on size) are passed in regs.
4496 Slot Stack Integral Float Float in structure Double Long Double
4497 ---- ----- -------- ----- ------------------ ------ -----------
4498 15 [SP+248] %f31 %f30,%f31 %d30
4499 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4500 13 [SP+232] %f27 %f26,%f27 %d26
4501 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4502 11 [SP+216] %f23 %f22,%f23 %d22
4503 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4504 9 [SP+200] %f19 %f18,%f19 %d18
4505 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4506 7 [SP+184] %f15 %f14,%f15 %d14
4507 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4508 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4509 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4510 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4511 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4512 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4513 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4515 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4517 Integral arguments are always passed as 64-bit quantities appropriately
4520 Passing of floating point values is handled as follows.
4521 If a prototype is in scope:
4522 If the value is in a named argument (i.e. not a stdarg function or a
4523 value not part of the `...') then the value is passed in the appropriate
4525 If the value is part of the `...' and is passed in one of the first 6
4526 slots then the value is passed in the appropriate int reg.
4527 If the value is part of the `...' and is not passed in one of the first 6
4528 slots then the value is passed in memory.
4529 If a prototype is not in scope:
4530 If the value is one of the first 6 arguments the value is passed in the
4531 appropriate integer reg and the appropriate fp reg.
4532 If the value is not one of the first 6 arguments the value is passed in
4533 the appropriate fp reg and in memory.
4536 Summary of the calling conventions implemented by GCC on SPARC:
4539 size argument return value
4541 small integer <4 int. reg. int. reg.
4542 word 4 int. reg. int. reg.
4543 double word 8 int. reg. int. reg.
4545 _Complex small integer <8 int. reg. int. reg.
4546 _Complex word 8 int. reg. int. reg.
4547 _Complex double word 16 memory int. reg.
4549 vector integer <=8 int. reg. FP reg.
4550 vector integer >8 memory memory
4552 float 4 int. reg. FP reg.
4553 double 8 int. reg. FP reg.
4554 long double 16 memory memory
4556 _Complex float 8 memory FP reg.
4557 _Complex double 16 memory FP reg.
4558 _Complex long double 32 memory FP reg.
4560 vector float any memory memory
4562 aggregate any memory memory
4567 size argument return value
4569 small integer <8 int. reg. int. reg.
4570 word 8 int. reg. int. reg.
4571 double word 16 int. reg. int. reg.
4573 _Complex small integer <16 int. reg. int. reg.
4574 _Complex word 16 int. reg. int. reg.
4575 _Complex double word 32 memory int. reg.
4577 vector integer <=16 FP reg. FP reg.
4578 vector integer 16<s<=32 memory FP reg.
4579 vector integer >32 memory memory
4581 float 4 FP reg. FP reg.
4582 double 8 FP reg. FP reg.
4583 long double 16 FP reg. FP reg.
4585 _Complex float 8 FP reg. FP reg.
4586 _Complex double 16 FP reg. FP reg.
4587 _Complex long double 32 memory FP reg.
4589 vector float <=16 FP reg. FP reg.
4590 vector float 16<s<=32 memory FP reg.
4591 vector float >32 memory memory
4593 aggregate <=16 reg. reg.
4594 aggregate 16<s<=32 memory reg.
4595 aggregate >32 memory memory
4599 Note #1: complex floating-point types follow the extended SPARC ABIs as
4600 implemented by the Sun compiler.
4602 Note #2: integral vector types follow the scalar floating-point types
4603 conventions to match what is implemented by the Sun VIS SDK.
4605 Note #3: floating-point vector types follow the aggregate types
4609 /* Maximum number of int regs for args. */
4610 #define SPARC_INT_ARG_MAX 6
4611 /* Maximum number of fp regs for args. */
4612 #define SPARC_FP_ARG_MAX 16
4614 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4616 /* Handle the INIT_CUMULATIVE_ARGS macro.
4617 Initialize a variable CUM of type CUMULATIVE_ARGS
4618 for a call to a function whose data type is FNTYPE.
4619 For a library call, FNTYPE is 0. */
4622 init_cumulative_args (struct sparc_args *cum, tree fntype,
4623 rtx libname ATTRIBUTE_UNUSED,
4624 tree fndecl ATTRIBUTE_UNUSED)
4627 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4628 cum->libcall_p = fntype == 0;
4631 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4632 When a prototype says `char' or `short', really pass an `int'. */
4635 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4637 return TARGET_ARCH32 ? true : false;
4640 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4643 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4645 return TARGET_ARCH64 ? true : false;
4648 /* Scan the record type TYPE and return the following predicates:
4649 - INTREGS_P: the record contains at least one field or sub-field
4650 that is eligible for promotion in integer registers.
4651 - FP_REGS_P: the record contains at least one field or sub-field
4652 that is eligible for promotion in floating-point registers.
4653 - PACKED_P: the record contains at least one field that is packed.
4655 Sub-fields are not taken into account for the PACKED_P predicate. */
4658 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4662 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4664 if (TREE_CODE (field) == FIELD_DECL)
4666 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4667 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4668 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4669 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4675 if (packed_p && DECL_PACKED (field))
4681 /* Compute the slot number to pass an argument in.
4682 Return the slot number or -1 if passing on the stack.
4684 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4685 the preceding args and about the function being called.
4686 MODE is the argument's machine mode.
4687 TYPE is the data type of the argument (as a tree).
4688 This is null for libcalls where that information may
4690 NAMED is nonzero if this argument is a named parameter
4691 (otherwise it is an extra parameter matching an ellipsis).
4692 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4693 *PREGNO records the register number to use if scalar type.
4694 *PPADDING records the amount of padding needed in words. */
4697 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4698 tree type, int named, int incoming_p,
4699 int *pregno, int *ppadding)
4701 int regbase = (incoming_p
4702 ? SPARC_INCOMING_INT_ARG_FIRST
4703 : SPARC_OUTGOING_INT_ARG_FIRST);
4704 int slotno = cum->words;
4705 enum mode_class mclass;
4710 if (type && TREE_ADDRESSABLE (type))
4716 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4719 /* For SPARC64, objects requiring 16-byte alignment get it. */
4721 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4722 && (slotno & 1) != 0)
4723 slotno++, *ppadding = 1;
4725 mclass = GET_MODE_CLASS (mode);
4726 if (type && TREE_CODE (type) == VECTOR_TYPE)
4728 /* Vector types deserve special treatment because they are
4729 polymorphic wrt their mode, depending upon whether VIS
4730 instructions are enabled. */
4731 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4733 /* The SPARC port defines no floating-point vector modes. */
4734 gcc_assert (mode == BLKmode);
4738 /* Integral vector types should either have a vector
4739 mode or an integral mode, because we are guaranteed
4740 by pass_by_reference that their size is not greater
4741 than 16 bytes and TImode is 16-byte wide. */
4742 gcc_assert (mode != BLKmode);
4744 /* Vector integers are handled like floats according to
4746 mclass = MODE_FLOAT;
4753 case MODE_COMPLEX_FLOAT:
4754 case MODE_VECTOR_INT:
4755 if (TARGET_ARCH64 && TARGET_FPU && named)
4757 if (slotno >= SPARC_FP_ARG_MAX)
4759 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4760 /* Arguments filling only one single FP register are
4761 right-justified in the outer double FP register. */
4762 if (GET_MODE_SIZE (mode) <= 4)
4769 case MODE_COMPLEX_INT:
4770 if (slotno >= SPARC_INT_ARG_MAX)
4772 regno = regbase + slotno;
4776 if (mode == VOIDmode)
4777 /* MODE is VOIDmode when generating the actual call. */
4780 gcc_assert (mode == BLKmode);
4784 || (TREE_CODE (type) != VECTOR_TYPE
4785 && TREE_CODE (type) != RECORD_TYPE))
4787 if (slotno >= SPARC_INT_ARG_MAX)
4789 regno = regbase + slotno;
4791 else /* TARGET_ARCH64 && type */
4793 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4795 /* First see what kinds of registers we would need. */
4796 if (TREE_CODE (type) == VECTOR_TYPE)
4799 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4801 /* The ABI obviously doesn't specify how packed structures
4802 are passed. These are defined to be passed in int regs
4803 if possible, otherwise memory. */
4804 if (packed_p || !named)
4805 fpregs_p = 0, intregs_p = 1;
4807 /* If all arg slots are filled, then must pass on stack. */
4808 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4811 /* If there are only int args and all int arg slots are filled,
4812 then must pass on stack. */
4813 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4816 /* Note that even if all int arg slots are filled, fp members may
4817 still be passed in regs if such regs are available.
4818 *PREGNO isn't set because there may be more than one, it's up
4819 to the caller to compute them. */
4832 /* Handle recursive register counting for structure field layout. */
4834 struct function_arg_record_value_parms
4836 rtx ret; /* return expression being built. */
4837 int slotno; /* slot number of the argument. */
4838 int named; /* whether the argument is named. */
4839 int regbase; /* regno of the base register. */
4840 int stack; /* 1 if part of the argument is on the stack. */
4841 int intoffset; /* offset of the first pending integer field. */
4842 unsigned int nregs; /* number of words passed in registers. */
4845 static void function_arg_record_value_3
4846 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4847 static void function_arg_record_value_2
4848 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4849 static void function_arg_record_value_1
4850 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4851 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4852 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4854 /* A subroutine of function_arg_record_value. Traverse the structure
4855 recursively and determine how many registers will be required. */
4858 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4859 struct function_arg_record_value_parms *parms,
4864 /* We need to compute how many registers are needed so we can
4865 allocate the PARALLEL but before we can do that we need to know
4866 whether there are any packed fields. The ABI obviously doesn't
4867 specify how structures are passed in this case, so they are
4868 defined to be passed in int regs if possible, otherwise memory,
4869 regardless of whether there are fp values present. */
4872 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4874 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4881 /* Compute how many registers we need. */
4882 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4884 if (TREE_CODE (field) == FIELD_DECL)
4886 HOST_WIDE_INT bitpos = startbitpos;
4888 if (DECL_SIZE (field) != 0)
4890 if (integer_zerop (DECL_SIZE (field)))
4893 if (host_integerp (bit_position (field), 1))
4894 bitpos += int_bit_position (field);
4897 /* ??? FIXME: else assume zero offset. */
4899 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4900 function_arg_record_value_1 (TREE_TYPE (field),
4904 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4905 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4910 if (parms->intoffset != -1)
4912 unsigned int startbit, endbit;
4913 int intslots, this_slotno;
4915 startbit = parms->intoffset & -BITS_PER_WORD;
4916 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4918 intslots = (endbit - startbit) / BITS_PER_WORD;
4919 this_slotno = parms->slotno + parms->intoffset
4922 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4924 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4925 /* We need to pass this field on the stack. */
4929 parms->nregs += intslots;
4930 parms->intoffset = -1;
4933 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4934 If it wasn't true we wouldn't be here. */
4935 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4936 && DECL_MODE (field) == BLKmode)
4937 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4938 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4945 if (parms->intoffset == -1)
4946 parms->intoffset = bitpos;
4952 /* A subroutine of function_arg_record_value. Assign the bits of the
4953 structure between parms->intoffset and bitpos to integer registers. */
4956 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4957 struct function_arg_record_value_parms *parms)
4959 enum machine_mode mode;
4961 unsigned int startbit, endbit;
4962 int this_slotno, intslots, intoffset;
4965 if (parms->intoffset == -1)
4968 intoffset = parms->intoffset;
4969 parms->intoffset = -1;
4971 startbit = intoffset & -BITS_PER_WORD;
4972 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4973 intslots = (endbit - startbit) / BITS_PER_WORD;
4974 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4976 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4980 /* If this is the trailing part of a word, only load that much into
4981 the register. Otherwise load the whole register. Note that in
4982 the latter case we may pick up unwanted bits. It's not a problem
4983 at the moment but may wish to revisit. */
4985 if (intoffset % BITS_PER_WORD != 0)
4986 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4991 intoffset /= BITS_PER_UNIT;
4994 regno = parms->regbase + this_slotno;
4995 reg = gen_rtx_REG (mode, regno);
4996 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4997 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5000 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5005 while (intslots > 0);
5008 /* A subroutine of function_arg_record_value. Traverse the structure
5009 recursively and assign bits to floating point registers. Track which
5010 bits in between need integer registers; invoke function_arg_record_value_3
5011 to make that happen. */
5014 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5015 struct function_arg_record_value_parms *parms,
5021 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5023 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5030 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5032 if (TREE_CODE (field) == FIELD_DECL)
5034 HOST_WIDE_INT bitpos = startbitpos;
5036 if (DECL_SIZE (field) != 0)
5038 if (integer_zerop (DECL_SIZE (field)))
5041 if (host_integerp (bit_position (field), 1))
5042 bitpos += int_bit_position (field);
5045 /* ??? FIXME: else assume zero offset. */
5047 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5048 function_arg_record_value_2 (TREE_TYPE (field),
5052 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5053 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5058 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5059 int regno, nregs, pos;
5060 enum machine_mode mode = DECL_MODE (field);
5063 function_arg_record_value_3 (bitpos, parms);
5065 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5068 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5069 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5071 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5073 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5079 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5080 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5082 reg = gen_rtx_REG (mode, regno);
5083 pos = bitpos / BITS_PER_UNIT;
5084 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5085 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5089 regno += GET_MODE_SIZE (mode) / 4;
5090 reg = gen_rtx_REG (mode, regno);
5091 pos += GET_MODE_SIZE (mode);
5092 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5093 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5099 if (parms->intoffset == -1)
5100 parms->intoffset = bitpos;
5106 /* Used by function_arg and function_value to implement the complex
5107 conventions of the 64-bit ABI for passing and returning structures.
5108 Return an expression valid as a return value for the two macros
5109 FUNCTION_ARG and FUNCTION_VALUE.
5111 TYPE is the data type of the argument (as a tree).
5112 This is null for libcalls where that information may
5114 MODE is the argument's machine mode.
5115 SLOTNO is the index number of the argument's slot in the parameter array.
5116 NAMED is nonzero if this argument is a named parameter
5117 (otherwise it is an extra parameter matching an ellipsis).
5118 REGBASE is the regno of the base register for the parameter array. */
5121 function_arg_record_value (const_tree type, enum machine_mode mode,
5122 int slotno, int named, int regbase)
5124 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5125 struct function_arg_record_value_parms parms;
5128 parms.ret = NULL_RTX;
5129 parms.slotno = slotno;
5130 parms.named = named;
5131 parms.regbase = regbase;
5134 /* Compute how many registers we need. */
5136 parms.intoffset = 0;
5137 function_arg_record_value_1 (type, 0, &parms, false);
5139 /* Take into account pending integer fields. */
5140 if (parms.intoffset != -1)
5142 unsigned int startbit, endbit;
5143 int intslots, this_slotno;
5145 startbit = parms.intoffset & -BITS_PER_WORD;
5146 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5147 intslots = (endbit - startbit) / BITS_PER_WORD;
5148 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5150 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5152 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5153 /* We need to pass this field on the stack. */
5157 parms.nregs += intslots;
5159 nregs = parms.nregs;
5161 /* Allocate the vector and handle some annoying special cases. */
5164 /* ??? Empty structure has no value? Duh? */
5167 /* Though there's nothing really to store, return a word register
5168 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5169 leads to breakage due to the fact that there are zero bytes to
5171 return gen_rtx_REG (mode, regbase);
5175 /* ??? C++ has structures with no fields, and yet a size. Give up
5176 for now and pass everything back in integer registers. */
5177 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5179 if (nregs + slotno > SPARC_INT_ARG_MAX)
5180 nregs = SPARC_INT_ARG_MAX - slotno;
5182 gcc_assert (nregs != 0);
5184 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5186 /* If at least one field must be passed on the stack, generate
5187 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5188 also be passed on the stack. We can't do much better because the
5189 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5190 of structures for which the fields passed exclusively in registers
5191 are not at the beginning of the structure. */
5193 XVECEXP (parms.ret, 0, 0)
5194 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5196 /* Fill in the entries. */
5198 parms.intoffset = 0;
5199 function_arg_record_value_2 (type, 0, &parms, false);
5200 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5202 gcc_assert (parms.nregs == nregs);
5207 /* Used by function_arg and function_value to implement the conventions
5208 of the 64-bit ABI for passing and returning unions.
5209 Return an expression valid as a return value for the two macros
5210 FUNCTION_ARG and FUNCTION_VALUE.
5212 SIZE is the size in bytes of the union.
5213 MODE is the argument's machine mode.
5214 REGNO is the hard register the union will be passed in. */
5217 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5220 int nwords = ROUND_ADVANCE (size), i;
5223 /* See comment in previous function for empty structures. */
5225 return gen_rtx_REG (mode, regno);
5227 if (slotno == SPARC_INT_ARG_MAX - 1)
5230 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5232 for (i = 0; i < nwords; i++)
5234 /* Unions are passed left-justified. */
5235 XVECEXP (regs, 0, i)
5236 = gen_rtx_EXPR_LIST (VOIDmode,
5237 gen_rtx_REG (word_mode, regno),
5238 GEN_INT (UNITS_PER_WORD * i));
5245 /* Used by function_arg and function_value to implement the conventions
5246 for passing and returning large (BLKmode) vectors.
5247 Return an expression valid as a return value for the two macros
5248 FUNCTION_ARG and FUNCTION_VALUE.
5250 SIZE is the size in bytes of the vector (at least 8 bytes).
5251 REGNO is the FP hard register the vector will be passed in. */
5254 function_arg_vector_value (int size, int regno)
5256 int i, nregs = size / 8;
5259 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5261 for (i = 0; i < nregs; i++)
5263 XVECEXP (regs, 0, i)
5264 = gen_rtx_EXPR_LIST (VOIDmode,
5265 gen_rtx_REG (DImode, regno + 2*i),
5272 /* Handle the FUNCTION_ARG macro.
5273 Determine where to put an argument to a function.
5274 Value is zero to push the argument on the stack,
5275 or a hard register in which to store the argument.
5277 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5278 the preceding args and about the function being called.
5279 MODE is the argument's machine mode.
5280 TYPE is the data type of the argument (as a tree).
5281 This is null for libcalls where that information may
5283 NAMED is nonzero if this argument is a named parameter
5284 (otherwise it is an extra parameter matching an ellipsis).
5285 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5288 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5289 tree type, int named, int incoming_p)
5291 int regbase = (incoming_p
5292 ? SPARC_INCOMING_INT_ARG_FIRST
5293 : SPARC_OUTGOING_INT_ARG_FIRST);
5294 int slotno, regno, padding;
5295 enum mode_class mclass = GET_MODE_CLASS (mode);
5297 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5302 /* Vector types deserve special treatment because they are polymorphic wrt
5303 their mode, depending upon whether VIS instructions are enabled. */
5304 if (type && TREE_CODE (type) == VECTOR_TYPE)
5306 HOST_WIDE_INT size = int_size_in_bytes (type);
5307 gcc_assert ((TARGET_ARCH32 && size <= 8)
5308 || (TARGET_ARCH64 && size <= 16));
5310 if (mode == BLKmode)
5311 return function_arg_vector_value (size,
5312 SPARC_FP_ARG_FIRST + 2*slotno);
5314 mclass = MODE_FLOAT;
5318 return gen_rtx_REG (mode, regno);
5320 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5321 and are promoted to registers if possible. */
5322 if (type && TREE_CODE (type) == RECORD_TYPE)
5324 HOST_WIDE_INT size = int_size_in_bytes (type);
5325 gcc_assert (size <= 16);
5327 return function_arg_record_value (type, mode, slotno, named, regbase);
5330 /* Unions up to 16 bytes in size are passed in integer registers. */
5331 else if (type && TREE_CODE (type) == UNION_TYPE)
5333 HOST_WIDE_INT size = int_size_in_bytes (type);
5334 gcc_assert (size <= 16);
5336 return function_arg_union_value (size, mode, slotno, regno);
5339 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5340 but also have the slot allocated for them.
5341 If no prototype is in scope fp values in register slots get passed
5342 in two places, either fp regs and int regs or fp regs and memory. */
5343 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5344 && SPARC_FP_REG_P (regno))
5346 rtx reg = gen_rtx_REG (mode, regno);
5347 if (cum->prototype_p || cum->libcall_p)
5349 /* "* 2" because fp reg numbers are recorded in 4 byte
5352 /* ??? This will cause the value to be passed in the fp reg and
5353 in the stack. When a prototype exists we want to pass the
5354 value in the reg but reserve space on the stack. That's an
5355 optimization, and is deferred [for a bit]. */
5356 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5357 return gen_rtx_PARALLEL (mode,
5359 gen_rtx_EXPR_LIST (VOIDmode,
5360 NULL_RTX, const0_rtx),
5361 gen_rtx_EXPR_LIST (VOIDmode,
5365 /* ??? It seems that passing back a register even when past
5366 the area declared by REG_PARM_STACK_SPACE will allocate
5367 space appropriately, and will not copy the data onto the
5368 stack, exactly as we desire.
5370 This is due to locate_and_pad_parm being called in
5371 expand_call whenever reg_parm_stack_space > 0, which
5372 while beneficial to our example here, would seem to be
5373 in error from what had been intended. Ho hum... -- r~ */
5381 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5385 /* On incoming, we don't need to know that the value
5386 is passed in %f0 and %i0, and it confuses other parts
5387 causing needless spillage even on the simplest cases. */
5391 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5392 + (regno - SPARC_FP_ARG_FIRST) / 2);
5394 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5395 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5397 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5401 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5402 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5403 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5408 /* All other aggregate types are passed in an integer register in a mode
5409 corresponding to the size of the type. */
5410 else if (type && AGGREGATE_TYPE_P (type))
5412 HOST_WIDE_INT size = int_size_in_bytes (type);
5413 gcc_assert (size <= 16);
5415 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5418 return gen_rtx_REG (mode, regno);
5421 /* For an arg passed partly in registers and partly in memory,
5422 this is the number of bytes of registers used.
5423 For args passed entirely in registers or entirely in memory, zero.
5425 Any arg that starts in the first 6 regs but won't entirely fit in them
5426 needs partial registers on v8. On v9, structures with integer
5427 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5428 values that begin in the last fp reg [where "last fp reg" varies with the
5429 mode] will be split between that reg and memory. */
5432 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5433 tree type, bool named)
5435 int slotno, regno, padding;
5437 /* We pass 0 for incoming_p here, it doesn't matter. */
5438 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5445 if ((slotno + (mode == BLKmode
5446 ? ROUND_ADVANCE (int_size_in_bytes (type))
5447 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5448 > SPARC_INT_ARG_MAX)
5449 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5453 /* We are guaranteed by pass_by_reference that the size of the
5454 argument is not greater than 16 bytes, so we only need to return
5455 one word if the argument is partially passed in registers. */
5457 if (type && AGGREGATE_TYPE_P (type))
5459 int size = int_size_in_bytes (type);
5461 if (size > UNITS_PER_WORD
5462 && slotno == SPARC_INT_ARG_MAX - 1)
5463 return UNITS_PER_WORD;
5465 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5466 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5467 && ! (TARGET_FPU && named)))
5469 /* The complex types are passed as packed types. */
5470 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5471 && slotno == SPARC_INT_ARG_MAX - 1)
5472 return UNITS_PER_WORD;
5474 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5476 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5478 return UNITS_PER_WORD;
5485 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5486 Specify whether to pass the argument by reference. */
5489 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5490 enum machine_mode mode, const_tree type,
5491 bool named ATTRIBUTE_UNUSED)
5494 /* Original SPARC 32-bit ABI says that structures and unions,
5495 and quad-precision floats are passed by reference. For Pascal,
5496 also pass arrays by reference. All other base types are passed
5499 Extended ABI (as implemented by the Sun compiler) says that all
5500 complex floats are passed by reference. Pass complex integers
5501 in registers up to 8 bytes. More generally, enforce the 2-word
5502 cap for passing arguments in registers.
5504 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5505 integers are passed like floats of the same size, that is in
5506 registers up to 8 bytes. Pass all vector floats by reference
5507 like structure and unions. */
5508 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5510 /* Catch CDImode, TFmode, DCmode and TCmode. */
5511 || GET_MODE_SIZE (mode) > 8
5513 && TREE_CODE (type) == VECTOR_TYPE
5514 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5516 /* Original SPARC 64-bit ABI says that structures and unions
5517 smaller than 16 bytes are passed in registers, as well as
5518 all other base types.
5520 Extended ABI (as implemented by the Sun compiler) says that
5521 complex floats are passed in registers up to 16 bytes. Pass
5522 all complex integers in registers up to 16 bytes. More generally,
5523 enforce the 2-word cap for passing arguments in registers.
5525 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5526 integers are passed like floats of the same size, that is in
5527 registers (up to 16 bytes). Pass all vector floats like structure
5530 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5531 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5532 /* Catch CTImode and TCmode. */
5533 || GET_MODE_SIZE (mode) > 16);
5536 /* Handle the FUNCTION_ARG_ADVANCE macro.
5537 Update the data in CUM to advance over an argument
5538 of mode MODE and data type TYPE.
5539 TYPE is null for libcalls where that information may not be available. */
5542 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5543 tree type, int named)
5545 int slotno, regno, padding;
5547 /* We pass 0 for incoming_p here, it doesn't matter. */
5548 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5550 /* If register required leading padding, add it. */
5552 cum->words += padding;
5556 cum->words += (mode != BLKmode
5557 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5558 : ROUND_ADVANCE (int_size_in_bytes (type)));
5562 if (type && AGGREGATE_TYPE_P (type))
5564 int size = int_size_in_bytes (type);
5568 else if (size <= 16)
5570 else /* passed by reference */
5575 cum->words += (mode != BLKmode
5576 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5577 : ROUND_ADVANCE (int_size_in_bytes (type)));
5582 /* Handle the FUNCTION_ARG_PADDING macro.
5583 For the 64 bit ABI structs are always stored left shifted in their
5587 function_arg_padding (enum machine_mode mode, const_tree type)
5589 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5592 /* Fall back to the default. */
5593 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5596 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5597 Specify whether to return the return value in memory. */
5600 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5603 /* Original SPARC 32-bit ABI says that structures and unions,
5604 and quad-precision floats are returned in memory. All other
5605 base types are returned in registers.
5607 Extended ABI (as implemented by the Sun compiler) says that
5608 all complex floats are returned in registers (8 FP registers
5609 at most for '_Complex long double'). Return all complex integers
5610 in registers (4 at most for '_Complex long long').
5612 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5613 integers are returned like floats of the same size, that is in
5614 registers up to 8 bytes and in memory otherwise. Return all
5615 vector floats in memory like structure and unions; note that
5616 they always have BLKmode like the latter. */
5617 return (TYPE_MODE (type) == BLKmode
5618 || TYPE_MODE (type) == TFmode
5619 || (TREE_CODE (type) == VECTOR_TYPE
5620 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5622 /* Original SPARC 64-bit ABI says that structures and unions
5623 smaller than 32 bytes are returned in registers, as well as
5624 all other base types.
5626 Extended ABI (as implemented by the Sun compiler) says that all
5627 complex floats are returned in registers (8 FP registers at most
5628 for '_Complex long double'). Return all complex integers in
5629 registers (4 at most for '_Complex TItype').
5631 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5632 integers are returned like floats of the same size, that is in
5633 registers. Return all vector floats like structure and unions;
5634 note that they always have BLKmode like the latter. */
5635 return ((TYPE_MODE (type) == BLKmode
5636 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5639 /* Handle the TARGET_STRUCT_VALUE target hook.
5640 Return where to find the structure return value address. */
5643 sparc_struct_value_rtx (tree fndecl, int incoming)
5652 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5653 STRUCT_VALUE_OFFSET));
5655 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5656 STRUCT_VALUE_OFFSET));
5658 /* Only follow the SPARC ABI for fixed-size structure returns.
5659 Variable size structure returns are handled per the normal
5660 procedures in GCC. This is enabled by -mstd-struct-return */
5662 && sparc_std_struct_return
5663 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5664 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5666 /* We must check and adjust the return address, as it is
5667 optional as to whether the return object is really
5669 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5670 rtx scratch = gen_reg_rtx (SImode);
5671 rtx endlab = gen_label_rtx ();
5673 /* Calculate the return object size */
5674 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5675 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5676 /* Construct a temporary return value */
5677 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5679 /* Implement SPARC 32-bit psABI callee returns struck checking
5682 Fetch the instruction where we will return to and see if
5683 it's an unimp instruction (the most significant 10 bits
5685 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5686 plus_constant (ret_rtx, 8)));
5687 /* Assume the size is valid and pre-adjust */
5688 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5689 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5690 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5691 /* Assign stack temp:
5692 Write the address of the memory pointed to by temp_val into
5693 the memory pointed to by mem */
5694 emit_move_insn (mem, XEXP (temp_val, 0));
5695 emit_label (endlab);
5698 set_mem_alias_set (mem, struct_value_alias_set);
5703 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5704 For v9, function return values are subject to the same rules as arguments,
5705 except that up to 32 bytes may be returned in registers. */
5708 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5710 /* Beware that the two values are swapped here wrt function_arg. */
5711 int regbase = (incoming_p
5712 ? SPARC_OUTGOING_INT_ARG_FIRST
5713 : SPARC_INCOMING_INT_ARG_FIRST);
5714 enum mode_class mclass = GET_MODE_CLASS (mode);
5717 /* Vector types deserve special treatment because they are polymorphic wrt
5718 their mode, depending upon whether VIS instructions are enabled. */
5719 if (type && TREE_CODE (type) == VECTOR_TYPE)
5721 HOST_WIDE_INT size = int_size_in_bytes (type);
5722 gcc_assert ((TARGET_ARCH32 && size <= 8)
5723 || (TARGET_ARCH64 && size <= 32));
5725 if (mode == BLKmode)
5726 return function_arg_vector_value (size,
5727 SPARC_FP_ARG_FIRST);
5729 mclass = MODE_FLOAT;
5732 if (TARGET_ARCH64 && type)
5734 /* Structures up to 32 bytes in size are returned in registers. */
5735 if (TREE_CODE (type) == RECORD_TYPE)
5737 HOST_WIDE_INT size = int_size_in_bytes (type);
5738 gcc_assert (size <= 32);
5740 return function_arg_record_value (type, mode, 0, 1, regbase);
5743 /* Unions up to 32 bytes in size are returned in integer registers. */
5744 else if (TREE_CODE (type) == UNION_TYPE)
5746 HOST_WIDE_INT size = int_size_in_bytes (type);
5747 gcc_assert (size <= 32);
5749 return function_arg_union_value (size, mode, 0, regbase);
5752 /* Objects that require it are returned in FP registers. */
5753 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5756 /* All other aggregate types are returned in an integer register in a
5757 mode corresponding to the size of the type. */
5758 else if (AGGREGATE_TYPE_P (type))
5760 /* All other aggregate types are passed in an integer register
5761 in a mode corresponding to the size of the type. */
5762 HOST_WIDE_INT size = int_size_in_bytes (type);
5763 gcc_assert (size <= 32);
5765 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5767 /* ??? We probably should have made the same ABI change in
5768 3.4.0 as the one we made for unions. The latter was
5769 required by the SCD though, while the former is not
5770 specified, so we favored compatibility and efficiency.
5772 Now we're stuck for aggregates larger than 16 bytes,
5773 because OImode vanished in the meantime. Let's not
5774 try to be unduly clever, and simply follow the ABI
5775 for unions in that case. */
5776 if (mode == BLKmode)
5777 return function_arg_union_value (size, mode, 0, regbase);
5782 /* This must match PROMOTE_FUNCTION_MODE. */
5783 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5787 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5788 regno = SPARC_FP_ARG_FIRST;
5792 return gen_rtx_REG (mode, regno);
5795 /* Do what is necessary for `va_start'. We look at the current function
5796 to determine if stdarg or varargs is used and return the address of
5797 the first unnamed parameter. */
5800 sparc_builtin_saveregs (void)
5802 int first_reg = crtl->args.info.words;
5806 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5807 emit_move_insn (gen_rtx_MEM (word_mode,
5808 gen_rtx_PLUS (Pmode,
5810 GEN_INT (FIRST_PARM_OFFSET (0)
5813 gen_rtx_REG (word_mode,
5814 SPARC_INCOMING_INT_ARG_FIRST + regno));
5816 address = gen_rtx_PLUS (Pmode,
5818 GEN_INT (FIRST_PARM_OFFSET (0)
5819 + UNITS_PER_WORD * first_reg));
5824 /* Implement `va_start' for stdarg. */
5827 sparc_va_start (tree valist, rtx nextarg)
5829 nextarg = expand_builtin_saveregs ();
5830 std_expand_builtin_va_start (valist, nextarg);
5833 /* Implement `va_arg' for stdarg. */
5836 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5839 HOST_WIDE_INT size, rsize, align;
5842 tree ptrtype = build_pointer_type (type);
5844 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5847 size = rsize = UNITS_PER_WORD;
5853 size = int_size_in_bytes (type);
5854 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5859 /* For SPARC64, objects requiring 16-byte alignment get it. */
5860 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5861 align = 2 * UNITS_PER_WORD;
5863 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5864 are left-justified in their slots. */
5865 if (AGGREGATE_TYPE_P (type))
5868 size = rsize = UNITS_PER_WORD;
5878 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5879 size_int (align - 1));
5880 incr = fold_convert (sizetype, incr);
5881 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5883 incr = fold_convert (ptr_type_node, incr);
5886 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5889 if (BYTES_BIG_ENDIAN && size < rsize)
5890 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5891 size_int (rsize - size));
5895 addr = fold_convert (build_pointer_type (ptrtype), addr);
5896 addr = build_va_arg_indirect_ref (addr);
5899 /* If the address isn't aligned properly for the type, we need a temporary.
5900 FIXME: This is inefficient, usually we can do this in registers. */
5901 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5903 tree tmp = create_tmp_var (type, "va_arg_tmp");
5904 tree dest_addr = build_fold_addr_expr (tmp);
5905 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5906 3, dest_addr, addr, size_int (rsize));
5907 TREE_ADDRESSABLE (tmp) = 1;
5908 gimplify_and_add (copy, pre_p);
5913 addr = fold_convert (ptrtype, addr);
5916 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5917 gimplify_assign (valist, incr, post_p);
5919 return build_va_arg_indirect_ref (addr);
5922 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5923 Specify whether the vector mode is supported by the hardware. */
5926 sparc_vector_mode_supported_p (enum machine_mode mode)
5928 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5931 /* Return the string to output an unconditional branch to LABEL, which is
5932 the operand number of the label.
5934 DEST is the destination insn (i.e. the label), INSN is the source. */
5937 output_ubranch (rtx dest, int label, rtx insn)
5939 static char string[64];
5940 bool v9_form = false;
5943 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5945 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5946 - INSN_ADDRESSES (INSN_UID (insn)));
5947 /* Leave some instructions for "slop". */
5948 if (delta >= -260000 && delta < 260000)
5953 strcpy (string, "ba%*,pt\t%%xcc, ");
5955 strcpy (string, "b%*\t");
5957 p = strchr (string, '\0');
5968 /* Return the string to output a conditional branch to LABEL, which is
5969 the operand number of the label. OP is the conditional expression.
5970 XEXP (OP, 0) is assumed to be a condition code register (integer or
5971 floating point) and its mode specifies what kind of comparison we made.
5973 DEST is the destination insn (i.e. the label), INSN is the source.
5975 REVERSED is nonzero if we should reverse the sense of the comparison.
5977 ANNUL is nonzero if we should generate an annulling branch. */
5980 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5983 static char string[64];
5984 enum rtx_code code = GET_CODE (op);
5985 rtx cc_reg = XEXP (op, 0);
5986 enum machine_mode mode = GET_MODE (cc_reg);
5987 const char *labelno, *branch;
5988 int spaces = 8, far;
5991 /* v9 branches are limited to +-1MB. If it is too far away,
6004 fbne,a,pn %fcc2, .LC29
6012 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6015 /* Reversal of FP compares takes care -- an ordered compare
6016 becomes an unordered compare and vice versa. */
6017 if (mode == CCFPmode || mode == CCFPEmode)
6018 code = reverse_condition_maybe_unordered (code);
6020 code = reverse_condition (code);
6023 /* Start by writing the branch condition. */
6024 if (mode == CCFPmode || mode == CCFPEmode)
6075 /* ??? !v9: FP branches cannot be preceded by another floating point
6076 insn. Because there is currently no concept of pre-delay slots,
6077 we can fix this only by always emitting a nop before a floating
6082 strcpy (string, "nop\n\t");
6083 strcat (string, branch);
6096 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6108 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6129 strcpy (string, branch);
6131 spaces -= strlen (branch);
6132 p = strchr (string, '\0');
6134 /* Now add the annulling, the label, and a possible noop. */
6147 if (! far && insn && INSN_ADDRESSES_SET_P ())
6149 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6150 - INSN_ADDRESSES (INSN_UID (insn)));
6151 /* Leave some instructions for "slop". */
6152 if (delta < -260000 || delta >= 260000)
6156 if (mode == CCFPmode || mode == CCFPEmode)
6158 static char v9_fcc_labelno[] = "%%fccX, ";
6159 /* Set the char indicating the number of the fcc reg to use. */
6160 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6161 labelno = v9_fcc_labelno;
6164 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6168 else if (mode == CCXmode || mode == CCX_NOOVmode)
6170 labelno = "%%xcc, ";
6175 labelno = "%%icc, ";
6180 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6183 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6196 strcpy (p, labelno);
6197 p = strchr (p, '\0');
6200 strcpy (p, ".+12\n\t nop\n\tb\t");
6201 /* Skip the next insn if requested or
6202 if we know that it will be a nop. */
6203 if (annul || ! final_sequence)
6217 /* Emit a library call comparison between floating point X and Y.
6218 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6219 Return the new operator to be used in the comparison sequence.
6221 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6222 values as arguments instead of the TFmode registers themselves,
6223 that's why we cannot call emit_float_lib_cmp. */
6226 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6229 rtx slot0, slot1, result, tem, tem2;
6230 enum machine_mode mode;
6231 enum rtx_code new_comparison;
6236 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6240 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6244 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6248 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6252 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6256 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6267 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6280 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6281 emit_move_insn (slot0, x);
6288 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6289 emit_move_insn (slot1, y);
6292 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6294 XEXP (slot0, 0), Pmode,
6295 XEXP (slot1, 0), Pmode);
6300 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6302 x, TFmode, y, TFmode);
6307 /* Immediately move the result of the libcall into a pseudo
6308 register so reload doesn't clobber the value if it needs
6309 the return register for a spill reg. */
6310 result = gen_reg_rtx (mode);
6311 emit_move_insn (result, hard_libcall_value (mode));
6316 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6319 new_comparison = (comparison == UNORDERED ? EQ : NE);
6320 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6323 new_comparison = (comparison == UNGT ? GT : NE);
6324 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6326 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6328 tem = gen_reg_rtx (mode);
6330 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6332 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6333 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6336 tem = gen_reg_rtx (mode);
6338 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6340 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6341 tem2 = gen_reg_rtx (mode);
6343 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6345 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6346 new_comparison = (comparison == UNEQ ? EQ : NE);
6347 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6353 /* Generate an unsigned DImode to FP conversion. This is the same code
6354 optabs would emit if we didn't have TFmode patterns. */
6357 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6359 rtx neglab, donelab, i0, i1, f0, in, out;
6362 in = force_reg (DImode, operands[1]);
6363 neglab = gen_label_rtx ();
6364 donelab = gen_label_rtx ();
6365 i0 = gen_reg_rtx (DImode);
6366 i1 = gen_reg_rtx (DImode);
6367 f0 = gen_reg_rtx (mode);
6369 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6371 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6372 emit_jump_insn (gen_jump (donelab));
6375 emit_label (neglab);
6377 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6378 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6379 emit_insn (gen_iordi3 (i0, i0, i1));
6380 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6381 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6383 emit_label (donelab);
6386 /* Generate an FP to unsigned DImode conversion. This is the same code
6387 optabs would emit if we didn't have TFmode patterns. */
6390 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6392 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6395 in = force_reg (mode, operands[1]);
6396 neglab = gen_label_rtx ();
6397 donelab = gen_label_rtx ();
6398 i0 = gen_reg_rtx (DImode);
6399 i1 = gen_reg_rtx (DImode);
6400 limit = gen_reg_rtx (mode);
6401 f0 = gen_reg_rtx (mode);
6403 emit_move_insn (limit,
6404 CONST_DOUBLE_FROM_REAL_VALUE (
6405 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6406 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6408 emit_insn (gen_rtx_SET (VOIDmode,
6410 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6411 emit_jump_insn (gen_jump (donelab));
6414 emit_label (neglab);
6416 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6417 emit_insn (gen_rtx_SET (VOIDmode,
6419 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6420 emit_insn (gen_movdi (i1, const1_rtx));
6421 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6422 emit_insn (gen_xordi3 (out, i0, i1));
6424 emit_label (donelab);
6427 /* Return the string to output a conditional branch to LABEL, testing
6428 register REG. LABEL is the operand number of the label; REG is the
6429 operand number of the reg. OP is the conditional expression. The mode
6430 of REG says what kind of comparison we made.
6432 DEST is the destination insn (i.e. the label), INSN is the source.
6434 REVERSED is nonzero if we should reverse the sense of the comparison.
6436 ANNUL is nonzero if we should generate an annulling branch. */
6439 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6440 int annul, rtx insn)
6442 static char string[64];
6443 enum rtx_code code = GET_CODE (op);
6444 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6449 /* branch on register are limited to +-128KB. If it is too far away,
6462 brgez,a,pn %o1, .LC29
6468 ba,pt %xcc, .LC29 */
6470 far = get_attr_length (insn) >= 3;
6472 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6474 code = reverse_condition (code);
6476 /* Only 64 bit versions of these instructions exist. */
6477 gcc_assert (mode == DImode);
6479 /* Start by writing the branch condition. */
6484 strcpy (string, "brnz");
6488 strcpy (string, "brz");
6492 strcpy (string, "brgez");
6496 strcpy (string, "brlz");
6500 strcpy (string, "brlez");
6504 strcpy (string, "brgz");
6511 p = strchr (string, '\0');
6513 /* Now add the annulling, reg, label, and nop. */
6520 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6523 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6528 *p = p < string + 8 ? '\t' : ' ';
6536 int veryfar = 1, delta;
6538 if (INSN_ADDRESSES_SET_P ())
6540 delta = (INSN_ADDRESSES (INSN_UID (dest))
6541 - INSN_ADDRESSES (INSN_UID (insn)));
6542 /* Leave some instructions for "slop". */
6543 if (delta >= -260000 && delta < 260000)
6547 strcpy (p, ".+12\n\t nop\n\t");
6548 /* Skip the next insn if requested or
6549 if we know that it will be a nop. */
6550 if (annul || ! final_sequence)
6560 strcpy (p, "ba,pt\t%%xcc, ");
6574 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6575 Such instructions cannot be used in the delay slot of return insn on v9.
6576 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6580 epilogue_renumber (register rtx *where, int test)
6582 register const char *fmt;
6584 register enum rtx_code code;
6589 code = GET_CODE (*where);
6594 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6596 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6597 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6605 /* Do not replace the frame pointer with the stack pointer because
6606 it can cause the delayed instruction to load below the stack.
6607 This occurs when instructions like:
6609 (set (reg/i:SI 24 %i0)
6610 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6611 (const_int -20 [0xffffffec])) 0))
6613 are in the return delayed slot. */
6615 if (GET_CODE (XEXP (*where, 0)) == REG
6616 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6617 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6618 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6623 if (SPARC_STACK_BIAS
6624 && GET_CODE (XEXP (*where, 0)) == REG
6625 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6633 fmt = GET_RTX_FORMAT (code);
6635 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6640 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6641 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6644 else if (fmt[i] == 'e'
6645 && epilogue_renumber (&(XEXP (*where, i)), test))
6651 /* Leaf functions and non-leaf functions have different needs. */
6654 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6657 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6659 static const int *const reg_alloc_orders[] = {
6660 reg_leaf_alloc_order,
6661 reg_nonleaf_alloc_order};
6664 order_regs_for_local_alloc (void)
6666 static int last_order_nonleaf = 1;
6668 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6670 last_order_nonleaf = !last_order_nonleaf;
6671 memcpy ((char *) reg_alloc_order,
6672 (const char *) reg_alloc_orders[last_order_nonleaf],
6673 FIRST_PSEUDO_REGISTER * sizeof (int));
6677 /* Return 1 if REG and MEM are legitimate enough to allow the various
6678 mem<-->reg splits to be run. */
6681 sparc_splitdi_legitimate (rtx reg, rtx mem)
6683 /* Punt if we are here by mistake. */
6684 gcc_assert (reload_completed);
6686 /* We must have an offsettable memory reference. */
6687 if (! offsettable_memref_p (mem))
6690 /* If we have legitimate args for ldd/std, we do not want
6691 the split to happen. */
6692 if ((REGNO (reg) % 2) == 0
6693 && mem_min_alignment (mem, 8))
6700 /* Return 1 if x and y are some kind of REG and they refer to
6701 different hard registers. This test is guaranteed to be
6702 run after reload. */
6705 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6707 if (GET_CODE (x) != REG)
6709 if (GET_CODE (y) != REG)
6711 if (REGNO (x) == REGNO (y))
6716 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6717 This makes them candidates for using ldd and std insns.
6719 Note reg1 and reg2 *must* be hard registers. */
6722 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6724 /* We might have been passed a SUBREG. */
6725 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6728 if (REGNO (reg1) % 2 != 0)
6731 /* Integer ldd is deprecated in SPARC V9 */
6732 if (TARGET_V9 && REGNO (reg1) < 32)
6735 return (REGNO (reg1) == REGNO (reg2) - 1);
6738 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6741 This can only happen when addr1 and addr2, the addresses in mem1
6742 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6743 addr1 must also be aligned on a 64-bit boundary.
6745 Also iff dependent_reg_rtx is not null it should not be used to
6746 compute the address for mem1, i.e. we cannot optimize a sequence
6758 But, note that the transformation from:
6763 is perfectly fine. Thus, the peephole2 patterns always pass us
6764 the destination register of the first load, never the second one.
6766 For stores we don't have a similar problem, so dependent_reg_rtx is
6770 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6774 HOST_WIDE_INT offset1;
6776 /* The mems cannot be volatile. */
6777 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6780 /* MEM1 should be aligned on a 64-bit boundary. */
6781 if (MEM_ALIGN (mem1) < 64)
6784 addr1 = XEXP (mem1, 0);
6785 addr2 = XEXP (mem2, 0);
6787 /* Extract a register number and offset (if used) from the first addr. */
6788 if (GET_CODE (addr1) == PLUS)
6790 /* If not a REG, return zero. */
6791 if (GET_CODE (XEXP (addr1, 0)) != REG)
6795 reg1 = REGNO (XEXP (addr1, 0));
6796 /* The offset must be constant! */
6797 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6799 offset1 = INTVAL (XEXP (addr1, 1));
6802 else if (GET_CODE (addr1) != REG)
6806 reg1 = REGNO (addr1);
6807 /* This was a simple (mem (reg)) expression. Offset is 0. */
6811 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6812 if (GET_CODE (addr2) != PLUS)
6815 if (GET_CODE (XEXP (addr2, 0)) != REG
6816 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6819 if (reg1 != REGNO (XEXP (addr2, 0)))
6822 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6825 /* The first offset must be evenly divisible by 8 to ensure the
6826 address is 64 bit aligned. */
6827 if (offset1 % 8 != 0)
6830 /* The offset for the second addr must be 4 more than the first addr. */
6831 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6834 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6839 /* Return 1 if reg is a pseudo, or is the first register in
6840 a hard register pair. This makes it suitable for use in
6841 ldd and std insns. */
6844 register_ok_for_ldd (rtx reg)
6846 /* We might have been passed a SUBREG. */
6850 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6851 return (REGNO (reg) % 2 == 0);
6856 /* Return 1 if OP is a memory whose address is known to be
6857 aligned to 8-byte boundary, or a pseudo during reload.
6858 This makes it suitable for use in ldd and std insns. */
6861 memory_ok_for_ldd (rtx op)
6865 /* In 64-bit mode, we assume that the address is word-aligned. */
6866 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6869 if ((reload_in_progress || reload_completed)
6870 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6873 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6875 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6884 /* Print operand X (an rtx) in assembler syntax to file FILE.
6885 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6886 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6889 print_operand (FILE *file, rtx x, int code)
6894 /* Output an insn in a delay slot. */
6896 sparc_indent_opcode = 1;
6898 fputs ("\n\t nop", file);
6901 /* Output an annul flag if there's nothing for the delay slot and we
6902 are optimizing. This is always used with '(' below.
6903 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6904 this is a dbx bug. So, we only do this when optimizing.
6905 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6906 Always emit a nop in case the next instruction is a branch. */
6907 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6911 /* Output a 'nop' if there's nothing for the delay slot and we are
6912 not optimizing. This is always used with '*' above. */
6913 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6914 fputs ("\n\t nop", file);
6915 else if (final_sequence)
6916 sparc_indent_opcode = 1;
6919 /* Output the right displacement from the saved PC on function return.
6920 The caller may have placed an "unimp" insn immediately after the call
6921 so we have to account for it. This insn is used in the 32-bit ABI
6922 when calling a function that returns a non zero-sized structure. The
6923 64-bit ABI doesn't have it. Be careful to have this test be the same
6924 as that used on the call. The exception here is that when
6925 sparc_std_struct_return is enabled, the psABI is followed exactly
6926 and the adjustment is made by the code in sparc_struct_value_rtx.
6927 The call emitted is the same when sparc_std_struct_return is
6930 && cfun->returns_struct
6931 && ! sparc_std_struct_return
6932 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6934 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6940 /* Output the Embedded Medium/Anywhere code model base register. */
6941 fputs (EMBMEDANY_BASE_REG, file);
6944 /* Print some local dynamic TLS name. */
6945 assemble_name (file, get_some_local_dynamic_name ());
6949 /* Adjust the operand to take into account a RESTORE operation. */
6950 if (GET_CODE (x) == CONST_INT)
6952 else if (GET_CODE (x) != REG)
6953 output_operand_lossage ("invalid %%Y operand");
6954 else if (REGNO (x) < 8)
6955 fputs (reg_names[REGNO (x)], file);
6956 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6957 fputs (reg_names[REGNO (x)-16], file);
6959 output_operand_lossage ("invalid %%Y operand");
6962 /* Print out the low order register name of a register pair. */
6963 if (WORDS_BIG_ENDIAN)
6964 fputs (reg_names[REGNO (x)+1], file);
6966 fputs (reg_names[REGNO (x)], file);
6969 /* Print out the high order register name of a register pair. */
6970 if (WORDS_BIG_ENDIAN)
6971 fputs (reg_names[REGNO (x)], file);
6973 fputs (reg_names[REGNO (x)+1], file);
6976 /* Print out the second register name of a register pair or quad.
6977 I.e., R (%o0) => %o1. */
6978 fputs (reg_names[REGNO (x)+1], file);
6981 /* Print out the third register name of a register quad.
6982 I.e., S (%o0) => %o2. */
6983 fputs (reg_names[REGNO (x)+2], file);
6986 /* Print out the fourth register name of a register quad.
6987 I.e., T (%o0) => %o3. */
6988 fputs (reg_names[REGNO (x)+3], file);
6991 /* Print a condition code register. */
6992 if (REGNO (x) == SPARC_ICC_REG)
6994 /* We don't handle CC[X]_NOOVmode because they're not supposed
6996 if (GET_MODE (x) == CCmode)
6997 fputs ("%icc", file);
6998 else if (GET_MODE (x) == CCXmode)
6999 fputs ("%xcc", file);
7004 /* %fccN register */
7005 fputs (reg_names[REGNO (x)], file);
7008 /* Print the operand's address only. */
7009 output_address (XEXP (x, 0));
7012 /* In this case we need a register. Use %g0 if the
7013 operand is const0_rtx. */
7015 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7017 fputs ("%g0", file);
7024 switch (GET_CODE (x))
7026 case IOR: fputs ("or", file); break;
7027 case AND: fputs ("and", file); break;
7028 case XOR: fputs ("xor", file); break;
7029 default: output_operand_lossage ("invalid %%A operand");
7034 switch (GET_CODE (x))
7036 case IOR: fputs ("orn", file); break;
7037 case AND: fputs ("andn", file); break;
7038 case XOR: fputs ("xnor", file); break;
7039 default: output_operand_lossage ("invalid %%B operand");
7043 /* These are used by the conditional move instructions. */
7047 enum rtx_code rc = GET_CODE (x);
7051 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7052 if (mode == CCFPmode || mode == CCFPEmode)
7053 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7055 rc = reverse_condition (GET_CODE (x));
7059 case NE: fputs ("ne", file); break;
7060 case EQ: fputs ("e", file); break;
7061 case GE: fputs ("ge", file); break;
7062 case GT: fputs ("g", file); break;
7063 case LE: fputs ("le", file); break;
7064 case LT: fputs ("l", file); break;
7065 case GEU: fputs ("geu", file); break;
7066 case GTU: fputs ("gu", file); break;
7067 case LEU: fputs ("leu", file); break;
7068 case LTU: fputs ("lu", file); break;
7069 case LTGT: fputs ("lg", file); break;
7070 case UNORDERED: fputs ("u", file); break;
7071 case ORDERED: fputs ("o", file); break;
7072 case UNLT: fputs ("ul", file); break;
7073 case UNLE: fputs ("ule", file); break;
7074 case UNGT: fputs ("ug", file); break;
7075 case UNGE: fputs ("uge", file); break;
7076 case UNEQ: fputs ("ue", file); break;
7077 default: output_operand_lossage (code == 'c'
7078 ? "invalid %%c operand"
7079 : "invalid %%C operand");
7084 /* These are used by the movr instruction pattern. */
7088 enum rtx_code rc = (code == 'd'
7089 ? reverse_condition (GET_CODE (x))
7093 case NE: fputs ("ne", file); break;
7094 case EQ: fputs ("e", file); break;
7095 case GE: fputs ("gez", file); break;
7096 case LT: fputs ("lz", file); break;
7097 case LE: fputs ("lez", file); break;
7098 case GT: fputs ("gz", file); break;
7099 default: output_operand_lossage (code == 'd'
7100 ? "invalid %%d operand"
7101 : "invalid %%D operand");
7108 /* Print a sign-extended character. */
7109 int i = trunc_int_for_mode (INTVAL (x), QImode);
7110 fprintf (file, "%d", i);
7115 /* Operand must be a MEM; write its address. */
7116 if (GET_CODE (x) != MEM)
7117 output_operand_lossage ("invalid %%f operand");
7118 output_address (XEXP (x, 0));
7123 /* Print a sign-extended 32-bit value. */
7125 if (GET_CODE(x) == CONST_INT)
7127 else if (GET_CODE(x) == CONST_DOUBLE)
7128 i = CONST_DOUBLE_LOW (x);
7131 output_operand_lossage ("invalid %%s operand");
7134 i = trunc_int_for_mode (i, SImode);
7135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7140 /* Do nothing special. */
7144 /* Undocumented flag. */
7145 output_operand_lossage ("invalid operand output code");
7148 if (GET_CODE (x) == REG)
7149 fputs (reg_names[REGNO (x)], file);
7150 else if (GET_CODE (x) == MEM)
7153 /* Poor Sun assembler doesn't understand absolute addressing. */
7154 if (CONSTANT_P (XEXP (x, 0)))
7155 fputs ("%g0+", file);
7156 output_address (XEXP (x, 0));
7159 else if (GET_CODE (x) == HIGH)
7161 fputs ("%hi(", file);
7162 output_addr_const (file, XEXP (x, 0));
7165 else if (GET_CODE (x) == LO_SUM)
7167 print_operand (file, XEXP (x, 0), 0);
7168 if (TARGET_CM_MEDMID)
7169 fputs ("+%l44(", file);
7171 fputs ("+%lo(", file);
7172 output_addr_const (file, XEXP (x, 1));
7175 else if (GET_CODE (x) == CONST_DOUBLE
7176 && (GET_MODE (x) == VOIDmode
7177 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7179 if (CONST_DOUBLE_HIGH (x) == 0)
7180 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7181 else if (CONST_DOUBLE_HIGH (x) == -1
7182 && CONST_DOUBLE_LOW (x) < 0)
7183 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7185 output_operand_lossage ("long long constant not a valid immediate operand");
7187 else if (GET_CODE (x) == CONST_DOUBLE)
7188 output_operand_lossage ("floating point constant not a valid immediate operand");
7189 else { output_addr_const (file, x); }
7192 /* Target hook for assembling integer objects. The sparc version has
7193 special handling for aligned DI-mode objects. */
7196 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7198 /* ??? We only output .xword's for symbols and only then in environments
7199 where the assembler can handle them. */
7200 if (aligned_p && size == 8
7201 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7205 assemble_integer_with_op ("\t.xword\t", x);
7210 assemble_aligned_integer (4, const0_rtx);
7211 assemble_aligned_integer (4, x);
7215 return default_assemble_integer (x, size, aligned_p);
7218 /* Return the value of a code used in the .proc pseudo-op that says
7219 what kind of result this function returns. For non-C types, we pick
7220 the closest C type. */
7222 #ifndef SHORT_TYPE_SIZE
7223 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7226 #ifndef INT_TYPE_SIZE
7227 #define INT_TYPE_SIZE BITS_PER_WORD
7230 #ifndef LONG_TYPE_SIZE
7231 #define LONG_TYPE_SIZE BITS_PER_WORD
7234 #ifndef LONG_LONG_TYPE_SIZE
7235 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7238 #ifndef FLOAT_TYPE_SIZE
7239 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7242 #ifndef DOUBLE_TYPE_SIZE
7243 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7246 #ifndef LONG_DOUBLE_TYPE_SIZE
7247 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7251 sparc_type_code (register tree type)
7253 register unsigned long qualifiers = 0;
7254 register unsigned shift;
7256 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7257 setting more, since some assemblers will give an error for this. Also,
7258 we must be careful to avoid shifts of 32 bits or more to avoid getting
7259 unpredictable results. */
7261 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7263 switch (TREE_CODE (type))
7269 qualifiers |= (3 << shift);
7274 qualifiers |= (2 << shift);
7278 case REFERENCE_TYPE:
7280 qualifiers |= (1 << shift);
7284 return (qualifiers | 8);
7287 case QUAL_UNION_TYPE:
7288 return (qualifiers | 9);
7291 return (qualifiers | 10);
7294 return (qualifiers | 16);
7297 /* If this is a range type, consider it to be the underlying
7299 if (TREE_TYPE (type) != 0)
7302 /* Carefully distinguish all the standard types of C,
7303 without messing up if the language is not C. We do this by
7304 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7305 look at both the names and the above fields, but that's redundant.
7306 Any type whose size is between two C types will be considered
7307 to be the wider of the two types. Also, we do not have a
7308 special code to use for "long long", so anything wider than
7309 long is treated the same. Note that we can't distinguish
7310 between "int" and "long" in this code if they are the same
7311 size, but that's fine, since neither can the assembler. */
7313 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7314 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7316 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7317 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7319 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7320 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7323 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7326 /* If this is a range type, consider it to be the underlying
7328 if (TREE_TYPE (type) != 0)
7331 /* Carefully distinguish all the standard types of C,
7332 without messing up if the language is not C. */
7334 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7335 return (qualifiers | 6);
7338 return (qualifiers | 7);
7340 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7341 /* ??? We need to distinguish between double and float complex types,
7342 but I don't know how yet because I can't reach this code from
7343 existing front-ends. */
7344 return (qualifiers | 7); /* Who knows? */
7347 case BOOLEAN_TYPE: /* Boolean truth value type. */
7348 case LANG_TYPE: /* ? */
7352 gcc_unreachable (); /* Not a type! */
7359 /* Nested function support. */
7361 /* Emit RTL insns to initialize the variable parts of a trampoline.
7362 FNADDR is an RTX for the address of the function's pure code.
7363 CXT is an RTX for the static chain value for the function.
7365 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7366 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7367 (to store insns). This is a bit excessive. Perhaps a different
7368 mechanism would be better here.
7370 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7373 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7375 /* SPARC 32-bit trampoline:
7378 sethi %hi(static), %g2
7380 or %g2, %lo(static), %g2
7382 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7383 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7387 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7388 expand_binop (SImode, ior_optab,
7389 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7390 size_int (10), 0, 1),
7391 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7392 NULL_RTX, 1, OPTAB_DIRECT));
7395 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7396 expand_binop (SImode, ior_optab,
7397 expand_shift (RSHIFT_EXPR, SImode, cxt,
7398 size_int (10), 0, 1),
7399 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7400 NULL_RTX, 1, OPTAB_DIRECT));
7403 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7404 expand_binop (SImode, ior_optab,
7405 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7406 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7407 NULL_RTX, 1, OPTAB_DIRECT));
7410 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7411 expand_binop (SImode, ior_optab,
7412 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7413 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7414 NULL_RTX, 1, OPTAB_DIRECT));
7416 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7417 aligned on a 16 byte boundary so one flush clears it all. */
7418 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7419 if (sparc_cpu != PROCESSOR_ULTRASPARC
7420 && sparc_cpu != PROCESSOR_ULTRASPARC3
7421 && sparc_cpu != PROCESSOR_NIAGARA
7422 && sparc_cpu != PROCESSOR_NIAGARA2)
7423 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7424 plus_constant (tramp, 8)))));
7426 /* Call __enable_execute_stack after writing onto the stack to make sure
7427 the stack address is accessible. */
7428 #ifdef ENABLE_EXECUTE_STACK
7429 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7430 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7435 /* The 64-bit version is simpler because it makes more sense to load the
7436 values as "immediate" data out of the trampoline. It's also easier since
7437 we can read the PC without clobbering a register. */
7440 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7442 /* SPARC 64-bit trampoline:
7451 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7452 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7453 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7454 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7455 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7456 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7457 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7458 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7459 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7460 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7461 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7463 if (sparc_cpu != PROCESSOR_ULTRASPARC
7464 && sparc_cpu != PROCESSOR_ULTRASPARC3
7465 && sparc_cpu != PROCESSOR_NIAGARA
7466 && sparc_cpu != PROCESSOR_NIAGARA2)
7467 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7469 /* Call __enable_execute_stack after writing onto the stack to make sure
7470 the stack address is accessible. */
7471 #ifdef ENABLE_EXECUTE_STACK
7472 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7473 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7477 /* Adjust the cost of a scheduling dependency. Return the new cost of
7478 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7481 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7483 enum attr_type insn_type;
7485 if (! recog_memoized (insn))
7488 insn_type = get_attr_type (insn);
7490 if (REG_NOTE_KIND (link) == 0)
7492 /* Data dependency; DEP_INSN writes a register that INSN reads some
7495 /* if a load, then the dependence must be on the memory address;
7496 add an extra "cycle". Note that the cost could be two cycles
7497 if the reg was written late in an instruction group; we ca not tell
7499 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7502 /* Get the delay only if the address of the store is the dependence. */
7503 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7505 rtx pat = PATTERN(insn);
7506 rtx dep_pat = PATTERN (dep_insn);
7508 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7509 return cost; /* This should not happen! */
7511 /* The dependency between the two instructions was on the data that
7512 is being stored. Assume that this implies that the address of the
7513 store is not dependent. */
7514 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7517 return cost + 3; /* An approximation. */
7520 /* A shift instruction cannot receive its data from an instruction
7521 in the same cycle; add a one cycle penalty. */
7522 if (insn_type == TYPE_SHIFT)
7523 return cost + 3; /* Split before cascade into shift. */
7527 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7528 INSN writes some cycles later. */
7530 /* These are only significant for the fpu unit; writing a fp reg before
7531 the fpu has finished with it stalls the processor. */
7533 /* Reusing an integer register causes no problems. */
7534 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7542 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7544 enum attr_type insn_type, dep_type;
7545 rtx pat = PATTERN(insn);
7546 rtx dep_pat = PATTERN (dep_insn);
7548 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7551 insn_type = get_attr_type (insn);
7552 dep_type = get_attr_type (dep_insn);
7554 switch (REG_NOTE_KIND (link))
7557 /* Data dependency; DEP_INSN writes a register that INSN reads some
7564 /* Get the delay iff the address of the store is the dependence. */
7565 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7568 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7575 /* If a load, then the dependence must be on the memory address. If
7576 the addresses aren't equal, then it might be a false dependency */
7577 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7579 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7580 || GET_CODE (SET_DEST (dep_pat)) != MEM
7581 || GET_CODE (SET_SRC (pat)) != MEM
7582 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7583 XEXP (SET_SRC (pat), 0)))
7591 /* Compare to branch latency is 0. There is no benefit from
7592 separating compare and branch. */
7593 if (dep_type == TYPE_COMPARE)
7595 /* Floating point compare to branch latency is less than
7596 compare to conditional move. */
7597 if (dep_type == TYPE_FPCMP)
7606 /* Anti-dependencies only penalize the fpu unit. */
7607 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7619 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7623 case PROCESSOR_SUPERSPARC:
7624 cost = supersparc_adjust_cost (insn, link, dep, cost);
7626 case PROCESSOR_HYPERSPARC:
7627 case PROCESSOR_SPARCLITE86X:
7628 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7637 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7638 int sched_verbose ATTRIBUTE_UNUSED,
7639 int max_ready ATTRIBUTE_UNUSED)
7644 sparc_use_sched_lookahead (void)
7646 if (sparc_cpu == PROCESSOR_NIAGARA
7647 || sparc_cpu == PROCESSOR_NIAGARA2)
7649 if (sparc_cpu == PROCESSOR_ULTRASPARC
7650 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7652 if ((1 << sparc_cpu) &
7653 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7654 (1 << PROCESSOR_SPARCLITE86X)))
7660 sparc_issue_rate (void)
7664 case PROCESSOR_NIAGARA:
7665 case PROCESSOR_NIAGARA2:
7669 /* Assume V9 processors are capable of at least dual-issue. */
7671 case PROCESSOR_SUPERSPARC:
7673 case PROCESSOR_HYPERSPARC:
7674 case PROCESSOR_SPARCLITE86X:
7676 case PROCESSOR_ULTRASPARC:
7677 case PROCESSOR_ULTRASPARC3:
7683 set_extends (rtx insn)
7685 register rtx pat = PATTERN (insn);
7687 switch (GET_CODE (SET_SRC (pat)))
7689 /* Load and some shift instructions zero extend. */
7692 /* sethi clears the high bits */
7694 /* LO_SUM is used with sethi. sethi cleared the high
7695 bits and the values used with lo_sum are positive */
7697 /* Store flag stores 0 or 1 */
7707 rtx op0 = XEXP (SET_SRC (pat), 0);
7708 rtx op1 = XEXP (SET_SRC (pat), 1);
7709 if (GET_CODE (op1) == CONST_INT)
7710 return INTVAL (op1) >= 0;
7711 if (GET_CODE (op0) != REG)
7713 if (sparc_check_64 (op0, insn) == 1)
7715 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7720 rtx op0 = XEXP (SET_SRC (pat), 0);
7721 rtx op1 = XEXP (SET_SRC (pat), 1);
7722 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7724 if (GET_CODE (op1) == CONST_INT)
7725 return INTVAL (op1) >= 0;
7726 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7729 return GET_MODE (SET_SRC (pat)) == SImode;
7730 /* Positive integers leave the high bits zero. */
7732 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7734 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7737 return - (GET_MODE (SET_SRC (pat)) == SImode);
7739 return sparc_check_64 (SET_SRC (pat), insn);
7745 /* We _ought_ to have only one kind per function, but... */
7746 static GTY(()) rtx sparc_addr_diff_list;
7747 static GTY(()) rtx sparc_addr_list;
7750 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7752 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7754 sparc_addr_diff_list
7755 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7757 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7761 sparc_output_addr_vec (rtx vec)
7763 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7764 int idx, vlen = XVECLEN (body, 0);
7766 #ifdef ASM_OUTPUT_ADDR_VEC_START
7767 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7770 #ifdef ASM_OUTPUT_CASE_LABEL
7771 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7774 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7777 for (idx = 0; idx < vlen; idx++)
7779 ASM_OUTPUT_ADDR_VEC_ELT
7780 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7783 #ifdef ASM_OUTPUT_ADDR_VEC_END
7784 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7789 sparc_output_addr_diff_vec (rtx vec)
7791 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7792 rtx base = XEXP (XEXP (body, 0), 0);
7793 int idx, vlen = XVECLEN (body, 1);
7795 #ifdef ASM_OUTPUT_ADDR_VEC_START
7796 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7799 #ifdef ASM_OUTPUT_CASE_LABEL
7800 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7803 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7806 for (idx = 0; idx < vlen; idx++)
7808 ASM_OUTPUT_ADDR_DIFF_ELT
7811 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7812 CODE_LABEL_NUMBER (base));
7815 #ifdef ASM_OUTPUT_ADDR_VEC_END
7816 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7821 sparc_output_deferred_case_vectors (void)
7826 if (sparc_addr_list == NULL_RTX
7827 && sparc_addr_diff_list == NULL_RTX)
7830 /* Align to cache line in the function's code section. */
7831 switch_to_section (current_function_section ());
7833 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7835 ASM_OUTPUT_ALIGN (asm_out_file, align);
7837 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7838 sparc_output_addr_vec (XEXP (t, 0));
7839 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7840 sparc_output_addr_diff_vec (XEXP (t, 0));
7842 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7845 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7846 unknown. Return 1 if the high bits are zero, -1 if the register is
7849 sparc_check_64 (rtx x, rtx insn)
7851 /* If a register is set only once it is safe to ignore insns this
7852 code does not know how to handle. The loop will either recognize
7853 the single set and return the correct value or fail to recognize
7858 gcc_assert (GET_CODE (x) == REG);
7860 if (GET_MODE (x) == DImode)
7861 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7863 if (flag_expensive_optimizations
7864 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7870 insn = get_last_insn_anywhere ();
7875 while ((insn = PREV_INSN (insn)))
7877 switch (GET_CODE (insn))
7890 rtx pat = PATTERN (insn);
7891 if (GET_CODE (pat) != SET)
7893 if (rtx_equal_p (x, SET_DEST (pat)))
7894 return set_extends (insn);
7895 if (y && rtx_equal_p (y, SET_DEST (pat)))
7896 return set_extends (insn);
7897 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7905 /* Returns assembly code to perform a DImode shift using
7906 a 64-bit global or out register on SPARC-V8+. */
7908 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7910 static char asm_code[60];
7912 /* The scratch register is only required when the destination
7913 register is not a 64-bit global or out register. */
7914 if (which_alternative != 2)
7915 operands[3] = operands[0];
7917 /* We can only shift by constants <= 63. */
7918 if (GET_CODE (operands[2]) == CONST_INT)
7919 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7921 if (GET_CODE (operands[1]) == CONST_INT)
7923 output_asm_insn ("mov\t%1, %3", operands);
7927 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7928 if (sparc_check_64 (operands[1], insn) <= 0)
7929 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7930 output_asm_insn ("or\t%L1, %3, %3", operands);
7933 strcpy(asm_code, opcode);
7935 if (which_alternative != 2)
7936 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7938 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7941 /* Output rtl to increment the profiler label LABELNO
7942 for profiling a function entry. */
7945 sparc_profile_hook (int labelno)
7950 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7951 if (NO_PROFILE_COUNTERS)
7953 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7957 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7958 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7959 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7963 #ifdef OBJECT_FORMAT_ELF
7965 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7968 if (flags & SECTION_MERGE)
7970 /* entsize cannot be expressed in this section attributes
7972 default_elf_asm_named_section (name, flags, decl);
7976 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7978 if (!(flags & SECTION_DEBUG))
7979 fputs (",#alloc", asm_out_file);
7980 if (flags & SECTION_WRITE)
7981 fputs (",#write", asm_out_file);
7982 if (flags & SECTION_TLS)
7983 fputs (",#tls", asm_out_file);
7984 if (flags & SECTION_CODE)
7985 fputs (",#execinstr", asm_out_file);
7987 /* ??? Handle SECTION_BSS. */
7989 fputc ('\n', asm_out_file);
7991 #endif /* OBJECT_FORMAT_ELF */
7993 /* We do not allow indirect calls to be optimized into sibling calls.
7995 We cannot use sibling calls when delayed branches are disabled
7996 because they will likely require the call delay slot to be filled.
7998 Also, on SPARC 32-bit we cannot emit a sibling call when the
7999 current function returns a structure. This is because the "unimp
8000 after call" convention would cause the callee to return to the
8001 wrong place. The generic code already disallows cases where the
8002 function being called returns a structure.
8004 It may seem strange how this last case could occur. Usually there
8005 is code after the call which jumps to epilogue code which dumps the
8006 return value into the struct return area. That ought to invalidate
8007 the sibling call right? Well, in the C++ case we can end up passing
8008 the pointer to the struct return area to a constructor (which returns
8009 void) and then nothing else happens. Such a sibling call would look
8010 valid without the added check here.
8012 VxWorks PIC PLT entries require the global pointer to be initialized
8013 on entry. We therefore can't emit sibling calls to them. */
8015 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8018 && flag_delayed_branch
8019 && (TARGET_ARCH64 || ! cfun->returns_struct)
8020 && !(TARGET_VXWORKS_RTP
8022 && !targetm.binds_local_p (decl)));
8025 /* libfunc renaming. */
8026 #include "config/gofast.h"
8029 sparc_init_libfuncs (void)
8033 /* Use the subroutines that Sun's library provides for integer
8034 multiply and divide. The `*' prevents an underscore from
8035 being prepended by the compiler. .umul is a little faster
8037 set_optab_libfunc (smul_optab, SImode, "*.umul");
8038 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8039 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8040 set_optab_libfunc (smod_optab, SImode, "*.rem");
8041 set_optab_libfunc (umod_optab, SImode, "*.urem");
8043 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8044 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8045 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8046 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8047 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8048 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8050 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8051 is because with soft-float, the SFmode and DFmode sqrt
8052 instructions will be absent, and the compiler will notice and
8053 try to use the TFmode sqrt instruction for calls to the
8054 builtin function sqrt, but this fails. */
8056 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8058 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8059 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8060 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8061 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8062 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8063 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8065 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8066 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8067 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8068 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8070 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8071 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8072 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8073 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8075 if (DITF_CONVERSION_LIBFUNCS)
8077 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8078 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8079 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8080 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8083 if (SUN_CONVERSION_LIBFUNCS)
8085 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8086 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8087 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8088 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8093 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8094 do not exist in the library. Make sure the compiler does not
8095 emit calls to them by accident. (It should always use the
8096 hardware instructions.) */
8097 set_optab_libfunc (smul_optab, SImode, 0);
8098 set_optab_libfunc (sdiv_optab, SImode, 0);
8099 set_optab_libfunc (udiv_optab, SImode, 0);
8100 set_optab_libfunc (smod_optab, SImode, 0);
8101 set_optab_libfunc (umod_optab, SImode, 0);
8103 if (SUN_INTEGER_MULTIPLY_64)
8105 set_optab_libfunc (smul_optab, DImode, "__mul64");
8106 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8107 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8108 set_optab_libfunc (smod_optab, DImode, "__rem64");
8109 set_optab_libfunc (umod_optab, DImode, "__urem64");
8112 if (SUN_CONVERSION_LIBFUNCS)
8114 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8115 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8116 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8117 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8121 gofast_maybe_init_libfuncs ();
8124 #define def_builtin(NAME, CODE, TYPE) \
8125 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8128 /* Implement the TARGET_INIT_BUILTINS target hook.
8129 Create builtin functions for special SPARC instructions. */
8132 sparc_init_builtins (void)
8135 sparc_vis_init_builtins ();
8138 /* Create builtin functions for VIS 1.0 instructions. */
8141 sparc_vis_init_builtins (void)
8143 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8144 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8145 tree v4hi = build_vector_type (intHI_type_node, 4);
8146 tree v2hi = build_vector_type (intHI_type_node, 2);
8147 tree v2si = build_vector_type (intSI_type_node, 2);
8149 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8150 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8151 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8152 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8153 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8154 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8155 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8156 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8157 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8158 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8159 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8160 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8161 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8163 intDI_type_node, 0);
8164 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8166 intDI_type_node, 0);
8167 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8169 intSI_type_node, 0);
8170 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8172 intDI_type_node, 0);
8174 /* Packing and expanding vectors. */
8175 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8176 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8177 v8qi_ftype_v2si_v8qi);
8178 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8180 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8181 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8182 v8qi_ftype_v4qi_v4qi);
8184 /* Multiplications. */
8185 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8186 v4hi_ftype_v4qi_v4hi);
8187 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8188 v4hi_ftype_v4qi_v2hi);
8189 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8190 v4hi_ftype_v4qi_v2hi);
8191 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8192 v4hi_ftype_v8qi_v4hi);
8193 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8194 v4hi_ftype_v8qi_v4hi);
8195 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8196 v2si_ftype_v4qi_v2hi);
8197 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8198 v2si_ftype_v4qi_v2hi);
8200 /* Data aligning. */
8201 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8202 v4hi_ftype_v4hi_v4hi);
8203 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8204 v8qi_ftype_v8qi_v8qi);
8205 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8206 v2si_ftype_v2si_v2si);
8207 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8210 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8213 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8216 /* Pixel distance. */
8217 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8218 di_ftype_v8qi_v8qi_di);
8221 /* Handle TARGET_EXPAND_BUILTIN target hook.
8222 Expand builtin functions for sparc intrinsics. */
8225 sparc_expand_builtin (tree exp, rtx target,
8226 rtx subtarget ATTRIBUTE_UNUSED,
8227 enum machine_mode tmode ATTRIBUTE_UNUSED,
8228 int ignore ATTRIBUTE_UNUSED)
8231 call_expr_arg_iterator iter;
8232 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8233 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8235 enum machine_mode mode[4];
8238 mode[0] = insn_data[icode].operand[0].mode;
8240 || GET_MODE (target) != mode[0]
8241 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8242 op[0] = gen_reg_rtx (mode[0]);
8246 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8249 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8250 op[arg_count] = expand_normal (arg);
8252 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8254 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8260 pat = GEN_FCN (icode) (op[0], op[1]);
8263 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8266 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8281 sparc_vis_mul8x16 (int e8, int e16)
8283 return (e8 * e16 + 128) / 256;
8286 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8287 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8288 constants. A tree list with the results of the multiplications is returned,
8289 and each element in the list is of INNER_TYPE. */
8292 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8294 tree n_elts = NULL_TREE;
8299 case CODE_FOR_fmul8x16_vis:
8300 for (; elts0 && elts1;
8301 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8304 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8305 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8306 n_elts = tree_cons (NULL_TREE,
8307 build_int_cst (inner_type, val),
8312 case CODE_FOR_fmul8x16au_vis:
8313 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8315 for (; elts0; elts0 = TREE_CHAIN (elts0))
8318 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8320 n_elts = tree_cons (NULL_TREE,
8321 build_int_cst (inner_type, val),
8326 case CODE_FOR_fmul8x16al_vis:
8327 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8329 for (; elts0; elts0 = TREE_CHAIN (elts0))
8332 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8334 n_elts = tree_cons (NULL_TREE,
8335 build_int_cst (inner_type, val),
8344 return nreverse (n_elts);
8347 /* Handle TARGET_FOLD_BUILTIN target hook.
8348 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8349 result of the function call is ignored. NULL_TREE is returned if the
8350 function could not be folded. */
8353 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8355 tree arg0, arg1, arg2;
8356 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8357 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8360 && icode != CODE_FOR_alignaddrsi_vis
8361 && icode != CODE_FOR_alignaddrdi_vis)
8362 return fold_convert (rtype, integer_zero_node);
8366 case CODE_FOR_fexpand_vis:
8367 arg0 = TREE_VALUE (arglist);
8370 if (TREE_CODE (arg0) == VECTOR_CST)
8372 tree inner_type = TREE_TYPE (rtype);
8373 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8374 tree n_elts = NULL_TREE;
8376 for (; elts; elts = TREE_CHAIN (elts))
8378 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8379 n_elts = tree_cons (NULL_TREE,
8380 build_int_cst (inner_type, val),
8383 return build_vector (rtype, nreverse (n_elts));
8387 case CODE_FOR_fmul8x16_vis:
8388 case CODE_FOR_fmul8x16au_vis:
8389 case CODE_FOR_fmul8x16al_vis:
8390 arg0 = TREE_VALUE (arglist);
8391 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8395 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8397 tree inner_type = TREE_TYPE (rtype);
8398 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8399 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8400 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8403 return build_vector (rtype, n_elts);
8407 case CODE_FOR_fpmerge_vis:
8408 arg0 = TREE_VALUE (arglist);
8409 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8413 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8415 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8416 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8417 tree n_elts = NULL_TREE;
8419 for (; elts0 && elts1;
8420 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8422 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8423 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8426 return build_vector (rtype, nreverse (n_elts));
8430 case CODE_FOR_pdist_vis:
8431 arg0 = TREE_VALUE (arglist);
8432 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8433 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8438 if (TREE_CODE (arg0) == VECTOR_CST
8439 && TREE_CODE (arg1) == VECTOR_CST
8440 && TREE_CODE (arg2) == INTEGER_CST)
8443 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8444 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8445 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8446 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8448 for (; elts0 && elts1;
8449 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8451 unsigned HOST_WIDE_INT
8452 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8453 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8454 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8455 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8457 unsigned HOST_WIDE_INT l;
8460 overflow |= neg_double (low1, high1, &l, &h);
8461 overflow |= add_double (low0, high0, l, h, &l, &h);
8463 overflow |= neg_double (l, h, &l, &h);
8465 overflow |= add_double (low, high, l, h, &low, &high);
8468 gcc_assert (overflow == 0);
8470 return build_int_cst_wide (rtype, low, high);
8480 /* ??? This duplicates information provided to the compiler by the
8481 ??? scheduler description. Some day, teach genautomata to output
8482 ??? the latencies and then CSE will just use that. */
8485 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8486 bool speed ATTRIBUTE_UNUSED)
8488 enum machine_mode mode = GET_MODE (x);
8489 bool float_mode_p = FLOAT_MODE_P (mode);
8494 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8512 if (GET_MODE (x) == VOIDmode
8513 && ((CONST_DOUBLE_HIGH (x) == 0
8514 && CONST_DOUBLE_LOW (x) < 0x1000)
8515 || (CONST_DOUBLE_HIGH (x) == -1
8516 && CONST_DOUBLE_LOW (x) < 0
8517 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8524 /* If outer-code was a sign or zero extension, a cost
8525 of COSTS_N_INSNS (1) was already added in. This is
8526 why we are subtracting it back out. */
8527 if (outer_code == ZERO_EXTEND)
8529 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8531 else if (outer_code == SIGN_EXTEND)
8533 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8535 else if (float_mode_p)
8537 *total = sparc_costs->float_load;
8541 *total = sparc_costs->int_load;
8549 *total = sparc_costs->float_plusminus;
8551 *total = COSTS_N_INSNS (1);
8556 *total = sparc_costs->float_mul;
8557 else if (! TARGET_HARD_MUL)
8558 *total = COSTS_N_INSNS (25);
8564 if (sparc_costs->int_mul_bit_factor)
8568 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8570 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8571 for (nbits = 0; value != 0; value &= value - 1)
8574 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8575 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8577 rtx x1 = XEXP (x, 1);
8578 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8579 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8581 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8583 for (; value2 != 0; value2 &= value2 - 1)
8591 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8592 bit_cost = COSTS_N_INSNS (bit_cost);
8596 *total = sparc_costs->int_mulX + bit_cost;
8598 *total = sparc_costs->int_mul + bit_cost;
8605 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8615 *total = sparc_costs->float_div_df;
8617 *total = sparc_costs->float_div_sf;
8622 *total = sparc_costs->int_divX;
8624 *total = sparc_costs->int_div;
8631 *total = COSTS_N_INSNS (1);
8638 case UNSIGNED_FLOAT:
8642 case FLOAT_TRUNCATE:
8643 *total = sparc_costs->float_move;
8648 *total = sparc_costs->float_sqrt_df;
8650 *total = sparc_costs->float_sqrt_sf;
8655 *total = sparc_costs->float_cmp;
8657 *total = COSTS_N_INSNS (1);
8662 *total = sparc_costs->float_cmove;
8664 *total = sparc_costs->int_cmove;
8668 /* Handle the NAND vector patterns. */
8669 if (sparc_vector_mode_supported_p (GET_MODE (x))
8670 && GET_CODE (XEXP (x, 0)) == NOT
8671 && GET_CODE (XEXP (x, 1)) == NOT)
8673 *total = COSTS_N_INSNS (1);
8684 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8685 This is achieved by means of a manual dynamic stack space allocation in
8686 the current frame. We make the assumption that SEQ doesn't contain any
8687 function calls, with the possible exception of calls to the PIC helper. */
8690 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8692 /* We must preserve the lowest 16 words for the register save area. */
8693 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8694 /* We really need only 2 words of fresh stack space. */
8695 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8698 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8699 SPARC_STACK_BIAS + offset));
8701 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8702 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8704 emit_insn (gen_rtx_SET (VOIDmode,
8705 adjust_address (slot, word_mode, UNITS_PER_WORD),
8709 emit_insn (gen_rtx_SET (VOIDmode,
8711 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8712 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8713 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8716 /* Output the assembler code for a thunk function. THUNK_DECL is the
8717 declaration for the thunk function itself, FUNCTION is the decl for
8718 the target function. DELTA is an immediate constant offset to be
8719 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8720 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8723 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8724 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8727 rtx this_rtx, insn, funexp;
8728 unsigned int int_arg_first;
8730 reload_completed = 1;
8731 epilogue_completed = 1;
8733 emit_note (NOTE_INSN_PROLOGUE_END);
8735 if (flag_delayed_branch)
8737 /* We will emit a regular sibcall below, so we need to instruct
8738 output_sibcall that we are in a leaf function. */
8739 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8741 /* This will cause final.c to invoke leaf_renumber_regs so we
8742 must behave as if we were in a not-yet-leafified function. */
8743 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8747 /* We will emit the sibcall manually below, so we will need to
8748 manually spill non-leaf registers. */
8749 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8751 /* We really are in a leaf function. */
8752 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8755 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8756 returns a structure, the structure return pointer is there instead. */
8757 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8758 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8760 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8762 /* Add DELTA. When possible use a plain add, otherwise load it into
8763 a register first. */
8766 rtx delta_rtx = GEN_INT (delta);
8768 if (! SPARC_SIMM13_P (delta))
8770 rtx scratch = gen_rtx_REG (Pmode, 1);
8771 emit_move_insn (scratch, delta_rtx);
8772 delta_rtx = scratch;
8775 /* THIS_RTX += DELTA. */
8776 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8779 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8782 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8783 rtx scratch = gen_rtx_REG (Pmode, 1);
8785 gcc_assert (vcall_offset < 0);
8787 /* SCRATCH = *THIS_RTX. */
8788 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8790 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8791 may not have any available scratch register at this point. */
8792 if (SPARC_SIMM13_P (vcall_offset))
8794 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8795 else if (! fixed_regs[5]
8796 /* The below sequence is made up of at least 2 insns,
8797 while the default method may need only one. */
8798 && vcall_offset < -8192)
8800 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8801 emit_move_insn (scratch2, vcall_offset_rtx);
8802 vcall_offset_rtx = scratch2;
8806 rtx increment = GEN_INT (-4096);
8808 /* VCALL_OFFSET is a negative number whose typical range can be
8809 estimated as -32768..0 in 32-bit mode. In almost all cases
8810 it is therefore cheaper to emit multiple add insns than
8811 spilling and loading the constant into a register (at least
8813 while (! SPARC_SIMM13_P (vcall_offset))
8815 emit_insn (gen_add2_insn (scratch, increment));
8816 vcall_offset += 4096;
8818 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8821 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8822 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8823 gen_rtx_PLUS (Pmode,
8825 vcall_offset_rtx)));
8827 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8828 emit_insn (gen_add2_insn (this_rtx, scratch));
8831 /* Generate a tail call to the target function. */
8832 if (! TREE_USED (function))
8834 assemble_external (function);
8835 TREE_USED (function) = 1;
8837 funexp = XEXP (DECL_RTL (function), 0);
8839 if (flag_delayed_branch)
8841 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8842 insn = emit_call_insn (gen_sibcall (funexp));
8843 SIBLING_CALL_P (insn) = 1;
8847 /* The hoops we have to jump through in order to generate a sibcall
8848 without using delay slots... */
8849 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8853 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8854 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8856 /* Delay emitting the PIC helper function because it needs to
8857 change the section and we are emitting assembly code. */
8858 load_pic_register (true); /* clobbers %o7 */
8859 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8862 emit_and_preserve (seq, spill_reg, spill_reg2);
8864 else if (TARGET_ARCH32)
8866 emit_insn (gen_rtx_SET (VOIDmode,
8868 gen_rtx_HIGH (SImode, funexp)));
8869 emit_insn (gen_rtx_SET (VOIDmode,
8871 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8873 else /* TARGET_ARCH64 */
8875 switch (sparc_cmodel)
8879 /* The destination can serve as a temporary. */
8880 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8885 /* The destination cannot serve as a temporary. */
8886 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8888 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8891 emit_and_preserve (seq, spill_reg, 0);
8899 emit_jump_insn (gen_indirect_jump (scratch));
8904 /* Run just enough of rest_of_compilation to get the insns emitted.
8905 There's not really enough bulk here to make other passes such as
8906 instruction scheduling worth while. Note that use_thunk calls
8907 assemble_start_function and assemble_end_function. */
8908 insn = get_insns ();
8909 insn_locators_alloc ();
8910 shorten_branches (insn);
8911 final_start_function (insn, file, 1);
8912 final (insn, file, 1);
8913 final_end_function ();
8914 free_after_compilation (cfun);
8916 reload_completed = 0;
8917 epilogue_completed = 0;
8920 /* Return true if sparc_output_mi_thunk would be able to output the
8921 assembler code for the thunk function specified by the arguments
8922 it is passed, and false otherwise. */
8924 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8925 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8926 HOST_WIDE_INT vcall_offset,
8927 const_tree function ATTRIBUTE_UNUSED)
8929 /* Bound the loop used in the default method above. */
8930 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8933 /* How to allocate a 'struct machine_function'. */
8935 static struct machine_function *
8936 sparc_init_machine_status (void)
8938 return GGC_CNEW (struct machine_function);
8941 /* Locate some local-dynamic symbol still in use by this function
8942 so that we can print its name in local-dynamic base patterns. */
8945 get_some_local_dynamic_name (void)
8949 if (cfun->machine->some_ld_name)
8950 return cfun->machine->some_ld_name;
8952 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8954 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8955 return cfun->machine->some_ld_name;
8961 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8966 && GET_CODE (x) == SYMBOL_REF
8967 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8969 cfun->machine->some_ld_name = XSTR (x, 0);
8976 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8977 This is called from dwarf2out.c to emit call frame instructions
8978 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8980 sparc_dwarf_handle_frame_unspec (const char *label,
8981 rtx pattern ATTRIBUTE_UNUSED,
8982 int index ATTRIBUTE_UNUSED)
8984 gcc_assert (index == UNSPECV_SAVEW);
8985 dwarf2out_window_save (label);
8988 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8989 We need to emit DTP-relative relocations. */
8992 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8997 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9000 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9005 output_addr_const (file, x);
9009 /* Do whatever processing is required at the end of a file. */
9012 sparc_file_end (void)
9014 /* If we haven't emitted the special PIC helper function, do so now. */
9015 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
9018 if (NEED_INDICATE_EXEC_STACK)
9019 file_end_indicate_exec_stack ();
9022 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9023 /* Implement TARGET_MANGLE_TYPE. */
9026 sparc_mangle_type (const_tree type)
9029 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9030 && TARGET_LONG_DOUBLE_128)
9033 /* For all other types, use normal C++ mangling. */
9038 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9039 compare and swap on the word containing the byte or half-word. */
9042 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9044 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9045 rtx addr = gen_reg_rtx (Pmode);
9046 rtx off = gen_reg_rtx (SImode);
9047 rtx oldv = gen_reg_rtx (SImode);
9048 rtx newv = gen_reg_rtx (SImode);
9049 rtx oldvalue = gen_reg_rtx (SImode);
9050 rtx newvalue = gen_reg_rtx (SImode);
9051 rtx res = gen_reg_rtx (SImode);
9052 rtx resv = gen_reg_rtx (SImode);
9053 rtx memsi, val, mask, end_label, loop_label, cc;
9055 emit_insn (gen_rtx_SET (VOIDmode, addr,
9056 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9058 if (Pmode != SImode)
9059 addr1 = gen_lowpart (SImode, addr1);
9060 emit_insn (gen_rtx_SET (VOIDmode, off,
9061 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9063 memsi = gen_rtx_MEM (SImode, addr);
9064 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9065 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9067 val = force_reg (SImode, memsi);
9069 emit_insn (gen_rtx_SET (VOIDmode, off,
9070 gen_rtx_XOR (SImode, off,
9071 GEN_INT (GET_MODE (mem) == QImode
9074 emit_insn (gen_rtx_SET (VOIDmode, off,
9075 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9077 if (GET_MODE (mem) == QImode)
9078 mask = force_reg (SImode, GEN_INT (0xff));
9080 mask = force_reg (SImode, GEN_INT (0xffff));
9082 emit_insn (gen_rtx_SET (VOIDmode, mask,
9083 gen_rtx_ASHIFT (SImode, mask, off)));
9085 emit_insn (gen_rtx_SET (VOIDmode, val,
9086 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9089 oldval = gen_lowpart (SImode, oldval);
9090 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9091 gen_rtx_ASHIFT (SImode, oldval, off)));
9093 newval = gen_lowpart_common (SImode, newval);
9094 emit_insn (gen_rtx_SET (VOIDmode, newv,
9095 gen_rtx_ASHIFT (SImode, newval, off)));
9097 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9098 gen_rtx_AND (SImode, oldv, mask)));
9100 emit_insn (gen_rtx_SET (VOIDmode, newv,
9101 gen_rtx_AND (SImode, newv, mask)));
9103 end_label = gen_label_rtx ();
9104 loop_label = gen_label_rtx ();
9105 emit_label (loop_label);
9107 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9108 gen_rtx_IOR (SImode, oldv, val)));
9110 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9111 gen_rtx_IOR (SImode, newv, val)));
9113 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9115 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9117 emit_insn (gen_rtx_SET (VOIDmode, resv,
9118 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9121 cc = gen_compare_reg_1 (NE, resv, val);
9122 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9124 /* Use cbranchcc4 to separate the compare and branch! */
9125 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9126 cc, const0_rtx, loop_label));
9128 emit_label (end_label);
9130 emit_insn (gen_rtx_SET (VOIDmode, res,
9131 gen_rtx_AND (SImode, res, mask)));
9133 emit_insn (gen_rtx_SET (VOIDmode, res,
9134 gen_rtx_LSHIFTRT (SImode, res, off)));
9136 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9139 #include "gt-sparc.h"