1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
45 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
54 #include "langhooks.h"
58 #include "dwarf2out.h"
63 struct processor_costs cypress_costs = {
64 COSTS_N_INSNS (2), /* int load */
65 COSTS_N_INSNS (2), /* int signed load */
66 COSTS_N_INSNS (2), /* int zeroed load */
67 COSTS_N_INSNS (2), /* float load */
68 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
69 COSTS_N_INSNS (5), /* fadd, fsub */
70 COSTS_N_INSNS (1), /* fcmp */
71 COSTS_N_INSNS (1), /* fmov, fmovr */
72 COSTS_N_INSNS (7), /* fmul */
73 COSTS_N_INSNS (37), /* fdivs */
74 COSTS_N_INSNS (37), /* fdivd */
75 COSTS_N_INSNS (63), /* fsqrts */
76 COSTS_N_INSNS (63), /* fsqrtd */
77 COSTS_N_INSNS (1), /* imul */
78 COSTS_N_INSNS (1), /* imulX */
79 0, /* imul bit factor */
80 COSTS_N_INSNS (1), /* idiv */
81 COSTS_N_INSNS (1), /* idivX */
82 COSTS_N_INSNS (1), /* movcc/movr */
83 0, /* shift penalty */
87 struct processor_costs supersparc_costs = {
88 COSTS_N_INSNS (1), /* int load */
89 COSTS_N_INSNS (1), /* int signed load */
90 COSTS_N_INSNS (1), /* int zeroed load */
91 COSTS_N_INSNS (0), /* float load */
92 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
93 COSTS_N_INSNS (3), /* fadd, fsub */
94 COSTS_N_INSNS (3), /* fcmp */
95 COSTS_N_INSNS (1), /* fmov, fmovr */
96 COSTS_N_INSNS (3), /* fmul */
97 COSTS_N_INSNS (6), /* fdivs */
98 COSTS_N_INSNS (9), /* fdivd */
99 COSTS_N_INSNS (12), /* fsqrts */
100 COSTS_N_INSNS (12), /* fsqrtd */
101 COSTS_N_INSNS (4), /* imul */
102 COSTS_N_INSNS (4), /* imulX */
103 0, /* imul bit factor */
104 COSTS_N_INSNS (4), /* idiv */
105 COSTS_N_INSNS (4), /* idivX */
106 COSTS_N_INSNS (1), /* movcc/movr */
107 1, /* shift penalty */
111 struct processor_costs hypersparc_costs = {
112 COSTS_N_INSNS (1), /* int load */
113 COSTS_N_INSNS (1), /* int signed load */
114 COSTS_N_INSNS (1), /* int zeroed load */
115 COSTS_N_INSNS (1), /* float load */
116 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
117 COSTS_N_INSNS (1), /* fadd, fsub */
118 COSTS_N_INSNS (1), /* fcmp */
119 COSTS_N_INSNS (1), /* fmov, fmovr */
120 COSTS_N_INSNS (1), /* fmul */
121 COSTS_N_INSNS (8), /* fdivs */
122 COSTS_N_INSNS (12), /* fdivd */
123 COSTS_N_INSNS (17), /* fsqrts */
124 COSTS_N_INSNS (17), /* fsqrtd */
125 COSTS_N_INSNS (17), /* imul */
126 COSTS_N_INSNS (17), /* imulX */
127 0, /* imul bit factor */
128 COSTS_N_INSNS (17), /* idiv */
129 COSTS_N_INSNS (17), /* idivX */
130 COSTS_N_INSNS (1), /* movcc/movr */
131 0, /* shift penalty */
135 struct processor_costs leon_costs = {
136 COSTS_N_INSNS (1), /* int load */
137 COSTS_N_INSNS (1), /* int signed load */
138 COSTS_N_INSNS (1), /* int zeroed load */
139 COSTS_N_INSNS (1), /* float load */
140 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
141 COSTS_N_INSNS (1), /* fadd, fsub */
142 COSTS_N_INSNS (1), /* fcmp */
143 COSTS_N_INSNS (1), /* fmov, fmovr */
144 COSTS_N_INSNS (1), /* fmul */
145 COSTS_N_INSNS (15), /* fdivs */
146 COSTS_N_INSNS (15), /* fdivd */
147 COSTS_N_INSNS (23), /* fsqrts */
148 COSTS_N_INSNS (23), /* fsqrtd */
149 COSTS_N_INSNS (5), /* imul */
150 COSTS_N_INSNS (5), /* imulX */
151 0, /* imul bit factor */
152 COSTS_N_INSNS (5), /* idiv */
153 COSTS_N_INSNS (5), /* idivX */
154 COSTS_N_INSNS (1), /* movcc/movr */
155 0, /* shift penalty */
159 struct processor_costs sparclet_costs = {
160 COSTS_N_INSNS (3), /* int load */
161 COSTS_N_INSNS (3), /* int signed load */
162 COSTS_N_INSNS (1), /* int zeroed load */
163 COSTS_N_INSNS (1), /* float load */
164 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
165 COSTS_N_INSNS (1), /* fadd, fsub */
166 COSTS_N_INSNS (1), /* fcmp */
167 COSTS_N_INSNS (1), /* fmov, fmovr */
168 COSTS_N_INSNS (1), /* fmul */
169 COSTS_N_INSNS (1), /* fdivs */
170 COSTS_N_INSNS (1), /* fdivd */
171 COSTS_N_INSNS (1), /* fsqrts */
172 COSTS_N_INSNS (1), /* fsqrtd */
173 COSTS_N_INSNS (5), /* imul */
174 COSTS_N_INSNS (5), /* imulX */
175 0, /* imul bit factor */
176 COSTS_N_INSNS (5), /* idiv */
177 COSTS_N_INSNS (5), /* idivX */
178 COSTS_N_INSNS (1), /* movcc/movr */
179 0, /* shift penalty */
183 struct processor_costs ultrasparc_costs = {
184 COSTS_N_INSNS (2), /* int load */
185 COSTS_N_INSNS (3), /* int signed load */
186 COSTS_N_INSNS (2), /* int zeroed load */
187 COSTS_N_INSNS (2), /* float load */
188 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
189 COSTS_N_INSNS (4), /* fadd, fsub */
190 COSTS_N_INSNS (1), /* fcmp */
191 COSTS_N_INSNS (2), /* fmov, fmovr */
192 COSTS_N_INSNS (4), /* fmul */
193 COSTS_N_INSNS (13), /* fdivs */
194 COSTS_N_INSNS (23), /* fdivd */
195 COSTS_N_INSNS (13), /* fsqrts */
196 COSTS_N_INSNS (23), /* fsqrtd */
197 COSTS_N_INSNS (4), /* imul */
198 COSTS_N_INSNS (4), /* imulX */
199 2, /* imul bit factor */
200 COSTS_N_INSNS (37), /* idiv */
201 COSTS_N_INSNS (68), /* idivX */
202 COSTS_N_INSNS (2), /* movcc/movr */
203 2, /* shift penalty */
207 struct processor_costs ultrasparc3_costs = {
208 COSTS_N_INSNS (2), /* int load */
209 COSTS_N_INSNS (3), /* int signed load */
210 COSTS_N_INSNS (3), /* int zeroed load */
211 COSTS_N_INSNS (2), /* float load */
212 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
213 COSTS_N_INSNS (4), /* fadd, fsub */
214 COSTS_N_INSNS (5), /* fcmp */
215 COSTS_N_INSNS (3), /* fmov, fmovr */
216 COSTS_N_INSNS (4), /* fmul */
217 COSTS_N_INSNS (17), /* fdivs */
218 COSTS_N_INSNS (20), /* fdivd */
219 COSTS_N_INSNS (20), /* fsqrts */
220 COSTS_N_INSNS (29), /* fsqrtd */
221 COSTS_N_INSNS (6), /* imul */
222 COSTS_N_INSNS (6), /* imulX */
223 0, /* imul bit factor */
224 COSTS_N_INSNS (40), /* idiv */
225 COSTS_N_INSNS (71), /* idivX */
226 COSTS_N_INSNS (2), /* movcc/movr */
227 0, /* shift penalty */
231 struct processor_costs niagara_costs = {
232 COSTS_N_INSNS (3), /* int load */
233 COSTS_N_INSNS (3), /* int signed load */
234 COSTS_N_INSNS (3), /* int zeroed load */
235 COSTS_N_INSNS (9), /* float load */
236 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
237 COSTS_N_INSNS (8), /* fadd, fsub */
238 COSTS_N_INSNS (26), /* fcmp */
239 COSTS_N_INSNS (8), /* fmov, fmovr */
240 COSTS_N_INSNS (29), /* fmul */
241 COSTS_N_INSNS (54), /* fdivs */
242 COSTS_N_INSNS (83), /* fdivd */
243 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
244 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
245 COSTS_N_INSNS (11), /* imul */
246 COSTS_N_INSNS (11), /* imulX */
247 0, /* imul bit factor */
248 COSTS_N_INSNS (72), /* idiv */
249 COSTS_N_INSNS (72), /* idivX */
250 COSTS_N_INSNS (1), /* movcc/movr */
251 0, /* shift penalty */
255 struct processor_costs niagara2_costs = {
256 COSTS_N_INSNS (3), /* int load */
257 COSTS_N_INSNS (3), /* int signed load */
258 COSTS_N_INSNS (3), /* int zeroed load */
259 COSTS_N_INSNS (3), /* float load */
260 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
261 COSTS_N_INSNS (6), /* fadd, fsub */
262 COSTS_N_INSNS (6), /* fcmp */
263 COSTS_N_INSNS (6), /* fmov, fmovr */
264 COSTS_N_INSNS (6), /* fmul */
265 COSTS_N_INSNS (19), /* fdivs */
266 COSTS_N_INSNS (33), /* fdivd */
267 COSTS_N_INSNS (19), /* fsqrts */
268 COSTS_N_INSNS (33), /* fsqrtd */
269 COSTS_N_INSNS (5), /* imul */
270 COSTS_N_INSNS (5), /* imulX */
271 0, /* imul bit factor */
272 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
273 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
274 COSTS_N_INSNS (1), /* movcc/movr */
275 0, /* shift penalty */
278 const struct processor_costs *sparc_costs = &cypress_costs;
280 #ifdef HAVE_AS_RELAX_OPTION
281 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
282 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
283 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
284 somebody does not branch between the sethi and jmp. */
285 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
287 #define LEAF_SIBCALL_SLOT_RESERVED_P \
288 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
291 /* Vector to say how input registers are mapped to output registers.
292 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
293 eliminate it. You must use -fomit-frame-pointer to get that. */
294 char leaf_reg_remap[] =
295 { 0, 1, 2, 3, 4, 5, 6, 7,
296 -1, -1, -1, -1, -1, -1, 14, -1,
297 -1, -1, -1, -1, -1, -1, -1, -1,
298 8, 9, 10, 11, 12, 13, -1, 15,
300 32, 33, 34, 35, 36, 37, 38, 39,
301 40, 41, 42, 43, 44, 45, 46, 47,
302 48, 49, 50, 51, 52, 53, 54, 55,
303 56, 57, 58, 59, 60, 61, 62, 63,
304 64, 65, 66, 67, 68, 69, 70, 71,
305 72, 73, 74, 75, 76, 77, 78, 79,
306 80, 81, 82, 83, 84, 85, 86, 87,
307 88, 89, 90, 91, 92, 93, 94, 95,
308 96, 97, 98, 99, 100};
310 /* Vector, indexed by hard register number, which contains 1
311 for a register that is allowable in a candidate for leaf
312 function treatment. */
313 char sparc_leaf_regs[] =
314 { 1, 1, 1, 1, 1, 1, 1, 1,
315 0, 0, 0, 0, 0, 0, 1, 0,
316 0, 0, 0, 0, 0, 0, 0, 0,
317 1, 1, 1, 1, 1, 1, 0, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
322 1, 1, 1, 1, 1, 1, 1, 1,
323 1, 1, 1, 1, 1, 1, 1, 1,
324 1, 1, 1, 1, 1, 1, 1, 1,
325 1, 1, 1, 1, 1, 1, 1, 1,
328 struct GTY(()) machine_function
330 /* Size of the frame of the function. */
331 HOST_WIDE_INT frame_size;
333 /* Size of the frame of the function minus the register window save area
334 and the outgoing argument area. */
335 HOST_WIDE_INT apparent_frame_size;
337 /* Register we pretend the frame pointer is allocated to. Normally, this
338 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
339 record "offset" separately as it may be too big for (reg + disp). */
341 HOST_WIDE_INT frame_base_offset;
343 /* Some local-dynamic TLS symbol name. */
344 const char *some_ld_name;
346 /* Number of global or FP registers to be saved (as 4-byte quantities). */
347 int n_global_fp_regs;
349 /* True if the current function is leaf and uses only leaf regs,
350 so that the SPARC leaf function optimization can be applied.
351 Private version of current_function_uses_only_leaf_regs, see
352 sparc_expand_prologue for the rationale. */
355 /* True if the prologue saves local or in registers. */
356 bool save_local_in_regs_p;
358 /* True if the data calculated by sparc_expand_prologue are valid. */
359 bool prologue_data_valid_p;
362 #define sparc_frame_size cfun->machine->frame_size
363 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
364 #define sparc_frame_base_reg cfun->machine->frame_base_reg
365 #define sparc_frame_base_offset cfun->machine->frame_base_offset
366 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
367 #define sparc_leaf_function_p cfun->machine->leaf_function_p
368 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
369 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
371 /* 1 if the next opcode is to be specially indented. */
372 int sparc_indent_opcode = 0;
374 static void sparc_option_override (void);
375 static void sparc_init_modes (void);
376 static void scan_record_type (const_tree, int *, int *, int *);
377 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
378 const_tree, bool, bool, int *, int *);
380 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
381 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
383 static void sparc_emit_set_const32 (rtx, rtx);
384 static void sparc_emit_set_const64 (rtx, rtx);
385 static void sparc_output_addr_vec (rtx);
386 static void sparc_output_addr_diff_vec (rtx);
387 static void sparc_output_deferred_case_vectors (void);
388 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
389 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
390 static rtx sparc_builtin_saveregs (void);
391 static int epilogue_renumber (rtx *, int);
392 static bool sparc_assemble_integer (rtx, unsigned int, int);
393 static int set_extends (rtx);
394 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
395 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
396 #ifdef TARGET_SOLARIS
397 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
398 tree) ATTRIBUTE_UNUSED;
400 static int sparc_adjust_cost (rtx, rtx, rtx, int);
401 static int sparc_issue_rate (void);
402 static void sparc_sched_init (FILE *, int, int);
403 static int sparc_use_sched_lookahead (void);
405 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
406 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
407 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
408 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
409 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
411 static bool sparc_function_ok_for_sibcall (tree, tree);
412 static void sparc_init_libfuncs (void);
413 static void sparc_init_builtins (void);
414 static void sparc_vis_init_builtins (void);
415 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static tree sparc_fold_builtin (tree, int, tree *, bool);
417 static int sparc_vis_mul8x16 (int, int);
418 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
419 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
420 HOST_WIDE_INT, tree);
421 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
422 HOST_WIDE_INT, const_tree);
423 static struct machine_function * sparc_init_machine_status (void);
424 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
425 static rtx sparc_tls_get_addr (void);
426 static rtx sparc_tls_got (void);
427 static const char *get_some_local_dynamic_name (void);
428 static int get_some_local_dynamic_name_1 (rtx *, void *);
429 static int sparc_register_move_cost (enum machine_mode,
430 reg_class_t, reg_class_t);
431 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
432 static rtx sparc_function_value (const_tree, const_tree, bool);
433 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
434 static bool sparc_function_value_regno_p (const unsigned int);
435 static rtx sparc_struct_value_rtx (tree, int);
436 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
437 int *, const_tree, int);
438 static bool sparc_return_in_memory (const_tree, const_tree);
439 static bool sparc_strict_argument_naming (cumulative_args_t);
440 static void sparc_va_start (tree, rtx);
441 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
442 static bool sparc_vector_mode_supported_p (enum machine_mode);
443 static bool sparc_tls_referenced_p (rtx);
444 static rtx sparc_legitimize_tls_address (rtx);
445 static rtx sparc_legitimize_pic_address (rtx, rtx);
446 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
447 static rtx sparc_delegitimize_address (rtx);
448 static bool sparc_mode_dependent_address_p (const_rtx);
449 static bool sparc_pass_by_reference (cumulative_args_t,
450 enum machine_mode, const_tree, bool);
451 static void sparc_function_arg_advance (cumulative_args_t,
452 enum machine_mode, const_tree, bool);
453 static rtx sparc_function_arg_1 (cumulative_args_t,
454 enum machine_mode, const_tree, bool, bool);
455 static rtx sparc_function_arg (cumulative_args_t,
456 enum machine_mode, const_tree, bool);
457 static rtx sparc_function_incoming_arg (cumulative_args_t,
458 enum machine_mode, const_tree, bool);
459 static unsigned int sparc_function_arg_boundary (enum machine_mode,
461 static int sparc_arg_partial_bytes (cumulative_args_t,
462 enum machine_mode, tree, bool);
463 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
464 static void sparc_file_end (void);
465 static bool sparc_frame_pointer_required (void);
466 static bool sparc_can_eliminate (const int, const int);
467 static rtx sparc_builtin_setjmp_frame_value (void);
468 static void sparc_conditional_register_usage (void);
469 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
470 static const char *sparc_mangle_type (const_tree);
472 static void sparc_trampoline_init (rtx, tree, rtx);
473 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
474 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
475 static bool sparc_print_operand_punct_valid_p (unsigned char);
476 static void sparc_print_operand (FILE *, rtx, int);
477 static void sparc_print_operand_address (FILE *, rtx);
479 #ifdef SUBTARGET_ATTRIBUTE_TABLE
480 /* Table of valid machine attributes. */
481 static const struct attribute_spec sparc_attribute_table[] =
483 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
485 SUBTARGET_ATTRIBUTE_TABLE,
486 { NULL, 0, 0, false, false, false, NULL, false }
490 /* Option handling. */
493 enum cmodel sparc_cmodel;
495 char sparc_hard_reg_printed[8];
497 /* Initialize the GCC target structure. */
499 /* The default is to use .half rather than .short for aligned HI objects. */
500 #undef TARGET_ASM_ALIGNED_HI_OP
501 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
503 #undef TARGET_ASM_UNALIGNED_HI_OP
504 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
505 #undef TARGET_ASM_UNALIGNED_SI_OP
506 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
507 #undef TARGET_ASM_UNALIGNED_DI_OP
508 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
510 /* The target hook has to handle DI-mode values. */
511 #undef TARGET_ASM_INTEGER
512 #define TARGET_ASM_INTEGER sparc_assemble_integer
514 #undef TARGET_ASM_FUNCTION_PROLOGUE
515 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
516 #undef TARGET_ASM_FUNCTION_EPILOGUE
517 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
519 #undef TARGET_SCHED_ADJUST_COST
520 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
521 #undef TARGET_SCHED_ISSUE_RATE
522 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
523 #undef TARGET_SCHED_INIT
524 #define TARGET_SCHED_INIT sparc_sched_init
525 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
526 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
528 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
529 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
531 #undef TARGET_INIT_LIBFUNCS
532 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
533 #undef TARGET_INIT_BUILTINS
534 #define TARGET_INIT_BUILTINS sparc_init_builtins
536 #undef TARGET_LEGITIMIZE_ADDRESS
537 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
538 #undef TARGET_DELEGITIMIZE_ADDRESS
539 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
540 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
541 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
543 #undef TARGET_EXPAND_BUILTIN
544 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
545 #undef TARGET_FOLD_BUILTIN
546 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
549 #undef TARGET_HAVE_TLS
550 #define TARGET_HAVE_TLS true
553 #undef TARGET_CANNOT_FORCE_CONST_MEM
554 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
556 #undef TARGET_ASM_OUTPUT_MI_THUNK
557 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
558 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
559 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
561 #undef TARGET_RTX_COSTS
562 #define TARGET_RTX_COSTS sparc_rtx_costs
563 #undef TARGET_ADDRESS_COST
564 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
565 #undef TARGET_REGISTER_MOVE_COST
566 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
568 #undef TARGET_PROMOTE_FUNCTION_MODE
569 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
571 #undef TARGET_FUNCTION_VALUE
572 #define TARGET_FUNCTION_VALUE sparc_function_value
573 #undef TARGET_LIBCALL_VALUE
574 #define TARGET_LIBCALL_VALUE sparc_libcall_value
575 #undef TARGET_FUNCTION_VALUE_REGNO_P
576 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
578 #undef TARGET_STRUCT_VALUE_RTX
579 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
580 #undef TARGET_RETURN_IN_MEMORY
581 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
582 #undef TARGET_MUST_PASS_IN_STACK
583 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
584 #undef TARGET_PASS_BY_REFERENCE
585 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
586 #undef TARGET_ARG_PARTIAL_BYTES
587 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
588 #undef TARGET_FUNCTION_ARG_ADVANCE
589 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
590 #undef TARGET_FUNCTION_ARG
591 #define TARGET_FUNCTION_ARG sparc_function_arg
592 #undef TARGET_FUNCTION_INCOMING_ARG
593 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
594 #undef TARGET_FUNCTION_ARG_BOUNDARY
595 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
597 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
598 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
599 #undef TARGET_STRICT_ARGUMENT_NAMING
600 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
602 #undef TARGET_EXPAND_BUILTIN_VA_START
603 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
604 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
605 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
607 #undef TARGET_VECTOR_MODE_SUPPORTED_P
608 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
610 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
611 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
613 #ifdef SUBTARGET_INSERT_ATTRIBUTES
614 #undef TARGET_INSERT_ATTRIBUTES
615 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
618 #ifdef SUBTARGET_ATTRIBUTE_TABLE
619 #undef TARGET_ATTRIBUTE_TABLE
620 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
623 #undef TARGET_RELAXED_ORDERING
624 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
626 #undef TARGET_OPTION_OVERRIDE
627 #define TARGET_OPTION_OVERRIDE sparc_option_override
629 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
630 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
631 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
634 #undef TARGET_ASM_FILE_END
635 #define TARGET_ASM_FILE_END sparc_file_end
637 #undef TARGET_FRAME_POINTER_REQUIRED
638 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
640 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
641 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
643 #undef TARGET_CAN_ELIMINATE
644 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
646 #undef TARGET_PREFERRED_RELOAD_CLASS
647 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
649 #undef TARGET_CONDITIONAL_REGISTER_USAGE
650 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
652 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
653 #undef TARGET_MANGLE_TYPE
654 #define TARGET_MANGLE_TYPE sparc_mangle_type
657 #undef TARGET_LEGITIMATE_ADDRESS_P
658 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
660 #undef TARGET_LEGITIMATE_CONSTANT_P
661 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
663 #undef TARGET_TRAMPOLINE_INIT
664 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
666 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
667 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
668 #undef TARGET_PRINT_OPERAND
669 #define TARGET_PRINT_OPERAND sparc_print_operand
670 #undef TARGET_PRINT_OPERAND_ADDRESS
671 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
673 struct gcc_target targetm = TARGET_INITIALIZER;
675 /* Validate and override various options, and do some machine dependent
679 sparc_option_override (void)
681 static struct code_model {
682 const char *const name;
683 const enum cmodel value;
684 } const cmodels[] = {
686 { "medlow", CM_MEDLOW },
687 { "medmid", CM_MEDMID },
688 { "medany", CM_MEDANY },
689 { "embmedany", CM_EMBMEDANY },
690 { NULL, (enum cmodel) 0 }
692 const struct code_model *cmodel;
693 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
694 static struct cpu_default {
696 const enum processor_type processor;
697 } const cpu_default[] = {
698 /* There must be one entry here for each TARGET_CPU value. */
699 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
700 { TARGET_CPU_v8, PROCESSOR_V8 },
701 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
702 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
703 { TARGET_CPU_leon, PROCESSOR_LEON },
704 { TARGET_CPU_sparclite, PROCESSOR_F930 },
705 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
706 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
707 { TARGET_CPU_v9, PROCESSOR_V9 },
708 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
709 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
710 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
711 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
714 const struct cpu_default *def;
715 /* Table of values for -m{cpu,tune}=. This must match the order of
716 the PROCESSOR_* enumeration. */
717 static struct cpu_table {
720 } const cpu_table[] = {
723 { MASK_ISA, MASK_V8 },
724 /* TI TMS390Z55 supersparc */
725 { MASK_ISA, MASK_V8 },
726 { MASK_ISA, MASK_V8|MASK_FPU },
728 { MASK_ISA, MASK_V8|MASK_FPU },
729 { MASK_ISA, MASK_SPARCLITE },
730 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
731 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
732 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
733 { MASK_ISA, MASK_SPARCLITE|MASK_FPU },
734 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
735 { MASK_ISA, MASK_SPARCLET },
737 { MASK_ISA, MASK_SPARCLET },
738 { MASK_ISA, MASK_V9 },
739 /* UltraSPARC I, II, IIi */
741 /* Although insns using %y are deprecated, it is a clear win. */
742 MASK_V9|MASK_DEPRECATED_V8_INSNS},
744 /* ??? Check if %y issue still holds true. */
746 MASK_V9|MASK_DEPRECATED_V8_INSNS},
749 MASK_V9|MASK_DEPRECATED_V8_INSNS},
751 { MASK_ISA, MASK_V9},
753 const struct cpu_table *cpu;
757 #ifdef SUBTARGET_OVERRIDE_OPTIONS
758 SUBTARGET_OVERRIDE_OPTIONS;
761 #ifndef SPARC_BI_ARCH
762 /* Check for unsupported architecture size. */
763 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
764 error ("%s is not supported by this configuration",
765 DEFAULT_ARCH32_P ? "-m64" : "-m32");
768 /* We force all 64bit archs to use 128 bit long double */
769 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
771 error ("-mlong-double-64 not allowed with -m64");
772 target_flags |= MASK_LONG_DOUBLE_128;
775 /* Code model selection. */
776 sparc_cmodel = SPARC_DEFAULT_CMODEL;
780 sparc_cmodel = CM_32;
783 if (sparc_cmodel_string != NULL)
787 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
788 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
790 if (cmodel->name == NULL)
791 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
793 sparc_cmodel = cmodel->value;
796 error ("-mcmodel= is not supported on 32 bit systems");
799 /* Check that -fcall-saved-REG wasn't specified for out registers. */
800 for (i = 8; i < 16; i++)
801 if (!call_used_regs [i])
803 error ("-fcall-saved-REG is not supported for out registers");
804 call_used_regs [i] = 1;
807 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
809 /* Set the default CPU. */
810 if (!global_options_set.x_sparc_cpu_and_features)
812 for (def = &cpu_default[0]; def->cpu != -1; ++def)
813 if (def->cpu == TARGET_CPU_DEFAULT)
815 gcc_assert (def->cpu != -1);
816 sparc_cpu_and_features = def->processor;
818 if (!global_options_set.x_sparc_cpu)
819 sparc_cpu = sparc_cpu_and_features;
821 cpu = &cpu_table[(int) sparc_cpu_and_features];
822 target_flags &= ~cpu->disable;
823 target_flags |= cpu->enable;
825 /* If -mfpu or -mno-fpu was explicitly used, don't override with
826 the processor default. */
827 if (target_flags_explicit & MASK_FPU)
828 target_flags = (target_flags & ~MASK_FPU) | fpu;
830 /* Don't allow -mvis if FPU is disabled. */
832 target_flags &= ~MASK_VIS;
834 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
836 -m64 also implies v9. */
837 if (TARGET_VIS || TARGET_ARCH64)
839 target_flags |= MASK_V9;
840 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
843 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
844 if (TARGET_V9 && TARGET_ARCH32)
845 target_flags |= MASK_DEPRECATED_V8_INSNS;
847 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
848 if (! TARGET_V9 || TARGET_ARCH64)
849 target_flags &= ~MASK_V8PLUS;
851 /* Don't use stack biasing in 32 bit mode. */
853 target_flags &= ~MASK_STACK_BIAS;
855 /* Supply a default value for align_functions. */
856 if (align_functions == 0
857 && (sparc_cpu == PROCESSOR_ULTRASPARC
858 || sparc_cpu == PROCESSOR_ULTRASPARC3
859 || sparc_cpu == PROCESSOR_NIAGARA
860 || sparc_cpu == PROCESSOR_NIAGARA2))
861 align_functions = 32;
863 /* Validate PCC_STRUCT_RETURN. */
864 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
865 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
867 /* Only use .uaxword when compiling for a 64-bit target. */
869 targetm.asm_out.unaligned_op.di = NULL;
871 /* Do various machine dependent initializations. */
874 /* Set up function hooks. */
875 init_machine_status = sparc_init_machine_status;
880 case PROCESSOR_CYPRESS:
881 sparc_costs = &cypress_costs;
884 case PROCESSOR_SPARCLITE:
885 case PROCESSOR_SUPERSPARC:
886 sparc_costs = &supersparc_costs;
890 case PROCESSOR_HYPERSPARC:
891 case PROCESSOR_SPARCLITE86X:
892 sparc_costs = &hypersparc_costs;
895 sparc_costs = &leon_costs;
897 case PROCESSOR_SPARCLET:
898 case PROCESSOR_TSC701:
899 sparc_costs = &sparclet_costs;
902 case PROCESSOR_ULTRASPARC:
903 sparc_costs = &ultrasparc_costs;
905 case PROCESSOR_ULTRASPARC3:
906 sparc_costs = &ultrasparc3_costs;
908 case PROCESSOR_NIAGARA:
909 sparc_costs = &niagara_costs;
911 case PROCESSOR_NIAGARA2:
912 sparc_costs = &niagara2_costs;
914 case PROCESSOR_NATIVE:
918 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
919 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
920 target_flags |= MASK_LONG_DOUBLE_128;
923 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
924 ((sparc_cpu == PROCESSOR_ULTRASPARC
925 || sparc_cpu == PROCESSOR_NIAGARA
926 || sparc_cpu == PROCESSOR_NIAGARA2)
928 : (sparc_cpu == PROCESSOR_ULTRASPARC3
930 global_options.x_param_values,
931 global_options_set.x_param_values);
932 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
933 ((sparc_cpu == PROCESSOR_ULTRASPARC
934 || sparc_cpu == PROCESSOR_ULTRASPARC3
935 || sparc_cpu == PROCESSOR_NIAGARA
936 || sparc_cpu == PROCESSOR_NIAGARA2)
938 global_options.x_param_values,
939 global_options_set.x_param_values);
941 /* Disable save slot sharing for call-clobbered registers by default.
942 The IRA sharing algorithm works on single registers only and this
943 pessimizes for double floating-point registers. */
944 if (!global_options_set.x_flag_ira_share_save_slots)
945 flag_ira_share_save_slots = 0;
948 /* Miscellaneous utilities. */
950 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
951 or branch on register contents instructions. */
954 v9_regcmp_p (enum rtx_code code)
956 return (code == EQ || code == NE || code == GE || code == LT
957 || code == LE || code == GT);
960 /* Nonzero if OP is a floating point constant which can
961 be loaded into an integer register using a single
962 sethi instruction. */
967 if (GET_CODE (op) == CONST_DOUBLE)
972 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
973 REAL_VALUE_TO_TARGET_SINGLE (r, i);
974 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
980 /* Nonzero if OP is a floating point constant which can
981 be loaded into an integer register using a single
987 if (GET_CODE (op) == CONST_DOUBLE)
992 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
993 REAL_VALUE_TO_TARGET_SINGLE (r, i);
994 return SPARC_SIMM13_P (i);
1000 /* Nonzero if OP is a floating point constant which can
1001 be loaded into an integer register using a high/losum
1002 instruction sequence. */
1005 fp_high_losum_p (rtx op)
1007 /* The constraints calling this should only be in
1008 SFmode move insns, so any constant which cannot
1009 be moved using a single insn will do. */
1010 if (GET_CODE (op) == CONST_DOUBLE)
1015 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1016 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1017 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1023 /* Return true if the address of LABEL can be loaded by means of the
1024 mov{si,di}_pic_label_ref patterns in PIC mode. */
1027 can_use_mov_pic_label_ref (rtx label)
1029 /* VxWorks does not impose a fixed gap between segments; the run-time
1030 gap can be different from the object-file gap. We therefore can't
1031 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1032 are absolutely sure that X is in the same segment as the GOT.
1033 Unfortunately, the flexibility of linker scripts means that we
1034 can't be sure of that in general, so assume that GOT-relative
1035 accesses are never valid on VxWorks. */
1036 if (TARGET_VXWORKS_RTP)
1039 /* Similarly, if the label is non-local, it might end up being placed
1040 in a different section than the current one; now mov_pic_label_ref
1041 requires the label and the code to be in the same section. */
1042 if (LABEL_REF_NONLOCAL_P (label))
1045 /* Finally, if we are reordering basic blocks and partition into hot
1046 and cold sections, this might happen for any label. */
1047 if (flag_reorder_blocks_and_partition)
1053 /* Expand a move instruction. Return true if all work is done. */
1056 sparc_expand_move (enum machine_mode mode, rtx *operands)
1058 /* Handle sets of MEM first. */
1059 if (GET_CODE (operands[0]) == MEM)
1061 /* 0 is a register (or a pair of registers) on SPARC. */
1062 if (register_or_zero_operand (operands[1], mode))
1065 if (!reload_in_progress)
1067 operands[0] = validize_mem (operands[0]);
1068 operands[1] = force_reg (mode, operands[1]);
1072 /* Fixup TLS cases. */
1074 && CONSTANT_P (operands[1])
1075 && sparc_tls_referenced_p (operands [1]))
1077 operands[1] = sparc_legitimize_tls_address (operands[1]);
1081 /* Fixup PIC cases. */
1082 if (flag_pic && CONSTANT_P (operands[1]))
1084 if (pic_address_needs_scratch (operands[1]))
1085 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1087 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1088 if (GET_CODE (operands[1]) == LABEL_REF
1089 && can_use_mov_pic_label_ref (operands[1]))
1093 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1099 gcc_assert (TARGET_ARCH64);
1100 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1105 if (symbolic_operand (operands[1], mode))
1108 = sparc_legitimize_pic_address (operands[1],
1110 ? operands[0] : NULL_RTX);
1115 /* If we are trying to toss an integer constant into FP registers,
1116 or loading a FP or vector constant, force it into memory. */
1117 if (CONSTANT_P (operands[1])
1118 && REG_P (operands[0])
1119 && (SPARC_FP_REG_P (REGNO (operands[0]))
1120 || SCALAR_FLOAT_MODE_P (mode)
1121 || VECTOR_MODE_P (mode)))
1123 /* emit_group_store will send such bogosity to us when it is
1124 not storing directly into memory. So fix this up to avoid
1125 crashes in output_constant_pool. */
1126 if (operands [1] == const0_rtx)
1127 operands[1] = CONST0_RTX (mode);
1129 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1130 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1131 && const_zero_operand (operands[1], mode))
1134 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1135 /* We are able to build any SF constant in integer registers
1136 with at most 2 instructions. */
1138 /* And any DF constant in integer registers. */
1140 && (reload_completed || reload_in_progress))))
1143 operands[1] = force_const_mem (mode, operands[1]);
1144 if (!reload_in_progress)
1145 operands[1] = validize_mem (operands[1]);
1149 /* Accept non-constants and valid constants unmodified. */
1150 if (!CONSTANT_P (operands[1])
1151 || GET_CODE (operands[1]) == HIGH
1152 || input_operand (operands[1], mode))
1158 /* All QImode constants require only one insn, so proceed. */
1163 sparc_emit_set_const32 (operands[0], operands[1]);
1167 /* input_operand should have filtered out 32-bit mode. */
1168 sparc_emit_set_const64 (operands[0], operands[1]);
1178 /* Load OP1, a 32-bit constant, into OP0, a register.
1179 We know it can't be done in one insn when we get
1180 here, the move expander guarantees this. */
1183 sparc_emit_set_const32 (rtx op0, rtx op1)
1185 enum machine_mode mode = GET_MODE (op0);
1188 if (reload_in_progress || reload_completed)
1191 temp = gen_reg_rtx (mode);
1193 if (GET_CODE (op1) == CONST_INT)
1195 gcc_assert (!small_int_operand (op1, mode)
1196 && !const_high_operand (op1, mode));
1198 /* Emit them as real moves instead of a HIGH/LO_SUM,
1199 this way CSE can see everything and reuse intermediate
1200 values if it wants. */
1201 emit_insn (gen_rtx_SET (VOIDmode, temp,
1202 GEN_INT (INTVAL (op1)
1203 & ~(HOST_WIDE_INT)0x3ff)));
1205 emit_insn (gen_rtx_SET (VOIDmode,
1207 gen_rtx_IOR (mode, temp,
1208 GEN_INT (INTVAL (op1) & 0x3ff))));
1212 /* A symbol, emit in the traditional way. */
1213 emit_insn (gen_rtx_SET (VOIDmode, temp,
1214 gen_rtx_HIGH (mode, op1)));
1215 emit_insn (gen_rtx_SET (VOIDmode,
1216 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1220 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1221 If TEMP is nonzero, we are forbidden to use any other scratch
1222 registers. Otherwise, we are allowed to generate them as needed.
1224 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1225 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1228 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1230 rtx temp1, temp2, temp3, temp4, temp5;
1233 if (temp && GET_MODE (temp) == TImode)
1236 temp = gen_rtx_REG (DImode, REGNO (temp));
1239 /* SPARC-V9 code-model support. */
1240 switch (sparc_cmodel)
1243 /* The range spanned by all instructions in the object is less
1244 than 2^31 bytes (2GB) and the distance from any instruction
1245 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1246 than 2^31 bytes (2GB).
1248 The executable must be in the low 4TB of the virtual address
1251 sethi %hi(symbol), %temp1
1252 or %temp1, %lo(symbol), %reg */
1254 temp1 = temp; /* op0 is allowed. */
1256 temp1 = gen_reg_rtx (DImode);
1258 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1259 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1263 /* The range spanned by all instructions in the object is less
1264 than 2^31 bytes (2GB) and the distance from any instruction
1265 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1266 than 2^31 bytes (2GB).
1268 The executable must be in the low 16TB of the virtual address
1271 sethi %h44(symbol), %temp1
1272 or %temp1, %m44(symbol), %temp2
1273 sllx %temp2, 12, %temp3
1274 or %temp3, %l44(symbol), %reg */
1279 temp3 = temp; /* op0 is allowed. */
1283 temp1 = gen_reg_rtx (DImode);
1284 temp2 = gen_reg_rtx (DImode);
1285 temp3 = gen_reg_rtx (DImode);
1288 emit_insn (gen_seth44 (temp1, op1));
1289 emit_insn (gen_setm44 (temp2, temp1, op1));
1290 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1291 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1292 emit_insn (gen_setl44 (op0, temp3, op1));
1296 /* The range spanned by all instructions in the object is less
1297 than 2^31 bytes (2GB) and the distance from any instruction
1298 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1299 than 2^31 bytes (2GB).
1301 The executable can be placed anywhere in the virtual address
1304 sethi %hh(symbol), %temp1
1305 sethi %lm(symbol), %temp2
1306 or %temp1, %hm(symbol), %temp3
1307 sllx %temp3, 32, %temp4
1308 or %temp4, %temp2, %temp5
1309 or %temp5, %lo(symbol), %reg */
1312 /* It is possible that one of the registers we got for operands[2]
1313 might coincide with that of operands[0] (which is why we made
1314 it TImode). Pick the other one to use as our scratch. */
1315 if (rtx_equal_p (temp, op0))
1317 gcc_assert (ti_temp);
1318 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1321 temp2 = temp; /* op0 is _not_ allowed, see above. */
1328 temp1 = gen_reg_rtx (DImode);
1329 temp2 = gen_reg_rtx (DImode);
1330 temp3 = gen_reg_rtx (DImode);
1331 temp4 = gen_reg_rtx (DImode);
1332 temp5 = gen_reg_rtx (DImode);
1335 emit_insn (gen_sethh (temp1, op1));
1336 emit_insn (gen_setlm (temp2, op1));
1337 emit_insn (gen_sethm (temp3, temp1, op1));
1338 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1339 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1340 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1341 gen_rtx_PLUS (DImode, temp4, temp2)));
1342 emit_insn (gen_setlo (op0, temp5, op1));
1346 /* Old old old backwards compatibility kruft here.
1347 Essentially it is MEDLOW with a fixed 64-bit
1348 virtual base added to all data segment addresses.
1349 Text-segment stuff is computed like MEDANY, we can't
1350 reuse the code above because the relocation knobs
1353 Data segment: sethi %hi(symbol), %temp1
1354 add %temp1, EMBMEDANY_BASE_REG, %temp2
1355 or %temp2, %lo(symbol), %reg */
1356 if (data_segment_operand (op1, GET_MODE (op1)))
1360 temp1 = temp; /* op0 is allowed. */
1365 temp1 = gen_reg_rtx (DImode);
1366 temp2 = gen_reg_rtx (DImode);
1369 emit_insn (gen_embmedany_sethi (temp1, op1));
1370 emit_insn (gen_embmedany_brsum (temp2, temp1));
1371 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1374 /* Text segment: sethi %uhi(symbol), %temp1
1375 sethi %hi(symbol), %temp2
1376 or %temp1, %ulo(symbol), %temp3
1377 sllx %temp3, 32, %temp4
1378 or %temp4, %temp2, %temp5
1379 or %temp5, %lo(symbol), %reg */
1384 /* It is possible that one of the registers we got for operands[2]
1385 might coincide with that of operands[0] (which is why we made
1386 it TImode). Pick the other one to use as our scratch. */
1387 if (rtx_equal_p (temp, op0))
1389 gcc_assert (ti_temp);
1390 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1393 temp2 = temp; /* op0 is _not_ allowed, see above. */
1400 temp1 = gen_reg_rtx (DImode);
1401 temp2 = gen_reg_rtx (DImode);
1402 temp3 = gen_reg_rtx (DImode);
1403 temp4 = gen_reg_rtx (DImode);
1404 temp5 = gen_reg_rtx (DImode);
1407 emit_insn (gen_embmedany_textuhi (temp1, op1));
1408 emit_insn (gen_embmedany_texthi (temp2, op1));
1409 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1410 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1411 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1412 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1413 gen_rtx_PLUS (DImode, temp4, temp2)));
1414 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1423 #if HOST_BITS_PER_WIDE_INT == 32
1425 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1430 /* These avoid problems when cross compiling. If we do not
1431 go through all this hair then the optimizer will see
1432 invalid REG_EQUAL notes or in some cases none at all. */
1433 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1434 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1435 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1436 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1438 /* The optimizer is not to assume anything about exactly
1439 which bits are set for a HIGH, they are unspecified.
1440 Unfortunately this leads to many missed optimizations
1441 during CSE. We mask out the non-HIGH bits, and matches
1442 a plain movdi, to alleviate this problem. */
1444 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1446 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1450 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1452 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1456 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1458 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1462 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1464 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1467 /* Worker routines for 64-bit constant formation on arch64.
1468 One of the key things to be doing in these emissions is
1469 to create as many temp REGs as possible. This makes it
1470 possible for half-built constants to be used later when
1471 such values are similar to something required later on.
1472 Without doing this, the optimizer cannot see such
1475 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1476 unsigned HOST_WIDE_INT, int);
1479 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1480 unsigned HOST_WIDE_INT low_bits, int is_neg)
1482 unsigned HOST_WIDE_INT high_bits;
1485 high_bits = (~low_bits) & 0xffffffff;
1487 high_bits = low_bits;
1489 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1492 emit_insn (gen_rtx_SET (VOIDmode, op0,
1493 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1497 /* If we are XOR'ing with -1, then we should emit a one's complement
1498 instead. This way the combiner will notice logical operations
1499 such as ANDN later on and substitute. */
1500 if ((low_bits & 0x3ff) == 0x3ff)
1502 emit_insn (gen_rtx_SET (VOIDmode, op0,
1503 gen_rtx_NOT (DImode, temp)));
1507 emit_insn (gen_rtx_SET (VOIDmode, op0,
1508 gen_safe_XOR64 (temp,
1509 (-(HOST_WIDE_INT)0x400
1510 | (low_bits & 0x3ff)))));
1515 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1516 unsigned HOST_WIDE_INT, int);
1519 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1520 unsigned HOST_WIDE_INT high_bits,
1521 unsigned HOST_WIDE_INT low_immediate,
1526 if ((high_bits & 0xfffffc00) != 0)
1528 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1529 if ((high_bits & ~0xfffffc00) != 0)
1530 emit_insn (gen_rtx_SET (VOIDmode, op0,
1531 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1537 emit_insn (gen_safe_SET64 (temp, high_bits));
1541 /* Now shift it up into place. */
1542 emit_insn (gen_rtx_SET (VOIDmode, op0,
1543 gen_rtx_ASHIFT (DImode, temp2,
1544 GEN_INT (shift_count))));
1546 /* If there is a low immediate part piece, finish up by
1547 putting that in as well. */
1548 if (low_immediate != 0)
1549 emit_insn (gen_rtx_SET (VOIDmode, op0,
1550 gen_safe_OR64 (op0, low_immediate)));
1553 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1554 unsigned HOST_WIDE_INT);
1556 /* Full 64-bit constant decomposition. Even though this is the
1557 'worst' case, we still optimize a few things away. */
1559 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1560 unsigned HOST_WIDE_INT high_bits,
1561 unsigned HOST_WIDE_INT low_bits)
1565 if (reload_in_progress || reload_completed)
1568 sub_temp = gen_reg_rtx (DImode);
1570 if ((high_bits & 0xfffffc00) != 0)
1572 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1573 if ((high_bits & ~0xfffffc00) != 0)
1574 emit_insn (gen_rtx_SET (VOIDmode,
1576 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1582 emit_insn (gen_safe_SET64 (temp, high_bits));
1586 if (!reload_in_progress && !reload_completed)
1588 rtx temp2 = gen_reg_rtx (DImode);
1589 rtx temp3 = gen_reg_rtx (DImode);
1590 rtx temp4 = gen_reg_rtx (DImode);
1592 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1593 gen_rtx_ASHIFT (DImode, sub_temp,
1596 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1597 if ((low_bits & ~0xfffffc00) != 0)
1599 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1600 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1601 emit_insn (gen_rtx_SET (VOIDmode, op0,
1602 gen_rtx_PLUS (DImode, temp4, temp3)));
1606 emit_insn (gen_rtx_SET (VOIDmode, op0,
1607 gen_rtx_PLUS (DImode, temp4, temp2)));
1612 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1613 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1614 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1617 /* We are in the middle of reload, so this is really
1618 painful. However we do still make an attempt to
1619 avoid emitting truly stupid code. */
1620 if (low1 != const0_rtx)
1622 emit_insn (gen_rtx_SET (VOIDmode, op0,
1623 gen_rtx_ASHIFT (DImode, sub_temp,
1624 GEN_INT (to_shift))));
1625 emit_insn (gen_rtx_SET (VOIDmode, op0,
1626 gen_rtx_IOR (DImode, op0, low1)));
1634 if (low2 != const0_rtx)
1636 emit_insn (gen_rtx_SET (VOIDmode, op0,
1637 gen_rtx_ASHIFT (DImode, sub_temp,
1638 GEN_INT (to_shift))));
1639 emit_insn (gen_rtx_SET (VOIDmode, op0,
1640 gen_rtx_IOR (DImode, op0, low2)));
1648 emit_insn (gen_rtx_SET (VOIDmode, op0,
1649 gen_rtx_ASHIFT (DImode, sub_temp,
1650 GEN_INT (to_shift))));
1651 if (low3 != const0_rtx)
1652 emit_insn (gen_rtx_SET (VOIDmode, op0,
1653 gen_rtx_IOR (DImode, op0, low3)));
1658 /* Analyze a 64-bit constant for certain properties. */
1659 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1660 unsigned HOST_WIDE_INT,
1661 int *, int *, int *);
1664 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1665 unsigned HOST_WIDE_INT low_bits,
1666 int *hbsp, int *lbsp, int *abbasp)
1668 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1671 lowest_bit_set = highest_bit_set = -1;
1675 if ((lowest_bit_set == -1)
1676 && ((low_bits >> i) & 1))
1678 if ((highest_bit_set == -1)
1679 && ((high_bits >> (32 - i - 1)) & 1))
1680 highest_bit_set = (64 - i - 1);
1683 && ((highest_bit_set == -1)
1684 || (lowest_bit_set == -1)));
1690 if ((lowest_bit_set == -1)
1691 && ((high_bits >> i) & 1))
1692 lowest_bit_set = i + 32;
1693 if ((highest_bit_set == -1)
1694 && ((low_bits >> (32 - i - 1)) & 1))
1695 highest_bit_set = 32 - i - 1;
1698 && ((highest_bit_set == -1)
1699 || (lowest_bit_set == -1)));
1701 /* If there are no bits set this should have gone out
1702 as one instruction! */
1703 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1704 all_bits_between_are_set = 1;
1705 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1709 if ((low_bits & (1 << i)) != 0)
1714 if ((high_bits & (1 << (i - 32))) != 0)
1717 all_bits_between_are_set = 0;
1720 *hbsp = highest_bit_set;
1721 *lbsp = lowest_bit_set;
1722 *abbasp = all_bits_between_are_set;
1725 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1728 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1729 unsigned HOST_WIDE_INT low_bits)
1731 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1734 || high_bits == 0xffffffff)
1737 analyze_64bit_constant (high_bits, low_bits,
1738 &highest_bit_set, &lowest_bit_set,
1739 &all_bits_between_are_set);
1741 if ((highest_bit_set == 63
1742 || lowest_bit_set == 0)
1743 && all_bits_between_are_set != 0)
1746 if ((highest_bit_set - lowest_bit_set) < 21)
1752 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1753 unsigned HOST_WIDE_INT,
1756 static unsigned HOST_WIDE_INT
1757 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1758 unsigned HOST_WIDE_INT low_bits,
1759 int lowest_bit_set, int shift)
1761 HOST_WIDE_INT hi, lo;
1763 if (lowest_bit_set < 32)
1765 lo = (low_bits >> lowest_bit_set) << shift;
1766 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1771 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1773 gcc_assert (! (hi & lo));
1777 /* Here we are sure to be arch64 and this is an integer constant
1778 being loaded into a register. Emit the most efficient
1779 insn sequence possible. Detection of all the 1-insn cases
1780 has been done already. */
1782 sparc_emit_set_const64 (rtx op0, rtx op1)
1784 unsigned HOST_WIDE_INT high_bits, low_bits;
1785 int lowest_bit_set, highest_bit_set;
1786 int all_bits_between_are_set;
1789 /* Sanity check that we know what we are working with. */
1790 gcc_assert (TARGET_ARCH64
1791 && (GET_CODE (op0) == SUBREG
1792 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1794 if (reload_in_progress || reload_completed)
1797 if (GET_CODE (op1) != CONST_INT)
1799 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1804 temp = gen_reg_rtx (DImode);
1806 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1807 low_bits = (INTVAL (op1) & 0xffffffff);
1809 /* low_bits bits 0 --> 31
1810 high_bits bits 32 --> 63 */
1812 analyze_64bit_constant (high_bits, low_bits,
1813 &highest_bit_set, &lowest_bit_set,
1814 &all_bits_between_are_set);
1816 /* First try for a 2-insn sequence. */
1818 /* These situations are preferred because the optimizer can
1819 * do more things with them:
1821 * sllx %reg, shift, %reg
1823 * srlx %reg, shift, %reg
1824 * 3) mov some_small_const, %reg
1825 * sllx %reg, shift, %reg
1827 if (((highest_bit_set == 63
1828 || lowest_bit_set == 0)
1829 && all_bits_between_are_set != 0)
1830 || ((highest_bit_set - lowest_bit_set) < 12))
1832 HOST_WIDE_INT the_const = -1;
1833 int shift = lowest_bit_set;
1835 if ((highest_bit_set != 63
1836 && lowest_bit_set != 0)
1837 || all_bits_between_are_set == 0)
1840 create_simple_focus_bits (high_bits, low_bits,
1843 else if (lowest_bit_set == 0)
1844 shift = -(63 - highest_bit_set);
1846 gcc_assert (SPARC_SIMM13_P (the_const));
1847 gcc_assert (shift != 0);
1849 emit_insn (gen_safe_SET64 (temp, the_const));
1851 emit_insn (gen_rtx_SET (VOIDmode,
1853 gen_rtx_ASHIFT (DImode,
1857 emit_insn (gen_rtx_SET (VOIDmode,
1859 gen_rtx_LSHIFTRT (DImode,
1861 GEN_INT (-shift))));
1865 /* Now a range of 22 or less bits set somewhere.
1866 * 1) sethi %hi(focus_bits), %reg
1867 * sllx %reg, shift, %reg
1868 * 2) sethi %hi(focus_bits), %reg
1869 * srlx %reg, shift, %reg
1871 if ((highest_bit_set - lowest_bit_set) < 21)
1873 unsigned HOST_WIDE_INT focus_bits =
1874 create_simple_focus_bits (high_bits, low_bits,
1875 lowest_bit_set, 10);
1877 gcc_assert (SPARC_SETHI_P (focus_bits));
1878 gcc_assert (lowest_bit_set != 10);
1880 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1882 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1883 if (lowest_bit_set < 10)
1884 emit_insn (gen_rtx_SET (VOIDmode,
1886 gen_rtx_LSHIFTRT (DImode, temp,
1887 GEN_INT (10 - lowest_bit_set))));
1888 else if (lowest_bit_set > 10)
1889 emit_insn (gen_rtx_SET (VOIDmode,
1891 gen_rtx_ASHIFT (DImode, temp,
1892 GEN_INT (lowest_bit_set - 10))));
1896 /* 1) sethi %hi(low_bits), %reg
1897 * or %reg, %lo(low_bits), %reg
1898 * 2) sethi %hi(~low_bits), %reg
1899 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1902 || high_bits == 0xffffffff)
1904 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1905 (high_bits == 0xffffffff));
1909 /* Now, try 3-insn sequences. */
1911 /* 1) sethi %hi(high_bits), %reg
1912 * or %reg, %lo(high_bits), %reg
1913 * sllx %reg, 32, %reg
1917 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1921 /* We may be able to do something quick
1922 when the constant is negated, so try that. */
1923 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1924 (~low_bits) & 0xfffffc00))
1926 /* NOTE: The trailing bits get XOR'd so we need the
1927 non-negated bits, not the negated ones. */
1928 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1930 if ((((~high_bits) & 0xffffffff) == 0
1931 && ((~low_bits) & 0x80000000) == 0)
1932 || (((~high_bits) & 0xffffffff) == 0xffffffff
1933 && ((~low_bits) & 0x80000000) != 0))
1935 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1937 if ((SPARC_SETHI_P (fast_int)
1938 && (~high_bits & 0xffffffff) == 0)
1939 || SPARC_SIMM13_P (fast_int))
1940 emit_insn (gen_safe_SET64 (temp, fast_int));
1942 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1947 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1948 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1949 sparc_emit_set_const64 (temp, negated_const);
1952 /* If we are XOR'ing with -1, then we should emit a one's complement
1953 instead. This way the combiner will notice logical operations
1954 such as ANDN later on and substitute. */
1955 if (trailing_bits == 0x3ff)
1957 emit_insn (gen_rtx_SET (VOIDmode, op0,
1958 gen_rtx_NOT (DImode, temp)));
1962 emit_insn (gen_rtx_SET (VOIDmode,
1964 gen_safe_XOR64 (temp,
1965 (-0x400 | trailing_bits))));
1970 /* 1) sethi %hi(xxx), %reg
1971 * or %reg, %lo(xxx), %reg
1972 * sllx %reg, yyy, %reg
1974 * ??? This is just a generalized version of the low_bits==0
1975 * thing above, FIXME...
1977 if ((highest_bit_set - lowest_bit_set) < 32)
1979 unsigned HOST_WIDE_INT focus_bits =
1980 create_simple_focus_bits (high_bits, low_bits,
1983 /* We can't get here in this state. */
1984 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1986 /* So what we know is that the set bits straddle the
1987 middle of the 64-bit word. */
1988 sparc_emit_set_const64_quick2 (op0, temp,
1994 /* 1) sethi %hi(high_bits), %reg
1995 * or %reg, %lo(high_bits), %reg
1996 * sllx %reg, 32, %reg
1997 * or %reg, low_bits, %reg
1999 if (SPARC_SIMM13_P(low_bits)
2000 && ((int)low_bits > 0))
2002 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2006 /* The easiest way when all else fails, is full decomposition. */
2007 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2009 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2011 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2012 return the mode to be used for the comparison. For floating-point,
2013 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2014 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2015 processing is needed. */
2018 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2020 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2046 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2047 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2049 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2050 return CCX_NOOVmode;
2056 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2063 /* Emit the compare insn and return the CC reg for a CODE comparison
2064 with operands X and Y. */
2067 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2069 enum machine_mode mode;
2072 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2075 mode = SELECT_CC_MODE (code, x, y);
2077 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2078 fcc regs (cse can't tell they're really call clobbered regs and will
2079 remove a duplicate comparison even if there is an intervening function
2080 call - it will then try to reload the cc reg via an int reg which is why
2081 we need the movcc patterns). It is possible to provide the movcc
2082 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2083 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2084 to tell cse that CCFPE mode registers (even pseudos) are call
2087 /* ??? This is an experiment. Rather than making changes to cse which may
2088 or may not be easy/clean, we do our own cse. This is possible because
2089 we will generate hard registers. Cse knows they're call clobbered (it
2090 doesn't know the same thing about pseudos). If we guess wrong, no big
2091 deal, but if we win, great! */
2093 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2094 #if 1 /* experiment */
2097 /* We cycle through the registers to ensure they're all exercised. */
2098 static int next_fcc_reg = 0;
2099 /* Previous x,y for each fcc reg. */
2100 static rtx prev_args[4][2];
2102 /* Scan prev_args for x,y. */
2103 for (reg = 0; reg < 4; reg++)
2104 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2109 prev_args[reg][0] = x;
2110 prev_args[reg][1] = y;
2111 next_fcc_reg = (next_fcc_reg + 1) & 3;
2113 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2116 cc_reg = gen_reg_rtx (mode);
2117 #endif /* ! experiment */
2118 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2119 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2121 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2123 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2124 will only result in an unrecognizable insn so no point in asserting. */
2125 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2131 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2134 gen_compare_reg (rtx cmp)
2136 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2139 /* This function is used for v9 only.
2140 DEST is the target of the Scc insn.
2141 CODE is the code for an Scc's comparison.
2142 X and Y are the values we compare.
2144 This function is needed to turn
2147 (gt (reg:CCX 100 %icc)
2151 (gt:DI (reg:CCX 100 %icc)
2154 IE: The instruction recognizer needs to see the mode of the comparison to
2155 find the right instruction. We could use "gt:DI" right in the
2156 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2159 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2162 && (GET_MODE (x) == DImode
2163 || GET_MODE (dest) == DImode))
2166 /* Try to use the movrCC insns. */
2168 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2170 && v9_regcmp_p (compare_code))
2175 /* Special case for op0 != 0. This can be done with one instruction if
2178 if (compare_code == NE
2179 && GET_MODE (dest) == DImode
2180 && rtx_equal_p (op0, dest))
2182 emit_insn (gen_rtx_SET (VOIDmode, dest,
2183 gen_rtx_IF_THEN_ELSE (DImode,
2184 gen_rtx_fmt_ee (compare_code, DImode,
2191 if (reg_overlap_mentioned_p (dest, op0))
2193 /* Handle the case where dest == x.
2194 We "early clobber" the result. */
2195 op0 = gen_reg_rtx (GET_MODE (x));
2196 emit_move_insn (op0, x);
2199 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2200 if (GET_MODE (op0) != DImode)
2202 temp = gen_reg_rtx (DImode);
2203 convert_move (temp, op0, 0);
2207 emit_insn (gen_rtx_SET (VOIDmode, dest,
2208 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2209 gen_rtx_fmt_ee (compare_code, DImode,
2217 x = gen_compare_reg_1 (compare_code, x, y);
2220 gcc_assert (GET_MODE (x) != CC_NOOVmode
2221 && GET_MODE (x) != CCX_NOOVmode);
2223 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2224 emit_insn (gen_rtx_SET (VOIDmode, dest,
2225 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2226 gen_rtx_fmt_ee (compare_code,
2227 GET_MODE (x), x, y),
2228 const1_rtx, dest)));
2234 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2235 without jumps using the addx/subx instructions. */
2238 emit_scc_insn (rtx operands[])
2245 /* The quad-word fp compare library routines all return nonzero to indicate
2246 true, which is different from the equivalent libgcc routines, so we must
2247 handle them specially here. */
2248 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2250 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2251 GET_CODE (operands[1]));
2252 operands[2] = XEXP (operands[1], 0);
2253 operands[3] = XEXP (operands[1], 1);
2256 code = GET_CODE (operands[1]);
2260 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2261 more applications). The exception to this is "reg != 0" which can
2262 be done in one instruction on v9 (so we do it). */
2265 if (GET_MODE (x) == SImode)
2267 rtx pat = gen_seqsi_special (operands[0], x, y);
2271 else if (GET_MODE (x) == DImode)
2273 rtx pat = gen_seqdi_special (operands[0], x, y);
2281 if (GET_MODE (x) == SImode)
2283 rtx pat = gen_snesi_special (operands[0], x, y);
2287 else if (GET_MODE (x) == DImode)
2289 rtx pat = gen_snedi_special (operands[0], x, y);
2295 /* For the rest, on v9 we can use conditional moves. */
2299 if (gen_v9_scc (operands[0], code, x, y))
2303 /* We can do LTU and GEU using the addx/subx instructions too. And
2304 for GTU/LEU, if both operands are registers swap them and fall
2305 back to the easy case. */
2306 if (code == GTU || code == LEU)
2308 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2309 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2314 code = swap_condition (code);
2318 if (code == LTU || code == GEU)
2320 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2321 gen_rtx_fmt_ee (code, SImode,
2322 gen_compare_reg_1 (code, x, y),
2327 /* Nope, do branches. */
2331 /* Emit a conditional jump insn for the v9 architecture using comparison code
2332 CODE and jump target LABEL.
2333 This function exists to take advantage of the v9 brxx insns. */
2336 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2338 emit_jump_insn (gen_rtx_SET (VOIDmode,
2340 gen_rtx_IF_THEN_ELSE (VOIDmode,
2341 gen_rtx_fmt_ee (code, GET_MODE (op0),
2343 gen_rtx_LABEL_REF (VOIDmode, label),
2348 emit_conditional_branch_insn (rtx operands[])
2350 /* The quad-word fp compare library routines all return nonzero to indicate
2351 true, which is different from the equivalent libgcc routines, so we must
2352 handle them specially here. */
2353 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2355 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2356 GET_CODE (operands[0]));
2357 operands[1] = XEXP (operands[0], 0);
2358 operands[2] = XEXP (operands[0], 1);
2361 if (TARGET_ARCH64 && operands[2] == const0_rtx
2362 && GET_CODE (operands[1]) == REG
2363 && GET_MODE (operands[1]) == DImode)
2365 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2369 operands[1] = gen_compare_reg (operands[0]);
2370 operands[2] = const0_rtx;
2371 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2372 operands[1], operands[2]);
2373 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2378 /* Generate a DFmode part of a hard TFmode register.
2379 REG is the TFmode hard register, LOW is 1 for the
2380 low 64bit of the register and 0 otherwise.
2383 gen_df_reg (rtx reg, int low)
2385 int regno = REGNO (reg);
2387 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2388 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2389 return gen_rtx_REG (DFmode, regno);
2392 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2393 Unlike normal calls, TFmode operands are passed by reference. It is
2394 assumed that no more than 3 operands are required. */
2397 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2399 rtx ret_slot = NULL, arg[3], func_sym;
2402 /* We only expect to be called for conversions, unary, and binary ops. */
2403 gcc_assert (nargs == 2 || nargs == 3);
2405 for (i = 0; i < nargs; ++i)
2407 rtx this_arg = operands[i];
2410 /* TFmode arguments and return values are passed by reference. */
2411 if (GET_MODE (this_arg) == TFmode)
2413 int force_stack_temp;
2415 force_stack_temp = 0;
2416 if (TARGET_BUGGY_QP_LIB && i == 0)
2417 force_stack_temp = 1;
2419 if (GET_CODE (this_arg) == MEM
2420 && ! force_stack_temp)
2421 this_arg = XEXP (this_arg, 0);
2422 else if (CONSTANT_P (this_arg)
2423 && ! force_stack_temp)
2425 this_slot = force_const_mem (TFmode, this_arg);
2426 this_arg = XEXP (this_slot, 0);
2430 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2432 /* Operand 0 is the return value. We'll copy it out later. */
2434 emit_move_insn (this_slot, this_arg);
2436 ret_slot = this_slot;
2438 this_arg = XEXP (this_slot, 0);
2445 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2447 if (GET_MODE (operands[0]) == TFmode)
2450 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2451 arg[0], GET_MODE (arg[0]),
2452 arg[1], GET_MODE (arg[1]));
2454 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2455 arg[0], GET_MODE (arg[0]),
2456 arg[1], GET_MODE (arg[1]),
2457 arg[2], GET_MODE (arg[2]));
2460 emit_move_insn (operands[0], ret_slot);
2466 gcc_assert (nargs == 2);
2468 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2469 GET_MODE (operands[0]), 1,
2470 arg[1], GET_MODE (arg[1]));
2472 if (ret != operands[0])
2473 emit_move_insn (operands[0], ret);
2477 /* Expand soft-float TFmode calls to sparc abi routines. */
2480 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2502 emit_soft_tfmode_libcall (func, 3, operands);
2506 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2510 gcc_assert (code == SQRT);
2513 emit_soft_tfmode_libcall (func, 2, operands);
2517 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2524 switch (GET_MODE (operands[1]))
2537 case FLOAT_TRUNCATE:
2538 switch (GET_MODE (operands[0]))
2552 switch (GET_MODE (operands[1]))
2557 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2567 case UNSIGNED_FLOAT:
2568 switch (GET_MODE (operands[1]))
2573 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2584 switch (GET_MODE (operands[0]))
2598 switch (GET_MODE (operands[0]))
2615 emit_soft_tfmode_libcall (func, 2, operands);
2618 /* Expand a hard-float tfmode operation. All arguments must be in
2622 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2626 if (GET_RTX_CLASS (code) == RTX_UNARY)
2628 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2629 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2633 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2634 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2635 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2636 operands[1], operands[2]);
2639 if (register_operand (operands[0], VOIDmode))
2642 dest = gen_reg_rtx (GET_MODE (operands[0]));
2644 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2646 if (dest != operands[0])
2647 emit_move_insn (operands[0], dest);
2651 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2653 if (TARGET_HARD_QUAD)
2654 emit_hard_tfmode_operation (code, operands);
2656 emit_soft_tfmode_binop (code, operands);
2660 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2662 if (TARGET_HARD_QUAD)
2663 emit_hard_tfmode_operation (code, operands);
2665 emit_soft_tfmode_unop (code, operands);
2669 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2671 if (TARGET_HARD_QUAD)
2672 emit_hard_tfmode_operation (code, operands);
2674 emit_soft_tfmode_cvt (code, operands);
2677 /* Return nonzero if a branch/jump/call instruction will be emitting
2678 nop into its delay slot. */
2681 empty_delay_slot (rtx insn)
2685 /* If no previous instruction (should not happen), return true. */
2686 if (PREV_INSN (insn) == NULL)
2689 seq = NEXT_INSN (PREV_INSN (insn));
2690 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2696 /* Return nonzero if TRIAL can go into the call delay slot. */
2699 tls_call_delay (rtx trial)
2704 call __tls_get_addr, %tgd_call (foo)
2705 add %l7, %o0, %o0, %tgd_add (foo)
2706 while Sun as/ld does not. */
2707 if (TARGET_GNU_TLS || !TARGET_TLS)
2710 pat = PATTERN (trial);
2712 /* We must reject tgd_add{32|64}, i.e.
2713 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2714 and tldm_add{32|64}, i.e.
2715 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2717 if (GET_CODE (pat) == SET
2718 && GET_CODE (SET_SRC (pat)) == PLUS)
2720 rtx unspec = XEXP (SET_SRC (pat), 1);
2722 if (GET_CODE (unspec) == UNSPEC
2723 && (XINT (unspec, 1) == UNSPEC_TLSGD
2724 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2731 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2732 instruction. RETURN_P is true if the v9 variant 'return' is to be
2733 considered in the test too.
2735 TRIAL must be a SET whose destination is a REG appropriate for the
2736 'restore' instruction or, if RETURN_P is true, for the 'return'
2740 eligible_for_restore_insn (rtx trial, bool return_p)
2742 rtx pat = PATTERN (trial);
2743 rtx src = SET_SRC (pat);
2745 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2746 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2747 && arith_operand (src, GET_MODE (src)))
2750 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2752 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2755 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2756 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2757 && arith_double_operand (src, GET_MODE (src)))
2758 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2760 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2761 else if (! TARGET_FPU && register_operand (src, SFmode))
2764 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2765 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2768 /* If we have the 'return' instruction, anything that does not use
2769 local or output registers and can go into a delay slot wins. */
2772 && !epilogue_renumber (&pat, 1)
2773 && get_attr_in_uncond_branch_delay (trial)
2774 == IN_UNCOND_BRANCH_DELAY_TRUE)
2777 /* The 'restore src1,src2,dest' pattern for SImode. */
2778 else if (GET_CODE (src) == PLUS
2779 && register_operand (XEXP (src, 0), SImode)
2780 && arith_operand (XEXP (src, 1), SImode))
2783 /* The 'restore src1,src2,dest' pattern for DImode. */
2784 else if (GET_CODE (src) == PLUS
2785 && register_operand (XEXP (src, 0), DImode)
2786 && arith_double_operand (XEXP (src, 1), DImode))
2789 /* The 'restore src1,%lo(src2),dest' pattern. */
2790 else if (GET_CODE (src) == LO_SUM
2791 && ! TARGET_CM_MEDMID
2792 && ((register_operand (XEXP (src, 0), SImode)
2793 && immediate_operand (XEXP (src, 1), SImode))
2795 && register_operand (XEXP (src, 0), DImode)
2796 && immediate_operand (XEXP (src, 1), DImode))))
2799 /* The 'restore src,src,dest' pattern. */
2800 else if (GET_CODE (src) == ASHIFT
2801 && (register_operand (XEXP (src, 0), SImode)
2802 || register_operand (XEXP (src, 0), DImode))
2803 && XEXP (src, 1) == const1_rtx)
2809 /* Return nonzero if TRIAL can go into the function return's delay slot. */
2812 eligible_for_return_delay (rtx trial)
2816 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2819 if (get_attr_length (trial) != 1)
2822 /* If the function uses __builtin_eh_return, the eh_return machinery
2823 occupies the delay slot. */
2824 if (crtl->calls_eh_return)
2827 /* In the case of a leaf or flat function, anything can go into the slot. */
2828 if (sparc_leaf_function_p || TARGET_FLAT)
2830 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
2832 pat = PATTERN (trial);
2834 /* Otherwise, only operations which can be done in tandem with
2835 a `restore' or `return' insn can go into the delay slot. */
2836 if (GET_CODE (SET_DEST (pat)) != REG
2837 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2840 /* If this instruction sets up floating point register and we have a return
2841 instruction, it can probably go in. But restore will not work
2843 if (REGNO (SET_DEST (pat)) >= 32)
2845 && !epilogue_renumber (&pat, 1)
2846 && get_attr_in_uncond_branch_delay (trial)
2847 == IN_UNCOND_BRANCH_DELAY_TRUE);
2849 return eligible_for_restore_insn (trial, true);
2852 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
2855 eligible_for_sibcall_delay (rtx trial)
2859 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2862 if (get_attr_length (trial) != 1)
2865 pat = PATTERN (trial);
2867 if (sparc_leaf_function_p || TARGET_FLAT)
2869 /* If the tail call is done using the call instruction,
2870 we have to restore %o7 in the delay slot. */
2871 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2874 /* %g1 is used to build the function address */
2875 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2881 /* Otherwise, only operations which can be done in tandem with
2882 a `restore' insn can go into the delay slot. */
2883 if (GET_CODE (SET_DEST (pat)) != REG
2884 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2885 || REGNO (SET_DEST (pat)) >= 32)
2888 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2890 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2893 return eligible_for_restore_insn (trial, false);
2897 short_branch (int uid1, int uid2)
2899 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2901 /* Leave a few words of "slop". */
2902 if (delta >= -1023 && delta <= 1022)
2908 /* Return nonzero if REG is not used after INSN.
2909 We assume REG is a reload reg, and therefore does
2910 not live past labels or calls or jumps. */
2912 reg_unused_after (rtx reg, rtx insn)
2914 enum rtx_code code, prev_code = UNKNOWN;
2916 while ((insn = NEXT_INSN (insn)))
2918 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2921 code = GET_CODE (insn);
2922 if (GET_CODE (insn) == CODE_LABEL)
2927 rtx set = single_set (insn);
2928 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2931 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2933 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2941 /* Determine if it's legal to put X into the constant pool. This
2942 is not possible if X contains the address of a symbol that is
2943 not constant (TLS) or not known at final link time (PIC). */
2946 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
2948 switch (GET_CODE (x))
2953 /* Accept all non-symbolic constants. */
2957 /* Labels are OK iff we are non-PIC. */
2958 return flag_pic != 0;
2961 /* 'Naked' TLS symbol references are never OK,
2962 non-TLS symbols are OK iff we are non-PIC. */
2963 if (SYMBOL_REF_TLS_MODEL (x))
2966 return flag_pic != 0;
2969 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
2972 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
2973 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
2981 /* Global Offset Table support. */
2982 static GTY(()) rtx got_helper_rtx = NULL_RTX;
2983 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
2985 /* Return the SYMBOL_REF for the Global Offset Table. */
2987 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
2992 if (!sparc_got_symbol)
2993 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2995 return sparc_got_symbol;
2998 /* Ensure that we are not using patterns that are not OK with PIC. */
3008 op = recog_data.operand[i];
3009 gcc_assert (GET_CODE (op) != SYMBOL_REF
3010 && (GET_CODE (op) != CONST
3011 || (GET_CODE (XEXP (op, 0)) == MINUS
3012 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3013 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3020 /* Return true if X is an address which needs a temporary register when
3021 reloaded while generating PIC code. */
3024 pic_address_needs_scratch (rtx x)
3026 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3027 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3028 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3029 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3030 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3036 /* Determine if a given RTX is a valid constant. We already know this
3037 satisfies CONSTANT_P. */
3040 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3042 switch (GET_CODE (x))
3046 if (sparc_tls_referenced_p (x))
3051 if (GET_MODE (x) == VOIDmode)
3054 /* Floating point constants are generally not ok.
3055 The only exception is 0.0 in VIS. */
3057 && SCALAR_FLOAT_MODE_P (mode)
3058 && const_zero_operand (x, mode))
3064 /* Vector constants are generally not ok.
3065 The only exception is 0 in VIS. */
3067 && const_zero_operand (x, mode))
3079 /* Determine if a given RTX is a valid constant address. */
3082 constant_address_p (rtx x)
3084 switch (GET_CODE (x))
3092 if (flag_pic && pic_address_needs_scratch (x))
3094 return sparc_legitimate_constant_p (Pmode, x);
3097 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3104 /* Nonzero if the constant value X is a legitimate general operand
3105 when generating PIC code. It is given that flag_pic is on and
3106 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3109 legitimate_pic_operand_p (rtx x)
3111 if (pic_address_needs_scratch (x))
3113 if (sparc_tls_referenced_p (x))
3118 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3120 && INTVAL (X) >= -0x1000 \
3121 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3123 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3125 && INTVAL (X) >= -0x1000 \
3126 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3128 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3130 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3131 ordinarily. This changes a bit when generating PIC. */
3134 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3136 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3138 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3140 else if (GET_CODE (addr) == PLUS)
3142 rs1 = XEXP (addr, 0);
3143 rs2 = XEXP (addr, 1);
3145 /* Canonicalize. REG comes first, if there are no regs,
3146 LO_SUM comes first. */
3148 && GET_CODE (rs1) != SUBREG
3150 || GET_CODE (rs2) == SUBREG
3151 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3153 rs1 = XEXP (addr, 1);
3154 rs2 = XEXP (addr, 0);
3158 && rs1 == pic_offset_table_rtx
3160 && GET_CODE (rs2) != SUBREG
3161 && GET_CODE (rs2) != LO_SUM
3162 && GET_CODE (rs2) != MEM
3163 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3164 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3165 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3167 || GET_CODE (rs1) == SUBREG)
3168 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3173 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3174 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3176 /* We prohibit REG + REG for TFmode when there are no quad move insns
3177 and we consequently need to split. We do this because REG+REG
3178 is not an offsettable address. If we get the situation in reload
3179 where source and destination of a movtf pattern are both MEMs with
3180 REG+REG address, then only one of them gets converted to an
3181 offsettable address. */
3183 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3186 /* We prohibit REG + REG on ARCH32 if not optimizing for
3187 DFmode/DImode because then mem_min_alignment is likely to be zero
3188 after reload and the forced split would lack a matching splitter
3190 if (TARGET_ARCH32 && !optimize
3191 && (mode == DFmode || mode == DImode))
3194 else if (USE_AS_OFFSETABLE_LO10
3195 && GET_CODE (rs1) == LO_SUM
3197 && ! TARGET_CM_MEDMID
3198 && RTX_OK_FOR_OLO10_P (rs2, mode))
3201 imm1 = XEXP (rs1, 1);
3202 rs1 = XEXP (rs1, 0);
3203 if (!CONSTANT_P (imm1)
3204 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3208 else if (GET_CODE (addr) == LO_SUM)
3210 rs1 = XEXP (addr, 0);
3211 imm1 = XEXP (addr, 1);
3213 if (!CONSTANT_P (imm1)
3214 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3217 /* We can't allow TFmode in 32-bit mode, because an offset greater
3218 than the alignment (8) may cause the LO_SUM to overflow. */
3219 if (mode == TFmode && TARGET_ARCH32)
3222 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3227 if (GET_CODE (rs1) == SUBREG)
3228 rs1 = SUBREG_REG (rs1);
3234 if (GET_CODE (rs2) == SUBREG)
3235 rs2 = SUBREG_REG (rs2);
3242 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3243 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3248 if ((REGNO (rs1) >= 32
3249 && REGNO (rs1) != FRAME_POINTER_REGNUM
3250 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3252 && (REGNO (rs2) >= 32
3253 && REGNO (rs2) != FRAME_POINTER_REGNUM
3254 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3260 /* Return the SYMBOL_REF for the tls_get_addr function. */
3262 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3265 sparc_tls_get_addr (void)
3267 if (!sparc_tls_symbol)
3268 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3270 return sparc_tls_symbol;
3273 /* Return the Global Offset Table to be used in TLS mode. */
3276 sparc_tls_got (void)
3278 /* In PIC mode, this is just the PIC offset table. */
3281 crtl->uses_pic_offset_table = 1;
3282 return pic_offset_table_rtx;
3285 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3286 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3287 if (TARGET_SUN_TLS && TARGET_ARCH32)
3289 load_got_register ();
3290 return global_offset_table_rtx;
3293 /* In all other cases, we load a new pseudo with the GOT symbol. */
3294 return copy_to_reg (sparc_got ());
3297 /* Return true if X contains a thread-local symbol. */
3300 sparc_tls_referenced_p (rtx x)
3302 if (!TARGET_HAVE_TLS)
3305 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3306 x = XEXP (XEXP (x, 0), 0);
3308 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3311 /* That's all we handle in sparc_legitimize_tls_address for now. */
3315 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3316 this (thread-local) address. */
3319 sparc_legitimize_tls_address (rtx addr)
3321 rtx temp1, temp2, temp3, ret, o0, got, insn;
3323 gcc_assert (can_create_pseudo_p ());
3325 if (GET_CODE (addr) == SYMBOL_REF)
3326 switch (SYMBOL_REF_TLS_MODEL (addr))
3328 case TLS_MODEL_GLOBAL_DYNAMIC:
3330 temp1 = gen_reg_rtx (SImode);
3331 temp2 = gen_reg_rtx (SImode);
3332 ret = gen_reg_rtx (Pmode);
3333 o0 = gen_rtx_REG (Pmode, 8);
3334 got = sparc_tls_got ();
3335 emit_insn (gen_tgd_hi22 (temp1, addr));
3336 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3339 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3340 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3345 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3346 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3349 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3350 insn = get_insns ();
3352 emit_libcall_block (insn, ret, o0, addr);
3355 case TLS_MODEL_LOCAL_DYNAMIC:
3357 temp1 = gen_reg_rtx (SImode);
3358 temp2 = gen_reg_rtx (SImode);
3359 temp3 = gen_reg_rtx (Pmode);
3360 ret = gen_reg_rtx (Pmode);
3361 o0 = gen_rtx_REG (Pmode, 8);
3362 got = sparc_tls_got ();
3363 emit_insn (gen_tldm_hi22 (temp1));
3364 emit_insn (gen_tldm_lo10 (temp2, temp1));
3367 emit_insn (gen_tldm_add32 (o0, got, temp2));
3368 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3373 emit_insn (gen_tldm_add64 (o0, got, temp2));
3374 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3377 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3378 insn = get_insns ();
3380 emit_libcall_block (insn, temp3, o0,
3381 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3382 UNSPEC_TLSLD_BASE));
3383 temp1 = gen_reg_rtx (SImode);
3384 temp2 = gen_reg_rtx (SImode);
3385 emit_insn (gen_tldo_hix22 (temp1, addr));
3386 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3388 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3390 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3393 case TLS_MODEL_INITIAL_EXEC:
3394 temp1 = gen_reg_rtx (SImode);
3395 temp2 = gen_reg_rtx (SImode);
3396 temp3 = gen_reg_rtx (Pmode);
3397 got = sparc_tls_got ();
3398 emit_insn (gen_tie_hi22 (temp1, addr));
3399 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3401 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3403 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3406 ret = gen_reg_rtx (Pmode);
3408 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3411 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3415 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3418 case TLS_MODEL_LOCAL_EXEC:
3419 temp1 = gen_reg_rtx (Pmode);
3420 temp2 = gen_reg_rtx (Pmode);
3423 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3424 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3428 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3429 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3431 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3438 else if (GET_CODE (addr) == CONST)
3442 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3444 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3445 offset = XEXP (XEXP (addr, 0), 1);
3447 base = force_operand (base, NULL_RTX);
3448 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3449 offset = force_reg (Pmode, offset);
3450 ret = gen_rtx_PLUS (Pmode, base, offset);
3454 gcc_unreachable (); /* for now ... */
3459 /* Legitimize PIC addresses. If the address is already position-independent,
3460 we return ORIG. Newly generated position-independent addresses go into a
3461 reg. This is REG if nonzero, otherwise we allocate register(s) as
3465 sparc_legitimize_pic_address (rtx orig, rtx reg)
3467 bool gotdata_op = false;
3469 if (GET_CODE (orig) == SYMBOL_REF
3470 /* See the comment in sparc_expand_move. */
3471 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3473 rtx pic_ref, address;
3478 gcc_assert (! reload_in_progress && ! reload_completed);
3479 reg = gen_reg_rtx (Pmode);
3484 /* If not during reload, allocate another temp reg here for loading
3485 in the address, so that these instructions can be optimized
3487 rtx temp_reg = ((reload_in_progress || reload_completed)
3488 ? reg : gen_reg_rtx (Pmode));
3490 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3491 won't get confused into thinking that these two instructions
3492 are loading in the true address of the symbol. If in the
3493 future a PIC rtx exists, that should be used instead. */
3496 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3497 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3501 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3502 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3510 crtl->uses_pic_offset_table = 1;
3514 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3515 pic_offset_table_rtx,
3518 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3519 pic_offset_table_rtx,
3525 = gen_const_mem (Pmode,
3526 gen_rtx_PLUS (Pmode,
3527 pic_offset_table_rtx, address));
3528 insn = emit_move_insn (reg, pic_ref);
3531 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3533 set_unique_reg_note (insn, REG_EQUAL, orig);
3536 else if (GET_CODE (orig) == CONST)
3540 if (GET_CODE (XEXP (orig, 0)) == PLUS
3541 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3546 gcc_assert (! reload_in_progress && ! reload_completed);
3547 reg = gen_reg_rtx (Pmode);
3550 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3551 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3552 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3553 base == reg ? NULL_RTX : reg);
3555 if (GET_CODE (offset) == CONST_INT)
3557 if (SMALL_INT (offset))
3558 return plus_constant (base, INTVAL (offset));
3559 else if (! reload_in_progress && ! reload_completed)
3560 offset = force_reg (Pmode, offset);
3562 /* If we reach here, then something is seriously wrong. */
3565 return gen_rtx_PLUS (Pmode, base, offset);
3567 else if (GET_CODE (orig) == LABEL_REF)
3568 /* ??? We ought to be checking that the register is live instead, in case
3569 it is eliminated. */
3570 crtl->uses_pic_offset_table = 1;
3575 /* Try machine-dependent ways of modifying an illegitimate address X
3576 to be legitimate. If we find one, return the new, valid address.
3578 OLDX is the address as it was before break_out_memory_refs was called.
3579 In some cases it is useful to look at this to decide what needs to be done.
3581 MODE is the mode of the operand pointed to by X.
3583 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3586 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3587 enum machine_mode mode)
3591 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3592 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3593 force_operand (XEXP (x, 0), NULL_RTX));
3594 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3595 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3596 force_operand (XEXP (x, 1), NULL_RTX));
3597 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3598 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3600 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3601 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3602 force_operand (XEXP (x, 1), NULL_RTX));
3604 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3607 if (sparc_tls_referenced_p (x))
3608 x = sparc_legitimize_tls_address (x);
3610 x = sparc_legitimize_pic_address (x, NULL_RTX);
3611 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3612 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3613 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3614 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3615 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3616 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3617 else if (GET_CODE (x) == SYMBOL_REF
3618 || GET_CODE (x) == CONST
3619 || GET_CODE (x) == LABEL_REF)
3620 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3625 /* Delegitimize an address that was legitimized by the above function. */
3628 sparc_delegitimize_address (rtx x)
3630 x = delegitimize_mem_from_attrs (x);
3632 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3633 switch (XINT (XEXP (x, 1), 1))
3635 case UNSPEC_MOVE_PIC:
3637 x = XVECEXP (XEXP (x, 1), 0, 0);
3638 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3644 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3645 if (GET_CODE (x) == MINUS
3646 && REG_P (XEXP (x, 0))
3647 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3648 && GET_CODE (XEXP (x, 1)) == LO_SUM
3649 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3650 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3652 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3653 gcc_assert (GET_CODE (x) == LABEL_REF);
3659 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3660 replace the input X, or the original X if no replacement is called for.
3661 The output parameter *WIN is 1 if the calling macro should goto WIN,
3664 For SPARC, we wish to handle addresses by splitting them into
3665 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3666 This cuts the number of extra insns by one.
3668 Do nothing when generating PIC code and the address is a symbolic
3669 operand or requires a scratch register. */
3672 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3673 int opnum, int type,
3674 int ind_levels ATTRIBUTE_UNUSED, int *win)
3676 /* Decompose SImode constants into HIGH+LO_SUM. */
3678 && (mode != TFmode || TARGET_ARCH64)
3679 && GET_MODE (x) == SImode
3680 && GET_CODE (x) != LO_SUM
3681 && GET_CODE (x) != HIGH
3682 && sparc_cmodel <= CM_MEDLOW
3684 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3686 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3687 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3688 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3689 opnum, (enum reload_type)type);
3694 /* We have to recognize what we have already generated above. */
3695 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3697 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3698 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3699 opnum, (enum reload_type)type);
3708 /* Return true if ADDR (a legitimate address expression)
3709 has an effect that depends on the machine mode it is used for.
3715 is not equivalent to
3717 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3719 because [%l7+a+1] is interpreted as the address of (a+1). */
3723 sparc_mode_dependent_address_p (const_rtx addr)
3725 if (flag_pic && GET_CODE (addr) == PLUS)
3727 rtx op0 = XEXP (addr, 0);
3728 rtx op1 = XEXP (addr, 1);
3729 if (op0 == pic_offset_table_rtx
3730 && symbolic_operand (op1, VOIDmode))
3737 #ifdef HAVE_GAS_HIDDEN
3738 # define USE_HIDDEN_LINKONCE 1
3740 # define USE_HIDDEN_LINKONCE 0
3744 get_pc_thunk_name (char name[32], unsigned int regno)
3746 const char *reg_name = reg_names[regno];
3748 /* Skip the leading '%' as that cannot be used in a
3752 if (USE_HIDDEN_LINKONCE)
3753 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
3755 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3758 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
3761 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
3763 int orig_flag_pic = flag_pic;
3766 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
3769 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
3771 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
3772 flag_pic = orig_flag_pic;
3777 /* Emit code to load the GOT register. */
3780 load_got_register (void)
3782 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
3783 if (!global_offset_table_rtx)
3784 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
3786 if (TARGET_VXWORKS_RTP)
3787 emit_insn (gen_vxworks_load_got ());
3790 /* The GOT symbol is subject to a PC-relative relocation so we need a
3791 helper function to add the PC value and thus get the final value. */
3792 if (!got_helper_rtx)
3795 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
3796 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3799 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
3801 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
3804 /* Need to emit this whether or not we obey regdecls,
3805 since setjmp/longjmp can cause life info to screw up.
3806 ??? In the case where we don't obey regdecls, this is not sufficient
3807 since we may not fall out the bottom. */
3808 emit_use (global_offset_table_rtx);
3811 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3812 address of the call target. */
3815 sparc_emit_call_insn (rtx pat, rtx addr)
3819 insn = emit_call_insn (pat);
3821 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3822 if (TARGET_VXWORKS_RTP
3824 && GET_CODE (addr) == SYMBOL_REF
3825 && (SYMBOL_REF_DECL (addr)
3826 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3827 : !SYMBOL_REF_LOCAL_P (addr)))
3829 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3830 crtl->uses_pic_offset_table = 1;
3834 /* Return 1 if RTX is a MEM which is known to be aligned to at
3835 least a DESIRED byte boundary. */
3838 mem_min_alignment (rtx mem, int desired)
3840 rtx addr, base, offset;
3842 /* If it's not a MEM we can't accept it. */
3843 if (GET_CODE (mem) != MEM)
3847 if (!TARGET_UNALIGNED_DOUBLES
3848 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3851 /* ??? The rest of the function predates MEM_ALIGN so
3852 there is probably a bit of redundancy. */
3853 addr = XEXP (mem, 0);
3854 base = offset = NULL_RTX;
3855 if (GET_CODE (addr) == PLUS)
3857 if (GET_CODE (XEXP (addr, 0)) == REG)
3859 base = XEXP (addr, 0);
3861 /* What we are saying here is that if the base
3862 REG is aligned properly, the compiler will make
3863 sure any REG based index upon it will be so
3865 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3866 offset = XEXP (addr, 1);
3868 offset = const0_rtx;
3871 else if (GET_CODE (addr) == REG)
3874 offset = const0_rtx;
3877 if (base != NULL_RTX)
3879 int regno = REGNO (base);
3881 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3883 /* Check if the compiler has recorded some information
3884 about the alignment of the base REG. If reload has
3885 completed, we already matched with proper alignments.
3886 If not running global_alloc, reload might give us
3887 unaligned pointer to local stack though. */
3889 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3890 || (optimize && reload_completed))
3891 && (INTVAL (offset) & (desired - 1)) == 0)
3896 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3900 else if (! TARGET_UNALIGNED_DOUBLES
3901 || CONSTANT_P (addr)
3902 || GET_CODE (addr) == LO_SUM)
3904 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3905 is true, in which case we can only assume that an access is aligned if
3906 it is to a constant address, or the address involves a LO_SUM. */
3910 /* An obviously unaligned address. */
3915 /* Vectors to keep interesting information about registers where it can easily
3916 be got. We used to use the actual mode value as the bit number, but there
3917 are more than 32 modes now. Instead we use two tables: one indexed by
3918 hard register number, and one indexed by mode. */
3920 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3921 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3922 mapped into one sparc_mode_class mode. */
3924 enum sparc_mode_class {
3925 S_MODE, D_MODE, T_MODE, O_MODE,
3926 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3930 /* Modes for single-word and smaller quantities. */
3931 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3933 /* Modes for double-word and smaller quantities. */
3934 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3936 /* Modes for quad-word and smaller quantities. */
3937 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3939 /* Modes for 8-word and smaller quantities. */
3940 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3942 /* Modes for single-float quantities. We must allow any single word or
3943 smaller quantity. This is because the fix/float conversion instructions
3944 take integer inputs/outputs from the float registers. */
3945 #define SF_MODES (S_MODES)
3947 /* Modes for double-float and smaller quantities. */
3948 #define DF_MODES (D_MODES)
3950 /* Modes for quad-float and smaller quantities. */
3951 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3953 /* Modes for quad-float pairs and smaller quantities. */
3954 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3956 /* Modes for double-float only quantities. */
3957 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3959 /* Modes for quad-float and double-float only quantities. */
3960 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3962 /* Modes for quad-float pairs and double-float only quantities. */
3963 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3965 /* Modes for condition codes. */
3966 #define CC_MODES (1 << (int) CC_MODE)
3967 #define CCFP_MODES (1 << (int) CCFP_MODE)
3969 /* Value is 1 if register/mode pair is acceptable on sparc.
3970 The funny mixture of D and T modes is because integer operations
3971 do not specially operate on tetra quantities, so non-quad-aligned
3972 registers can hold quadword quantities (except %o4 and %i4 because
3973 they cross fixed registers). */
3975 /* This points to either the 32 bit or the 64 bit version. */
3976 const int *hard_regno_mode_classes;
3978 static const int hard_32bit_mode_classes[] = {
3979 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3980 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3981 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3982 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3984 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3985 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3986 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3987 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3989 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3990 and none can hold SFmode/SImode values. */
3991 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3992 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3993 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3994 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3997 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4003 static const int hard_64bit_mode_classes[] = {
4004 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4005 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4006 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4007 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4009 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4010 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4011 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4012 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4014 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4015 and none can hold SFmode/SImode values. */
4016 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4017 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4018 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4019 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4022 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4028 int sparc_mode_class [NUM_MACHINE_MODES];
4030 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4033 sparc_init_modes (void)
4037 for (i = 0; i < NUM_MACHINE_MODES; i++)
4039 switch (GET_MODE_CLASS (i))
4042 case MODE_PARTIAL_INT:
4043 case MODE_COMPLEX_INT:
4044 if (GET_MODE_SIZE (i) <= 4)
4045 sparc_mode_class[i] = 1 << (int) S_MODE;
4046 else if (GET_MODE_SIZE (i) == 8)
4047 sparc_mode_class[i] = 1 << (int) D_MODE;
4048 else if (GET_MODE_SIZE (i) == 16)
4049 sparc_mode_class[i] = 1 << (int) T_MODE;
4050 else if (GET_MODE_SIZE (i) == 32)
4051 sparc_mode_class[i] = 1 << (int) O_MODE;
4053 sparc_mode_class[i] = 0;
4055 case MODE_VECTOR_INT:
4056 if (GET_MODE_SIZE (i) <= 4)
4057 sparc_mode_class[i] = 1 << (int)SF_MODE;
4058 else if (GET_MODE_SIZE (i) == 8)
4059 sparc_mode_class[i] = 1 << (int)DF_MODE;
4062 case MODE_COMPLEX_FLOAT:
4063 if (GET_MODE_SIZE (i) <= 4)
4064 sparc_mode_class[i] = 1 << (int) SF_MODE;
4065 else if (GET_MODE_SIZE (i) == 8)
4066 sparc_mode_class[i] = 1 << (int) DF_MODE;
4067 else if (GET_MODE_SIZE (i) == 16)
4068 sparc_mode_class[i] = 1 << (int) TF_MODE;
4069 else if (GET_MODE_SIZE (i) == 32)
4070 sparc_mode_class[i] = 1 << (int) OF_MODE;
4072 sparc_mode_class[i] = 0;
4075 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4076 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4078 sparc_mode_class[i] = 1 << (int) CC_MODE;
4081 sparc_mode_class[i] = 0;
4087 hard_regno_mode_classes = hard_64bit_mode_classes;
4089 hard_regno_mode_classes = hard_32bit_mode_classes;
4091 /* Initialize the array used by REGNO_REG_CLASS. */
4092 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4094 if (i < 16 && TARGET_V8PLUS)
4095 sparc_regno_reg_class[i] = I64_REGS;
4096 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4097 sparc_regno_reg_class[i] = GENERAL_REGS;
4099 sparc_regno_reg_class[i] = FP_REGS;
4101 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4103 sparc_regno_reg_class[i] = FPCC_REGS;
4105 sparc_regno_reg_class[i] = NO_REGS;
4109 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4112 save_global_or_fp_reg_p (unsigned int regno,
4113 int leaf_function ATTRIBUTE_UNUSED)
4115 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4118 /* Return whether the return address register (%i7) is needed. */
4121 return_addr_reg_needed_p (int leaf_function)
4123 /* If it is live, for example because of __builtin_return_address (0). */
4124 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4127 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4129 /* Loading the GOT register clobbers %o7. */
4130 || crtl->uses_pic_offset_table
4131 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4137 /* Return whether REGNO, a local or in register, must be saved/restored. */
4140 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4142 /* General case: call-saved registers live at some point. */
4143 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4146 /* Frame pointer register (%fp) if needed. */
4147 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4150 /* Return address register (%i7) if needed. */
4151 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4154 /* GOT register (%l7) if needed. */
4155 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4158 /* If the function accesses prior frames, the frame pointer and the return
4159 address of the previous frame must be saved on the stack. */
4160 if (crtl->accesses_prior_frames
4161 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4167 /* Compute the frame size required by the function. This function is called
4168 during the reload pass and also by sparc_expand_prologue. */
4171 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4173 HOST_WIDE_INT frame_size, apparent_frame_size;
4174 int args_size, n_global_fp_regs = 0;
4175 bool save_local_in_regs_p = false;
4178 /* If the function allocates dynamic stack space, the dynamic offset is
4179 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4180 if (leaf_function && !cfun->calls_alloca)
4183 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4185 /* Calculate space needed for global registers. */
4187 for (i = 0; i < 8; i++)
4188 if (save_global_or_fp_reg_p (i, 0))
4189 n_global_fp_regs += 2;
4191 for (i = 0; i < 8; i += 2)
4192 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4193 n_global_fp_regs += 2;
4195 /* In the flat window model, find out which local and in registers need to
4196 be saved. We don't reserve space in the current frame for them as they
4197 will be spilled into the register window save area of the caller's frame.
4198 However, as soon as we use this register window save area, we must create
4199 that of the current frame to make it the live one. */
4201 for (i = 16; i < 32; i++)
4202 if (save_local_or_in_reg_p (i, leaf_function))
4204 save_local_in_regs_p = true;
4208 /* Calculate space needed for FP registers. */
4209 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4210 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4211 n_global_fp_regs += 2;
4214 && n_global_fp_regs == 0
4216 && !save_local_in_regs_p)
4217 frame_size = apparent_frame_size = 0;
4220 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4221 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4222 apparent_frame_size += n_global_fp_regs * 4;
4224 /* We need to add the size of the outgoing argument area. */
4225 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4227 /* And that of the register window save area. */
4228 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4230 /* Finally, bump to the appropriate alignment. */
4231 frame_size = SPARC_STACK_ALIGN (frame_size);
4234 /* Set up values for use in prologue and epilogue. */
4235 sparc_frame_size = frame_size;
4236 sparc_apparent_frame_size = apparent_frame_size;
4237 sparc_n_global_fp_regs = n_global_fp_regs;
4238 sparc_save_local_in_regs_p = save_local_in_regs_p;
4243 /* Output any necessary .register pseudo-ops. */
4246 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4248 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4254 /* Check if %g[2367] were used without
4255 .register being printed for them already. */
4256 for (i = 2; i < 8; i++)
4258 if (df_regs_ever_live_p (i)
4259 && ! sparc_hard_reg_printed [i])
4261 sparc_hard_reg_printed [i] = 1;
4262 /* %g7 is used as TLS base register, use #ignore
4263 for it instead of #scratch. */
4264 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4265 i == 7 ? "ignore" : "scratch");
4272 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4274 #if PROBE_INTERVAL > 4096
4275 #error Cannot use indexed addressing mode for stack probing
4278 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4279 inclusive. These are offsets from the current stack pointer.
4281 Note that we don't use the REG+REG addressing mode for the probes because
4282 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4283 so the advantages of having a single code win here. */
4286 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4288 rtx g1 = gen_rtx_REG (Pmode, 1);
4290 /* See if we have a constant small number of probes to generate. If so,
4291 that's the easy case. */
4292 if (size <= PROBE_INTERVAL)
4294 emit_move_insn (g1, GEN_INT (first));
4295 emit_insn (gen_rtx_SET (VOIDmode, g1,
4296 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4297 emit_stack_probe (plus_constant (g1, -size));
4300 /* The run-time loop is made up of 10 insns in the generic case while the
4301 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4302 else if (size <= 5 * PROBE_INTERVAL)
4306 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4307 emit_insn (gen_rtx_SET (VOIDmode, g1,
4308 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4309 emit_stack_probe (g1);
4311 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4312 it exceeds SIZE. If only two probes are needed, this will not
4313 generate any code. Then probe at FIRST + SIZE. */
4314 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4316 emit_insn (gen_rtx_SET (VOIDmode, g1,
4317 plus_constant (g1, -PROBE_INTERVAL)));
4318 emit_stack_probe (g1);
4321 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4324 /* Otherwise, do the same as above, but in a loop. Note that we must be
4325 extra careful with variables wrapping around because we might be at
4326 the very top (or the very bottom) of the address space and we have
4327 to be able to handle this case properly; in particular, we use an
4328 equality test for the loop condition. */
4331 HOST_WIDE_INT rounded_size;
4332 rtx g4 = gen_rtx_REG (Pmode, 4);
4334 emit_move_insn (g1, GEN_INT (first));
4337 /* Step 1: round SIZE to the previous multiple of the interval. */
4339 rounded_size = size & -PROBE_INTERVAL;
4340 emit_move_insn (g4, GEN_INT (rounded_size));
4343 /* Step 2: compute initial and final value of the loop counter. */
4345 /* TEST_ADDR = SP + FIRST. */
4346 emit_insn (gen_rtx_SET (VOIDmode, g1,
4347 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4349 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4350 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4355 while (TEST_ADDR != LAST_ADDR)
4357 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4361 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4362 until it is equal to ROUNDED_SIZE. */
4365 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4367 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4370 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4371 that SIZE is equal to ROUNDED_SIZE. */
4373 if (size != rounded_size)
4374 emit_stack_probe (plus_constant (g4, rounded_size - size));
4377 /* Make sure nothing is scheduled before we are done. */
4378 emit_insn (gen_blockage ());
4381 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4382 absolute addresses. */
4385 output_probe_stack_range (rtx reg1, rtx reg2)
4387 static int labelno = 0;
4388 char loop_lab[32], end_lab[32];
4391 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4392 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4394 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4396 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4399 output_asm_insn ("cmp\t%0, %1", xops);
4401 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4403 fputs ("\tbe\t", asm_out_file);
4404 assemble_name_raw (asm_out_file, end_lab);
4405 fputc ('\n', asm_out_file);
4407 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4408 xops[1] = GEN_INT (-PROBE_INTERVAL);
4409 output_asm_insn (" add\t%0, %1, %0", xops);
4411 /* Probe at TEST_ADDR and branch. */
4413 fputs ("\tba,pt\t%xcc,", asm_out_file);
4415 fputs ("\tba\t", asm_out_file);
4416 assemble_name_raw (asm_out_file, loop_lab);
4417 fputc ('\n', asm_out_file);
4418 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4419 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4421 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4426 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4427 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4428 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4429 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4430 the action to be performed if it returns false. Return the new offset. */
4432 typedef bool (*sorr_pred_t) (unsigned int, int);
4433 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4436 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4437 int offset, int leaf_function, sorr_pred_t save_p,
4438 sorr_act_t action_true, sorr_act_t action_false)
4443 if (TARGET_ARCH64 && high <= 32)
4447 for (i = low; i < high; i++)
4449 if (save_p (i, leaf_function))
4451 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4452 if (action_true == SORR_SAVE)
4454 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4455 RTX_FRAME_RELATED_P (insn) = 1;
4457 else /* action_true == SORR_RESTORE */
4459 /* The frame pointer must be restored last since its old
4460 value may be used as base address for the frame. This
4461 is problematic in 64-bit mode only because of the lack
4462 of double-word load instruction. */
4463 if (i == HARD_FRAME_POINTER_REGNUM)
4466 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4470 else if (action_false == SORR_ADVANCE)
4476 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4477 emit_move_insn (hard_frame_pointer_rtx, mem);
4482 for (i = low; i < high; i += 2)
4484 bool reg0 = save_p (i, leaf_function);
4485 bool reg1 = save_p (i + 1, leaf_function);
4486 enum machine_mode mode;
4491 mode = i < 32 ? DImode : DFmode;
4496 mode = i < 32 ? SImode : SFmode;
4501 mode = i < 32 ? SImode : SFmode;
4507 if (action_false == SORR_ADVANCE)
4512 mem = gen_frame_mem (mode, plus_constant (base, offset));
4513 if (action_true == SORR_SAVE)
4515 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4516 RTX_FRAME_RELATED_P (insn) = 1;
4520 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4521 set1 = gen_rtx_SET (VOIDmode, mem,
4522 gen_rtx_REG (SImode, regno));
4523 RTX_FRAME_RELATED_P (set1) = 1;
4525 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4526 set2 = gen_rtx_SET (VOIDmode, mem,
4527 gen_rtx_REG (SImode, regno + 1));
4528 RTX_FRAME_RELATED_P (set2) = 1;
4529 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4530 gen_rtx_PARALLEL (VOIDmode,
4531 gen_rtvec (2, set1, set2)));
4534 else /* action_true == SORR_RESTORE */
4535 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4537 /* Always preserve double-word alignment. */
4538 offset = (offset + 8) & -8;
4545 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4548 emit_adjust_base_to_offset (rtx base, int offset)
4550 /* ??? This might be optimized a little as %g1 might already have a
4551 value close enough that a single add insn will do. */
4552 /* ??? Although, all of this is probably only a temporary fix because
4553 if %g1 can hold a function result, then sparc_expand_epilogue will
4554 lose (the result will be clobbered). */
4555 rtx new_base = gen_rtx_REG (Pmode, 1);
4556 emit_move_insn (new_base, GEN_INT (offset));
4557 emit_insn (gen_rtx_SET (VOIDmode,
4558 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4562 /* Emit code to save/restore call-saved global and FP registers. */
4565 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4567 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4569 base = emit_adjust_base_to_offset (base, offset);
4574 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4575 save_global_or_fp_reg_p, action, SORR_NONE);
4576 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4577 save_global_or_fp_reg_p, action, SORR_NONE);
4580 /* Emit code to save/restore call-saved local and in registers. */
4583 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4585 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4587 base = emit_adjust_base_to_offset (base, offset);
4591 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4592 save_local_or_in_reg_p, action, SORR_ADVANCE);
4595 /* Emit a window_save insn. */
4598 emit_window_save (rtx increment)
4600 rtx insn = emit_insn (gen_window_save (increment));
4601 RTX_FRAME_RELATED_P (insn) = 1;
4603 /* The incoming return address (%o7) is saved in %i7. */
4604 add_reg_note (insn, REG_CFA_REGISTER,
4605 gen_rtx_SET (VOIDmode,
4606 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4608 INCOMING_RETURN_ADDR_REGNUM)));
4610 /* The window save event. */
4611 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4613 /* The CFA is %fp, the hard frame pointer. */
4614 add_reg_note (insn, REG_CFA_DEF_CFA,
4615 plus_constant (hard_frame_pointer_rtx,
4616 INCOMING_FRAME_SP_OFFSET));
4621 /* Generate an increment for the stack pointer. */
4624 gen_stack_pointer_inc (rtx increment)
4626 return gen_rtx_SET (VOIDmode,
4628 gen_rtx_PLUS (Pmode,
4633 /* Generate a decrement for the stack pointer. */
4636 gen_stack_pointer_dec (rtx decrement)
4638 return gen_rtx_SET (VOIDmode,
4640 gen_rtx_MINUS (Pmode,
4645 /* Expand the function prologue. The prologue is responsible for reserving
4646 storage for the frame, saving the call-saved registers and loading the
4647 GOT register if needed. */
4650 sparc_expand_prologue (void)
4655 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4656 on the final value of the flag means deferring the prologue/epilogue
4657 expansion until just before the second scheduling pass, which is too
4658 late to emit multiple epilogues or return insns.
4660 Of course we are making the assumption that the value of the flag
4661 will not change between now and its final value. Of the three parts
4662 of the formula, only the last one can reasonably vary. Let's take a
4663 closer look, after assuming that the first two ones are set to true
4664 (otherwise the last value is effectively silenced).
4666 If only_leaf_regs_used returns false, the global predicate will also
4667 be false so the actual frame size calculated below will be positive.
4668 As a consequence, the save_register_window insn will be emitted in
4669 the instruction stream; now this insn explicitly references %fp
4670 which is not a leaf register so only_leaf_regs_used will always
4671 return false subsequently.
4673 If only_leaf_regs_used returns true, we hope that the subsequent
4674 optimization passes won't cause non-leaf registers to pop up. For
4675 example, the regrename pass has special provisions to not rename to
4676 non-leaf registers in a leaf function. */
4677 sparc_leaf_function_p
4678 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4680 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4682 if (flag_stack_usage_info)
4683 current_function_static_stack_size = size;
4685 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4686 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4690 else if (sparc_leaf_function_p)
4692 rtx size_int_rtx = GEN_INT (-size);
4695 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4696 else if (size <= 8192)
4698 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4699 /* %sp is still the CFA register. */
4700 RTX_FRAME_RELATED_P (insn) = 1;
4701 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4705 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4706 emit_move_insn (size_rtx, size_int_rtx);
4707 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4708 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4709 gen_stack_pointer_inc (size_int_rtx));
4712 RTX_FRAME_RELATED_P (insn) = 1;
4716 rtx size_int_rtx = GEN_INT (-size);
4719 emit_window_save (size_int_rtx);
4720 else if (size <= 8192)
4722 emit_window_save (GEN_INT (-4096));
4723 /* %sp is not the CFA register anymore. */
4724 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4728 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4729 emit_move_insn (size_rtx, size_int_rtx);
4730 emit_window_save (size_rtx);
4734 if (sparc_leaf_function_p)
4736 sparc_frame_base_reg = stack_pointer_rtx;
4737 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4741 sparc_frame_base_reg = hard_frame_pointer_rtx;
4742 sparc_frame_base_offset = SPARC_STACK_BIAS;
4745 if (sparc_n_global_fp_regs > 0)
4746 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4747 sparc_frame_base_offset
4748 - sparc_apparent_frame_size,
4751 /* Load the GOT register if needed. */
4752 if (crtl->uses_pic_offset_table)
4753 load_got_register ();
4755 /* Advertise that the data calculated just above are now valid. */
4756 sparc_prologue_data_valid_p = true;
4759 /* Expand the function prologue. The prologue is responsible for reserving
4760 storage for the frame, saving the call-saved registers and loading the
4761 GOT register if needed. */
4764 sparc_flat_expand_prologue (void)
4769 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
4771 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4773 if (flag_stack_usage_info)
4774 current_function_static_stack_size = size;
4776 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4777 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4779 if (sparc_save_local_in_regs_p)
4780 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
4787 rtx size_int_rtx, size_rtx;
4789 size_rtx = size_int_rtx = GEN_INT (-size);
4791 /* We establish the frame (i.e. decrement the stack pointer) first, even
4792 if we use a frame pointer, because we cannot clobber any call-saved
4793 registers, including the frame pointer, if we haven't created a new
4794 register save area, for the sake of compatibility with the ABI. */
4796 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4797 else if (size <= 8192 && !frame_pointer_needed)
4799 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4800 RTX_FRAME_RELATED_P (insn) = 1;
4801 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4805 size_rtx = gen_rtx_REG (Pmode, 1);
4806 emit_move_insn (size_rtx, size_int_rtx);
4807 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4808 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4809 gen_stack_pointer_inc (size_int_rtx));
4811 RTX_FRAME_RELATED_P (insn) = 1;
4813 /* Ensure nothing is scheduled until after the frame is established. */
4814 emit_insn (gen_blockage ());
4816 if (frame_pointer_needed)
4818 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4819 gen_rtx_MINUS (Pmode,
4822 RTX_FRAME_RELATED_P (insn) = 1;
4824 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4825 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4826 plus_constant (stack_pointer_rtx,
4830 if (return_addr_reg_needed_p (sparc_leaf_function_p))
4832 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
4833 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
4835 insn = emit_move_insn (i7, o7);
4836 RTX_FRAME_RELATED_P (insn) = 1;
4838 add_reg_note (insn, REG_CFA_REGISTER,
4839 gen_rtx_SET (VOIDmode, i7, o7));
4841 /* Prevent this instruction from ever being considered dead,
4842 even if this function has no epilogue. */
4843 emit_insn (gen_rtx_USE (VOIDmode, i7));
4847 if (frame_pointer_needed)
4849 sparc_frame_base_reg = hard_frame_pointer_rtx;
4850 sparc_frame_base_offset = SPARC_STACK_BIAS;
4854 sparc_frame_base_reg = stack_pointer_rtx;
4855 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4858 if (sparc_n_global_fp_regs > 0)
4859 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4860 sparc_frame_base_offset
4861 - sparc_apparent_frame_size,
4864 /* Load the GOT register if needed. */
4865 if (crtl->uses_pic_offset_table)
4866 load_got_register ();
4868 /* Advertise that the data calculated just above are now valid. */
4869 sparc_prologue_data_valid_p = true;
4872 /* This function generates the assembly code for function entry, which boils
4873 down to emitting the necessary .register directives. */
4876 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4878 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4880 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4882 sparc_output_scratch_registers (file);
4885 /* Expand the function epilogue, either normal or part of a sibcall.
4886 We emit all the instructions except the return or the call. */
4889 sparc_expand_epilogue (bool for_eh)
4891 HOST_WIDE_INT size = sparc_frame_size;
4893 if (sparc_n_global_fp_regs > 0)
4894 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4895 sparc_frame_base_offset
4896 - sparc_apparent_frame_size,
4899 if (size == 0 || for_eh)
4901 else if (sparc_leaf_function_p)
4904 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
4905 else if (size <= 8192)
4907 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4908 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
4912 rtx reg = gen_rtx_REG (Pmode, 1);
4913 emit_move_insn (reg, GEN_INT (-size));
4914 emit_insn (gen_stack_pointer_dec (reg));
4919 /* Expand the function epilogue, either normal or part of a sibcall.
4920 We emit all the instructions except the return or the call. */
4923 sparc_flat_expand_epilogue (bool for_eh)
4925 HOST_WIDE_INT size = sparc_frame_size;
4927 if (sparc_n_global_fp_regs > 0)
4928 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4929 sparc_frame_base_offset
4930 - sparc_apparent_frame_size,
4933 /* If we have a frame pointer, we'll need both to restore it before the
4934 frame is destroyed and use its current value in destroying the frame.
4935 Since we don't have an atomic way to do that in the flat window model,
4936 we save the current value into a temporary register (%g1). */
4937 if (frame_pointer_needed && !for_eh)
4938 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
4940 if (return_addr_reg_needed_p (sparc_leaf_function_p))
4941 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
4942 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
4944 if (sparc_save_local_in_regs_p)
4945 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
4946 sparc_frame_base_offset,
4949 if (size == 0 || for_eh)
4951 else if (frame_pointer_needed)
4953 /* Make sure the frame is destroyed after everything else is done. */
4954 emit_insn (gen_blockage ());
4956 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
4961 emit_insn (gen_blockage ());
4964 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
4965 else if (size <= 8192)
4967 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4968 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
4972 rtx reg = gen_rtx_REG (Pmode, 1);
4973 emit_move_insn (reg, GEN_INT (-size));
4974 emit_insn (gen_stack_pointer_dec (reg));
4979 /* Return true if it is appropriate to emit `return' instructions in the
4980 body of a function. */
4983 sparc_can_use_return_insn_p (void)
4985 return sparc_prologue_data_valid_p
4986 && sparc_n_global_fp_regs == 0
4988 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
4989 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
4992 /* This function generates the assembly code for function exit. */
4995 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4997 /* If the last two instructions of a function are "call foo; dslot;"
4998 the return address might point to the first instruction in the next
4999 function and we have to output a dummy nop for the sake of sane
5000 backtraces in such cases. This is pointless for sibling calls since
5001 the return address is explicitly adjusted. */
5003 rtx insn, last_real_insn;
5005 insn = get_last_insn ();
5007 last_real_insn = prev_real_insn (insn);
5009 && GET_CODE (last_real_insn) == INSN
5010 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5011 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5014 && CALL_P (last_real_insn)
5015 && !SIBLING_CALL_P (last_real_insn))
5016 fputs("\tnop\n", file);
5018 sparc_output_deferred_case_vectors ();
5021 /* Output a 'restore' instruction. */
5024 output_restore (rtx pat)
5030 fputs ("\t restore\n", asm_out_file);
5034 gcc_assert (GET_CODE (pat) == SET);
5036 operands[0] = SET_DEST (pat);
5037 pat = SET_SRC (pat);
5039 switch (GET_CODE (pat))
5042 operands[1] = XEXP (pat, 0);
5043 operands[2] = XEXP (pat, 1);
5044 output_asm_insn (" restore %r1, %2, %Y0", operands);
5047 operands[1] = XEXP (pat, 0);
5048 operands[2] = XEXP (pat, 1);
5049 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5052 operands[1] = XEXP (pat, 0);
5053 gcc_assert (XEXP (pat, 1) == const1_rtx);
5054 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5058 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5063 /* Output a return. */
5066 output_return (rtx insn)
5068 if (crtl->calls_eh_return)
5070 /* If the function uses __builtin_eh_return, the eh_return
5071 machinery occupies the delay slot. */
5072 gcc_assert (!final_sequence);
5074 if (flag_delayed_branch)
5076 if (!TARGET_FLAT && TARGET_V9)
5077 fputs ("\treturn\t%i7+8\n", asm_out_file);
5081 fputs ("\trestore\n", asm_out_file);
5083 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5086 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5091 fputs ("\trestore\n", asm_out_file);
5093 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5094 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5097 else if (sparc_leaf_function_p || TARGET_FLAT)
5099 /* This is a leaf or flat function so we don't have to bother restoring
5100 the register window, which frees us from dealing with the convoluted
5101 semantics of restore/return. We simply output the jump to the
5102 return address and the insn in the delay slot (if any). */
5104 return "jmp\t%%o7+%)%#";
5108 /* This is a regular function so we have to restore the register window.
5109 We may have a pending insn for the delay slot, which will be either
5110 combined with the 'restore' instruction or put in the delay slot of
5111 the 'return' instruction. */
5117 delay = NEXT_INSN (insn);
5120 pat = PATTERN (delay);
5122 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5124 epilogue_renumber (&pat, 0);
5125 return "return\t%%i7+%)%#";
5129 output_asm_insn ("jmp\t%%i7+%)", NULL);
5130 output_restore (pat);
5131 PATTERN (delay) = gen_blockage ();
5132 INSN_CODE (delay) = -1;
5137 /* The delay slot is empty. */
5139 return "return\t%%i7+%)\n\t nop";
5140 else if (flag_delayed_branch)
5141 return "jmp\t%%i7+%)\n\t restore";
5143 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5150 /* Output a sibling call. */
5153 output_sibcall (rtx insn, rtx call_operand)
5157 gcc_assert (flag_delayed_branch);
5159 operands[0] = call_operand;
5161 if (sparc_leaf_function_p || TARGET_FLAT)
5163 /* This is a leaf or flat function so we don't have to bother restoring
5164 the register window. We simply output the jump to the function and
5165 the insn in the delay slot (if any). */
5167 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5170 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5173 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5174 it into branch if possible. */
5175 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5180 /* This is a regular function so we have to restore the register window.
5181 We may have a pending insn for the delay slot, which will be combined
5182 with the 'restore' instruction. */
5184 output_asm_insn ("call\t%a0, 0", operands);
5188 rtx delay = NEXT_INSN (insn);
5191 output_restore (PATTERN (delay));
5193 PATTERN (delay) = gen_blockage ();
5194 INSN_CODE (delay) = -1;
5197 output_restore (NULL_RTX);
5203 /* Functions for handling argument passing.
5205 For 32-bit, the first 6 args are normally in registers and the rest are
5206 pushed. Any arg that starts within the first 6 words is at least
5207 partially passed in a register unless its data type forbids.
5209 For 64-bit, the argument registers are laid out as an array of 16 elements
5210 and arguments are added sequentially. The first 6 int args and up to the
5211 first 16 fp args (depending on size) are passed in regs.
5213 Slot Stack Integral Float Float in structure Double Long Double
5214 ---- ----- -------- ----- ------------------ ------ -----------
5215 15 [SP+248] %f31 %f30,%f31 %d30
5216 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5217 13 [SP+232] %f27 %f26,%f27 %d26
5218 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5219 11 [SP+216] %f23 %f22,%f23 %d22
5220 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5221 9 [SP+200] %f19 %f18,%f19 %d18
5222 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5223 7 [SP+184] %f15 %f14,%f15 %d14
5224 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5225 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5226 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5227 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5228 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5229 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5230 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5232 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5234 Integral arguments are always passed as 64-bit quantities appropriately
5237 Passing of floating point values is handled as follows.
5238 If a prototype is in scope:
5239 If the value is in a named argument (i.e. not a stdarg function or a
5240 value not part of the `...') then the value is passed in the appropriate
5242 If the value is part of the `...' and is passed in one of the first 6
5243 slots then the value is passed in the appropriate int reg.
5244 If the value is part of the `...' and is not passed in one of the first 6
5245 slots then the value is passed in memory.
5246 If a prototype is not in scope:
5247 If the value is one of the first 6 arguments the value is passed in the
5248 appropriate integer reg and the appropriate fp reg.
5249 If the value is not one of the first 6 arguments the value is passed in
5250 the appropriate fp reg and in memory.
5253 Summary of the calling conventions implemented by GCC on the SPARC:
5256 size argument return value
5258 small integer <4 int. reg. int. reg.
5259 word 4 int. reg. int. reg.
5260 double word 8 int. reg. int. reg.
5262 _Complex small integer <8 int. reg. int. reg.
5263 _Complex word 8 int. reg. int. reg.
5264 _Complex double word 16 memory int. reg.
5266 vector integer <=8 int. reg. FP reg.
5267 vector integer >8 memory memory
5269 float 4 int. reg. FP reg.
5270 double 8 int. reg. FP reg.
5271 long double 16 memory memory
5273 _Complex float 8 memory FP reg.
5274 _Complex double 16 memory FP reg.
5275 _Complex long double 32 memory FP reg.
5277 vector float any memory memory
5279 aggregate any memory memory
5284 size argument return value
5286 small integer <8 int. reg. int. reg.
5287 word 8 int. reg. int. reg.
5288 double word 16 int. reg. int. reg.
5290 _Complex small integer <16 int. reg. int. reg.
5291 _Complex word 16 int. reg. int. reg.
5292 _Complex double word 32 memory int. reg.
5294 vector integer <=16 FP reg. FP reg.
5295 vector integer 16<s<=32 memory FP reg.
5296 vector integer >32 memory memory
5298 float 4 FP reg. FP reg.
5299 double 8 FP reg. FP reg.
5300 long double 16 FP reg. FP reg.
5302 _Complex float 8 FP reg. FP reg.
5303 _Complex double 16 FP reg. FP reg.
5304 _Complex long double 32 memory FP reg.
5306 vector float <=16 FP reg. FP reg.
5307 vector float 16<s<=32 memory FP reg.
5308 vector float >32 memory memory
5310 aggregate <=16 reg. reg.
5311 aggregate 16<s<=32 memory reg.
5312 aggregate >32 memory memory
5316 Note #1: complex floating-point types follow the extended SPARC ABIs as
5317 implemented by the Sun compiler.
5319 Note #2: integral vector types follow the scalar floating-point types
5320 conventions to match what is implemented by the Sun VIS SDK.
5322 Note #3: floating-point vector types follow the aggregate types
5326 /* Maximum number of int regs for args. */
5327 #define SPARC_INT_ARG_MAX 6
5328 /* Maximum number of fp regs for args. */
5329 #define SPARC_FP_ARG_MAX 16
5331 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5333 /* Handle the INIT_CUMULATIVE_ARGS macro.
5334 Initialize a variable CUM of type CUMULATIVE_ARGS
5335 for a call to a function whose data type is FNTYPE.
5336 For a library call, FNTYPE is 0. */
5339 init_cumulative_args (struct sparc_args *cum, tree fntype,
5340 rtx libname ATTRIBUTE_UNUSED,
5341 tree fndecl ATTRIBUTE_UNUSED)
5344 cum->prototype_p = fntype && prototype_p (fntype);
5345 cum->libcall_p = fntype == 0;
5348 /* Handle promotion of pointer and integer arguments. */
5350 static enum machine_mode
5351 sparc_promote_function_mode (const_tree type,
5352 enum machine_mode mode,
5354 const_tree fntype ATTRIBUTE_UNUSED,
5355 int for_return ATTRIBUTE_UNUSED)
5357 if (type != NULL_TREE && POINTER_TYPE_P (type))
5359 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5363 /* Integral arguments are passed as full words, as per the ABI. */
5364 if (GET_MODE_CLASS (mode) == MODE_INT
5365 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5371 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5374 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5376 return TARGET_ARCH64 ? true : false;
5379 /* Scan the record type TYPE and return the following predicates:
5380 - INTREGS_P: the record contains at least one field or sub-field
5381 that is eligible for promotion in integer registers.
5382 - FP_REGS_P: the record contains at least one field or sub-field
5383 that is eligible for promotion in floating-point registers.
5384 - PACKED_P: the record contains at least one field that is packed.
5386 Sub-fields are not taken into account for the PACKED_P predicate. */
5389 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5394 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5396 if (TREE_CODE (field) == FIELD_DECL)
5398 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5399 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5400 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5401 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5407 if (packed_p && DECL_PACKED (field))
5413 /* Compute the slot number to pass an argument in.
5414 Return the slot number or -1 if passing on the stack.
5416 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5417 the preceding args and about the function being called.
5418 MODE is the argument's machine mode.
5419 TYPE is the data type of the argument (as a tree).
5420 This is null for libcalls where that information may
5422 NAMED is nonzero if this argument is a named parameter
5423 (otherwise it is an extra parameter matching an ellipsis).
5424 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5425 *PREGNO records the register number to use if scalar type.
5426 *PPADDING records the amount of padding needed in words. */
5429 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5430 const_tree type, bool named, bool incoming_p,
5431 int *pregno, int *ppadding)
5433 int regbase = (incoming_p
5434 ? SPARC_INCOMING_INT_ARG_FIRST
5435 : SPARC_OUTGOING_INT_ARG_FIRST);
5436 int slotno = cum->words;
5437 enum mode_class mclass;
5442 if (type && TREE_ADDRESSABLE (type))
5448 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5451 /* For SPARC64, objects requiring 16-byte alignment get it. */
5453 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5454 && (slotno & 1) != 0)
5455 slotno++, *ppadding = 1;
5457 mclass = GET_MODE_CLASS (mode);
5458 if (type && TREE_CODE (type) == VECTOR_TYPE)
5460 /* Vector types deserve special treatment because they are
5461 polymorphic wrt their mode, depending upon whether VIS
5462 instructions are enabled. */
5463 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5465 /* The SPARC port defines no floating-point vector modes. */
5466 gcc_assert (mode == BLKmode);
5470 /* Integral vector types should either have a vector
5471 mode or an integral mode, because we are guaranteed
5472 by pass_by_reference that their size is not greater
5473 than 16 bytes and TImode is 16-byte wide. */
5474 gcc_assert (mode != BLKmode);
5476 /* Vector integers are handled like floats according to
5478 mclass = MODE_FLOAT;
5485 case MODE_COMPLEX_FLOAT:
5486 case MODE_VECTOR_INT:
5487 if (TARGET_ARCH64 && TARGET_FPU && named)
5489 if (slotno >= SPARC_FP_ARG_MAX)
5491 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5492 /* Arguments filling only one single FP register are
5493 right-justified in the outer double FP register. */
5494 if (GET_MODE_SIZE (mode) <= 4)
5501 case MODE_COMPLEX_INT:
5502 if (slotno >= SPARC_INT_ARG_MAX)
5504 regno = regbase + slotno;
5508 if (mode == VOIDmode)
5509 /* MODE is VOIDmode when generating the actual call. */
5512 gcc_assert (mode == BLKmode);
5516 || (TREE_CODE (type) != VECTOR_TYPE
5517 && TREE_CODE (type) != RECORD_TYPE))
5519 if (slotno >= SPARC_INT_ARG_MAX)
5521 regno = regbase + slotno;
5523 else /* TARGET_ARCH64 && type */
5525 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5527 /* First see what kinds of registers we would need. */
5528 if (TREE_CODE (type) == VECTOR_TYPE)
5531 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5533 /* The ABI obviously doesn't specify how packed structures
5534 are passed. These are defined to be passed in int regs
5535 if possible, otherwise memory. */
5536 if (packed_p || !named)
5537 fpregs_p = 0, intregs_p = 1;
5539 /* If all arg slots are filled, then must pass on stack. */
5540 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5543 /* If there are only int args and all int arg slots are filled,
5544 then must pass on stack. */
5545 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5548 /* Note that even if all int arg slots are filled, fp members may
5549 still be passed in regs if such regs are available.
5550 *PREGNO isn't set because there may be more than one, it's up
5551 to the caller to compute them. */
5564 /* Handle recursive register counting for structure field layout. */
5566 struct function_arg_record_value_parms
5568 rtx ret; /* return expression being built. */
5569 int slotno; /* slot number of the argument. */
5570 int named; /* whether the argument is named. */
5571 int regbase; /* regno of the base register. */
5572 int stack; /* 1 if part of the argument is on the stack. */
5573 int intoffset; /* offset of the first pending integer field. */
5574 unsigned int nregs; /* number of words passed in registers. */
5577 static void function_arg_record_value_3
5578 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5579 static void function_arg_record_value_2
5580 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5581 static void function_arg_record_value_1
5582 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5583 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5584 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5586 /* A subroutine of function_arg_record_value. Traverse the structure
5587 recursively and determine how many registers will be required. */
5590 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5591 struct function_arg_record_value_parms *parms,
5596 /* We need to compute how many registers are needed so we can
5597 allocate the PARALLEL but before we can do that we need to know
5598 whether there are any packed fields. The ABI obviously doesn't
5599 specify how structures are passed in this case, so they are
5600 defined to be passed in int regs if possible, otherwise memory,
5601 regardless of whether there are fp values present. */
5604 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5606 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5613 /* Compute how many registers we need. */
5614 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5616 if (TREE_CODE (field) == FIELD_DECL)
5618 HOST_WIDE_INT bitpos = startbitpos;
5620 if (DECL_SIZE (field) != 0)
5622 if (integer_zerop (DECL_SIZE (field)))
5625 if (host_integerp (bit_position (field), 1))
5626 bitpos += int_bit_position (field);
5629 /* ??? FIXME: else assume zero offset. */
5631 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5632 function_arg_record_value_1 (TREE_TYPE (field),
5636 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5637 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5642 if (parms->intoffset != -1)
5644 unsigned int startbit, endbit;
5645 int intslots, this_slotno;
5647 startbit = parms->intoffset & -BITS_PER_WORD;
5648 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5650 intslots = (endbit - startbit) / BITS_PER_WORD;
5651 this_slotno = parms->slotno + parms->intoffset
5654 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5656 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5657 /* We need to pass this field on the stack. */
5661 parms->nregs += intslots;
5662 parms->intoffset = -1;
5665 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5666 If it wasn't true we wouldn't be here. */
5667 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5668 && DECL_MODE (field) == BLKmode)
5669 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5670 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5677 if (parms->intoffset == -1)
5678 parms->intoffset = bitpos;
5684 /* A subroutine of function_arg_record_value. Assign the bits of the
5685 structure between parms->intoffset and bitpos to integer registers. */
5688 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5689 struct function_arg_record_value_parms *parms)
5691 enum machine_mode mode;
5693 unsigned int startbit, endbit;
5694 int this_slotno, intslots, intoffset;
5697 if (parms->intoffset == -1)
5700 intoffset = parms->intoffset;
5701 parms->intoffset = -1;
5703 startbit = intoffset & -BITS_PER_WORD;
5704 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5705 intslots = (endbit - startbit) / BITS_PER_WORD;
5706 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5708 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5712 /* If this is the trailing part of a word, only load that much into
5713 the register. Otherwise load the whole register. Note that in
5714 the latter case we may pick up unwanted bits. It's not a problem
5715 at the moment but may wish to revisit. */
5717 if (intoffset % BITS_PER_WORD != 0)
5718 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5723 intoffset /= BITS_PER_UNIT;
5726 regno = parms->regbase + this_slotno;
5727 reg = gen_rtx_REG (mode, regno);
5728 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5729 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5732 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5737 while (intslots > 0);
5740 /* A subroutine of function_arg_record_value. Traverse the structure
5741 recursively and assign bits to floating point registers. Track which
5742 bits in between need integer registers; invoke function_arg_record_value_3
5743 to make that happen. */
5746 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5747 struct function_arg_record_value_parms *parms,
5753 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5755 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5762 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5764 if (TREE_CODE (field) == FIELD_DECL)
5766 HOST_WIDE_INT bitpos = startbitpos;
5768 if (DECL_SIZE (field) != 0)
5770 if (integer_zerop (DECL_SIZE (field)))
5773 if (host_integerp (bit_position (field), 1))
5774 bitpos += int_bit_position (field);
5777 /* ??? FIXME: else assume zero offset. */
5779 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5780 function_arg_record_value_2 (TREE_TYPE (field),
5784 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5785 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5790 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5791 int regno, nregs, pos;
5792 enum machine_mode mode = DECL_MODE (field);
5795 function_arg_record_value_3 (bitpos, parms);
5797 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5800 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5801 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5803 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5805 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5811 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5812 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5814 reg = gen_rtx_REG (mode, regno);
5815 pos = bitpos / BITS_PER_UNIT;
5816 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5817 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5821 regno += GET_MODE_SIZE (mode) / 4;
5822 reg = gen_rtx_REG (mode, regno);
5823 pos += GET_MODE_SIZE (mode);
5824 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5825 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5831 if (parms->intoffset == -1)
5832 parms->intoffset = bitpos;
5838 /* Used by function_arg and sparc_function_value_1 to implement the complex
5839 conventions of the 64-bit ABI for passing and returning structures.
5840 Return an expression valid as a return value for the FUNCTION_ARG
5841 and TARGET_FUNCTION_VALUE.
5843 TYPE is the data type of the argument (as a tree).
5844 This is null for libcalls where that information may
5846 MODE is the argument's machine mode.
5847 SLOTNO is the index number of the argument's slot in the parameter array.
5848 NAMED is nonzero if this argument is a named parameter
5849 (otherwise it is an extra parameter matching an ellipsis).
5850 REGBASE is the regno of the base register for the parameter array. */
5853 function_arg_record_value (const_tree type, enum machine_mode mode,
5854 int slotno, int named, int regbase)
5856 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5857 struct function_arg_record_value_parms parms;
5860 parms.ret = NULL_RTX;
5861 parms.slotno = slotno;
5862 parms.named = named;
5863 parms.regbase = regbase;
5866 /* Compute how many registers we need. */
5868 parms.intoffset = 0;
5869 function_arg_record_value_1 (type, 0, &parms, false);
5871 /* Take into account pending integer fields. */
5872 if (parms.intoffset != -1)
5874 unsigned int startbit, endbit;
5875 int intslots, this_slotno;
5877 startbit = parms.intoffset & -BITS_PER_WORD;
5878 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5879 intslots = (endbit - startbit) / BITS_PER_WORD;
5880 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5882 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5884 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5885 /* We need to pass this field on the stack. */
5889 parms.nregs += intslots;
5891 nregs = parms.nregs;
5893 /* Allocate the vector and handle some annoying special cases. */
5896 /* ??? Empty structure has no value? Duh? */
5899 /* Though there's nothing really to store, return a word register
5900 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5901 leads to breakage due to the fact that there are zero bytes to
5903 return gen_rtx_REG (mode, regbase);
5907 /* ??? C++ has structures with no fields, and yet a size. Give up
5908 for now and pass everything back in integer registers. */
5909 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5911 if (nregs + slotno > SPARC_INT_ARG_MAX)
5912 nregs = SPARC_INT_ARG_MAX - slotno;
5914 gcc_assert (nregs != 0);
5916 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5918 /* If at least one field must be passed on the stack, generate
5919 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5920 also be passed on the stack. We can't do much better because the
5921 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5922 of structures for which the fields passed exclusively in registers
5923 are not at the beginning of the structure. */
5925 XVECEXP (parms.ret, 0, 0)
5926 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5928 /* Fill in the entries. */
5930 parms.intoffset = 0;
5931 function_arg_record_value_2 (type, 0, &parms, false);
5932 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5934 gcc_assert (parms.nregs == nregs);
5939 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5940 of the 64-bit ABI for passing and returning unions.
5941 Return an expression valid as a return value for the FUNCTION_ARG
5942 and TARGET_FUNCTION_VALUE.
5944 SIZE is the size in bytes of the union.
5945 MODE is the argument's machine mode.
5946 REGNO is the hard register the union will be passed in. */
5949 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5952 int nwords = ROUND_ADVANCE (size), i;
5955 /* See comment in previous function for empty structures. */
5957 return gen_rtx_REG (mode, regno);
5959 if (slotno == SPARC_INT_ARG_MAX - 1)
5962 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5964 for (i = 0; i < nwords; i++)
5966 /* Unions are passed left-justified. */
5967 XVECEXP (regs, 0, i)
5968 = gen_rtx_EXPR_LIST (VOIDmode,
5969 gen_rtx_REG (word_mode, regno),
5970 GEN_INT (UNITS_PER_WORD * i));
5977 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5978 for passing and returning large (BLKmode) vectors.
5979 Return an expression valid as a return value for the FUNCTION_ARG
5980 and TARGET_FUNCTION_VALUE.
5982 SIZE is the size in bytes of the vector (at least 8 bytes).
5983 REGNO is the FP hard register the vector will be passed in. */
5986 function_arg_vector_value (int size, int regno)
5988 int i, nregs = size / 8;
5991 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5993 for (i = 0; i < nregs; i++)
5995 XVECEXP (regs, 0, i)
5996 = gen_rtx_EXPR_LIST (VOIDmode,
5997 gen_rtx_REG (DImode, regno + 2*i),
6004 /* Determine where to put an argument to a function.
6005 Value is zero to push the argument on the stack,
6006 or a hard register in which to store the argument.
6008 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6009 the preceding args and about the function being called.
6010 MODE is the argument's machine mode.
6011 TYPE is the data type of the argument (as a tree).
6012 This is null for libcalls where that information may
6014 NAMED is true if this argument is a named parameter
6015 (otherwise it is an extra parameter matching an ellipsis).
6016 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6017 TARGET_FUNCTION_INCOMING_ARG. */
6020 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6021 const_tree type, bool named, bool incoming_p)
6023 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6025 int regbase = (incoming_p
6026 ? SPARC_INCOMING_INT_ARG_FIRST
6027 : SPARC_OUTGOING_INT_ARG_FIRST);
6028 int slotno, regno, padding;
6029 enum mode_class mclass = GET_MODE_CLASS (mode);
6031 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6036 /* Vector types deserve special treatment because they are polymorphic wrt
6037 their mode, depending upon whether VIS instructions are enabled. */
6038 if (type && TREE_CODE (type) == VECTOR_TYPE)
6040 HOST_WIDE_INT size = int_size_in_bytes (type);
6041 gcc_assert ((TARGET_ARCH32 && size <= 8)
6042 || (TARGET_ARCH64 && size <= 16));
6044 if (mode == BLKmode)
6045 return function_arg_vector_value (size,
6046 SPARC_FP_ARG_FIRST + 2*slotno);
6048 mclass = MODE_FLOAT;
6052 return gen_rtx_REG (mode, regno);
6054 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6055 and are promoted to registers if possible. */
6056 if (type && TREE_CODE (type) == RECORD_TYPE)
6058 HOST_WIDE_INT size = int_size_in_bytes (type);
6059 gcc_assert (size <= 16);
6061 return function_arg_record_value (type, mode, slotno, named, regbase);
6064 /* Unions up to 16 bytes in size are passed in integer registers. */
6065 else if (type && TREE_CODE (type) == UNION_TYPE)
6067 HOST_WIDE_INT size = int_size_in_bytes (type);
6068 gcc_assert (size <= 16);
6070 return function_arg_union_value (size, mode, slotno, regno);
6073 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6074 but also have the slot allocated for them.
6075 If no prototype is in scope fp values in register slots get passed
6076 in two places, either fp regs and int regs or fp regs and memory. */
6077 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6078 && SPARC_FP_REG_P (regno))
6080 rtx reg = gen_rtx_REG (mode, regno);
6081 if (cum->prototype_p || cum->libcall_p)
6083 /* "* 2" because fp reg numbers are recorded in 4 byte
6086 /* ??? This will cause the value to be passed in the fp reg and
6087 in the stack. When a prototype exists we want to pass the
6088 value in the reg but reserve space on the stack. That's an
6089 optimization, and is deferred [for a bit]. */
6090 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6091 return gen_rtx_PARALLEL (mode,
6093 gen_rtx_EXPR_LIST (VOIDmode,
6094 NULL_RTX, const0_rtx),
6095 gen_rtx_EXPR_LIST (VOIDmode,
6099 /* ??? It seems that passing back a register even when past
6100 the area declared by REG_PARM_STACK_SPACE will allocate
6101 space appropriately, and will not copy the data onto the
6102 stack, exactly as we desire.
6104 This is due to locate_and_pad_parm being called in
6105 expand_call whenever reg_parm_stack_space > 0, which
6106 while beneficial to our example here, would seem to be
6107 in error from what had been intended. Ho hum... -- r~ */
6115 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6119 /* On incoming, we don't need to know that the value
6120 is passed in %f0 and %i0, and it confuses other parts
6121 causing needless spillage even on the simplest cases. */
6125 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6126 + (regno - SPARC_FP_ARG_FIRST) / 2);
6128 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6129 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6131 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6135 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6136 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6137 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6142 /* All other aggregate types are passed in an integer register in a mode
6143 corresponding to the size of the type. */
6144 else if (type && AGGREGATE_TYPE_P (type))
6146 HOST_WIDE_INT size = int_size_in_bytes (type);
6147 gcc_assert (size <= 16);
6149 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6152 return gen_rtx_REG (mode, regno);
6155 /* Handle the TARGET_FUNCTION_ARG target hook. */
6158 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6159 const_tree type, bool named)
6161 return sparc_function_arg_1 (cum, mode, type, named, false);
6164 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6167 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6168 const_tree type, bool named)
6170 return sparc_function_arg_1 (cum, mode, type, named, true);
6173 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6176 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6178 return ((TARGET_ARCH64
6179 && (GET_MODE_ALIGNMENT (mode) == 128
6180 || (type && TYPE_ALIGN (type) == 128)))
6185 /* For an arg passed partly in registers and partly in memory,
6186 this is the number of bytes of registers used.
6187 For args passed entirely in registers or entirely in memory, zero.
6189 Any arg that starts in the first 6 regs but won't entirely fit in them
6190 needs partial registers on v8. On v9, structures with integer
6191 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6192 values that begin in the last fp reg [where "last fp reg" varies with the
6193 mode] will be split between that reg and memory. */
6196 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6197 tree type, bool named)
6199 int slotno, regno, padding;
6201 /* We pass false for incoming_p here, it doesn't matter. */
6202 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6203 false, ®no, &padding);
6210 if ((slotno + (mode == BLKmode
6211 ? ROUND_ADVANCE (int_size_in_bytes (type))
6212 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6213 > SPARC_INT_ARG_MAX)
6214 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6218 /* We are guaranteed by pass_by_reference that the size of the
6219 argument is not greater than 16 bytes, so we only need to return
6220 one word if the argument is partially passed in registers. */
6222 if (type && AGGREGATE_TYPE_P (type))
6224 int size = int_size_in_bytes (type);
6226 if (size > UNITS_PER_WORD
6227 && slotno == SPARC_INT_ARG_MAX - 1)
6228 return UNITS_PER_WORD;
6230 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6231 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6232 && ! (TARGET_FPU && named)))
6234 /* The complex types are passed as packed types. */
6235 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6236 && slotno == SPARC_INT_ARG_MAX - 1)
6237 return UNITS_PER_WORD;
6239 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6241 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6243 return UNITS_PER_WORD;
6250 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6251 Specify whether to pass the argument by reference. */
6254 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6255 enum machine_mode mode, const_tree type,
6256 bool named ATTRIBUTE_UNUSED)
6259 /* Original SPARC 32-bit ABI says that structures and unions,
6260 and quad-precision floats are passed by reference. For Pascal,
6261 also pass arrays by reference. All other base types are passed
6264 Extended ABI (as implemented by the Sun compiler) says that all
6265 complex floats are passed by reference. Pass complex integers
6266 in registers up to 8 bytes. More generally, enforce the 2-word
6267 cap for passing arguments in registers.
6269 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6270 integers are passed like floats of the same size, that is in
6271 registers up to 8 bytes. Pass all vector floats by reference
6272 like structure and unions. */
6273 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6275 /* Catch CDImode, TFmode, DCmode and TCmode. */
6276 || GET_MODE_SIZE (mode) > 8
6278 && TREE_CODE (type) == VECTOR_TYPE
6279 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6281 /* Original SPARC 64-bit ABI says that structures and unions
6282 smaller than 16 bytes are passed in registers, as well as
6283 all other base types.
6285 Extended ABI (as implemented by the Sun compiler) says that
6286 complex floats are passed in registers up to 16 bytes. Pass
6287 all complex integers in registers up to 16 bytes. More generally,
6288 enforce the 2-word cap for passing arguments in registers.
6290 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6291 integers are passed like floats of the same size, that is in
6292 registers (up to 16 bytes). Pass all vector floats like structure
6295 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6296 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6297 /* Catch CTImode and TCmode. */
6298 || GET_MODE_SIZE (mode) > 16);
6301 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6302 Update the data in CUM to advance over an argument
6303 of mode MODE and data type TYPE.
6304 TYPE is null for libcalls where that information may not be available. */
6307 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6308 const_tree type, bool named)
6310 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6313 /* We pass false for incoming_p here, it doesn't matter. */
6314 function_arg_slotno (cum, mode, type, named, false, ®no, &padding);
6316 /* If argument requires leading padding, add it. */
6317 cum->words += padding;
6321 cum->words += (mode != BLKmode
6322 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6323 : ROUND_ADVANCE (int_size_in_bytes (type)));
6327 if (type && AGGREGATE_TYPE_P (type))
6329 int size = int_size_in_bytes (type);
6333 else if (size <= 16)
6335 else /* passed by reference */
6340 cum->words += (mode != BLKmode
6341 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6342 : ROUND_ADVANCE (int_size_in_bytes (type)));
6347 /* Handle the FUNCTION_ARG_PADDING macro.
6348 For the 64 bit ABI structs are always stored left shifted in their
6352 function_arg_padding (enum machine_mode mode, const_tree type)
6354 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6357 /* Fall back to the default. */
6358 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6361 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6362 Specify whether to return the return value in memory. */
6365 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6368 /* Original SPARC 32-bit ABI says that structures and unions,
6369 and quad-precision floats are returned in memory. All other
6370 base types are returned in registers.
6372 Extended ABI (as implemented by the Sun compiler) says that
6373 all complex floats are returned in registers (8 FP registers
6374 at most for '_Complex long double'). Return all complex integers
6375 in registers (4 at most for '_Complex long long').
6377 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6378 integers are returned like floats of the same size, that is in
6379 registers up to 8 bytes and in memory otherwise. Return all
6380 vector floats in memory like structure and unions; note that
6381 they always have BLKmode like the latter. */
6382 return (TYPE_MODE (type) == BLKmode
6383 || TYPE_MODE (type) == TFmode
6384 || (TREE_CODE (type) == VECTOR_TYPE
6385 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6387 /* Original SPARC 64-bit ABI says that structures and unions
6388 smaller than 32 bytes are returned in registers, as well as
6389 all other base types.
6391 Extended ABI (as implemented by the Sun compiler) says that all
6392 complex floats are returned in registers (8 FP registers at most
6393 for '_Complex long double'). Return all complex integers in
6394 registers (4 at most for '_Complex TItype').
6396 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6397 integers are returned like floats of the same size, that is in
6398 registers. Return all vector floats like structure and unions;
6399 note that they always have BLKmode like the latter. */
6400 return (TYPE_MODE (type) == BLKmode
6401 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6404 /* Handle the TARGET_STRUCT_VALUE target hook.
6405 Return where to find the structure return value address. */
6408 sparc_struct_value_rtx (tree fndecl, int incoming)
6417 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6418 STRUCT_VALUE_OFFSET));
6420 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6421 STRUCT_VALUE_OFFSET));
6423 /* Only follow the SPARC ABI for fixed-size structure returns.
6424 Variable size structure returns are handled per the normal
6425 procedures in GCC. This is enabled by -mstd-struct-return */
6427 && sparc_std_struct_return
6428 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6429 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6431 /* We must check and adjust the return address, as it is
6432 optional as to whether the return object is really
6434 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6435 rtx scratch = gen_reg_rtx (SImode);
6436 rtx endlab = gen_label_rtx ();
6438 /* Calculate the return object size */
6439 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6440 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6441 /* Construct a temporary return value */
6443 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6445 /* Implement SPARC 32-bit psABI callee return struct checking:
6447 Fetch the instruction where we will return to and see if
6448 it's an unimp instruction (the most significant 10 bits
6450 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6451 plus_constant (ret_reg, 8)));
6452 /* Assume the size is valid and pre-adjust */
6453 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6454 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6456 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6457 /* Write the address of the memory pointed to by temp_val into
6458 the memory pointed to by mem */
6459 emit_move_insn (mem, XEXP (temp_val, 0));
6460 emit_label (endlab);
6467 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6468 For v9, function return values are subject to the same rules as arguments,
6469 except that up to 32 bytes may be returned in registers. */
6472 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6475 /* Beware that the two values are swapped here wrt function_arg. */
6476 int regbase = (outgoing
6477 ? SPARC_INCOMING_INT_ARG_FIRST
6478 : SPARC_OUTGOING_INT_ARG_FIRST);
6479 enum mode_class mclass = GET_MODE_CLASS (mode);
6482 /* Vector types deserve special treatment because they are polymorphic wrt
6483 their mode, depending upon whether VIS instructions are enabled. */
6484 if (type && TREE_CODE (type) == VECTOR_TYPE)
6486 HOST_WIDE_INT size = int_size_in_bytes (type);
6487 gcc_assert ((TARGET_ARCH32 && size <= 8)
6488 || (TARGET_ARCH64 && size <= 32));
6490 if (mode == BLKmode)
6491 return function_arg_vector_value (size,
6492 SPARC_FP_ARG_FIRST);
6494 mclass = MODE_FLOAT;
6497 if (TARGET_ARCH64 && type)
6499 /* Structures up to 32 bytes in size are returned in registers. */
6500 if (TREE_CODE (type) == RECORD_TYPE)
6502 HOST_WIDE_INT size = int_size_in_bytes (type);
6503 gcc_assert (size <= 32);
6505 return function_arg_record_value (type, mode, 0, 1, regbase);
6508 /* Unions up to 32 bytes in size are returned in integer registers. */
6509 else if (TREE_CODE (type) == UNION_TYPE)
6511 HOST_WIDE_INT size = int_size_in_bytes (type);
6512 gcc_assert (size <= 32);
6514 return function_arg_union_value (size, mode, 0, regbase);
6517 /* Objects that require it are returned in FP registers. */
6518 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6521 /* All other aggregate types are returned in an integer register in a
6522 mode corresponding to the size of the type. */
6523 else if (AGGREGATE_TYPE_P (type))
6525 /* All other aggregate types are passed in an integer register
6526 in a mode corresponding to the size of the type. */
6527 HOST_WIDE_INT size = int_size_in_bytes (type);
6528 gcc_assert (size <= 32);
6530 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6532 /* ??? We probably should have made the same ABI change in
6533 3.4.0 as the one we made for unions. The latter was
6534 required by the SCD though, while the former is not
6535 specified, so we favored compatibility and efficiency.
6537 Now we're stuck for aggregates larger than 16 bytes,
6538 because OImode vanished in the meantime. Let's not
6539 try to be unduly clever, and simply follow the ABI
6540 for unions in that case. */
6541 if (mode == BLKmode)
6542 return function_arg_union_value (size, mode, 0, regbase);
6547 /* We should only have pointer and integer types at this point. This
6548 must match sparc_promote_function_mode. */
6549 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6553 /* We should only have pointer and integer types at this point. This must
6554 match sparc_promote_function_mode. */
6555 else if (TARGET_ARCH32
6556 && mclass == MODE_INT
6557 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6560 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6561 regno = SPARC_FP_ARG_FIRST;
6565 return gen_rtx_REG (mode, regno);
6568 /* Handle TARGET_FUNCTION_VALUE.
6569 On the SPARC, the value is found in the first "output" register, but the
6570 called function leaves it in the first "input" register. */
6573 sparc_function_value (const_tree valtype,
6574 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6577 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6580 /* Handle TARGET_LIBCALL_VALUE. */
6583 sparc_libcall_value (enum machine_mode mode,
6584 const_rtx fun ATTRIBUTE_UNUSED)
6586 return sparc_function_value_1 (NULL_TREE, mode, false);
6589 /* Handle FUNCTION_VALUE_REGNO_P.
6590 On the SPARC, the first "output" reg is used for integer values, and the
6591 first floating point register is used for floating point values. */
6594 sparc_function_value_regno_p (const unsigned int regno)
6596 return (regno == 8 || regno == 32);
6599 /* Do what is necessary for `va_start'. We look at the current function
6600 to determine if stdarg or varargs is used and return the address of
6601 the first unnamed parameter. */
6604 sparc_builtin_saveregs (void)
6606 int first_reg = crtl->args.info.words;
6610 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6611 emit_move_insn (gen_rtx_MEM (word_mode,
6612 gen_rtx_PLUS (Pmode,
6614 GEN_INT (FIRST_PARM_OFFSET (0)
6617 gen_rtx_REG (word_mode,
6618 SPARC_INCOMING_INT_ARG_FIRST + regno));
6620 address = gen_rtx_PLUS (Pmode,
6622 GEN_INT (FIRST_PARM_OFFSET (0)
6623 + UNITS_PER_WORD * first_reg));
6628 /* Implement `va_start' for stdarg. */
6631 sparc_va_start (tree valist, rtx nextarg)
6633 nextarg = expand_builtin_saveregs ();
6634 std_expand_builtin_va_start (valist, nextarg);
6637 /* Implement `va_arg' for stdarg. */
6640 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6643 HOST_WIDE_INT size, rsize, align;
6646 tree ptrtype = build_pointer_type (type);
6648 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6651 size = rsize = UNITS_PER_WORD;
6657 size = int_size_in_bytes (type);
6658 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6663 /* For SPARC64, objects requiring 16-byte alignment get it. */
6664 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6665 align = 2 * UNITS_PER_WORD;
6667 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6668 are left-justified in their slots. */
6669 if (AGGREGATE_TYPE_P (type))
6672 size = rsize = UNITS_PER_WORD;
6682 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6683 incr = fold_convert (sizetype, incr);
6684 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6686 incr = fold_convert (ptr_type_node, incr);
6689 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6692 if (BYTES_BIG_ENDIAN && size < rsize)
6693 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6697 addr = fold_convert (build_pointer_type (ptrtype), addr);
6698 addr = build_va_arg_indirect_ref (addr);
6701 /* If the address isn't aligned properly for the type, we need a temporary.
6702 FIXME: This is inefficient, usually we can do this in registers. */
6703 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6705 tree tmp = create_tmp_var (type, "va_arg_tmp");
6706 tree dest_addr = build_fold_addr_expr (tmp);
6707 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6708 3, dest_addr, addr, size_int (rsize));
6709 TREE_ADDRESSABLE (tmp) = 1;
6710 gimplify_and_add (copy, pre_p);
6715 addr = fold_convert (ptrtype, addr);
6717 incr = fold_build_pointer_plus_hwi (incr, rsize);
6718 gimplify_assign (valist, incr, post_p);
6720 return build_va_arg_indirect_ref (addr);
6723 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6724 Specify whether the vector mode is supported by the hardware. */
6727 sparc_vector_mode_supported_p (enum machine_mode mode)
6729 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6732 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6734 static enum machine_mode
6735 sparc_preferred_simd_mode (enum machine_mode mode)
6753 /* Return the string to output an unconditional branch to LABEL, which is
6754 the operand number of the label.
6756 DEST is the destination insn (i.e. the label), INSN is the source. */
6759 output_ubranch (rtx dest, int label, rtx insn)
6761 static char string[64];
6762 bool v9_form = false;
6765 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6767 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6768 - INSN_ADDRESSES (INSN_UID (insn)));
6769 /* Leave some instructions for "slop". */
6770 if (delta >= -260000 && delta < 260000)
6775 strcpy (string, "ba%*,pt\t%%xcc, ");
6777 strcpy (string, "b%*\t");
6779 p = strchr (string, '\0');
6790 /* Return the string to output a conditional branch to LABEL, which is
6791 the operand number of the label. OP is the conditional expression.
6792 XEXP (OP, 0) is assumed to be a condition code register (integer or
6793 floating point) and its mode specifies what kind of comparison we made.
6795 DEST is the destination insn (i.e. the label), INSN is the source.
6797 REVERSED is nonzero if we should reverse the sense of the comparison.
6799 ANNUL is nonzero if we should generate an annulling branch. */
6802 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6805 static char string[64];
6806 enum rtx_code code = GET_CODE (op);
6807 rtx cc_reg = XEXP (op, 0);
6808 enum machine_mode mode = GET_MODE (cc_reg);
6809 const char *labelno, *branch;
6810 int spaces = 8, far;
6813 /* v9 branches are limited to +-1MB. If it is too far away,
6826 fbne,a,pn %fcc2, .LC29
6834 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6837 /* Reversal of FP compares takes care -- an ordered compare
6838 becomes an unordered compare and vice versa. */
6839 if (mode == CCFPmode || mode == CCFPEmode)
6840 code = reverse_condition_maybe_unordered (code);
6842 code = reverse_condition (code);
6845 /* Start by writing the branch condition. */
6846 if (mode == CCFPmode || mode == CCFPEmode)
6897 /* ??? !v9: FP branches cannot be preceded by another floating point
6898 insn. Because there is currently no concept of pre-delay slots,
6899 we can fix this only by always emitting a nop before a floating
6904 strcpy (string, "nop\n\t");
6905 strcat (string, branch);
6918 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6930 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6951 strcpy (string, branch);
6953 spaces -= strlen (branch);
6954 p = strchr (string, '\0');
6956 /* Now add the annulling, the label, and a possible noop. */
6969 if (! far && insn && INSN_ADDRESSES_SET_P ())
6971 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6972 - INSN_ADDRESSES (INSN_UID (insn)));
6973 /* Leave some instructions for "slop". */
6974 if (delta < -260000 || delta >= 260000)
6978 if (mode == CCFPmode || mode == CCFPEmode)
6980 static char v9_fcc_labelno[] = "%%fccX, ";
6981 /* Set the char indicating the number of the fcc reg to use. */
6982 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6983 labelno = v9_fcc_labelno;
6986 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6990 else if (mode == CCXmode || mode == CCX_NOOVmode)
6992 labelno = "%%xcc, ";
6997 labelno = "%%icc, ";
7002 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7005 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7018 strcpy (p, labelno);
7019 p = strchr (p, '\0');
7022 strcpy (p, ".+12\n\t nop\n\tb\t");
7023 /* Skip the next insn if requested or
7024 if we know that it will be a nop. */
7025 if (annul || ! final_sequence)
7039 /* Emit a library call comparison between floating point X and Y.
7040 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7041 Return the new operator to be used in the comparison sequence.
7043 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7044 values as arguments instead of the TFmode registers themselves,
7045 that's why we cannot call emit_float_lib_cmp. */
7048 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7051 rtx slot0, slot1, result, tem, tem2, libfunc;
7052 enum machine_mode mode;
7053 enum rtx_code new_comparison;
7058 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7062 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7066 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7070 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7074 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7078 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7089 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7102 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7103 emit_move_insn (slot0, x);
7110 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7111 emit_move_insn (slot1, y);
7114 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7115 emit_library_call (libfunc, LCT_NORMAL,
7117 XEXP (slot0, 0), Pmode,
7118 XEXP (slot1, 0), Pmode);
7123 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7124 emit_library_call (libfunc, LCT_NORMAL,
7126 x, TFmode, y, TFmode);
7131 /* Immediately move the result of the libcall into a pseudo
7132 register so reload doesn't clobber the value if it needs
7133 the return register for a spill reg. */
7134 result = gen_reg_rtx (mode);
7135 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7140 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7143 new_comparison = (comparison == UNORDERED ? EQ : NE);
7144 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7147 new_comparison = (comparison == UNGT ? GT : NE);
7148 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7150 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7152 tem = gen_reg_rtx (mode);
7154 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7156 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7157 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7160 tem = gen_reg_rtx (mode);
7162 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7164 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7165 tem2 = gen_reg_rtx (mode);
7167 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7169 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7170 new_comparison = (comparison == UNEQ ? EQ : NE);
7171 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7177 /* Generate an unsigned DImode to FP conversion. This is the same code
7178 optabs would emit if we didn't have TFmode patterns. */
7181 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7183 rtx neglab, donelab, i0, i1, f0, in, out;
7186 in = force_reg (DImode, operands[1]);
7187 neglab = gen_label_rtx ();
7188 donelab = gen_label_rtx ();
7189 i0 = gen_reg_rtx (DImode);
7190 i1 = gen_reg_rtx (DImode);
7191 f0 = gen_reg_rtx (mode);
7193 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7195 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7196 emit_jump_insn (gen_jump (donelab));
7199 emit_label (neglab);
7201 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7202 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7203 emit_insn (gen_iordi3 (i0, i0, i1));
7204 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7205 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7207 emit_label (donelab);
7210 /* Generate an FP to unsigned DImode conversion. This is the same code
7211 optabs would emit if we didn't have TFmode patterns. */
7214 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7216 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7219 in = force_reg (mode, operands[1]);
7220 neglab = gen_label_rtx ();
7221 donelab = gen_label_rtx ();
7222 i0 = gen_reg_rtx (DImode);
7223 i1 = gen_reg_rtx (DImode);
7224 limit = gen_reg_rtx (mode);
7225 f0 = gen_reg_rtx (mode);
7227 emit_move_insn (limit,
7228 CONST_DOUBLE_FROM_REAL_VALUE (
7229 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7230 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7232 emit_insn (gen_rtx_SET (VOIDmode,
7234 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7235 emit_jump_insn (gen_jump (donelab));
7238 emit_label (neglab);
7240 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7241 emit_insn (gen_rtx_SET (VOIDmode,
7243 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7244 emit_insn (gen_movdi (i1, const1_rtx));
7245 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7246 emit_insn (gen_xordi3 (out, i0, i1));
7248 emit_label (donelab);
7251 /* Return the string to output a conditional branch to LABEL, testing
7252 register REG. LABEL is the operand number of the label; REG is the
7253 operand number of the reg. OP is the conditional expression. The mode
7254 of REG says what kind of comparison we made.
7256 DEST is the destination insn (i.e. the label), INSN is the source.
7258 REVERSED is nonzero if we should reverse the sense of the comparison.
7260 ANNUL is nonzero if we should generate an annulling branch. */
7263 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7264 int annul, rtx insn)
7266 static char string[64];
7267 enum rtx_code code = GET_CODE (op);
7268 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7273 /* branch on register are limited to +-128KB. If it is too far away,
7286 brgez,a,pn %o1, .LC29
7292 ba,pt %xcc, .LC29 */
7294 far = get_attr_length (insn) >= 3;
7296 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7298 code = reverse_condition (code);
7300 /* Only 64 bit versions of these instructions exist. */
7301 gcc_assert (mode == DImode);
7303 /* Start by writing the branch condition. */
7308 strcpy (string, "brnz");
7312 strcpy (string, "brz");
7316 strcpy (string, "brgez");
7320 strcpy (string, "brlz");
7324 strcpy (string, "brlez");
7328 strcpy (string, "brgz");
7335 p = strchr (string, '\0');
7337 /* Now add the annulling, reg, label, and nop. */
7344 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7347 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7352 *p = p < string + 8 ? '\t' : ' ';
7360 int veryfar = 1, delta;
7362 if (INSN_ADDRESSES_SET_P ())
7364 delta = (INSN_ADDRESSES (INSN_UID (dest))
7365 - INSN_ADDRESSES (INSN_UID (insn)));
7366 /* Leave some instructions for "slop". */
7367 if (delta >= -260000 && delta < 260000)
7371 strcpy (p, ".+12\n\t nop\n\t");
7372 /* Skip the next insn if requested or
7373 if we know that it will be a nop. */
7374 if (annul || ! final_sequence)
7384 strcpy (p, "ba,pt\t%%xcc, ");
7398 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7399 Such instructions cannot be used in the delay slot of return insn on v9.
7400 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7404 epilogue_renumber (register rtx *where, int test)
7406 register const char *fmt;
7408 register enum rtx_code code;
7413 code = GET_CODE (*where);
7418 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7420 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7421 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7429 /* Do not replace the frame pointer with the stack pointer because
7430 it can cause the delayed instruction to load below the stack.
7431 This occurs when instructions like:
7433 (set (reg/i:SI 24 %i0)
7434 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7435 (const_int -20 [0xffffffec])) 0))
7437 are in the return delayed slot. */
7439 if (GET_CODE (XEXP (*where, 0)) == REG
7440 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7441 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7442 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7447 if (SPARC_STACK_BIAS
7448 && GET_CODE (XEXP (*where, 0)) == REG
7449 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7457 fmt = GET_RTX_FORMAT (code);
7459 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7464 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7465 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7468 else if (fmt[i] == 'e'
7469 && epilogue_renumber (&(XEXP (*where, i)), test))
7475 /* Leaf functions and non-leaf functions have different needs. */
7478 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7481 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7483 static const int *const reg_alloc_orders[] = {
7484 reg_leaf_alloc_order,
7485 reg_nonleaf_alloc_order};
7488 order_regs_for_local_alloc (void)
7490 static int last_order_nonleaf = 1;
7492 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7494 last_order_nonleaf = !last_order_nonleaf;
7495 memcpy ((char *) reg_alloc_order,
7496 (const char *) reg_alloc_orders[last_order_nonleaf],
7497 FIRST_PSEUDO_REGISTER * sizeof (int));
7501 /* Return 1 if REG and MEM are legitimate enough to allow the various
7502 mem<-->reg splits to be run. */
7505 sparc_splitdi_legitimate (rtx reg, rtx mem)
7507 /* Punt if we are here by mistake. */
7508 gcc_assert (reload_completed);
7510 /* We must have an offsettable memory reference. */
7511 if (! offsettable_memref_p (mem))
7514 /* If we have legitimate args for ldd/std, we do not want
7515 the split to happen. */
7516 if ((REGNO (reg) % 2) == 0
7517 && mem_min_alignment (mem, 8))
7524 /* Return 1 if x and y are some kind of REG and they refer to
7525 different hard registers. This test is guaranteed to be
7526 run after reload. */
7529 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7531 if (GET_CODE (x) != REG)
7533 if (GET_CODE (y) != REG)
7535 if (REGNO (x) == REGNO (y))
7540 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7541 This makes them candidates for using ldd and std insns.
7543 Note reg1 and reg2 *must* be hard registers. */
7546 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7548 /* We might have been passed a SUBREG. */
7549 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7552 if (REGNO (reg1) % 2 != 0)
7555 /* Integer ldd is deprecated in SPARC V9 */
7556 if (TARGET_V9 && REGNO (reg1) < 32)
7559 return (REGNO (reg1) == REGNO (reg2) - 1);
7562 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7565 This can only happen when addr1 and addr2, the addresses in mem1
7566 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7567 addr1 must also be aligned on a 64-bit boundary.
7569 Also iff dependent_reg_rtx is not null it should not be used to
7570 compute the address for mem1, i.e. we cannot optimize a sequence
7582 But, note that the transformation from:
7587 is perfectly fine. Thus, the peephole2 patterns always pass us
7588 the destination register of the first load, never the second one.
7590 For stores we don't have a similar problem, so dependent_reg_rtx is
7594 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7598 HOST_WIDE_INT offset1;
7600 /* The mems cannot be volatile. */
7601 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7604 /* MEM1 should be aligned on a 64-bit boundary. */
7605 if (MEM_ALIGN (mem1) < 64)
7608 addr1 = XEXP (mem1, 0);
7609 addr2 = XEXP (mem2, 0);
7611 /* Extract a register number and offset (if used) from the first addr. */
7612 if (GET_CODE (addr1) == PLUS)
7614 /* If not a REG, return zero. */
7615 if (GET_CODE (XEXP (addr1, 0)) != REG)
7619 reg1 = REGNO (XEXP (addr1, 0));
7620 /* The offset must be constant! */
7621 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7623 offset1 = INTVAL (XEXP (addr1, 1));
7626 else if (GET_CODE (addr1) != REG)
7630 reg1 = REGNO (addr1);
7631 /* This was a simple (mem (reg)) expression. Offset is 0. */
7635 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7636 if (GET_CODE (addr2) != PLUS)
7639 if (GET_CODE (XEXP (addr2, 0)) != REG
7640 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7643 if (reg1 != REGNO (XEXP (addr2, 0)))
7646 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7649 /* The first offset must be evenly divisible by 8 to ensure the
7650 address is 64 bit aligned. */
7651 if (offset1 % 8 != 0)
7654 /* The offset for the second addr must be 4 more than the first addr. */
7655 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7658 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7663 /* Return 1 if reg is a pseudo, or is the first register in
7664 a hard register pair. This makes it suitable for use in
7665 ldd and std insns. */
7668 register_ok_for_ldd (rtx reg)
7670 /* We might have been passed a SUBREG. */
7674 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7675 return (REGNO (reg) % 2 == 0);
7680 /* Return 1 if OP is a memory whose address is known to be
7681 aligned to 8-byte boundary, or a pseudo during reload.
7682 This makes it suitable for use in ldd and std insns. */
7685 memory_ok_for_ldd (rtx op)
7689 /* In 64-bit mode, we assume that the address is word-aligned. */
7690 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7693 if ((reload_in_progress || reload_completed)
7694 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7697 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7699 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7708 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7711 sparc_print_operand_punct_valid_p (unsigned char code)
7724 /* Implement TARGET_PRINT_OPERAND.
7725 Print operand X (an rtx) in assembler syntax to file FILE.
7726 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7727 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7730 sparc_print_operand (FILE *file, rtx x, int code)
7735 /* Output an insn in a delay slot. */
7737 sparc_indent_opcode = 1;
7739 fputs ("\n\t nop", file);
7742 /* Output an annul flag if there's nothing for the delay slot and we
7743 are optimizing. This is always used with '(' below.
7744 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7745 this is a dbx bug. So, we only do this when optimizing.
7746 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7747 Always emit a nop in case the next instruction is a branch. */
7748 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7752 /* Output a 'nop' if there's nothing for the delay slot and we are
7753 not optimizing. This is always used with '*' above. */
7754 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7755 fputs ("\n\t nop", file);
7756 else if (final_sequence)
7757 sparc_indent_opcode = 1;
7760 /* Output the right displacement from the saved PC on function return.
7761 The caller may have placed an "unimp" insn immediately after the call
7762 so we have to account for it. This insn is used in the 32-bit ABI
7763 when calling a function that returns a non zero-sized structure. The
7764 64-bit ABI doesn't have it. Be careful to have this test be the same
7765 as that for the call. The exception is when sparc_std_struct_return
7766 is enabled, the psABI is followed exactly and the adjustment is made
7767 by the code in sparc_struct_value_rtx. The call emitted is the same
7768 when sparc_std_struct_return is enabled. */
7770 && cfun->returns_struct
7771 && !sparc_std_struct_return
7772 && DECL_SIZE (DECL_RESULT (current_function_decl))
7773 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7775 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7781 /* Output the Embedded Medium/Anywhere code model base register. */
7782 fputs (EMBMEDANY_BASE_REG, file);
7785 /* Print some local dynamic TLS name. */
7786 assemble_name (file, get_some_local_dynamic_name ());
7790 /* Adjust the operand to take into account a RESTORE operation. */
7791 if (GET_CODE (x) == CONST_INT)
7793 else if (GET_CODE (x) != REG)
7794 output_operand_lossage ("invalid %%Y operand");
7795 else if (REGNO (x) < 8)
7796 fputs (reg_names[REGNO (x)], file);
7797 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7798 fputs (reg_names[REGNO (x)-16], file);
7800 output_operand_lossage ("invalid %%Y operand");
7803 /* Print out the low order register name of a register pair. */
7804 if (WORDS_BIG_ENDIAN)
7805 fputs (reg_names[REGNO (x)+1], file);
7807 fputs (reg_names[REGNO (x)], file);
7810 /* Print out the high order register name of a register pair. */
7811 if (WORDS_BIG_ENDIAN)
7812 fputs (reg_names[REGNO (x)], file);
7814 fputs (reg_names[REGNO (x)+1], file);
7817 /* Print out the second register name of a register pair or quad.
7818 I.e., R (%o0) => %o1. */
7819 fputs (reg_names[REGNO (x)+1], file);
7822 /* Print out the third register name of a register quad.
7823 I.e., S (%o0) => %o2. */
7824 fputs (reg_names[REGNO (x)+2], file);
7827 /* Print out the fourth register name of a register quad.
7828 I.e., T (%o0) => %o3. */
7829 fputs (reg_names[REGNO (x)+3], file);
7832 /* Print a condition code register. */
7833 if (REGNO (x) == SPARC_ICC_REG)
7835 /* We don't handle CC[X]_NOOVmode because they're not supposed
7837 if (GET_MODE (x) == CCmode)
7838 fputs ("%icc", file);
7839 else if (GET_MODE (x) == CCXmode)
7840 fputs ("%xcc", file);
7845 /* %fccN register */
7846 fputs (reg_names[REGNO (x)], file);
7849 /* Print the operand's address only. */
7850 output_address (XEXP (x, 0));
7853 /* In this case we need a register. Use %g0 if the
7854 operand is const0_rtx. */
7856 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7858 fputs ("%g0", file);
7865 switch (GET_CODE (x))
7867 case IOR: fputs ("or", file); break;
7868 case AND: fputs ("and", file); break;
7869 case XOR: fputs ("xor", file); break;
7870 default: output_operand_lossage ("invalid %%A operand");
7875 switch (GET_CODE (x))
7877 case IOR: fputs ("orn", file); break;
7878 case AND: fputs ("andn", file); break;
7879 case XOR: fputs ("xnor", file); break;
7880 default: output_operand_lossage ("invalid %%B operand");
7884 /* These are used by the conditional move instructions. */
7888 enum rtx_code rc = GET_CODE (x);
7892 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7893 if (mode == CCFPmode || mode == CCFPEmode)
7894 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7896 rc = reverse_condition (GET_CODE (x));
7900 case NE: fputs ("ne", file); break;
7901 case EQ: fputs ("e", file); break;
7902 case GE: fputs ("ge", file); break;
7903 case GT: fputs ("g", file); break;
7904 case LE: fputs ("le", file); break;
7905 case LT: fputs ("l", file); break;
7906 case GEU: fputs ("geu", file); break;
7907 case GTU: fputs ("gu", file); break;
7908 case LEU: fputs ("leu", file); break;
7909 case LTU: fputs ("lu", file); break;
7910 case LTGT: fputs ("lg", file); break;
7911 case UNORDERED: fputs ("u", file); break;
7912 case ORDERED: fputs ("o", file); break;
7913 case UNLT: fputs ("ul", file); break;
7914 case UNLE: fputs ("ule", file); break;
7915 case UNGT: fputs ("ug", file); break;
7916 case UNGE: fputs ("uge", file); break;
7917 case UNEQ: fputs ("ue", file); break;
7918 default: output_operand_lossage (code == 'c'
7919 ? "invalid %%c operand"
7920 : "invalid %%C operand");
7925 /* These are used by the movr instruction pattern. */
7929 enum rtx_code rc = (code == 'd'
7930 ? reverse_condition (GET_CODE (x))
7934 case NE: fputs ("ne", file); break;
7935 case EQ: fputs ("e", file); break;
7936 case GE: fputs ("gez", file); break;
7937 case LT: fputs ("lz", file); break;
7938 case LE: fputs ("lez", file); break;
7939 case GT: fputs ("gz", file); break;
7940 default: output_operand_lossage (code == 'd'
7941 ? "invalid %%d operand"
7942 : "invalid %%D operand");
7949 /* Print a sign-extended character. */
7950 int i = trunc_int_for_mode (INTVAL (x), QImode);
7951 fprintf (file, "%d", i);
7956 /* Operand must be a MEM; write its address. */
7957 if (GET_CODE (x) != MEM)
7958 output_operand_lossage ("invalid %%f operand");
7959 output_address (XEXP (x, 0));
7964 /* Print a sign-extended 32-bit value. */
7966 if (GET_CODE(x) == CONST_INT)
7968 else if (GET_CODE(x) == CONST_DOUBLE)
7969 i = CONST_DOUBLE_LOW (x);
7972 output_operand_lossage ("invalid %%s operand");
7975 i = trunc_int_for_mode (i, SImode);
7976 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7981 /* Do nothing special. */
7985 /* Undocumented flag. */
7986 output_operand_lossage ("invalid operand output code");
7989 if (GET_CODE (x) == REG)
7990 fputs (reg_names[REGNO (x)], file);
7991 else if (GET_CODE (x) == MEM)
7994 /* Poor Sun assembler doesn't understand absolute addressing. */
7995 if (CONSTANT_P (XEXP (x, 0)))
7996 fputs ("%g0+", file);
7997 output_address (XEXP (x, 0));
8000 else if (GET_CODE (x) == HIGH)
8002 fputs ("%hi(", file);
8003 output_addr_const (file, XEXP (x, 0));
8006 else if (GET_CODE (x) == LO_SUM)
8008 sparc_print_operand (file, XEXP (x, 0), 0);
8009 if (TARGET_CM_MEDMID)
8010 fputs ("+%l44(", file);
8012 fputs ("+%lo(", file);
8013 output_addr_const (file, XEXP (x, 1));
8016 else if (GET_CODE (x) == CONST_DOUBLE
8017 && (GET_MODE (x) == VOIDmode
8018 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8020 if (CONST_DOUBLE_HIGH (x) == 0)
8021 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8022 else if (CONST_DOUBLE_HIGH (x) == -1
8023 && CONST_DOUBLE_LOW (x) < 0)
8024 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8026 output_operand_lossage ("long long constant not a valid immediate operand");
8028 else if (GET_CODE (x) == CONST_DOUBLE)
8029 output_operand_lossage ("floating point constant not a valid immediate operand");
8030 else { output_addr_const (file, x); }
8033 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8036 sparc_print_operand_address (FILE *file, rtx x)
8038 register rtx base, index = 0;
8040 register rtx addr = x;
8043 fputs (reg_names[REGNO (addr)], file);
8044 else if (GET_CODE (addr) == PLUS)
8046 if (CONST_INT_P (XEXP (addr, 0)))
8047 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8048 else if (CONST_INT_P (XEXP (addr, 1)))
8049 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8051 base = XEXP (addr, 0), index = XEXP (addr, 1);
8052 if (GET_CODE (base) == LO_SUM)
8054 gcc_assert (USE_AS_OFFSETABLE_LO10
8056 && ! TARGET_CM_MEDMID);
8057 output_operand (XEXP (base, 0), 0);
8058 fputs ("+%lo(", file);
8059 output_address (XEXP (base, 1));
8060 fprintf (file, ")+%d", offset);
8064 fputs (reg_names[REGNO (base)], file);
8066 fprintf (file, "%+d", offset);
8067 else if (REG_P (index))
8068 fprintf (file, "+%s", reg_names[REGNO (index)]);
8069 else if (GET_CODE (index) == SYMBOL_REF
8070 || GET_CODE (index) == LABEL_REF
8071 || GET_CODE (index) == CONST)
8072 fputc ('+', file), output_addr_const (file, index);
8073 else gcc_unreachable ();
8076 else if (GET_CODE (addr) == MINUS
8077 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8079 output_addr_const (file, XEXP (addr, 0));
8081 output_addr_const (file, XEXP (addr, 1));
8082 fputs ("-.)", file);
8084 else if (GET_CODE (addr) == LO_SUM)
8086 output_operand (XEXP (addr, 0), 0);
8087 if (TARGET_CM_MEDMID)
8088 fputs ("+%l44(", file);
8090 fputs ("+%lo(", file);
8091 output_address (XEXP (addr, 1));
8095 && GET_CODE (addr) == CONST
8096 && GET_CODE (XEXP (addr, 0)) == MINUS
8097 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8098 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8099 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8101 addr = XEXP (addr, 0);
8102 output_addr_const (file, XEXP (addr, 0));
8103 /* Group the args of the second CONST in parenthesis. */
8105 /* Skip past the second CONST--it does nothing for us. */
8106 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8107 /* Close the parenthesis. */
8112 output_addr_const (file, addr);
8116 /* Target hook for assembling integer objects. The sparc version has
8117 special handling for aligned DI-mode objects. */
8120 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8122 /* ??? We only output .xword's for symbols and only then in environments
8123 where the assembler can handle them. */
8124 if (aligned_p && size == 8
8125 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8129 assemble_integer_with_op ("\t.xword\t", x);
8134 assemble_aligned_integer (4, const0_rtx);
8135 assemble_aligned_integer (4, x);
8139 return default_assemble_integer (x, size, aligned_p);
8142 /* Return the value of a code used in the .proc pseudo-op that says
8143 what kind of result this function returns. For non-C types, we pick
8144 the closest C type. */
8146 #ifndef SHORT_TYPE_SIZE
8147 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8150 #ifndef INT_TYPE_SIZE
8151 #define INT_TYPE_SIZE BITS_PER_WORD
8154 #ifndef LONG_TYPE_SIZE
8155 #define LONG_TYPE_SIZE BITS_PER_WORD
8158 #ifndef LONG_LONG_TYPE_SIZE
8159 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8162 #ifndef FLOAT_TYPE_SIZE
8163 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8166 #ifndef DOUBLE_TYPE_SIZE
8167 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8170 #ifndef LONG_DOUBLE_TYPE_SIZE
8171 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8175 sparc_type_code (register tree type)
8177 register unsigned long qualifiers = 0;
8178 register unsigned shift;
8180 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8181 setting more, since some assemblers will give an error for this. Also,
8182 we must be careful to avoid shifts of 32 bits or more to avoid getting
8183 unpredictable results. */
8185 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8187 switch (TREE_CODE (type))
8193 qualifiers |= (3 << shift);
8198 qualifiers |= (2 << shift);
8202 case REFERENCE_TYPE:
8204 qualifiers |= (1 << shift);
8208 return (qualifiers | 8);
8211 case QUAL_UNION_TYPE:
8212 return (qualifiers | 9);
8215 return (qualifiers | 10);
8218 return (qualifiers | 16);
8221 /* If this is a range type, consider it to be the underlying
8223 if (TREE_TYPE (type) != 0)
8226 /* Carefully distinguish all the standard types of C,
8227 without messing up if the language is not C. We do this by
8228 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8229 look at both the names and the above fields, but that's redundant.
8230 Any type whose size is between two C types will be considered
8231 to be the wider of the two types. Also, we do not have a
8232 special code to use for "long long", so anything wider than
8233 long is treated the same. Note that we can't distinguish
8234 between "int" and "long" in this code if they are the same
8235 size, but that's fine, since neither can the assembler. */
8237 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8238 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8240 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8241 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8243 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8244 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8247 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8250 /* If this is a range type, consider it to be the underlying
8252 if (TREE_TYPE (type) != 0)
8255 /* Carefully distinguish all the standard types of C,
8256 without messing up if the language is not C. */
8258 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8259 return (qualifiers | 6);
8262 return (qualifiers | 7);
8264 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8265 /* ??? We need to distinguish between double and float complex types,
8266 but I don't know how yet because I can't reach this code from
8267 existing front-ends. */
8268 return (qualifiers | 7); /* Who knows? */
8271 case BOOLEAN_TYPE: /* Boolean truth value type. */
8277 gcc_unreachable (); /* Not a type! */
8284 /* Nested function support. */
8286 /* Emit RTL insns to initialize the variable parts of a trampoline.
8287 FNADDR is an RTX for the address of the function's pure code.
8288 CXT is an RTX for the static chain value for the function.
8290 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8291 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8292 (to store insns). This is a bit excessive. Perhaps a different
8293 mechanism would be better here.
8295 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8298 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8300 /* SPARC 32-bit trampoline:
8303 sethi %hi(static), %g2
8305 or %g2, %lo(static), %g2
8307 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8308 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8312 (adjust_address (m_tramp, SImode, 0),
8313 expand_binop (SImode, ior_optab,
8314 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8315 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8316 NULL_RTX, 1, OPTAB_DIRECT));
8319 (adjust_address (m_tramp, SImode, 4),
8320 expand_binop (SImode, ior_optab,
8321 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8322 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8323 NULL_RTX, 1, OPTAB_DIRECT));
8326 (adjust_address (m_tramp, SImode, 8),
8327 expand_binop (SImode, ior_optab,
8328 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8329 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8330 NULL_RTX, 1, OPTAB_DIRECT));
8333 (adjust_address (m_tramp, SImode, 12),
8334 expand_binop (SImode, ior_optab,
8335 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8336 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8337 NULL_RTX, 1, OPTAB_DIRECT));
8339 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8340 aligned on a 16 byte boundary so one flush clears it all. */
8341 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8342 if (sparc_cpu != PROCESSOR_ULTRASPARC
8343 && sparc_cpu != PROCESSOR_ULTRASPARC3
8344 && sparc_cpu != PROCESSOR_NIAGARA
8345 && sparc_cpu != PROCESSOR_NIAGARA2)
8346 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8348 /* Call __enable_execute_stack after writing onto the stack to make sure
8349 the stack address is accessible. */
8350 #ifdef HAVE_ENABLE_EXECUTE_STACK
8351 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8352 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8357 /* The 64-bit version is simpler because it makes more sense to load the
8358 values as "immediate" data out of the trampoline. It's also easier since
8359 we can read the PC without clobbering a register. */
8362 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8364 /* SPARC 64-bit trampoline:
8373 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8374 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8375 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8376 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8377 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8378 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8379 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8380 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8381 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8382 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8383 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8385 if (sparc_cpu != PROCESSOR_ULTRASPARC
8386 && sparc_cpu != PROCESSOR_ULTRASPARC3
8387 && sparc_cpu != PROCESSOR_NIAGARA
8388 && sparc_cpu != PROCESSOR_NIAGARA2)
8389 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8391 /* Call __enable_execute_stack after writing onto the stack to make sure
8392 the stack address is accessible. */
8393 #ifdef HAVE_ENABLE_EXECUTE_STACK
8394 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8395 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8399 /* Worker for TARGET_TRAMPOLINE_INIT. */
8402 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8404 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8405 cxt = force_reg (Pmode, cxt);
8407 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8409 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8412 /* Adjust the cost of a scheduling dependency. Return the new cost of
8413 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8416 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8418 enum attr_type insn_type;
8420 if (! recog_memoized (insn))
8423 insn_type = get_attr_type (insn);
8425 if (REG_NOTE_KIND (link) == 0)
8427 /* Data dependency; DEP_INSN writes a register that INSN reads some
8430 /* if a load, then the dependence must be on the memory address;
8431 add an extra "cycle". Note that the cost could be two cycles
8432 if the reg was written late in an instruction group; we ca not tell
8434 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8437 /* Get the delay only if the address of the store is the dependence. */
8438 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8440 rtx pat = PATTERN(insn);
8441 rtx dep_pat = PATTERN (dep_insn);
8443 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8444 return cost; /* This should not happen! */
8446 /* The dependency between the two instructions was on the data that
8447 is being stored. Assume that this implies that the address of the
8448 store is not dependent. */
8449 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8452 return cost + 3; /* An approximation. */
8455 /* A shift instruction cannot receive its data from an instruction
8456 in the same cycle; add a one cycle penalty. */
8457 if (insn_type == TYPE_SHIFT)
8458 return cost + 3; /* Split before cascade into shift. */
8462 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8463 INSN writes some cycles later. */
8465 /* These are only significant for the fpu unit; writing a fp reg before
8466 the fpu has finished with it stalls the processor. */
8468 /* Reusing an integer register causes no problems. */
8469 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8477 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8479 enum attr_type insn_type, dep_type;
8480 rtx pat = PATTERN(insn);
8481 rtx dep_pat = PATTERN (dep_insn);
8483 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8486 insn_type = get_attr_type (insn);
8487 dep_type = get_attr_type (dep_insn);
8489 switch (REG_NOTE_KIND (link))
8492 /* Data dependency; DEP_INSN writes a register that INSN reads some
8499 /* Get the delay iff the address of the store is the dependence. */
8500 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8503 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8510 /* If a load, then the dependence must be on the memory address. If
8511 the addresses aren't equal, then it might be a false dependency */
8512 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8514 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8515 || GET_CODE (SET_DEST (dep_pat)) != MEM
8516 || GET_CODE (SET_SRC (pat)) != MEM
8517 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8518 XEXP (SET_SRC (pat), 0)))
8526 /* Compare to branch latency is 0. There is no benefit from
8527 separating compare and branch. */
8528 if (dep_type == TYPE_COMPARE)
8530 /* Floating point compare to branch latency is less than
8531 compare to conditional move. */
8532 if (dep_type == TYPE_FPCMP)
8541 /* Anti-dependencies only penalize the fpu unit. */
8542 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8554 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8558 case PROCESSOR_SUPERSPARC:
8559 cost = supersparc_adjust_cost (insn, link, dep, cost);
8561 case PROCESSOR_HYPERSPARC:
8562 case PROCESSOR_SPARCLITE86X:
8563 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8572 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8573 int sched_verbose ATTRIBUTE_UNUSED,
8574 int max_ready ATTRIBUTE_UNUSED)
8578 sparc_use_sched_lookahead (void)
8580 if (sparc_cpu == PROCESSOR_NIAGARA
8581 || sparc_cpu == PROCESSOR_NIAGARA2)
8583 if (sparc_cpu == PROCESSOR_ULTRASPARC
8584 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8586 if ((1 << sparc_cpu) &
8587 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8588 (1 << PROCESSOR_SPARCLITE86X)))
8594 sparc_issue_rate (void)
8598 case PROCESSOR_NIAGARA:
8599 case PROCESSOR_NIAGARA2:
8603 /* Assume V9 processors are capable of at least dual-issue. */
8605 case PROCESSOR_SUPERSPARC:
8607 case PROCESSOR_HYPERSPARC:
8608 case PROCESSOR_SPARCLITE86X:
8610 case PROCESSOR_ULTRASPARC:
8611 case PROCESSOR_ULTRASPARC3:
8617 set_extends (rtx insn)
8619 register rtx pat = PATTERN (insn);
8621 switch (GET_CODE (SET_SRC (pat)))
8623 /* Load and some shift instructions zero extend. */
8626 /* sethi clears the high bits */
8628 /* LO_SUM is used with sethi. sethi cleared the high
8629 bits and the values used with lo_sum are positive */
8631 /* Store flag stores 0 or 1 */
8641 rtx op0 = XEXP (SET_SRC (pat), 0);
8642 rtx op1 = XEXP (SET_SRC (pat), 1);
8643 if (GET_CODE (op1) == CONST_INT)
8644 return INTVAL (op1) >= 0;
8645 if (GET_CODE (op0) != REG)
8647 if (sparc_check_64 (op0, insn) == 1)
8649 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8654 rtx op0 = XEXP (SET_SRC (pat), 0);
8655 rtx op1 = XEXP (SET_SRC (pat), 1);
8656 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8658 if (GET_CODE (op1) == CONST_INT)
8659 return INTVAL (op1) >= 0;
8660 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8663 return GET_MODE (SET_SRC (pat)) == SImode;
8664 /* Positive integers leave the high bits zero. */
8666 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8668 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8671 return - (GET_MODE (SET_SRC (pat)) == SImode);
8673 return sparc_check_64 (SET_SRC (pat), insn);
8679 /* We _ought_ to have only one kind per function, but... */
8680 static GTY(()) rtx sparc_addr_diff_list;
8681 static GTY(()) rtx sparc_addr_list;
8684 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8686 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8688 sparc_addr_diff_list
8689 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8691 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8695 sparc_output_addr_vec (rtx vec)
8697 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8698 int idx, vlen = XVECLEN (body, 0);
8700 #ifdef ASM_OUTPUT_ADDR_VEC_START
8701 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8704 #ifdef ASM_OUTPUT_CASE_LABEL
8705 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8708 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8711 for (idx = 0; idx < vlen; idx++)
8713 ASM_OUTPUT_ADDR_VEC_ELT
8714 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8717 #ifdef ASM_OUTPUT_ADDR_VEC_END
8718 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8723 sparc_output_addr_diff_vec (rtx vec)
8725 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8726 rtx base = XEXP (XEXP (body, 0), 0);
8727 int idx, vlen = XVECLEN (body, 1);
8729 #ifdef ASM_OUTPUT_ADDR_VEC_START
8730 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8733 #ifdef ASM_OUTPUT_CASE_LABEL
8734 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8737 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8740 for (idx = 0; idx < vlen; idx++)
8742 ASM_OUTPUT_ADDR_DIFF_ELT
8745 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8746 CODE_LABEL_NUMBER (base));
8749 #ifdef ASM_OUTPUT_ADDR_VEC_END
8750 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8755 sparc_output_deferred_case_vectors (void)
8760 if (sparc_addr_list == NULL_RTX
8761 && sparc_addr_diff_list == NULL_RTX)
8764 /* Align to cache line in the function's code section. */
8765 switch_to_section (current_function_section ());
8767 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8769 ASM_OUTPUT_ALIGN (asm_out_file, align);
8771 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8772 sparc_output_addr_vec (XEXP (t, 0));
8773 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8774 sparc_output_addr_diff_vec (XEXP (t, 0));
8776 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8779 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8780 unknown. Return 1 if the high bits are zero, -1 if the register is
8783 sparc_check_64 (rtx x, rtx insn)
8785 /* If a register is set only once it is safe to ignore insns this
8786 code does not know how to handle. The loop will either recognize
8787 the single set and return the correct value or fail to recognize
8792 gcc_assert (GET_CODE (x) == REG);
8794 if (GET_MODE (x) == DImode)
8795 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8797 if (flag_expensive_optimizations
8798 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8804 insn = get_last_insn_anywhere ();
8809 while ((insn = PREV_INSN (insn)))
8811 switch (GET_CODE (insn))
8824 rtx pat = PATTERN (insn);
8825 if (GET_CODE (pat) != SET)
8827 if (rtx_equal_p (x, SET_DEST (pat)))
8828 return set_extends (insn);
8829 if (y && rtx_equal_p (y, SET_DEST (pat)))
8830 return set_extends (insn);
8831 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8839 /* Returns assembly code to perform a DImode shift using
8840 a 64-bit global or out register on SPARC-V8+. */
8842 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8844 static char asm_code[60];
8846 /* The scratch register is only required when the destination
8847 register is not a 64-bit global or out register. */
8848 if (which_alternative != 2)
8849 operands[3] = operands[0];
8851 /* We can only shift by constants <= 63. */
8852 if (GET_CODE (operands[2]) == CONST_INT)
8853 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8855 if (GET_CODE (operands[1]) == CONST_INT)
8857 output_asm_insn ("mov\t%1, %3", operands);
8861 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8862 if (sparc_check_64 (operands[1], insn) <= 0)
8863 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8864 output_asm_insn ("or\t%L1, %3, %3", operands);
8867 strcpy(asm_code, opcode);
8869 if (which_alternative != 2)
8870 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8872 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8875 /* Output rtl to increment the profiler label LABELNO
8876 for profiling a function entry. */
8879 sparc_profile_hook (int labelno)
8884 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8885 if (NO_PROFILE_COUNTERS)
8887 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8891 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8892 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8893 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8897 #ifdef TARGET_SOLARIS
8898 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8901 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8902 tree decl ATTRIBUTE_UNUSED)
8904 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
8906 solaris_elf_asm_comdat_section (name, flags, decl);
8910 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8912 if (!(flags & SECTION_DEBUG))
8913 fputs (",#alloc", asm_out_file);
8914 if (flags & SECTION_WRITE)
8915 fputs (",#write", asm_out_file);
8916 if (flags & SECTION_TLS)
8917 fputs (",#tls", asm_out_file);
8918 if (flags & SECTION_CODE)
8919 fputs (",#execinstr", asm_out_file);
8921 /* ??? Handle SECTION_BSS. */
8923 fputc ('\n', asm_out_file);
8925 #endif /* TARGET_SOLARIS */
8927 /* We do not allow indirect calls to be optimized into sibling calls.
8929 We cannot use sibling calls when delayed branches are disabled
8930 because they will likely require the call delay slot to be filled.
8932 Also, on SPARC 32-bit we cannot emit a sibling call when the
8933 current function returns a structure. This is because the "unimp
8934 after call" convention would cause the callee to return to the
8935 wrong place. The generic code already disallows cases where the
8936 function being called returns a structure.
8938 It may seem strange how this last case could occur. Usually there
8939 is code after the call which jumps to epilogue code which dumps the
8940 return value into the struct return area. That ought to invalidate
8941 the sibling call right? Well, in the C++ case we can end up passing
8942 the pointer to the struct return area to a constructor (which returns
8943 void) and then nothing else happens. Such a sibling call would look
8944 valid without the added check here.
8946 VxWorks PIC PLT entries require the global pointer to be initialized
8947 on entry. We therefore can't emit sibling calls to them. */
8949 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8952 && flag_delayed_branch
8953 && (TARGET_ARCH64 || ! cfun->returns_struct)
8954 && !(TARGET_VXWORKS_RTP
8956 && !targetm.binds_local_p (decl)));
8959 /* libfunc renaming. */
8962 sparc_init_libfuncs (void)
8966 /* Use the subroutines that Sun's library provides for integer
8967 multiply and divide. The `*' prevents an underscore from
8968 being prepended by the compiler. .umul is a little faster
8970 set_optab_libfunc (smul_optab, SImode, "*.umul");
8971 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8972 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8973 set_optab_libfunc (smod_optab, SImode, "*.rem");
8974 set_optab_libfunc (umod_optab, SImode, "*.urem");
8976 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8977 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8978 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8979 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8980 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8981 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8983 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8984 is because with soft-float, the SFmode and DFmode sqrt
8985 instructions will be absent, and the compiler will notice and
8986 try to use the TFmode sqrt instruction for calls to the
8987 builtin function sqrt, but this fails. */
8989 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8991 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8992 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8993 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8994 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8995 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8996 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8998 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8999 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9000 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9001 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9003 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9004 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9005 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9006 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9008 if (DITF_CONVERSION_LIBFUNCS)
9010 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9011 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9012 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9013 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9016 if (SUN_CONVERSION_LIBFUNCS)
9018 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9019 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9020 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9021 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9026 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9027 do not exist in the library. Make sure the compiler does not
9028 emit calls to them by accident. (It should always use the
9029 hardware instructions.) */
9030 set_optab_libfunc (smul_optab, SImode, 0);
9031 set_optab_libfunc (sdiv_optab, SImode, 0);
9032 set_optab_libfunc (udiv_optab, SImode, 0);
9033 set_optab_libfunc (smod_optab, SImode, 0);
9034 set_optab_libfunc (umod_optab, SImode, 0);
9036 if (SUN_INTEGER_MULTIPLY_64)
9038 set_optab_libfunc (smul_optab, DImode, "__mul64");
9039 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9040 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9041 set_optab_libfunc (smod_optab, DImode, "__rem64");
9042 set_optab_libfunc (umod_optab, DImode, "__urem64");
9045 if (SUN_CONVERSION_LIBFUNCS)
9047 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9048 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9049 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9050 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9055 #define def_builtin(NAME, CODE, TYPE) \
9056 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
9059 /* Implement the TARGET_INIT_BUILTINS target hook.
9060 Create builtin functions for special SPARC instructions. */
9063 sparc_init_builtins (void)
9066 sparc_vis_init_builtins ();
9069 /* Create builtin functions for VIS 1.0 instructions. */
9072 sparc_vis_init_builtins (void)
9074 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9075 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9076 tree v4hi = build_vector_type (intHI_type_node, 4);
9077 tree v2hi = build_vector_type (intHI_type_node, 2);
9078 tree v2si = build_vector_type (intSI_type_node, 2);
9080 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9081 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9082 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9083 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9084 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9085 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9086 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9087 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9088 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9089 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9090 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9091 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9092 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9094 intDI_type_node, 0);
9095 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9097 intDI_type_node, 0);
9098 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9100 intSI_type_node, 0);
9101 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9103 intDI_type_node, 0);
9105 /* Packing and expanding vectors. */
9106 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
9107 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9108 v8qi_ftype_v2si_v8qi);
9109 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9111 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
9112 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9113 v8qi_ftype_v4qi_v4qi);
9115 /* Multiplications. */
9116 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9117 v4hi_ftype_v4qi_v4hi);
9118 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9119 v4hi_ftype_v4qi_v2hi);
9120 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9121 v4hi_ftype_v4qi_v2hi);
9122 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9123 v4hi_ftype_v8qi_v4hi);
9124 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9125 v4hi_ftype_v8qi_v4hi);
9126 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9127 v2si_ftype_v4qi_v2hi);
9128 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9129 v2si_ftype_v4qi_v2hi);
9131 /* Data aligning. */
9132 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9133 v4hi_ftype_v4hi_v4hi);
9134 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9135 v8qi_ftype_v8qi_v8qi);
9136 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9137 v2si_ftype_v2si_v2si);
9138 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
9141 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9144 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9147 /* Pixel distance. */
9148 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9149 di_ftype_v8qi_v8qi_di);
9152 /* Handle TARGET_EXPAND_BUILTIN target hook.
9153 Expand builtin functions for sparc intrinsics. */
9156 sparc_expand_builtin (tree exp, rtx target,
9157 rtx subtarget ATTRIBUTE_UNUSED,
9158 enum machine_mode tmode ATTRIBUTE_UNUSED,
9159 int ignore ATTRIBUTE_UNUSED)
9162 call_expr_arg_iterator iter;
9163 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9164 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9166 enum machine_mode mode[4];
9169 mode[0] = insn_data[icode].operand[0].mode;
9171 || GET_MODE (target) != mode[0]
9172 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
9173 op[0] = gen_reg_rtx (mode[0]);
9177 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9180 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
9181 op[arg_count] = expand_normal (arg);
9183 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
9185 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
9191 pat = GEN_FCN (icode) (op[0], op[1]);
9194 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9197 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9212 sparc_vis_mul8x16 (int e8, int e16)
9214 return (e8 * e16 + 128) / 256;
9217 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9218 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9219 constants. A tree list with the results of the multiplications is returned,
9220 and each element in the list is of INNER_TYPE. */
9223 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9225 tree n_elts = NULL_TREE;
9230 case CODE_FOR_fmul8x16_vis:
9231 for (; elts0 && elts1;
9232 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9235 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9236 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9237 n_elts = tree_cons (NULL_TREE,
9238 build_int_cst (inner_type, val),
9243 case CODE_FOR_fmul8x16au_vis:
9244 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9246 for (; elts0; elts0 = TREE_CHAIN (elts0))
9249 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9251 n_elts = tree_cons (NULL_TREE,
9252 build_int_cst (inner_type, val),
9257 case CODE_FOR_fmul8x16al_vis:
9258 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9260 for (; elts0; elts0 = TREE_CHAIN (elts0))
9263 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9265 n_elts = tree_cons (NULL_TREE,
9266 build_int_cst (inner_type, val),
9275 return nreverse (n_elts);
9278 /* Handle TARGET_FOLD_BUILTIN target hook.
9279 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9280 result of the function call is ignored. NULL_TREE is returned if the
9281 function could not be folded. */
9284 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9285 tree *args, bool ignore)
9287 tree arg0, arg1, arg2;
9288 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9289 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9292 && icode != CODE_FOR_alignaddrsi_vis
9293 && icode != CODE_FOR_alignaddrdi_vis)
9294 return build_zero_cst (rtype);
9298 case CODE_FOR_fexpand_vis:
9302 if (TREE_CODE (arg0) == VECTOR_CST)
9304 tree inner_type = TREE_TYPE (rtype);
9305 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9306 tree n_elts = NULL_TREE;
9308 for (; elts; elts = TREE_CHAIN (elts))
9310 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9311 n_elts = tree_cons (NULL_TREE,
9312 build_int_cst (inner_type, val),
9315 return build_vector (rtype, nreverse (n_elts));
9319 case CODE_FOR_fmul8x16_vis:
9320 case CODE_FOR_fmul8x16au_vis:
9321 case CODE_FOR_fmul8x16al_vis:
9327 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9329 tree inner_type = TREE_TYPE (rtype);
9330 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9331 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9332 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
9335 return build_vector (rtype, n_elts);
9339 case CODE_FOR_fpmerge_vis:
9345 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9347 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9348 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9349 tree n_elts = NULL_TREE;
9351 for (; elts0 && elts1;
9352 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9354 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
9355 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
9358 return build_vector (rtype, nreverse (n_elts));
9362 case CODE_FOR_pdist_vis:
9370 if (TREE_CODE (arg0) == VECTOR_CST
9371 && TREE_CODE (arg1) == VECTOR_CST
9372 && TREE_CODE (arg2) == INTEGER_CST)
9375 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
9376 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
9377 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9378 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9380 for (; elts0 && elts1;
9381 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9383 unsigned HOST_WIDE_INT
9384 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9385 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9386 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
9387 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
9389 unsigned HOST_WIDE_INT l;
9392 overflow |= neg_double (low1, high1, &l, &h);
9393 overflow |= add_double (low0, high0, l, h, &l, &h);
9395 overflow |= neg_double (l, h, &l, &h);
9397 overflow |= add_double (low, high, l, h, &low, &high);
9400 gcc_assert (overflow == 0);
9402 return build_int_cst_wide (rtype, low, high);
9412 /* ??? This duplicates information provided to the compiler by the
9413 ??? scheduler description. Some day, teach genautomata to output
9414 ??? the latencies and then CSE will just use that. */
9417 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
9418 bool speed ATTRIBUTE_UNUSED)
9420 enum machine_mode mode = GET_MODE (x);
9421 bool float_mode_p = FLOAT_MODE_P (mode);
9426 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
9444 if (GET_MODE (x) == VOIDmode
9445 && ((CONST_DOUBLE_HIGH (x) == 0
9446 && CONST_DOUBLE_LOW (x) < 0x1000)
9447 || (CONST_DOUBLE_HIGH (x) == -1
9448 && CONST_DOUBLE_LOW (x) < 0
9449 && CONST_DOUBLE_LOW (x) >= -0x1000)))
9456 /* If outer-code was a sign or zero extension, a cost
9457 of COSTS_N_INSNS (1) was already added in. This is
9458 why we are subtracting it back out. */
9459 if (outer_code == ZERO_EXTEND)
9461 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
9463 else if (outer_code == SIGN_EXTEND)
9465 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
9467 else if (float_mode_p)
9469 *total = sparc_costs->float_load;
9473 *total = sparc_costs->int_load;
9481 *total = sparc_costs->float_plusminus;
9483 *total = COSTS_N_INSNS (1);
9488 *total = sparc_costs->float_mul;
9489 else if (! TARGET_HARD_MUL)
9490 *total = COSTS_N_INSNS (25);
9496 if (sparc_costs->int_mul_bit_factor)
9500 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9502 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9503 for (nbits = 0; value != 0; value &= value - 1)
9506 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
9507 && GET_MODE (XEXP (x, 1)) == VOIDmode)
9509 rtx x1 = XEXP (x, 1);
9510 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
9511 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
9513 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
9515 for (; value2 != 0; value2 &= value2 - 1)
9523 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
9524 bit_cost = COSTS_N_INSNS (bit_cost);
9528 *total = sparc_costs->int_mulX + bit_cost;
9530 *total = sparc_costs->int_mul + bit_cost;
9537 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
9547 *total = sparc_costs->float_div_df;
9549 *total = sparc_costs->float_div_sf;
9554 *total = sparc_costs->int_divX;
9556 *total = sparc_costs->int_div;
9563 *total = COSTS_N_INSNS (1);
9570 case UNSIGNED_FLOAT:
9574 case FLOAT_TRUNCATE:
9575 *total = sparc_costs->float_move;
9580 *total = sparc_costs->float_sqrt_df;
9582 *total = sparc_costs->float_sqrt_sf;
9587 *total = sparc_costs->float_cmp;
9589 *total = COSTS_N_INSNS (1);
9594 *total = sparc_costs->float_cmove;
9596 *total = sparc_costs->int_cmove;
9600 /* Handle the NAND vector patterns. */
9601 if (sparc_vector_mode_supported_p (GET_MODE (x))
9602 && GET_CODE (XEXP (x, 0)) == NOT
9603 && GET_CODE (XEXP (x, 1)) == NOT)
9605 *total = COSTS_N_INSNS (1);
9616 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
9619 general_or_i64_p (reg_class_t rclass)
9621 return (rclass == GENERAL_REGS || rclass == I64_REGS);
9624 /* Implement TARGET_REGISTER_MOVE_COST. */
9627 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9628 reg_class_t from, reg_class_t to)
9630 if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
9631 || (general_or_i64_p (from) && FP_REG_CLASS_P (to))
9632 || from == FPCC_REGS
9635 if (sparc_cpu == PROCESSOR_ULTRASPARC
9636 || sparc_cpu == PROCESSOR_ULTRASPARC3
9637 || sparc_cpu == PROCESSOR_NIAGARA
9638 || sparc_cpu == PROCESSOR_NIAGARA2)
9647 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
9648 This is achieved by means of a manual dynamic stack space allocation in
9649 the current frame. We make the assumption that SEQ doesn't contain any
9650 function calls, with the possible exception of calls to the GOT helper. */
9653 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
9655 /* We must preserve the lowest 16 words for the register save area. */
9656 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
9657 /* We really need only 2 words of fresh stack space. */
9658 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
9661 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
9662 SPARC_STACK_BIAS + offset));
9664 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
9665 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
9667 emit_insn (gen_rtx_SET (VOIDmode,
9668 adjust_address (slot, word_mode, UNITS_PER_WORD),
9672 emit_insn (gen_rtx_SET (VOIDmode,
9674 adjust_address (slot, word_mode, UNITS_PER_WORD)));
9675 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
9676 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
9679 /* Output the assembler code for a thunk function. THUNK_DECL is the
9680 declaration for the thunk function itself, FUNCTION is the decl for
9681 the target function. DELTA is an immediate constant offset to be
9682 added to THIS. If VCALL_OFFSET is nonzero, the word at address
9683 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
9686 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9687 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9690 rtx this_rtx, insn, funexp;
9691 unsigned int int_arg_first;
9693 reload_completed = 1;
9694 epilogue_completed = 1;
9696 emit_note (NOTE_INSN_PROLOGUE_END);
9700 sparc_leaf_function_p = 1;
9702 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9704 else if (flag_delayed_branch)
9706 /* We will emit a regular sibcall below, so we need to instruct
9707 output_sibcall that we are in a leaf function. */
9708 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
9710 /* This will cause final.c to invoke leaf_renumber_regs so we
9711 must behave as if we were in a not-yet-leafified function. */
9712 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9716 /* We will emit the sibcall manually below, so we will need to
9717 manually spill non-leaf registers. */
9718 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9720 /* We really are in a leaf function. */
9721 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9724 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9725 returns a structure, the structure return pointer is there instead. */
9727 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9728 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9730 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9732 /* Add DELTA. When possible use a plain add, otherwise load it into
9733 a register first. */
9736 rtx delta_rtx = GEN_INT (delta);
9738 if (! SPARC_SIMM13_P (delta))
9740 rtx scratch = gen_rtx_REG (Pmode, 1);
9741 emit_move_insn (scratch, delta_rtx);
9742 delta_rtx = scratch;
9745 /* THIS_RTX += DELTA. */
9746 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9749 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9752 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9753 rtx scratch = gen_rtx_REG (Pmode, 1);
9755 gcc_assert (vcall_offset < 0);
9757 /* SCRATCH = *THIS_RTX. */
9758 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9760 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9761 may not have any available scratch register at this point. */
9762 if (SPARC_SIMM13_P (vcall_offset))
9764 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9765 else if (! fixed_regs[5]
9766 /* The below sequence is made up of at least 2 insns,
9767 while the default method may need only one. */
9768 && vcall_offset < -8192)
9770 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9771 emit_move_insn (scratch2, vcall_offset_rtx);
9772 vcall_offset_rtx = scratch2;
9776 rtx increment = GEN_INT (-4096);
9778 /* VCALL_OFFSET is a negative number whose typical range can be
9779 estimated as -32768..0 in 32-bit mode. In almost all cases
9780 it is therefore cheaper to emit multiple add insns than
9781 spilling and loading the constant into a register (at least
9783 while (! SPARC_SIMM13_P (vcall_offset))
9785 emit_insn (gen_add2_insn (scratch, increment));
9786 vcall_offset += 4096;
9788 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9791 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9792 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9793 gen_rtx_PLUS (Pmode,
9795 vcall_offset_rtx)));
9797 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9798 emit_insn (gen_add2_insn (this_rtx, scratch));
9801 /* Generate a tail call to the target function. */
9802 if (! TREE_USED (function))
9804 assemble_external (function);
9805 TREE_USED (function) = 1;
9807 funexp = XEXP (DECL_RTL (function), 0);
9809 if (flag_delayed_branch)
9811 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9812 insn = emit_call_insn (gen_sibcall (funexp));
9813 SIBLING_CALL_P (insn) = 1;
9817 /* The hoops we have to jump through in order to generate a sibcall
9818 without using delay slots... */
9819 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
9823 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9825 load_got_register (); /* clobbers %o7 */
9826 scratch = sparc_legitimize_pic_address (funexp, scratch);
9829 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
9831 else if (TARGET_ARCH32)
9833 emit_insn (gen_rtx_SET (VOIDmode,
9835 gen_rtx_HIGH (SImode, funexp)));
9836 emit_insn (gen_rtx_SET (VOIDmode,
9838 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9840 else /* TARGET_ARCH64 */
9842 switch (sparc_cmodel)
9846 /* The destination can serve as a temporary. */
9847 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9852 /* The destination cannot serve as a temporary. */
9853 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9855 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9858 emit_and_preserve (seq, spill_reg, 0);
9866 emit_jump_insn (gen_indirect_jump (scratch));
9871 /* Run just enough of rest_of_compilation to get the insns emitted.
9872 There's not really enough bulk here to make other passes such as
9873 instruction scheduling worth while. Note that use_thunk calls
9874 assemble_start_function and assemble_end_function. */
9875 insn = get_insns ();
9876 insn_locators_alloc ();
9877 shorten_branches (insn);
9878 final_start_function (insn, file, 1);
9879 final (insn, file, 1);
9880 final_end_function ();
9882 reload_completed = 0;
9883 epilogue_completed = 0;
9886 /* Return true if sparc_output_mi_thunk would be able to output the
9887 assembler code for the thunk function specified by the arguments
9888 it is passed, and false otherwise. */
9890 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9891 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9892 HOST_WIDE_INT vcall_offset,
9893 const_tree function ATTRIBUTE_UNUSED)
9895 /* Bound the loop used in the default method above. */
9896 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9899 /* How to allocate a 'struct machine_function'. */
9901 static struct machine_function *
9902 sparc_init_machine_status (void)
9904 return ggc_alloc_cleared_machine_function ();
9907 /* Locate some local-dynamic symbol still in use by this function
9908 so that we can print its name in local-dynamic base patterns. */
9911 get_some_local_dynamic_name (void)
9915 if (cfun->machine->some_ld_name)
9916 return cfun->machine->some_ld_name;
9918 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9920 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9921 return cfun->machine->some_ld_name;
9927 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9932 && GET_CODE (x) == SYMBOL_REF
9933 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9935 cfun->machine->some_ld_name = XSTR (x, 0);
9942 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9943 We need to emit DTP-relative relocations. */
9946 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9951 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9954 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9959 output_addr_const (file, x);
9963 /* Do whatever processing is required at the end of a file. */
9966 sparc_file_end (void)
9968 /* If we need to emit the special GOT helper function, do so now. */
9971 const char *name = XSTR (got_helper_rtx, 0);
9972 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
9973 #ifdef DWARF2_UNWIND_INFO
9977 if (USE_HIDDEN_LINKONCE)
9979 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9980 get_identifier (name),
9981 build_function_type_list (void_type_node,
9983 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9984 NULL_TREE, void_type_node);
9985 TREE_STATIC (decl) = 1;
9986 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9987 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9988 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9989 resolve_unique_section (decl, 0, flag_function_sections);
9990 allocate_struct_function (decl, true);
9992 current_function_decl = decl;
9993 init_varasm_status ();
9994 assemble_start_function (decl, name);
9998 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9999 switch_to_section (text_section);
10001 ASM_OUTPUT_ALIGN (asm_out_file, align);
10002 ASM_OUTPUT_LABEL (asm_out_file, name);
10005 #ifdef DWARF2_UNWIND_INFO
10006 do_cfi = dwarf2out_do_cfi_asm ();
10008 fprintf (asm_out_file, "\t.cfi_startproc\n");
10010 if (flag_delayed_branch)
10011 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10012 reg_name, reg_name);
10014 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10015 reg_name, reg_name);
10016 #ifdef DWARF2_UNWIND_INFO
10018 fprintf (asm_out_file, "\t.cfi_endproc\n");
10022 if (NEED_INDICATE_EXEC_STACK)
10023 file_end_indicate_exec_stack ();
10025 #ifdef TARGET_SOLARIS
10026 solaris_file_end ();
10030 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10031 /* Implement TARGET_MANGLE_TYPE. */
10033 static const char *
10034 sparc_mangle_type (const_tree type)
10037 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10038 && TARGET_LONG_DOUBLE_128)
10041 /* For all other types, use normal C++ mangling. */
10046 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10047 compare and swap on the word containing the byte or half-word. */
10050 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10052 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10053 rtx addr = gen_reg_rtx (Pmode);
10054 rtx off = gen_reg_rtx (SImode);
10055 rtx oldv = gen_reg_rtx (SImode);
10056 rtx newv = gen_reg_rtx (SImode);
10057 rtx oldvalue = gen_reg_rtx (SImode);
10058 rtx newvalue = gen_reg_rtx (SImode);
10059 rtx res = gen_reg_rtx (SImode);
10060 rtx resv = gen_reg_rtx (SImode);
10061 rtx memsi, val, mask, end_label, loop_label, cc;
10063 emit_insn (gen_rtx_SET (VOIDmode, addr,
10064 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10066 if (Pmode != SImode)
10067 addr1 = gen_lowpart (SImode, addr1);
10068 emit_insn (gen_rtx_SET (VOIDmode, off,
10069 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10071 memsi = gen_rtx_MEM (SImode, addr);
10072 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10073 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10075 val = force_reg (SImode, memsi);
10077 emit_insn (gen_rtx_SET (VOIDmode, off,
10078 gen_rtx_XOR (SImode, off,
10079 GEN_INT (GET_MODE (mem) == QImode
10082 emit_insn (gen_rtx_SET (VOIDmode, off,
10083 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10085 if (GET_MODE (mem) == QImode)
10086 mask = force_reg (SImode, GEN_INT (0xff));
10088 mask = force_reg (SImode, GEN_INT (0xffff));
10090 emit_insn (gen_rtx_SET (VOIDmode, mask,
10091 gen_rtx_ASHIFT (SImode, mask, off)));
10093 emit_insn (gen_rtx_SET (VOIDmode, val,
10094 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10097 oldval = gen_lowpart (SImode, oldval);
10098 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10099 gen_rtx_ASHIFT (SImode, oldval, off)));
10101 newval = gen_lowpart_common (SImode, newval);
10102 emit_insn (gen_rtx_SET (VOIDmode, newv,
10103 gen_rtx_ASHIFT (SImode, newval, off)));
10105 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10106 gen_rtx_AND (SImode, oldv, mask)));
10108 emit_insn (gen_rtx_SET (VOIDmode, newv,
10109 gen_rtx_AND (SImode, newv, mask)));
10111 end_label = gen_label_rtx ();
10112 loop_label = gen_label_rtx ();
10113 emit_label (loop_label);
10115 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10116 gen_rtx_IOR (SImode, oldv, val)));
10118 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10119 gen_rtx_IOR (SImode, newv, val)));
10121 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10123 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10125 emit_insn (gen_rtx_SET (VOIDmode, resv,
10126 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10129 cc = gen_compare_reg_1 (NE, resv, val);
10130 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10132 /* Use cbranchcc4 to separate the compare and branch! */
10133 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10134 cc, const0_rtx, loop_label));
10136 emit_label (end_label);
10138 emit_insn (gen_rtx_SET (VOIDmode, res,
10139 gen_rtx_AND (SImode, res, mask)));
10141 emit_insn (gen_rtx_SET (VOIDmode, res,
10142 gen_rtx_LSHIFTRT (SImode, res, off)));
10144 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10147 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
10150 sparc_frame_pointer_required (void)
10152 /* If the stack pointer is dynamically modified in the function, it cannot
10153 serve as the frame pointer. */
10154 if (cfun->calls_alloca)
10157 /* If the function receives nonlocal gotos, it needs to save the frame
10158 pointer in the nonlocal_goto_save_area object. */
10159 if (cfun->has_nonlocal_label)
10162 /* In flat mode, that's it. */
10166 /* Otherwise, the frame pointer is required if the function isn't leaf. */
10167 return !(current_function_is_leaf && only_leaf_regs_used ());
10170 /* The way this is structured, we can't eliminate SFP in favor of SP
10171 if the frame pointer is required: we want to use the SFP->HFP elimination
10172 in that case. But the test in update_eliminables doesn't know we are
10173 assuming below that we only do the former elimination. */
10176 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
10178 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
10181 /* Return the hard frame pointer directly to bypass the stack bias. */
10184 sparc_builtin_setjmp_frame_value (void)
10186 return hard_frame_pointer_rtx;
10189 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
10190 they won't be allocated. */
10193 sparc_conditional_register_usage (void)
10195 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
10197 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10198 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10200 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
10201 /* then honor it. */
10202 if (TARGET_ARCH32 && fixed_regs[5])
10204 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
10209 for (regno = SPARC_FIRST_V9_FP_REG;
10210 regno <= SPARC_LAST_V9_FP_REG;
10212 fixed_regs[regno] = 1;
10213 /* %fcc0 is used by v8 and v9. */
10214 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
10215 regno <= SPARC_LAST_V9_FCC_REG;
10217 fixed_regs[regno] = 1;
10222 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
10223 fixed_regs[regno] = 1;
10225 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
10226 /* then honor it. Likewise with g3 and g4. */
10227 if (fixed_regs[2] == 2)
10228 fixed_regs[2] = ! TARGET_APP_REGS;
10229 if (fixed_regs[3] == 2)
10230 fixed_regs[3] = ! TARGET_APP_REGS;
10231 if (TARGET_ARCH32 && fixed_regs[4] == 2)
10232 fixed_regs[4] = ! TARGET_APP_REGS;
10233 else if (TARGET_CM_EMBMEDANY)
10235 else if (fixed_regs[4] == 2)
10240 /* Disable leaf functions. */
10241 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
10242 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10243 leaf_reg_remap [regno] = regno;
10247 /* Implement TARGET_PREFERRED_RELOAD_CLASS
10249 - We can't load constants into FP registers.
10250 - We can't load FP constants into integer registers when soft-float,
10251 because there is no soft-float pattern with a r/F constraint.
10252 - We can't load FP constants into integer registers for TFmode unless
10253 it is 0.0L, because there is no movtf pattern with a r/F constraint.
10254 - Try and reload integer constants (symbolic or otherwise) back into
10255 registers directly, rather than having them dumped to memory. */
10258 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
10260 if (CONSTANT_P (x))
10262 if (FP_REG_CLASS_P (rclass)
10263 || rclass == GENERAL_OR_FP_REGS
10264 || rclass == GENERAL_OR_EXTRA_FP_REGS
10265 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && ! TARGET_FPU)
10266 || (GET_MODE (x) == TFmode && ! const_zero_operand (x, TFmode)))
10269 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10270 return GENERAL_REGS;
10276 #include "gt-sparc.h"